diff --git "a/416.jsonl" "b/416.jsonl" new file mode 100644--- /dev/null +++ "b/416.jsonl" @@ -0,0 +1,1794 @@ +{"seq_id":"27062574599","text":"\nimport ephem\n#import datetime\n\nmars = ephem.Mars('2000/01/01')\nconstellation = ephem.constellation(mars)\nprint(constellation)\n\n#Тестирование кода для бота\n\"\"\"\ndef get_planet(user_message):\n try:\n #Получаем now из библиотеки datetime, возможно в переменая context седеожит дату\n now = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n #Вытаскиваем название планеты из сообщения\n name_planet = (user_message.split())[1].capitalize()\n #Присваиваем переменую name_planet в атрибут\n ephem_name_planet= getattr(ephem, name_planet)\n #Получаем координаты планеты\n planet = ephem_name_planet(now)\n #Запрос созвездия\n constellation = (ephem.constellation(planet))[1]\n print(f'{name_planet} in the constellation {constellation}')\n except AttributeError:\n print(\"Check planet name\")\n\nget_planet('/planet moon')\n\"\"\"","repo_name":"dmitryole/lesson2","sub_path":"mars.py","file_name":"mars.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9354939301","text":"import json\nimport pathlib, os\nfrom tqdm import tqdm\nfrom collections import defaultdict, Counter\nfrom torch.utils.data import DataLoader\nimport openai\n\ndef run_api_inference(args, dataset):\n # format examples\n print(f\"\\nPreparing dataset for API...\")\n examples = []\n dataloader = DataLoader(dataset, batch_size=1, shuffle=False, pin_memory=True, timeout=60, num_workers=1, drop_last=False)\n for bix, data in tqdm(enumerate(dataloader)):\n for i in range(len(data[0])):\n text = data[0][i]\n label = data[1][i]\n index = data[-1][i]\n incontext=defaultdict(list)\n text = dataset.get_prompt(input=text, incontext={})\n examples.append({\n \"key\": bix,\n \"input\": text,\n \"gold\": label\n })\n\n # launch api\n MY_OPENAI_KEY = args.openai_key # FILL IN YOUR API KEY from https://openai.com/api/\n assert MY_OPENAI_KEY, print(f\"Please fill in your openai api key, you can obtain this from https://openai.com/api/\")\n openai.api_key = MY_OPENAI_KEY\n openai.Engine.list()\n engines = openai.Engine.list()\n\n # run inference\n if args.model == \"gpt175\":\n engine_name = \"text-davinci-002\"\n elif args.model == \"gpt6.7\":\n engine_name = \"text-curie-001\"\n\n prompt_str = args.prompt_choice\n if \"incontext\" in args.prompt_choice:\n prompt_str += str(args.num_incontext)\n expt_path = f\"{args.result_path}/{args.dataset}_{engine_name}_{args.client_subsample}clientsample_{prompt_str}_{args.num_incontext}incontext.json\"\n\n # reload if something crashed\n if os.path.exists(expt_path):\n with open(expt_path) as f:\n examples = json.load(f)\n\n print(f\"Running inference with {engine_name}...\")\n print(f\"Saving data to: {expt_path}\")\n for i, example in enumerate(examples):\n if \"pred\" not in example:\n completion = openai.Completion.create(engine=engine_name, prompt=example['input'], temperature=0, top_p=1)\n example['pred'] = completion.choices[0].text\n \n # periodic saving in case something crashes\n if i % 100 == 0:\n print(f\"Step: {i}\")\n with open(expt_path, \"w\") as f:\n json.dump(examples, f)\n print(\"Saved\")\n \n # save final results\n with open(expt_path, \"w\") as f:\n json.dump(examples, f)\n print(f\"Saved API inference results to path: {expt_path}\")\n\n # score the results\n results = []\n for ex in examples:\n if 'pred' in ex:\n results.append(ex['pred'])\n dataset.compute_accuracy(results, dataset, args)\n","repo_name":"simran-arora/focus","sub_path":"privacy/run_api_inference.py","file_name":"run_api_inference.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"67"} +{"seq_id":"10532680477","text":"# Load necessary packages\nimport pandas as pd\nimport numpy as np\nimport rpy2.robjects as robjects\nfrom rpy2.robjects.packages import importr\n\n# Load example gene expression data\ndata = pd.read_csv(\"https://raw.githubusercontent.com/*/RNA-seq-analysis/master/DESeq2/data/counts.txt\", sep=\"\\t\", index_col=0)\n\n# Set conditions for differential expression analysis\ndata['condition'] = ['A', 'A', 'A', 'B', 'B', 'B']\nconditions = robjects.FactorVector(data['condition'].values.tolist())\n\n# Perform differential gene expression analysis using DESeq2\nDESeq2 = importr('DESeq2')\nr_dataframe = robjects.pandas2ri.py2rpy(data.iloc[:, :-1])\ndds = DESeq2.DESeqDataSetFromMatrix(countData=r_dataframe, colData=conditions, design= ~ condition)\ndds = DESeq2.DESeq(dds)\nres = DESeq2.results(dds)\n\n# Extract significant differentially expressed genes\nres_df = pd.DataFrame(np.array(res), index=res.names, columns=res.colnames)\nsig_genes = res_df[res_df['padj'] < 0.05].index.tolist()\n\n# Perform further analysis using edgeR\nedgeR = importr('edgeR')\ncounts = np.array(data.iloc[:, :-1])\ndge = edgeR.DGEList(counts=counts, group=data['condition'])\ndge = edgeR.calcNormFactors(dge)\ndesign_mat = np.vstack((np.ones(data.shape[0]), data['condition'] == 'B')).T\nedgeR.glmQLFit(dge, design_mat)\nedgeR.glmQLFTest(dge, coef=2)\nedgeR.topTags(edgeR.glmQLFTest(dge, coef=2))\n","repo_name":"Andersanjuan/RNA-Seq-gene-expression-analysis","sub_path":"Gene_expression.py","file_name":"Gene_expression.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"26723188063","text":"def dotProps(obj, keys, set_value=None, set_mode=False):\n if not keys or not obj or not isinstance(obj, dict) or not isinstance(keys, str):\n return None\n\n value = obj\n _keys = keys.split(\".\")\n keys_last_index = len(_keys) - 1\n for idx, key in enumerate(_keys):\n last_key = idx == keys_last_index\n if key in value:\n if last_key:\n if set_mode:\n value[key] = set_value\n elif not isinstance(value[key], dict):\n value[key] = {}\n\n value = value[key]\n else:\n if last_key:\n value[key] = set_value if set_mode else None\n else:\n value[key] = {}\n value = value[key]\n\n return value\n\n\nclass JSON(dict):\n # https://stackoverflow.com/a/3405143/190597\n def __missing__(self, key):\n value = self[key] = type(self)()\n return value\n\n def g(self, keys):\n return dotProps(self, keys)\n\n def s(self, keys, value):\n return dotProps(self, keys, value, True)\n","repo_name":"nicolasdao/pypuffy","sub_path":"src/puffy/object/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17482782513","text":"# © 2019 Numigi (tm) and all its contributors (https://bit.ly/numigiens)\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n\nfrom odoo import api, fields, models\n\n\nclass ProductCategory(models.Model):\n\n _inherit = 'product.category'\n\n consignment = fields.Boolean(\n help=\"If checked, only one supplier can be selected in the supplier prices list \"\n \"for products of this category. \"\n \"The supplier will automatically be set as owner of the stock \"\n \"on receipt orders.\"\n )\n","repo_name":"Numigi/odoo-purchase-addons","sub_path":"purchase_consignment/models/product_category.py","file_name":"product_category.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"28173716619","text":"import pygame\nimport sys\n\nclass GameObject():\n def __init__(self,screen,width,height,location=[0,0]):\n self.screen= screen\n self.width=width\n self.height=height\n self.location=location\n self.rectangle=pygame.rect.Rect(self.location[0],self.location[1],self.width,self.height)\n\n\n\n def Draw(self):\n raise NotImplementedError\n\n","repo_name":"sametkaya/pygame_lessons","sub_path":"lesson9/venv/Include/GameObject.py","file_name":"GameObject.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"25061639619","text":"import logging\nimport os\nfrom logging.handlers import RotatingFileHandler\nfrom flask import Flask, request, render_template, redirect, url_for, flash\n\nfrom squad_maker_app.algorithms import make_squads_minimize_cumulative_delta_mean\n\nSETTINGS_ENV_VAR = 'SQUAD_MAKER_SETTINGS'\nPLAYER_SOURCE_CONFIG = 'PLAYER_SOURCE'\nNUM_SQUADS_REQUEST_ARG = 'numSquads'\nLOG_FILE = 'instance.log'\nMAX_LOG_FILE_BYTES = 10000\nMAX_LOG_FILE_BACKUPS = 1\n\n\ndef create_app():\n app = Flask(__name__)\n\n # configure logging\n log_handler = RotatingFileHandler(LOG_FILE, maxBytes=MAX_LOG_FILE_BYTES, backupCount=MAX_LOG_FILE_BACKUPS)\n log_handler.setLevel(logging.DEBUG if app.debug else logging.INFO)\n app.logger.addHandler(log_handler)\n\n # load default settings, and override with values from a custom config file, if present.\n app.config.from_object('squad_maker_app.default_settings')\n app.config.from_envvar(SETTINGS_ENV_VAR, silent=True)\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n @app.route('/')\n def home():\n # initially all players are on the waiting list\n waiting_list = get_all_players()\n waiting_list.sort(key=total_rating, reverse=True)\n return render_template('home.html', waiting_list=waiting_list,\n num_squads_input_name=NUM_SQUADS_REQUEST_ARG)\n\n @app.route('/squad-maker')\n def make_squads():\n try:\n num_squads = get_num_squads_from_request(request)\n players = get_all_players()\n (squads, waiting_list) = make_squads_minimize_cumulative_delta_mean(num_squads, players)\n app.logger.info(\"Built %d squads with %d players on the waiting list\" % (len(squads), len(waiting_list)))\n for squad in squads:\n squad.players.sort(key=total_rating, reverse=True)\n waiting_list.sort(key=total_rating, reverse=True)\n return render_template('squads.html', squads=squads, waiting_list=waiting_list)\n except ValueError as e:\n # A ValueError indicates a problem with one or more of the input arguments. We\n # want to show these types of errors to the user. All other errors/exceptions should\n # trigger a 5XX error\n app.logger.info(\"Got a ValueError while building squads: %s\" % str(e))\n flash(str(e), 'error')\n return redirect(url_for('home'))\n\n @app.errorhandler(404)\n def handle_page_not_found(e):\n return render_template('not_found.html')\n\n def get_all_players():\n if PLAYER_SOURCE_CONFIG not in app.config:\n raise Exception(\"Missing required '%s' configuration variable\" % PLAYER_SOURCE_CONFIG)\n players = app.config[PLAYER_SOURCE_CONFIG]()\n app.logger.info(\"Sourced data for %d players\" % (len(players) if players else None))\n return players\n\n def total_rating(player):\n \"\"\" Sort players by cumulative skill rating \"\"\"\n return player.skating + player.shooting + player.checking\n\n return app\n\n\ndef get_num_squads_from_request(request):\n value = request.args.get(NUM_SQUADS_REQUEST_ARG, '')\n\n if value in ['', None]:\n raise ValueError(\"You must enter the number of squads to make.\")\n\n try:\n num_squads = int(value)\n except ValueError:\n raise ValueError(\"'%s' is not a valid number of squads.\" % value)\n\n if num_squads == 0:\n raise ValueError(\"You must build at least one squad.\")\n\n if num_squads < 0:\n raise ValueError(\"You cannot build a negative number of squads.\")\n return num_squads\n","repo_name":"arthurcode/squad-maker","sub_path":"squad_maker_app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26174737264","text":"class Category:\n\n def __init__(self, category):\n self.name = category\n self.ledger = [] # Creating dictionary to store values\n\n\n def deposit(self, amount, description=''):\n # Function to append items to the ledger\n self.ledger.append({'amount':amount, 'description':description})\n\n def withdraw (self, amount, description=''):\n # Function to withdraw funds from the ledger\n if self.check_funds(amount):\n self.ledger.append({'amount':-amount, 'description':description})\n return True\n return False\n\n def get_withdrawals(self):\n withdraw = 0\n for l in self.ledger:\n if l['amount']<0:\n withdraw += -l['amount']\n return withdraw\n\n def get_balance(self):\n # It returns the current balance\n bal = 0\n for l in self.ledger:\n bal += l['amount']\n return bal\n\n \n def transfer(self, amount, category_object):\n \n if self.withdraw(amount, \"Transfer to \"+category_object.name):\n category_object.deposit(amount, \"Transfer from \"+self.name)\n return True\n return False\n\n def check_funds(self, amount):\n fund = 0\n for l in self.ledger:\n fund += l['amount']\n if amount>fund:\n return False\n return True\n\n def __str__(self): \n # Returns formatted output\n brk = '\\n'\n title = self.name.center(30, '*') + brk\n list = ''\n for l in self.ledger:\n list += '{:<23}'.format(l['description'])[:23]\n t = '{:.2f}'.format(l['amount'])\n # for display '#######' if amount is too long\n t = '{:>7}'.format(t)[:7]\n list += t + brk \n total = 'Total: ' + str(self.get_balance())\n return title + list + total\n\ndef create_spend_chart(categories):\n # Return a string that is a bar chart\n brk = '\\n'\n title = 'Percentage spent by category' + brk\n\n data = {} # sum of each category\n sum = 0 # sum over all categories\n longest = 0 \n for category in categories:\n if not category.name in data:\n data[category.name] = 0\n w = category.get_withdrawals()\n data[category.name] += w\n if len(category.name)>longest:\n longest = len(category.name)\n sum += w\n\n perc = {} \n tuples = data.items() # dictionary\n for k,v in tuples:\n perc[k] = int(v/sum * 10) * 10 # 75,4 -> 70\n \n # Creating chart\n list = ''\n for lp in range(100,-10,-10):\n list += \"{:>3}\".format(str(lp))+'| '\n for k,v in perc.items():\n if lp<=v:\n list += 'o '\n else:\n list += ' '\n list += brk\n \n sep = (' {:->'+str(4+2*len(categories))+'}').format('') + brk\n\n legend = ''\n for i in range(longest):\n legend += ' '\n for category in categories:\n if i full speed forward\n # 1ms pulse => full speed backward\n COUNTER_CLOCKWISE = (0.002 / (1 / FREQUENCY)) * 100\n CLOCKWISE = (0.001 / (1 / FREQUENCY)) * 100\n\n def __init__(self, signal_pin=14):\n self.signal_pin = signal_pin\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.signal_pin, GPIO.OUT)\n\n def dispense(self, amount=10):\n p = GPIO.PWM(self.signal_pin, self.FREQUENCY)\n\n # Start PWM with 0% duty cycle\n p.start(0)\n\n # Rotate motor for 0.05 sec clockwise\n p.ChangeDutyCycle(self.CLOCKWISE)\n time.sleep(0.05)\n\n # Cleanup\n p.stop()\n GPIO.cleanup()\n\nif __name__ == '__main__':\n ServoController().dispense()\n","repo_name":"jbargu/remote_pet_dispenser","sub_path":"servo_controller.py","file_name":"servo_controller.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36327757596","text":"import csv\nimport datetime\nimport json\nimport os\nimport sqlite3\nimport urllib.request\n\nimport click\nfrom flask import current_app, g\nfrom flask.cli import with_appcontext\n\nSPREADSHEET = 'src/db/data/ETH.csv'\nSECRET_KEY = os.environ.get('SECRET_KEY')\n\ndef get_db():\n\t\"\"\" Connect to database \"\"\"\n\tif 'db' not in g:\n\t\tg.db = sqlite3.connect(\n\t\t\tcurrent_app.config['DATABASE'],\n\t\t\tdetect_types=sqlite3.PARSE_DECLTYPES\n\t\t)\n\t\tg.db.row_factory = sqlite3.Row\n\treturn g.db\n\ndef close_db(e=None):\n\t\"\"\" Close database connection \"\"\"\n\tdb = g.pop('db', None)\n\n\tif db is not None:\n\t\tdb.close()\n\ndef init_db():\n\t\"\"\" Clear existing data and create new tables \"\"\"\n\tdb = get_db()\n\n\twith current_app.open_resource('db/sql/schema.sql') as f:\n\t\tdb.executescript(f.read().decode('utf8'))\n\ndef insert_db():\n\t\"\"\" Add CSV data into database \"\"\"\n\tdb = get_db()\n\tcur = db.cursor()\n\tfile = open(SPREADSHEET)\t\n\tcontents = csv.reader(file)\n\n\twith current_app.open_resource('db/sql/insert.sql') as f:\n\t\tcur.executemany(f.read().decode('utf8'), contents)\n\t\tdb.commit()\n\ndef data_db():\n\t\"\"\" Query API and add data to CSV \"\"\"\n\tf = urllib.request.Request('https://min-api.cryptocompare.com/data/v2/histoday?fsym=ETH&tsym=USD&limit=2000', \n\t\t\t\t\t\t\t\tdata=None, headers={'authorization': SECRET_KEY})\n\twith urllib.request.urlopen(f) as r:\n\t\ttry:\n\t\t\t# Parse json response\n\t\t\tdata = json.loads(r.read().decode('utf-8'))\n\t\t\t# Dict write csv module\n\t\t\twith open(SPREADSHEET, 'w', newline='') as csvfile:\n\t\t\t\t# Sequence of keys that specify the write order to file \n\t\t\t\tfieldnames = ['Date', 'Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume']\n\t\t\t\t# Dict write csv module\n\t\t\t\twriter = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\t\t\t\t# Create a dictionary of fieldnames\n\t\t\t\twriter.writeheader()\n\t\t\t\t# Iterate over the data list\n\t\t\t\tfor x in data['Data']['Data']:\n\t\t\t\t\t# Convert time stamp\n\t\t\t\t\tx['time'] = datetime.datetime.utcfromtimestamp(x['time']).isoformat()[0:10]\n\t\t\t\t\t# Write to file\n\t\t\t\t\twriter.writerow({'Date': x['time'],'Open': x['open'], 'High': x['high'], \n\t\t\t\t\t\t\t\t\t'Low': x['low'], 'Close': x['close'], 'Adj Close': x['close'],\n\t\t\t\t\t\t\t\t\t'Volume': x['volumefrom']})\n\n\t\texcept Exception as e:\n\t\t\tprint(\"Error getting coin information %s\" % str(e))\n\t\t\treturn None\t\n\n\t\n@click.command('init-db')\n@with_appcontext\ndef init_db_command():\n\t\"\"\"Clear the existing data and create new tables.\"\"\"\n\tinit_db()\n\tclick.echo('Initialized the database.')\n\n@click.command('insert-db')\n@with_appcontext\ndef insert_db_command():\n\t\"\"\" Add csv file to database \"\"\"\n\tinsert_db()\n\tclick.echo('Data added successfully')\n\n@click.command('data-db')\n@with_appcontext\ndef data_db_command():\n\t\"\"\" Query API and add data to CSV \"\"\"\n\tdata_db()\n\tclick.echo('Upload to CSV complete')\n\n@click.command('db')\n@with_appcontext\ndef db_command():\n\t\"\"\" Run all DB commands \"\"\"\n\tinit_db()\n\tdata_db()\n\tinsert_db()\n\tclick.echo('Database operational')\n\ndef init_app(app):\n\t\"\"\" Register database functions with the Flask app \"\"\"\n\tapp.teardown_appcontext(close_db)\n\tapp.cli.add_command(init_db_command)\n\tapp.cli.add_command(insert_db_command)\n\tapp.cli.add_command(data_db_command)\n\tapp.cli.add_command(db_command)","repo_name":"brianfray/ethereum-price-json","sub_path":"src/db/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18813140842","text":"from typing import Dict\r\nfrom pydantic import BaseModel\r\nfrom fastapi import FastAPI\r\nimport os\r\nimport uvicorn\r\nfrom transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM\r\n\r\nsaved_tokenizer = AutoTokenizer.from_pretrained('./models/t5-base-finetuned_TextToSql')\r\nsaved_model = AutoModelForSeq2SeqLM.from_pretrained('./models/t5-base-finetuned_TextToSql')\r\n\r\napp = FastAPI()\r\n\r\nprint(\"loading tokenizer + model\")\r\n\r\nsaved_tokenizer = AutoTokenizer.from_pretrained('./models/t5-base-finetuned_TextToSql')\r\nsaved_model = AutoModelForSeq2SeqLM.from_pretrained('./models/t5-base-finetuned_TextToSql')\r\n\r\nprint(\"loaded tokenizer + model\")\r\n\r\nclass Request(BaseModel):\r\n text: str\r\n\r\nclass Response(BaseModel):\r\n Query: str\r\n\r\n@app.get('/')\r\nasync def index():\r\n return {\"message\" : \"Convert Text to Sql\"}\r\n\r\n\r\n@app.post(\"/predict\", response_model=Response)\r\nasync def predict(request: Request):\r\n conversion_text_sample = f'text to sql: {request.text}'\r\n\r\n #output = sorted(CLF(request.text)[0], key=lambda x: x['score'], reverse=True) # use our pipeline and sort results\r\n\r\n input_ids = saved_tokenizer(conversion_text_sample, return_tensors='pt').input_ids\r\n\r\n outputs = saved_model.generate(input_ids, max_length= 512)\r\n query = saved_tokenizer.decode(outputs[0], skip_special_tokens=True)\r\n\r\n return Response(\r\n Query = query\r\n )\r\n\r\n\r\n#if __name__ == '__main__':\r\n# uvicorn.run(\"app:app\", host=\"127.0.0.1\", port=5050, reload= True)\r\n\r\n# uvicorn api:app --reload : run this command\r\n# http://127.0.0.1:8000/docs : open this link\r\n","repo_name":"himanshu-pachori/my_work","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"69816103895","text":"from airflow.utils.decorators import apply_defaults\nfrom airflow.contrib.operators.qubole_operator import QuboleOperator\nfrom airflow.contrib.hooks.qubole_hook import QuboleHook\nfrom cStringIO import StringIO\n\n\nclass QuboleResultOperator(QuboleOperator):\n \"\"\"\n Execute tasks (commands) on QDS (https://qubole.com).\n Push the results of these tasks to\n an xcom named 'qbol_cmd_results' if the number\n of rows in the results are less than or equal to 10\n \"\"\"\n\n @apply_defaults\n def __init__(self, *args, **kwargs):\n super(QuboleResultOperator, self).__init__(*args, **kwargs)\n\n def execute(self, context):\n\n self.hook = QuboleHook(*self.args, **self.kwargs)\n self.hook.execute(context)\n cmd = self.hook.cmd\n\n ti = context['ti']\n\n if cmd is not None:\n query_result_buffer = StringIO()\n cmd.get_results(fp=query_result_buffer, inline=True)\n query_result = query_result_buffer.getvalue().strip()\n query_result_buffer.close()\n row_list = filter(None, query_result.split('\\r\\n'))\n if len(row_list) <= 10:\n ti.xcom_push(key='qbol_cmd_results', value=query_result)\n","repo_name":"devj/conf_demo","sub_path":"operators/qubole_result_operator.py","file_name":"qubole_result_operator.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73495037652","text":"from models.shared import db\nfrom datetime import datetime\n\n\nclass LandData(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n parcel_number = db.Column(db.String, unique=True)\n county_reference = db.Column(db.String)\n county_id = db.Column(db.Integer, db.ForeignKey('county.id'))\n county = db.relationship('County', uselist=False, lazy='subquery')\n sale_date = db.Column(db.DateTime)\n sale_price = db.Column(db.String)\n sale_price_num = db.Column(db.Float)\n acres = db.Column(db.Float)\n current_use = db.Column(db.String)\n property_type = db.Column(db.String)\n state = db.Column(db.String, nullable=True)\n\n createdAt = db.Column(db.DateTime, nullable=False)\n updatedAt = db.Column(db.DateTime, nullable=False)\n\n def __init__(self, **kwargs):\n super(LandData, self).__init__(**kwargs)\n self.createdAt = datetime.now()\n self.updatedAt = datetime.now()\n\n def save(self):\n self.updatedAt = datetime.now()\n db.session.add(self)\n db.session.commit()\n\n @property\n def serialize(self):\n return {\n 'id': self.id,\n 'parcelNumber': self.parcel_number,\n 'countyReference': self.county_reference,\n 'state': self.state,\n 'saleDate': self.sale_date,\n 'salePrice': self.sale_price,\n 'salePriceNum': self.sale_price_num,\n 'acres': self.acres,\n 'currentUse': self.current_use,\n 'propertyType': self.property_type,\n 'createdAt': self.createdAt,\n 'updatedAt': self.updatedAt,\n 'county': self.county.serialize\n }\n","repo_name":"dk-extdev/react_sass_flask_evaluation","sub_path":"models/LandData.py","file_name":"LandData.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"38520946876","text":"\"\"\"\nverify that the filter() method behaves as expected\n\"\"\"\n\nimport dicttools\nimport unittest\n\nclass MaxTests(unittest.TestCase):\n\n def test_max_keys(self):\n \"\"\"\n verify that you can find the maximum value burried deep in the \n dictionary.\n \"\"\"\n dictionary = {\n 1: { 2: {}, 3: {} },\n 2: { 4: {}, 7: {} },\n }\n\n \n # filter out the odd elements\n actual = dicttools.max(dictionary)\n self.assertEquals(7, actual, msg=\"%s != %s\" % (actual, 7))\n\n def test_max_non_recursive_keys(self):\n \"\"\"\n verify that you can find the maximum value without recursion at the \n top level.\n \"\"\"\n dictionary = {\n 1: { 2: {}, 3: {} },\n 3: { 4: {}, 6: {} },\n }\n\n \n # filter out the odd elements\n actual = dicttools.max(dictionary, recursive=False)\n self.assertEquals(3, actual, msg=\"%s != %s\" % (actual, 3))\n\n","repo_name":"rlgomes/dicttools","sub_path":"test/maxtests.py","file_name":"maxtests.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"35417892141","text":"from sys import stdin, setrecursionlimit\nsetrecursionlimit(10**6)\n\n# node class \nclass node():\n def __init__(self,data):\n self.data = data\n self.next = None\n# take input\ndef takeInput():\n head = None\n tail = None\n datas = list(map(int,stdin.readline().rstrip().split(\" \")))\n\n for currData in datas:\n if currData == -1:\n break\n newNode = node(currData)\n if head is None:\n head = newNode\n tail = newNode\n else:\n tail.next = newNode\n tail = newNode\n return head\n\n# print Linked list\ndef printLL(head):\n while head is not None:\n print(str(head.data),end=\" \")\n head = head.next\n print()\n\n# reverse linked list\ndef revesreLL(head):\n if head is None or head.next is None:\n return head\n smallHead = revesreLL(head.next)\n tail = head.next\n tail.next = head\n head.next = None\n return smallHead\n\n\n# Main function\n\nt = int(stdin.readline().rstrip())\n\nwhile t > 0:\n head = takeInput()\n printLL(head)\n head = revesreLL(head)\n printLL(head)\n t -= 1","repo_name":"lokeshvelayudham/DataStructureAlgorathim-in-Python","sub_path":"2.dataStructures/7.linkedList2/reverseLLR3.py","file_name":"reverseLLR3.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"24092308741","text":"def main():\n with open('Dane_PR/liczby.txt', 'r') as file:\n arr = [int(x) for x in file.read().strip().split('\\n')]\n print(f'zad 4.1: {count_powers_of_3(arr)}')\n print(f'zad 4.2: {numbers_equal_to_factorials_sum(arr)}')\n print(f'zad 4.3:\\n{find_longest_series_in(arr)}')\n\n\ndef count_powers_of_3(arr: list):\n numbers = 0\n for num in arr:\n if is_power_of_3(num):\n numbers += 1\n return numbers\n\n\ndef is_power_of_3(num):\n if num == 1:\n return True\n elif num % 3 != 0:\n return False\n else:\n return is_power_of_3(num / 3)\n\n\ndef numbers_equal_to_factorials_sum(arr: list):\n result = []\n mem = {0: 1, 1: 1}\n for num in arr:\n factorials = [factorial(int(digit), mem) for digit in str(num)]\n if sum(factorials) == num:\n result.append(num)\n return result\n\n\ndef factorial(n: int, mem: dict):\n if mem.get(n) is None:\n mem[n] = n * factorial(n - 1, mem)\n return mem[n]\n\n\ndef find_longest_series_in(arr: list):\n longest_ans = [0, 0, 0]\n for i in range(len(arr)):\n ans = find_longest_series([arr[i]], arr[i], arr[i + 1:])\n longest_ans = ans if ans[1] > longest_ans[1] else longest_ans\n return f'pierwsza liczba: {longest_ans[0][0]}\\ndlugosc ciagu: {longest_ans[1]}\\nnwd: {longest_ans[2]}'\n\n\ndef find_longest_series(current_series: list, curr_nwd: int, rest: list):\n if len(rest) == 0 or nwd(curr_nwd, rest[0]) == 1:\n return [current_series, len(current_series), curr_nwd]\n current_series.append(rest[0])\n return find_longest_series(current_series, nwd(curr_nwd, rest[0]), rest[1:])\n\n\ndef nwd(a, b): return nwd(b, a % b) if b else a\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dotrooe/korki","sub_path":"maturki/2019/zad4.py","file_name":"zad4.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19450974808","text":"from email import message\nfrom unicodedata import name\nimport telebot\nfrom telebot import types\n\n#5624965452:AAGrdQGCu8O6AzbiHr7snSuJXi884-EEuHM\nbot = telebot.TeleBot(\"5624965452:AAGrdQGCu8O6AzbiHr7snSuJXi884-EEuHM\")\n\ntown_list = ['тбилиси', 'кутаиси', 'батуми', 'рустави', 'tbilisi', 'kutaisi', 'batumi', 'rustavi'] #Кобулети, Боржоми, Мцхета, и др. и кнопки\n\n@bot.message_handler(commands=[\"start\"])\ndef start(m, res=False):\n bot.send_message(m.chat.id, 'В каком городе вы ищете жилье?')\n\none_request_dict = {} #сформировать словарь ID/one_request_dict\n\n@bot.message_handler(content_types=[\"text\"])\ndef handle_town(message):\n if message.text.lower() not in town_list:\n bot.send_message(message.from_user.id, \"Попробуйте ввести название города, например, Тбили��и\")\n else:\n one_request_dict[\"town\"] = message.text\n if one_request_dict[\"town\"] == 'тбилиси' or 'tbilisi':\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\n btn1 = types.KeyboardButton(\"Глдани\")\n btn2 = types.KeyboardButton(\"Дидубе\")\n btn3 = types.KeyboardButton(\"Ваке\")\n btn4 = types.KeyboardButton(\"Исани\")\n btn5 = types.KeyboardButton(\"Крцанисси\")\n btn6 = types.KeyboardButton(\"Мтацминда\")\n btn7 = types.KeyboardButton(\"Надзаладеви\")\n btn8 = types.KeyboardButton(\"Сабуртало\")\n btn9 = types.KeyboardButton(\"Самгори\")\n btn10 = types.KeyboardButton(\"Чугурети\")\n btn11 = types.KeyboardButton(\"Окрестности Тбилиси\")\n markup.add(btn1, btn2, btn3, btn4, btn5, btn6, btn7, btn8, btn9, btn10, btn11)\n if one_request_dict[\"town\"] == 'батуми' or 'batumi':\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\n btn1 = types.KeyboardButton(\"Аэропорт\")\n btn2 = types.KeyboardButton(\"Агмашенебели\")\n btn3 = types.KeyboardButton(\"Багратиони\")\n btn4 = types.KeyboardButton(\"Бони-Городокский район\")\n btn5 = types.KeyboardButton(\"Поселок Тамар\")\n btn6 = types.KeyboardButton(\"Кахаберийский район\")\n btn7 = types.KeyboardButton(\"Руставельский район\")\n btn8 = types.KeyboardButton(\"Старый Батуми\")\n btn9 = types.KeyboardButton(\"Химшиашвили\")\n btn10 = types.KeyboardButton(\"Джавахишвили Район\")\n markup.add(btn1, btn2, btn3, btn4, btn5, btn6, btn7, btn8, btn9, btn10)\n if one_request_dict[\"town\"] == 'кутаиси' or 'kutaisi':\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\n btn1 = types.KeyboardButton(\"Поселок Авангарди\")\n btn2 = types.KeyboardButton(\"Поселок Автокархана\")\n btn3 = types.KeyboardButton(\"Поселок Асатиани\")\n btn4 = types.KeyboardButton(\"Пос. Агмашенебели\")\n btn5 = types.KeyboardButton(\"Балахвани\")\n btn6 = types.KeyboardButton(\"Бжолеби\")\n btn7 = types.KeyboardButton(\"Холм Габашвили\")\n btn8 = types.KeyboardButton(\"Гора Сакуслиа\")\n btn9 = types.KeyboardButton(\"Гуматеси\")\n btn10 = types.KeyboardButton(\"Вакисубани\")\n btn11 = types.KeyboardButton(\"Застава\")\n btn12 = types.KeyboardButton(\"Мепесутубани\")\n btn13 = types.KeyboardButton(\"Мцванеквавила\")\n btn14 = types.KeyboardButton(\"Поселок Никея\")\n btn15 = types.KeyboardButton(\"Ниноцминда\")\n btn16 = types.KeyboardButton(\"Рионгеси\")\n btn17 = types.KeyboardButton(\"Сафичхиа\")\n btn18 = types.KeyboardButton(\"Сагориа\")\n btn19 = types.KeyboardButton(\"Укимериони\")\n btn20 = types.KeyboardButton(\"Кроника\")\n btn21 = types.KeyboardButton(\"Укимериони\")\n btn22 = types.KeyboardButton(\"Кроника\")\n markup.add(btn1, btn2, btn3, btn4, btn5, btn6, btn7, btn8, btn9, btn10, btn11, btn12, btn13, btn14, btn15, btn16, btn17, btn18, btn19, btn20, btn21, btn22)\n bot.send_message(message.from_user.id, \"Какой район \"+one_request_dict[\"town\"]+\" вас интересует?\".format(message.from_user), reply_markup=markup)\n bot.register_next_step_handler(message, handle_district)\ndef handle_district(message):\n one_request_dict[\"district\"] = message.text\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\n btn1 = types.KeyboardButton(\"Квартира\")\n btn2 = types.KeyboardButton(\"Комната\")\n btn3 = types.KeyboardButton(\"Дом\")\n markup.add(btn1, btn2, btn3)\n bot.send_message(message.chat.id, text=\"Вы ищете квартиру, комнату или дом?\".format(message.from_user), reply_markup=markup)\n bot.register_next_step_handler(message, handle_type_of_house)\ndef handle_type_of_house(message):\n one_request_dict[\"type_of_house\"] = message.text\n if one_request_dict[\"type_of_house\"] != \"Комната\":\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\n btn1 = types.KeyboardButton(\"1\")\n btn2 = types.KeyboardButton(\"2\")\n btn3 = types.KeyboardButton(\"3\")\n btn4 = types.KeyboardButton(\"4\")\n markup.add(btn1, btn2, btn3, btn4)\n bot.send_message(message.chat.id, text=\"Сколько вам нужно комнат?\".format(message.from_user), reply_markup=markup)\n bot.register_next_step_handler(message, handle_flat_quolity)\n else:\n one_request_dict[\"flat_quolity\"] = 1\n bot.send_message(message.from_user.id, \"Введите минимальный порог цены (в долларах)\")\n bot.register_next_step_handler(message, handle_min_prise) \ndef handle_flat_quolity(message):\n one_request_dict[\"flat_quolity\"] = int(message.text)\n bot.send_message(message.from_user.id, \"Введите минимальный порог цены (в долларах)\")\n bot.register_next_step_handler(message, handle_min_prise)\ndef handle_min_prise(message):\n one_request_dict[\"min_prise\"] = 0\n if message.text.isdigit():\n one_request_dict[\"min_prise\"] = int(message.text)\n bot.send_message(message.from_user.id, \"Введите максимальный порог цены (в долларах)\")\n bot.register_next_step_handler(message, handle_max_prise)\n else:\n bot.send_message(message.from_user.id, \"Цифрами, пожалуйста. Попробуйте еще раз\")\n bot.register_next_step_handler(message, handle_min_prise)\ndef handle_max_prise(message):\n one_request_dict[\"max_prise\"] = 0\n if message.text.isdigit():\n one_request_dict[\"max_prise\"] = int(message.text)\n if one_request_dict[\"max_prise\"] < one_request_dict[\"min_prise\"]:\n bot.send_message(message.from_user.id, 'Максимальный порог цены меньше минимального, попробуйте еще раз');\n bot.send_message(message.from_user.id, 'Введите минимальный порог цены (в долларах)');\n bot.register_next_step_handler(message, handle_min_prise)\n else:\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\n btn1 = types.KeyboardButton(\"Да\")\n btn2 = types.KeyboardButton(\"Нет\")\n markup.add(btn1, btn2)\n bot.send_message(message.chat.id, text=\"Показывать объявления только от собственника?\".format(message.from_user), reply_markup=markup)\n bot.register_next_step_handler(message, handle_owner)\n else:\n bot.send_message(message.from_user.id, \"Цифрами, пожалуйста. Попробуйте еще раз\")\n bot.register_next_step_handler(message, handle_max_prise)\ndef handle_owner(message):\n one_request_dict[\"handle_owner\"] = message.text\none_request_dict = {}\nbot.infinity_polling()\n\n","repo_name":"DedNikifor111/Bot_myhome","sub_path":"Bot_myhome/Bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":8504,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1866377423","text":"\"\"\"Use the following command to install retriever: python setup.py install\"\"\"\n\nfrom setuptools import setup\nimport platform\nimport sys\nimport warnings\n\ncurrent_platform = platform.system().lower()\nextra_includes = []\nif current_platform == \"darwin\":\n try:\n import py2app\n except ImportError:\n pass\n extra_includes = []\nelif current_platform == \"windows\":\n try:\n import py2exe\n except ImportError:\n pass\n import sys\n extra_includes = ['pyodbc', 'inspect']\n sys.path.append(\n \"C:\\\\Windows\\\\winsxs\\\\x86_microsoft.vc90.crt_1fc8b3b9a1e18e3b_9.0.21022.8_none_bcb86ed6ac711f91\")\nfrom __init__ import VERSION\n\n\ndef is_wxpython_installed():\n \"\"\"Returns True if wxpython is installed\"\"\"\n try:\n return __import__(\"wx\")\n except ImportError:\n return False\n\n\ndef clean_version(v):\n if v == 'master':\n return '1.0.0'\n return v.replace('v', '').replace('.rc', '').replace('.beta', '')\n\npackages = [\n 'retriever.lib',\n 'retriever.engines',\n 'retriever.app',\n 'retriever',\n]\n\nincludes = [\n 'xlrd',\n 'wx',\n 'pymysql',\n 'psycopg2',\n 'sqlite3',\n] + extra_includes\n\nexcludes = [\n 'pyreadline',\n 'doctest',\n 'optparse',\n 'getopt',\n 'pickle',\n 'calendar',\n 'pdb',\n 'inspect',\n 'email',\n 'pywin', 'pywin.debugger',\n 'pywin.debugger.dbgcon',\n 'pywin.dialogs', 'pywin.dialogs.list',\n 'Tkconstants', 'Tkinter', 'tcl',\n]\n\n\nwx_installed = is_wxpython_installed()\n\nif wx_installed is False:\n warnings.warn(\"\"\"wxpython is not installed.\n Retriever will not work in GUI mode.\n For retriever-gui install python-wxpython and\n run 'python setup.py install' again.\"\"\",\n UserWarning\n )\n\nsetup(name='retriever',\n version=clean_version(VERSION),\n description='EcoData Retriever',\n author='Ben Morris, Ethan White, Henry Senyondo',\n author_email='ethan@weecology.org',\n url='https://github.com/weecology/retriever',\n classifiers=['Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',],\n packages=packages,\n package_dir={\n 'retriever': ''\n },\n entry_points={\n 'console_scripts': [\n 'retriever = retriever.__main__:main',\n ],\n },\n install_requires=[\n 'xlrd',\n ],\n\n # py2exe flags\n console=[{'script': \"__main__.py\",\n 'dest_base': \"retriever\",\n 'icon_resources': [(1, 'icon.ico')]\n }],\n zipfile=None,\n\n # py2app flags\n app=['__main__.py'],\n data_files=[('', ['CITATION'])],\n setup_requires=['py2app'] if current_platform == 'darwin' else [],\n\n # options\n # optimize is set to 1 of py2app to avoid errors with pymysql\n # bundle_files = 1 or 2 was causing failed builds so we moved\n # to bundle_files = 3 and Inno Setup\n options={'py2exe': {'bundle_files': 3,\n 'compressed': 2,\n 'optimize': 1,\n 'packages': packages,\n 'includes': includes,\n 'excludes': excludes,\n },\n 'py2app': {'packages': ['retriever'],\n 'includes': includes,\n 'site_packages': True,\n 'resources': [],\n 'optimize': 1,\n 'argv_emulation': True,\n 'no_chdir': True,\n 'iconfile': 'osx_icon.icns',\n },\n },\n )\n\n\ntry:\n from compile import compile\n compile()\nexcept:\n pass\n","repo_name":"ReyhanehA/GDP60","sub_path":"145845_setup.py_C__Users_user_Desktop_data_2_data_google_data_weecology_retriever.py","file_name":"145845_setup.py_C__Users_user_Desktop_data_2_data_google_data_weecology_retriever.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1382669829","text":"import pytest\nfrom mock import MagicMock, patch, call\n\nfrom pyolite.models.user import User\n\n\ndef set_mocks():\n mocks = {}\n\n for mock in ['initial_path', 'path', 'file', 'keys', 'git',\n 'first_key',\n 'second_key']:\n mocks[mock] = MagicMock()\n\n key_path = 'tests/fixtures/second_simple_key.pub'\n mocks['second_key'].__str__ = lambda x: key_path\n mocks['second_key'].isdir.return_value = True\n\n key_path = 'tests/fixtures/simple_key.pub'\n mocks['first_key'].__str__ = lambda x: key_path\n mocks['first_key'].isdir.return_value = False\n\n mocks['file'].walk.return_value = [mocks['first_key']]\n mocks['path'].return_value = mocks['file']\n\n return mocks\n\n\ndef test_if_a_user_can_be_retrieved_by_name():\n mocks = set_mocks()\n\n with patch.multiple('pyolite.models.user', Path=mocks['path'],\n ListKeys=mocks['keys']):\n user = User(mocks['initial_path'], mocks['git'], 'vtemian', repos=None,\n keys=[mocks['first_key']])\n test_user = User.get_by_name('vtemian', mocks['initial_path'],\n mocks['git'])\n\n assert test_user.name == user.name\n assert test_user.repos == user.repos\n assert test_user.keys == user.keys\n assert test_user.path == user.path\n assert test_user.git == user.git\n\n mocks['path'].has_calls([\n call('path', 'keydir'),\n call('path', 'conf/')\n ])\n\n assert str(test_user) == '< vtemian >'\n assert repr(test_user) == '< vtemian >'\n\n\ndef test_if_user_is_admin():\n mocks = set_mocks()\n\n with patch.multiple('pyolite.models.user', Path=mocks['path'],\n ListKeys=mocks['keys']):\n user = User(mocks['initial_path'], mocks['git'], 'vtemian', repos=None,\n keys=[mocks['first_key']])\n assert not user.is_admin\n\n user.repos = ['/path/to/gitolite/admin/gitolite.conf']\n assert user.is_admin\n\n\ndef test_get_user_by_nothing_it_should_raise_value_error():\n mocks = set_mocks()\n\n with patch.multiple('pyolite.models.user', Path=mocks['path'],\n ListKeys=mocks['keys']):\n with pytest.raises(ValueError):\n User.get(MagicMock(), mocks['git'], mocks['path'])\n","repo_name":"presslabs/pyolite","sub_path":"tests/models/test_user.py","file_name":"test_user.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"67"} +{"seq_id":"74211189974","text":"import pytest\n\nfrom src.test.test_sensors.test_sensor import TestSensor as Sensor\nfrom src.sensors.dht11_sensor import DHT11_Sensor\n\nclass DHT11_Fake_Sensor(DHT11_Sensor):\n \"\"\" This is class to use as data class for DHT11 tests \"\"\"\n def __init__(self, pin=1, humidity=30, temperature=20, key_name=\"test\", sensor_data=10):\n self.pin = pin\n self.raw_data = self.RawData()\n self.raw_data.humidity = humidity\n self.raw_data.temperature = temperature\n self.raw_data.valid = True\n self.key_name = key_name\n self.sensor_data = sensor_data \n\n class RawData:\n def __init__(self):\n pass\n\n def is_valid(self):\n return self.valid\n\nclass TestDHT11_Sensor:\n \"\"\" General tests for DHT11_sensor functions \"\"\"\n def __init__(self):\n self._dht11_reader()\n\n def _dht11_reader(self):\n self.reader = DHT11_Fake_Sensor() #Leave default values\n\n def test_init(self):\n assert self.reader.pin == 1\n assert self.reader.raw_data.humidity == 30\n assert self.reader.raw_data.temperature == 20\n assert self.reader.key_name == \"test\"\n assert self.reader.sensor_data == 10\n\n def test_get_data(self):\n \"\"\" Function to test __str__() \"\"\"\n key1 = \"humidity\"\n key2 = \"temperature\"\n \n expected_data1 = {key1:self.reader.sensor_data}\n expected_data2 = {key2:self.reader.sensor_data}\n\n self.reader.key_name = key1\n data1 = self.get_data()\n\n assert data1 == expected_data1\n \n \n","repo_name":"atyu1/SSPro-Collector","sub_path":"src/test/test_sensors/test_dht11_sensor.py","file_name":"test_dht11_sensor.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23907086098","text":"#Aluno: Deivison rodrigues jordao\r\n# programa que mostra a divisao inteora e o resto da divisao de 2 numeros\r\n\r\n#Declaracao de variaveis\r\n\r\nnumero_1 = 0\r\nnumero_2 = 0\r\ndivisao_int = 0\r\nresto = 0\r\n\r\n#Processamento\r\n\r\nnumero_1 = float(input(\"Digite o primeiro numero(dividendo): \"))\r\nnumero_2 = float(input(\"Digite o segundo numero(divisor): \"))\r\n\r\nwhile(numero_1 > 0 ):\r\n numero_1 = numero_1 - numero_2\r\n if(numero_1 < 0):\r\n numero_1 = numero_1 + numero_2\r\n break\r\n divisao_int = divisao_int + 1 \r\nresto = numero_1\r\n\r\n#Saida \r\nprint(divisao_int)\r\nprint(resto)","repo_name":"deivisongithub/intro-a-programacao","sub_path":"lista 4/Respostas da lista 4 while/Questão 4.py","file_name":"Questão 4.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72035424213","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('sign-up/', views.sign_up_view, name='sign-up'),\n path('sign-in/', views.sign_in_view, name='sign-in'),\n path('logout/', views.logout, name='logout'),\n path('user/', views.user_view, name='user-list'),\n path('user/follow//', views.user_follow, name='user-follow'),\n]","repo_name":"kinghong97/mySpartaSns","sub_path":"user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"30762352115","text":"from Node import Node\n\n\nclass Tree:\n \"\"\"docstring for Tree\"\"\"\n def __init__(self, root):\n self.node = Node(list(root), None)\n\n def solve(self):\n self.create_moves(self.node.root)\n\n def create_moves(self, parent):\n result = []\n for i in range(len(parent)):\n if parent[i] == '_':\n if parent[i - 1] == '>' and i - 1 >= 0:\n temp = parent[:]\n temp[i], temp[i - 1] = temp[i - 1], temp[i]\n result.append(temp)\n if parent[i - 2] == '>' and i - 2 >= 0:\n temp = parent[:]\n temp[i], temp[i - 2] = temp[i - 2], temp[i]\n result.append(temp)\n if parent[i + 1] == '<' and i + 1 <= len(parent) - 1:\n temp = parent[:]\n temp[i], temp[i + 1] = temp[i + 1], temp[i]\n result.append(temp)\n if parent[i + 2] == '<' and i + 2 <= len(parent) - 1:\n temp = parent[:]\n temp[i], temp[i + 2] = temp[i + 2], temp[i]\n result.append(temp)\n if(len(result) == 0):\n return False\n return result\n\n def append_children(self):\n pass\n\n","repo_name":"mirchev1977/python-101-2016","sub_path":"week-06/03-frog-leap/Tree.py","file_name":"Tree.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16538839988","text":"import os, sys\n\nos.environ[\"SDL_VIDEODRIVER\"] = \"dummy\"\n \n \nimport pygame.transform\n \n \nif 1:\n #some platforms might need to init the display for some parts of pygame.\n import pygame.display\n pygame.display.init()\n screen = pygame.display.set_mode((1,1))\n\nfilename = \"\" #No dots allowed in filename except for extension\n\nnameonly = filename.split('.')[0]\nextension = filename.split('.')[1]\n\nimage = pygame.image.load(os.path.join(filename)).convert_alpha()\nimage = pygame.transform.scale(image, (image.get_width() * 2, image.get_height() * 2))\npygame.image.save(image, nameonly + \"2x.\" + extension)\n","repo_name":"GudniNathan/PixelArtDoubleUp","sub_path":"PixelArtDoubleUp.py","file_name":"PixelArtDoubleUp.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29141909490","text":"import pytest\nfrom unittest import TestCase\n\nfrom blogbuilder.templates.template import MissingTemplateValueError, Template\n\n\nclass TemplateTest(TestCase):\n def test_render_uses_the_template(self) -> None:\n \"\"\"\n given a template with a template string\n and no values to interpolate\n it renders the template string as is\n \"\"\"\n basic_template_str = \"sup yo\"\n\n template = Template(basic_template_str)\n result = template.render({})\n\n assert basic_template_str == result\n\n def test_render_interpolates_value(self) -> None:\n \"\"\"\n given a template with a template string\n and a value to interpolate\n it interpolates the value into the string\n \"\"\"\n basic_template_str = \"sup $name\"\n\n template = Template(basic_template_str)\n result = template.render({\"name\": \"terry\"})\n\n assert \"sup terry\" == result\n\n def test_render_missing_value(self) -> None:\n \"\"\"\n given insufficient values to populate the template string\n it raises an error\n \"\"\"\n template = Template(\"$nope\")\n with pytest.raises(\n MissingTemplateValueError, match=\"Missing template value 'nope'\"\n ):\n template.render({})\n","repo_name":"th3james/BlogBuilder","sub_path":"src/blogbuilder/tests/templates/test_template.py","file_name":"test_template.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14710072940","text":"import os\nimport sys\nfrom cryptography.fernet import Fernet\n\ndef decipher(channel_name, theKey):\n if len(os.listdir(\"./logs\")) != 0:\n for filename in os.listdir(\"./logs\"):\n filename_string_list = filename.split('-')\n channel_name_extracted = filename_string_list[1]\n if channel_name == channel_name_extracted:\n cipher = Fernet(theKey)\n encrypted_text = \"\"\n with open(os.path.join('./logs',filename), 'r') as log:\n encrypted_text = log.read()\n if encrypted_text != \"\":\n try:\n decrypted_text = cipher.decrypt(encrypted_text)\n with open(os.path.join('./logs',filename), 'w') as output:\n output.write(decrypted_text)\n print(\"Finish decrypting \" + filename + \"!\")\n except:\n print(\"Error: Invalid Token to be decrypted.\" + filename +\" is corrupted or incorrect symmetric key or it has been decrypted already.\")\n else:\n print(\"There is no log for the channel \" + channel_name + \".\")\n print(\"Finish log decryption.\")\n else:\n print(\"There is no entry for\" + channel_name + \" under the logs folder.\")\n\nif len(sys.argv) != 3:\n raise Exception(\"It needs exactly 2 arguments to use this decipher program, channel name and the key.\")\nelif sys.argv[2].endswith('.txt'):\n channel_name = sys.argv[1]\n logKey_file = sys.argv[2]\n key = \"\"\n with open(logKey_file, 'rb') as logkey:\n key = logkey.read()\n decipher(channel_name,key)\nelse:\n raise Exception(\"You need to run python decipher_logs.py \\\\<#channel_name> \")\n","repo_name":"zhanpengwang888/SquirrelChat","sub_path":"server/decipher_logs.py","file_name":"decipher_logs.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"28435935271","text":"'''\\\nCreated on May 5, 2020\n\nVarious binary properties.\n\n@author: Eric Mader\n'''\n\nfrom xml.etree.ElementTree import Element\n\nfrom .UCDProperties import UCDProperties\n\npropsList = [\n (\"whitespace\", \"WSpace\"),\n (\"dash\", \"Dash\"),\n (\"hyphen\", \"Hyphen\"),\n (\"quotationMark\", \"QMark\"),\n (\"terminalPunctuation\", \"Term\"),\n (\"sentenceTerminalPunctuation\", \"STerm\"),\n (\"diacritic\", \"Dia\"),\n (\"extender\", \"Ext\"),\n (\"prependedConcatenationMark\", \"PCM\"),\n (\"softDotted\", \"SD\"),\n (\"alphabetic\", \"Alpha\"),\n (\"otherAlphabetic\", \"OAlpha\"),\n (\"math\", \"Math\"),\n (\"otherMath\", \"OMath\"),\n (\"hexDigit\", \"Hex\"),\n (\"asciiHexDigit\", \"AHex\"),\n (\"defaultIgnorable\", \"DI\"),\n (\"otherDefaultIgnorable\", \"ODI\"),\n (\"logicalOrderException\", \"LOE\"),\n (\"regionalIndicator\", \"RI\"),\n (\"graphemeBase\", \"Gr_Base\"),\n (\"graphemeExtended\", \"Gr_Ext\"),\n (\"otherGraphemeExtended\", \"OGr_Ext\"),\n (\"graphemeLink\", \"Gr_Link\"),\n (\"ideographic\", \"Ideo\"),\n (\"unifiedIdeographic\", \"UIdeo\"),\n (\"idsBinaryOperator\", \"IDSB\"),\n (\"idsTrinaryOperator\", \"IDST\"),\n (\"radical\", \"Radical\"),\n (\"deprecated\", \"Dep\"),\n (\"variationSelector\", \"VS\"),\n (\"nonCharacterCodePoint\", \"NChar\"),\n (\"idStart\", \"IDS\"),\n (\"otherIDStart\", \"OIDS\"),\n (\"xIDStart\", \"XIDS\"),\n (\"idContinue\", \"IDC\"),\n (\"otherIDContinue\", \"OIDC\"),\n (\"xIDContinue\", \"XIDC\"),\n (\"patternSyntax\", \"Pat_Syn\"),\n (\"patternWhitespace\", \"Pat_WS\")\n]\n\nclass BinaryProperties(UCDProperties):\n def __init__(self, char: Element, group: Element):\n UCDProperties.__init__(self, char, group)\n\n for (field, tag) in propsList:\n value = self.getBooleanProperty(tag)\n setattr(self, field, value)\n\n del self._char\n del self._group\n\n","repo_name":"ermader/UnicodeData","sub_path":"UnicodeData/BinaryProperties.py","file_name":"BinaryProperties.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"798522812","text":"from ParticleFilter import Map, ParticleFilter_FIX\nfrom config.mapInfo import map_info\n\nimport argparse\nimport pandas as pd\nimport numpy as np\nimport os\nfrom tqdm import tqdm\n\nfrom utils.metric import jaccard\n\n\ndef main(args):\n if not os.path.isdir(args.output) and args.render:\n os.mkdir(args.output)\n\n _map = Map(map_info[args.env])\n pf = ParticleFilter_FIX(100, _map, args.epsilon)\n pf.init_fixed_particles(*(map_info[args.env][\"init_particle\"]))\n\n features = pd.read_csv(args.input)\n i_max = 1\n for i, col in enumerate(features.columns):\n if 'feat' in col:\n i_max = i + 1 # feat0 부터 있으니까 +1 \n features, times = np.split(features.to_numpy(), [i_max], axis=1)\n features = np.array(features, dtype=np.float32)\n label = times[:,0]\n data_id = times[:,1]\n env = times[:,2]\n\n\n ###############################\n # Likelihood shaping function #\n ###############################\n features = np.power(features,4)\n features = features - np.quantile(features, 0.75, axis=1).reshape(-1,1)\n \n\n result = {'left': {'left':0, 'front':0, 'right':0},\n 'front':{'left':0, 'front' :0, 'right':0},\n 'right':{'left':0, 'front':0, 'right':0}}\n for i in tqdm(range(times.shape[0])):\n if env[i] == args.env and label[i] != 'none':\n pf.init_fixed_particles(*(map_info[args.env][\"init_particle\"]))\n pf.set_weight([1,0.5,0.5,0.3])\n feature = features[i]\n featlen = feature.shape[0]//2\n feature_L = [feature[:featlen], feature[:featlen]]\n\n feat = feature_L[0]\n if label[i] == 'front':\n feat = feature_L[1]\n\n pf.update(feat)\n if args.render:\n pf.render(args.output, env[i]+label[i]+data_id[i])\n pred = pf.predict()\n \n # try\n if label[i] == pred:\n result[label[i]][label[i]] += 1\n else:\n result[label[i]][pred] += 1\n\n jac = jaccard(result)\n print(args.env, \"result\")\n print(\"label:\\tN\\tJ\\tTP\\tFP\\tFN\")\n for key in ['left','front','right']:\n print(\"%s:\\t%d\\t%.3f\\t%d\\t%d\\t%d\"%(key, jac[key]['N'], jac[key]['J'], jac[key]['TP'], jac[key]['FP'], jac[key]['FN']))\n print('Total accuracy :', jac['accuracy'])\n\n if not os.path.isdir(\"results/classification\"):\n os.mkdir(\"results/classification\")\n with open(\"results/classification/result_{}.txt\".format(args.output), \"a\") as f:\n f.write(os.path.join('results/metric', args.env + \" result\\n\"))\n f.write(\"label:\\tN\\tJ\\tTP\\tFP\\tFN\\n\")\n for key in ['left','front','right']:\n f.write(\"%s:\\t%d\\t%.3f\\t%d\\t%d\\t%d\\n\"%(key, jac[key]['N'], jac[key]['J'], jac[key]['TP'], jac[key]['FP'], jac[key]['FN']))\n f.write('Total accuracy : %.3f\\n'%jac['accuracy'])\n f.write('\\n')\n \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"ParticleFilter\")\n parser.add_argument('--input', type=str, required=True)\n parser.add_argument('--output', type=str, default='image')\n parser.add_argument('--render', action='store_true')\n parser.add_argument('--epsilon', type=float, default=0.2)\n parser.add_argument('--env', type=str, required=True, choices=['SA1','SA2','SB1','SB2','SB3'])\n args = parser.parse_args()\n main(args)","repo_name":"jackyoung96/ASPLE","sub_path":"classification_test.py","file_name":"classification_test.py","file_ext":"py","file_size_in_byte":3382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41845329653","text":"from PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\nfrom plotter import Ui_Form as Ui_Plot\nfrom submodule.visualise_motion import Visualiser\n\nfrom submodule.utils.conversion_utils import *\n\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_qt4agg import (\n FigureCanvasQTAgg as FigureCanvas,\n NavigationToolbar2QT as NavigationToolbar)\n\nclass Plotter(QWidget, Ui_Plot):\n trigger = pyqtSignal()\n def __init__(self, parent=None):\n QWidget.__init__(self, parent)\n self.setupUi(self)\n\n self.btn_plot_update.clicked.connect(self.plot_data)\n self.plotter = Visualiser()\n self.start_figure()\n \n QToolTip.setFont(QFont(\"SansSerif\", 11))\n self.label_51.setToolTip(\"Wave direction expressed\\nin the mesh coordinate system.\\n0deg correspond to a wave travelling in the positive x-direction\")\n \n self.label_54.setToolTip(\"Wave direction expressed\\nin the North-South/West-East coordinate system.\\n0deg correspond to a wave travelling from North to South\")\n \n def set_data(self, data):\n self._data = data\n if 'hyd' in data.keys() and not 'p_fit' in data.keys():\n self.enable_plot_area([1])\n elif 'p_fit' in data.keys():\n self.enable_plot_area([1,2])\n else:\n self.enable_plot_area()\n \n def start_figure(self):\n fig = Figure()\n self.addmpl(fig)\n self.enable_plot_area()\n \n def rmmpl(self,):\n self.mpl_vl.removeWidget(self.canvas)\n self.canvas.close()\n self.mpl_vl.removeWidget(self.toolbar)\n self.toolbar.close()\n \n def addmpl(self, fig):\n self.canvas = FigureCanvas(fig)\n self.mpl_vl.addWidget(self.canvas)\n self.canvas.draw()\n self.toolbar = NavigationToolbar(self.canvas, self.mpl_window, coordinates=True)\n self.mpl_vl.addWidget(self.toolbar)\n \n def plot_excitation(self):\n fig = self.plotter.show_diffraction_problem(int(self.cb_dofi.currentIndex()),\n float(self.cb_angle.currentIndex()))\n self.rmmpl()\n self.addmpl(fig)\n# \n def plot_radiation(self):\n fig = self.plotter.show_radiation_problem(int(self.cb_dofi.currentIndex()),\n float(self.cb_dofj.currentIndex()))\n self.rmmpl()\n self.addmpl(fig)\n \n def plot_rao(self):\n fig = self.plotter.show_rao(int(self.cb_te.currentIndex()),\n int(self.cb_hm0.currentIndex()), \n int(self.cb_wavedir.currentIndex()),\n int(self.cb_dofi.currentIndex()),\n int(self.cb_angle.currentIndex()))\n self.rmmpl()\n self.addmpl(fig)\n \n def plot_power_matrix(self):\n fig = self.plotter.show_power_matrix(int(self.cb_wavedir.currentIndex()))\n self.rmmpl()\n self.addmpl(fig)\n \n def plot_k_fit(self):\n fig = self.plotter.show_k_fit(int(self.cb_te.currentIndex()),\n int(self.cb_hm0.currentIndex()), \n int(self.cb_wavedir.currentIndex()))\n self.rmmpl()\n self.addmpl(fig)\n \n def plot_c_fit(self):\n fig = self.plotter.show_c_fit(int(self.cb_te.currentIndex()),\n int(self.cb_hm0.currentIndex()), \n int(self.cb_wavedir.currentIndex()))\n self.rmmpl()\n self.addmpl(fig)\n \n def plot_original_power_matrix(self):\n fig = self.plotter.show_original_power_matrix(int(self.cb_wavedir.currentIndex()))\n self.rmmpl()\n self.addmpl(fig)\n \n def plot_user_power_matrix(self):\n fig = self.plotter.show_user_power_matrix(int(self.cb_wavedir.currentIndex()))\n self.rmmpl()\n self.addmpl(fig)\n \n \n def plot_mass(self):\n fig = self.plotter.show_mass()\n self.rmmpl()\n self.addmpl(fig)\n \n def plot_hydrostatic(self):\n fig = self.plotter.show_hst()\n self.rmmpl()\n self.addmpl(fig)\n \n def enable_plot_area(self, index=[]):\n self.plotter.set_hydrodynamic_data(None)\n self.plotter.set_performance_fit_data(None)\n if index:\n self.groupBox_11.setEnabled(True)\n self.groupBox_9.setEnabled(True)\n self.btn_plot_update.setEnabled(True)\n for ind in index:\n if ind == 1:\n self.enable_hydrodynamic(True)\n self.set_hydrodynamic_dimensions(True)\n self.plotter.set_hydrodynamic_data(self._data['hyd'])\n elif ind == 2:\n self.enable_motion(True)\n self.set_motion_dimensions(True)\n self.plotter.set_performance_fit_data(self._data['p_fit'])\n else:\n pass\n else:\n self.groupBox_11.setEnabled(False)\n self.groupBox_9.setEnabled(False)\n self.btn_plot_update.setEnabled(False)\n self.enable_hydrodynamic(False)\n self.enable_motion(False)\n \n\n def enable_hydrodynamic(self, choice):\n self.rb_excitation.setEnabled(choice)\n self.rb_radiation.setEnabled(choice)\n self.rb_mass.setEnabled(choice)\n self.rb_stiffness.setEnabled(choice)\n self.rb_radiation.setChecked(True)\n \n def enable_motion(self, choice):\n self.c_fit.setEnabled(choice)\n self.k_fit.setEnabled(choice)\n self.rb_userpowermat.setEnabled(choice)\n self.rb_origpowermat.setEnabled(choice)\n self.rb_rao.setEnabled(choice)\n self.rb_powermat.setEnabled(choice)\n \n def set_motion_dimensions(self, stat):\n if stat:\n te = self._data['p_fit']['te'].tolist()\n te = [str(x) for x in te]\n hm0 = self._data['p_fit']['hm0'].tolist()\n hm0 = [str(x) for x in hm0]\n wave_dir = self._data['p_fit']['wave_dir']\n \n wave_dir_ne = angle_wrap(-wave_dir-np.pi/2,'r2r')*180/np.pi\n wave_dir = [str(x) for x in wave_dir_ne.tolist()]\n\n \n self.cb_te.clear()\n self.cb_te.addItems(te)\n self.cb_hm0.clear()\n self.cb_hm0.addItems(hm0)\n self.cb_wavedir.clear()\n self.cb_wavedir.addItems(wave_dir)\n else:\n self.cb_te.clear()\n self.cb_hm0.clear()\n self.cb_wavedir.clear()\n \n def set_hydrodynamic_dimensions(self, stat):\n if stat:\n angle = self._data['hyd']['directions'].tolist()\n angle = [str(x) for x in angle]\n dofi = range(self._data['hyd']['m_m'].shape[0])\n dofi = [str(x) for x in dofi]\n self.cb_angle.clear()\n self.cb_angle.addItems(angle)\n self.cb_dofi.clear()\n self.cb_dofi.addItems(dofi)\n self.cb_dofj.clear()\n self.cb_dofj.addItems(dofi)\n else:\n self.cb_angle.clear()\n self.cb_dofi.clear()\n self.cb_dofj.clear()\n \n def plot_data(self):\n # check which tab is active first\n active_rb = [ix for ix, x in enumerate(self.groupBox_9.children()) if ix>0 and x.isChecked()]\n \n if active_rb[0] == 1:\n self.plot_radiation()\n elif active_rb[0] == 2:\n self.plot_excitation()\n elif active_rb[0] == 3:\n self.plot_mass()\n elif active_rb[0] == 4:\n self.plot_hydrostatic()\n elif active_rb[0] == 5:\n self.plot_c_fit()\n elif active_rb[0] == 6:\n self.plot_k_fit()\n elif active_rb[0] == 7:\n self.plot_original_power_matrix()\n elif active_rb[0] == 8:\n self.plot_user_power_matrix()\n elif active_rb[0] == 9:\n self.plot_power_matrix()\n elif active_rb[0] == 10:\n self.plot_rao()\n elif active_rb[0] == 11:\n pass\n","repo_name":"DTOcean/dtocean-hydrodynamics","sub_path":"dtocean_wec/plot_form.py","file_name":"plot_form.py","file_ext":"py","file_size_in_byte":8190,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"69836086614","text":"from scipy import interpolate\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.array([0.0, 0.1, 0.2, 0.3, 0.4])\ny = np.array([0.0, 0.078348, 0.13891, 0.192916, 0.244981])\nx_0 = 0.2\n\n# Do wykresu\nf1 = interpolate.interp1d(x, y, kind='quadratic')\nx_range = np.arange(0, 0.4, 0.001)\ny_range = f1(x_range)\n\n\np1 = interpolate.lagrange(x, y)\nderivative = np.polyder(p1)\n\ny_range_prim = derivative(x_range)\n\nprint(f\"f'(0.2) = {derivative(x_0)}\")\n\n\nplt.scatter(x, y)\nplt.plot(x_range, y_range, label='f(x)')\nplt.plot(x_range, y_range_prim, label='f\\'(x)')\nplt.scatter(x_0, derivative(x_0))\n\n\nplt.legend()\nplt.grid()\nplt.show()\n","repo_name":"kacpermisiek/Metody-numeryczne","sub_path":"lista6/zadanie2.py","file_name":"zadanie2.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1841796273","text":"\"\"\"\npython对json文件的操作分为编码和解码\n\ndumps 字符串\ndump json对象 ���以通过fp文件流写入文件\n\n\n解码:\nload\nloads 字符串操作\n\"\"\"\n\n# 知识点\n# import json\n#\n# str1 = \"[{'username': 'dachang,', 'age': 19}]\"\n# print(type(str1))\n#\n# json_str = json.dumps(str1, ensure_ascii=False)\n# print(json_str)\n# print(type(json_str))\n#\n# new_str = json.loads(json_str)\n# print(new_str)\n# print(type(new_str))\n\n# 举例\nimport requests\nimport json\nfrom bs4 import BeautifulSoup\n\nheaders = {\n \"User-Agent\": \"Mozilla / 5.0(Macintosh;IntelMacOSX10_14_2) AppleWebKit / \\\n 537.36(KHTML, likeGecko) Chrome / 78.0.3904.87Safari / 537.36\"}\n\nurl = 'http://www.seputu.com/'\nrsp = requests.get(url, headers=headers)\n# print(rsp.text)\n\nsoup = BeautifulSoup(rsp.text, 'lxml')\n\ncontent = []\nfor mulu in soup.find_all(class_= \"mulu\"):\n # print(mulu)\n # 标题\n h2 = mulu.find('h2')\n if h2 != None:\n h2_title = h2.string\n print(h2_title)\n\n list1 = []\n\n # 获取标题内容url地址\n for a in mulu.find(class_=\"box\").find_all('a'):\n href = a.get('href')\n box_title = a.get('title')\n # print(href)\n # print(box_title)\n\n list1.append({'href': href, 'box_title': box_title})\n\n content.append({'title': h2_title, 'content': list1})\n\n\nwith open('daomubiji.json', 'a', encoding='utf-8') as f:\n json.dump(content, fp=f, indent=4, ensure_ascii=False)\n","repo_name":"Dengdi21/myspider","sub_path":"Project/16-saving_data/one_to_json.py","file_name":"one_to_json.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13768683233","text":"import gevent\nfrom gevent import monkey\n\nmonkey.patch_socket()\nmonkey.patch_ssl()\nfrom gevent.pool import Group\nfrom gevent import Timeout\nimport multiprocessing\nimport multiprocessing.pool\nimport moon\nimport itertools\nimport confidential\n\n# import requests\nimport urllib3\n\n\nclass NoDaemonProcess(multiprocessing.Process):\n @property\n def daemon(self):\n return False\n\n @daemon.setter\n def daemon(self, value):\n pass\n\n\nclass NoDaemonContext(type(multiprocessing.get_context())):\n Process = NoDaemonProcess\n\n\nclass UrTooSlow(Exception):\n print(\"Gotta go fast, n eat chilli dawgs\")\n\n\n# We sub-class multiprocessing.pool.Pool instead of multiprocessing.Pool\n# because the latter is only a wrapper function, not a proper class.\nclass MyPool(multiprocessing.pool.Pool):\n def __init__(self, *args, **kwargs):\n kwargs[\"context\"] = NoDaemonContext()\n super(MyPool, self).__init__(*args, **kwargs)\n\n\ndef iterator_ception():\n for i in range(0, 128):\n yield itertools.combinations_with_replacement(moon.search_space, i)\n\n\ndef tater_masher(iter_tater):\n p = Group()\n output_set = set()\n\n for returned_data in p.imap_unordered(urllib_iscontent, iter_tater, maxsize=10):\n # if len(p) <= 5:\n # print(f'there are only {len(p)} waiting')\n if returned_data[0]:\n output_set.add(returned_data[1])\n print(f\"the list of data i'm going to return is {len(output_set)}\")\n\n return output_set\n\n\ndef sonuva_iscontent(request_string):\n payload = \"\".join(request_string)\n test_url = confidential.preamble + payload\n trial_request = None\n\n try:\n # with Timeout(180, UrTooSlow):\n trial_request = requests.get(test_url)\n except (requests.exceptions.ConnectionError):\n # OSError is too broad, fishing for WindowsError()\n return (False, test_url)\n except requests.exceptions.ChunkedEncodingError:\n print(\"how many times do we have to teach you this old man?\")\n except OSError(420):\n print(\"big fat cero\")\n return (False, test_url)\n except UrTooSlow:\n print(\"help me\")\n return (False, test_url)\n\n if trial_request is None:\n return (False, test_url)\n # except IndexError:\n # print('wake me up, wake me up inside')\n # this is where the request is made, is parallelized!\n if trial_request.status_code not in (404, 400, 403, 504, 500):\n # this is a list of 'bad' status codes\n # so far 404 is generic URL not found\n # 400 is something that should trigger an internal app but doesn't\n # 403 is file denied access\n # 504 is a weird bug, maybe rate limiting?\n print(f\"{test_url} returned status code {trial_request.status_code}\")\n # discovering new and exciting Status_codes\n return (True, test_url)\n elif trial_request.status_code in (403, 504, 500, 503):\n print(f\"{test_url} failed with {trial_request.status_code}\")\n return (False, test_url)\n else:\n return (False, test_url)\n\n\ndef urllib_iscontent(request_string):\n payload = \"\".join(request_string)\n test_url = confidential.preamble + payload\n trial_request = None\n http = urllib3.PoolManager(\n maxsize=10, timeout=180, retries=urllib3.Retry(3, raise_on_redirect=False)\n )\n\n try:\n trial_request = http.request(\"GET\", test_url)\n except OSError:\n print(\"URLLIB error, halp\")\n return (False, test_url)\n\n if trial_request is None:\n return (False, test_url)\n\n if trial_request.status not in (404, 400, 403, 504, 500):\n print(f\"{test_url} returned status code {trial_request.status}\")\n return (True, test_url)\n elif trial_request.status in (504, 500, 503): # 403 is kidna noisy\n print(f\"{test_url} failed with {trial_request.status}\")\n return (False, test_url)\n else:\n return (False, test_url)\n\n\ndef main():\n process_count = 1\n # start_length = 0\n print(\"Hello world\")\n Fought_the_law = iterator_ception()\n print(\"iterator up\")\n p = MyPool(process_count)\n print(\"pool going\")\n\n print(\"set engaged\")\n\n for returned_stuff in p.imap_unordered(tater_masher, Fought_the_law):\n outputsquared_set = set()\n for _ in returned_stuff:\n print(f\"egads, I found a {returned_stuff}\")\n outputsquared_set.add(returned_stuff)\n print(\"will this print?\")\n with open(\"results_saver.txt\", \"a\") as file:\n for item in outputsquared_set:\n file.write(item)\n\n print(\"this will not print in the heatdeath of the universe\")\n print(\"so how are you doing, weary time traveler?\")\n print(\"pull up a chair, watch the stars die\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ace510/Python-stuff","sub_path":"cdn_discovery/guts/cluster_search.py","file_name":"cluster_search.py","file_ext":"py","file_size_in_byte":4771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3143438783","text":"\"\"\"\nDBus Bus implementation\n\n@author: Tom Cocagne\n\"\"\"\n\nimport binascii\nimport os\n\nfrom twisted.python import log\n\nimport txdbus.protocol\nfrom txdbus import authentication, client, error, message, objects, router\nfrom txdbus import marshal\nfrom txdbus.interface import DBusInterface, Method, Signal\n\n\nclass DError(Exception):\n \"\"\"\n Used to signal anticipated errors\n \"\"\"\n\n def __init__(self, errorName, msg=None):\n self.dbusErrorName = errorName\n self.errorName = errorName\n self.errorMessage = msg\n\n def __str__(self):\n return self.errorMessage\n\n\nclass BusProtocol (txdbus.protocol.BasicDBusProtocol):\n \"\"\"\n Instances of this class handle connections to DBus clients\n\n @ivar bus: The L{Bus} instance associated with this connection\n @type bus: L{Bus}\n \"\"\"\n _client = False\n _called_hello = False\n bus = None\n authenticator = authentication.BusAuthenticator\n\n def connectionAuthenticated(self):\n self.username = self.guid\n self.uniqueName = None\n self.busNames = {} # name => allow_replacement\n self.bus = self.factory.bus\n self.matchRules = set()\n self.isConnected = True\n\n def connectionLost(self, reason):\n self.isConnected = False\n if self.bus is not None:\n self.bus.clientDisconnected(self)\n\n def rawDBusMessageReceived(self, raw_msg):\n msg = message.parseMessage(raw_msg)\n mt = msg._messageType\n\n if not self.uniqueName:\n self.bus.clientConnected(self)\n\n if not self._called_hello and mt == 1:\n if msg.destination == 'org.freedesktop.DBus':\n if msg.member == 'Hello':\n\n r = message.MethodReturnMessage(\n msg.serial,\n body=[self.uniqueName],\n signature='s',\n )\n\n self._called_hello = True\n self.sendMessage(r)\n\n return\n\n else:\n self.transport.loseConnection()\n\n msg.sender = self.uniqueName\n\n # re-marshal with the sender set and same serial number\n msg._marshal(False)\n\n self.bus.messageReceived(self, msg)\n\n\nclass Bus (objects.DBusObject):\n \"\"\"\n DBus Bus implementation.\n\n @ivar stdIface: L{interface.DBusInterface} containing the standard bus\n interface\n @type stdIface: L{interface.DBusInterface}\n \"\"\"\n stdIface = DBusInterface(\n 'org.freedesktop.DBus',\n Method('Hello', arguments='', returns='s'),\n Method('GetId', arguments='', returns='s'),\n Method(\n 'RequestName', arguments='su', returns='u'\n ),\n Method('ReleaseName', arguments='s', returns='u'),\n Method(\n 'ListQueuedOwners',\n arguments='s',\n returns='as'\n ),\n Method('AddMatch', arguments='s', returns=''),\n Method('RemoveMatch', arguments='s', returns=''),\n Method(\n 'GetNameOwner', arguments='s', returns='s'\n ),\n Method(\n 'GetConnectionUnixUser',\n arguments='s',\n returns='u'\n ),\n\n # Not Implemented Methods\n Method(\n 'GetConnectionUnixProcessId',\n arguments='s',\n returns='u'\n ),\n Method(\n 'ListActivatableNames',\n arguments='',\n returns='as'\n ),\n Method(\n 'UpdateActivationEnvironment',\n arguments='a{ss}',\n returns=''\n ),\n Method(\n 'StartServiceByName',\n arguments='su',\n returns='u'\n ),\n Method(\n 'GetAdtAuditSessionData',\n arguments='s',\n returns='u'\n ),\n Method(\n 'GetConnectionSELinuxSecurityContext',\n arguments='su',\n returns='ay'\n ),\n Method('ReloadConfig'),\n\n Signal('NameAcquired', arguments='s'),\n Signal('NameLost', arguments='s'),\n Signal('NameOwnerChanged', arguments='sss')\n )\n\n dbusInterfaces = [stdIface]\n\n def __init__(self):\n objects.DBusObject.__init__(self, '/org/freedesktop/DBus')\n self.uuid = binascii.hexlify(os.urandom(16))\n self.clients = {} # maps unique_bus_id to client connection\n self.busNames = {} # maps name to list of queued connections\n self.router = router.MessageRouter()\n self.next_id = 1\n self.obj_handler = objects.DBusObjectHandler(self)\n\n self.obj_handler.exportObject(self)\n\n # returns the new unique bus name for the client connection\n def clientConnected(self, proto):\n \"\"\"\n Called when a client connects to the bus. This method assigns the\n new connection a unique bus name.\n \"\"\"\n proto.uniqueName = ':1.%d' % (self.next_id,)\n self.next_id += 1\n self.clients[proto.uniqueName] = proto\n\n def clientDisconnected(self, proto):\n \"\"\"\n Called when a client disconnects from the bus\n \"\"\"\n for rule_id in proto.matchRules:\n self.router.delMatch(rule_id)\n\n for busName in proto.busNames.keys():\n self.dbus_ReleaseName(busName, proto.uniqueName)\n\n if proto.uniqueName:\n del self.clients[proto.uniqueName]\n\n def sendMessage(self, msg):\n \"\"\"\n Sends the supplied message to the correct destination. The\n @type msg: L{message.DBusMessage}\n @param msg: The 'destination' field of the message must be set for\n method calls and returns\n \"\"\"\n if msg._messageType in (1, 2):\n assert msg.destination, 'Failed to specify a message destination'\n\n if msg.destination is not None:\n if msg.destination[0] == ':':\n p = self.clients.get(msg.destination, None)\n else:\n p = self.busNames.get(msg.destination, None)\n if p:\n p = p[0]\n\n # print 'SND: ', msg._messageType, ' to ', p.uniqueName, 'serial',\n # msg.serial,\n\n if p:\n p.sendMessage(msg)\n else:\n log.msg(\n 'Invalid bus name in msg.destination: '\n + msg.destination\n )\n else:\n self.router.routeMessage(msg)\n\n def messageReceived(self, p, msg):\n mt = msg._messageType\n\n # print 'MSG: ', mt, ' from ', p.uniqueName, ' to ', msg.destination\n\n try:\n if mt == 1:\n self.methodCallReceived(p, msg)\n elif mt == 2:\n self.methodReturnReceived(p, msg)\n elif mt == 3:\n self.errorReceived(p, msg)\n elif mt == 4:\n self.signalReceived(p, msg)\n\n if (\n msg.destination\n and not msg.destination == 'org.freedesktop.DBus'\n ):\n self.sendMessage(msg)\n\n self.router.routeMessage(msg)\n except DError as e:\n sig = None\n body = None\n if e.errorMessage:\n sig = 's'\n body = [e.errorMessage]\n\n r = message.ErrorMessage(\n e.errorName,\n msg.serial,\n signature=sig,\n body=body,\n )\n p.sendMessage(r)\n\n def methodCallReceived(self, p, msg):\n if msg.destination == 'org.freedesktop.DBus':\n self.obj_handler.handleMethodCallMessage(msg)\n\n def methodReturnReceived(self, p, msg):\n pass\n\n def errorReceived(self, p, msg):\n pass\n\n def signalReceived(self, p, msg):\n pass\n\n def sendSignal(self, p, member, signature=None, body=None,\n path='/org/freedesktop/DBus',\n interface='org.freedesktop.DBus'):\n \"\"\"\n Sends a signal to a specific connection\n\n @type p: L{BusProtocol}\n @param p: L{BusProtocol} instance to send a signal to\n\n @type member: C{string}\n @param member: Name of the signal to send\n\n @type path: C{string}\n @param path: Path of the object emitting the signal. Defaults to\n 'org/freedesktop/DBus'\n\n @type interface: C{string}\n @param interface: If specified, this specifies the interface containing\n the desired method. Defaults to 'org.freedesktop.DBus'\n\n @type body: None or C{list}\n @param body: If supplied, this is a list of signal arguments. The\n contents of the list must match the signature.\n\n @type signature: None or C{string}\n @param signature: If specified, this specifies the DBus signature of\n the body of the DBus Signal message. This string must be a valid\n Signature string as defined by the DBus specification. If the body\n argumnent is supplied, this parameter must be provided.\n \"\"\"\n if not isinstance(body, (list, tuple)):\n body = [body]\n\n s = message.SignalMessage(path, member, interface,\n p.uniqueName, signature, body)\n p.sendMessage(s)\n\n def broadcastSignal(self, member, signature=None, body=None,\n path='/org/freedesktop/DBus',\n interface='org.freedesktop.DBus'):\n \"\"\"\n Sends a signal to all connections with registered interest\n\n @type member: C{string}\n @param member: Name of the signal to send\n\n @type path: C{string}\n @param path: Path of the object emitting the signal. Defaults to\n 'org/freedesktop/DBus'\n\n @type interface: C{string}\n @param interface: If specified, this specifies the interface containing\n the desired method. Defaults to 'org.freedesktop.DBus'\n\n @type body: None or C{list}\n @param body: If supplied, this is a list of signal arguments. The\n contents of the list must match the signature.\n\n @type signature: None or C{string}\n @param signature: If specified, this specifies the DBus signature of\n the body of the DBus Signal message. This string must be a valid\n Signature string as defined by the DBus specification. If the body\n argumnent is supplied , this parameter must be provided.\n \"\"\"\n if not isinstance(body, (list, tuple)):\n body = [body]\n\n s = message.SignalMessage(path, member, interface,\n None, signature, body)\n self.router.routeMessage(s)\n\n # ----------------------------------------------------------------\n # DBus Object Interface\n #\n def dbus_Hello(self, dbusCaller=None):\n raise DError(\n 'org.freedesktop.DBus.Error.Failed',\n 'Already handled an Hello message',\n )\n\n def dbus_GetId(self):\n return self.uuid\n\n def dbus_RequestName(self, name, flags, dbusCaller=None):\n caller = self.clients[dbusCaller]\n\n allow_replacement = bool(flags & 0x1)\n replace_existing = bool(flags & 0x2)\n do_not_queue = bool(flags & 0x4)\n\n if not name:\n raise DError(\n 'org.freedesktop.DBus.Error.InvalidArgs',\n 'Empty string is not a valid bus name',\n )\n\n if name[0] == ':':\n raise DError(\n 'org.freedesktop.DBus.Error.InvalidArgs',\n 'Cannot acquire a service starting with \\':\\' such as \"%s\"' %\n (name,),\n )\n\n try:\n marshal.validateBusName(name)\n except error.MarshallingError as e:\n raise DError('org.freedesktop.DBus.Error.InvalidArgs', str(e))\n\n def signalAcq(old_owner_name):\n self.sendSignal(caller, 'NameAcquired', 's', name)\n self.broadcastSignal(\n 'NameOwnerChanged',\n 'sss',\n [name, old_owner_name, caller.uniqueName],\n )\n\n if name not in self.busNames:\n self.busNames[name] = [caller, ]\n caller.busNames[name] = allow_replacement\n\n signalAcq('')\n\n return client.NAME_ACQUIRED\n else:\n queue = self.busNames[name]\n owner = queue[0]\n\n if owner is caller:\n # Update the replacement flag\n owner.busNames[name] = allow_replacement\n\n return client.NAME_ALREADY_OWNER\n else:\n if not replace_existing:\n return client.NAME_IN_USE\n\n if owner.busNames[name]:\n del queue[0]\n queue.insert(0, caller)\n del owner.busNames[name]\n caller.busNames[name] = allow_replacement\n self.sendSignal(owner, 'NameLost', 's', name)\n signalAcq(owner.uniqueName)\n return client.NAME_ACQUIRED\n else:\n if do_not_queue:\n return client.NAME_IN_USE\n\n queue.append(caller)\n caller.busNames[name] = allow_replacement\n\n return client.NAME_IN_QUEUE\n\n def dbus_ReleaseName(self, name, dbusCaller=None):\n caller = self.clients[dbusCaller]\n\n queue = self.busNames.get(name, None)\n\n if queue is None:\n return client.NAME_NON_EXISTENT\n\n owner = queue[0]\n\n if caller is not owner:\n return client.NAME_NOT_OWNER\n\n del queue[0]\n\n if caller.isConnected:\n self.sendSignal(caller, 'NameLost', 's', name)\n\n if queue:\n self.sendSignal(queue[0], 'NameAcquired', 's', name)\n else:\n del self.busNames[name]\n\n return client.NAME_RELEASED\n\n def dbus_ListQueuedOwners(self, name):\n queue = self.busNames.get(name, None)\n if queue:\n return [p.uniqueName for p in queue]\n else:\n raise DError(\n 'org.freedesktop.DBus.Error.NameHasNoOwner',\n 'Could not get owners of name \\'%s\\': no such name' %\n (name,),\n )\n\n def dbus_AddMatch(self, rule, dbusCaller=None):\n caller = self.clients[dbusCaller]\n\n kwargs = {\n 'mtype': None,\n 'sender': None,\n 'interface': None,\n 'member': None,\n 'path': None,\n 'path_namespace': None,\n 'destination': None,\n 'args': None,\n 'arg_paths': None,\n 'arg0namespace': None,\n }\n\n for item in rule.split(','):\n k, v = item.split('=')\n\n value = v[1:-1]\n\n if k == 'type':\n k = 'mtype'\n\n if k in kwargs:\n kwargs[k] = value\n\n elif k.startswith('arg'):\n if k.endswith('path'):\n if kwargs['arg_paths'] is None:\n kwargs['arg_paths'] = []\n kwargs['arg_paths'].append((int(k[3:-4]), value))\n else:\n if kwargs['args'] is None:\n kwargs['args'] = []\n kwargs['args'].append((int(k[3:]), value))\n\n self.router.addMatch(caller.sendMessage, **kwargs)\n\n def dbus_GetNameOwner(self, busName):\n if busName.startswith(':'):\n conn = self.clients.get(busName, None)\n else:\n conn = self.busNames.get(busName, None)\n if conn:\n conn = conn[0]\n\n if conn is None:\n raise DError(\n \"org.freedesktop.DBus.Error.NameHasNoOwner\",\n \"Could not get UID of name '%s': no such name\" %\n (busName,),\n )\n\n return conn.uniqueName\n\n def dbus_GetConnectionUnixUser(self, busName):\n if busName.startswith(':'):\n conn = self.clients.get(busName, None)\n else:\n conn = self.busNames.get(busName, None)\n if conn:\n conn = conn[0]\n\n if conn is None:\n raise DError(\n \"org.freedesktop.DBus.Error.NameHasNoOwner\",\n \"Could not get UID of name '%s': no such name\" %\n (busName,),\n )\n\n try:\n import pwd\n return pwd.getpwnam(conn.username).pw_uid\n except BaseException:\n raise DError(\n 'org.freedesktop.DBus.Error',\n \"Unable to determine unix user for bus '%s'\" %\n (busName,),\n )\n","repo_name":"cocagne/txdbus","sub_path":"txdbus/bus.py","file_name":"bus.py","file_ext":"py","file_size_in_byte":16714,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"67"} +{"seq_id":"16880335492","text":"from models import Exposure\n\n\nclass ExposureTranslator:\n def to_dict(self, model: Exposure) -> dict:\n return {\n \"source\": model.source,\n \"type\": model.type,\n \"database_identifiers\": model.database_identifiers,\n \"title\": model.title,\n \"description\": model.description,\n \"date_publication\": model.date_publication\n }\n","repo_name":"Entarudin/scraper","sub_path":"translators/exposure_translator.py","file_name":"exposure_translator.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74611378774","text":"import sys\n\n# 모험가 수\nn = int(sys.stdin.readline().rstrip())\n# 공포도 배열\ndata = list(map(int, sys.stdin.readline().rstrip().split()))\n# 공포도 배열 정렬\ndata.sort()\n\n# 총 그룹의 수\nresult = 0\n# 현재 그룹의 인원 수\ncount = 0\n\n# 공포도 배�� 탐색\nfor horror in data:\n # 현재 그룹에 인원 추가\n count += 1\n # 현재 그룹의 인원이 탐색한 공포도보다 크거나 같으면, 바로 그룹 결성\n if (count >= horror):\n result += 1\n count = 0 # 새로운 그룹을 결성해야하므로, 현재 그룹에 포함된 인원 수는 초기화\n\n# 총 그룹의 수 출력\nprint(result)\n \n","repo_name":"unhochoi/algorithm","sub_path":"greedy/모험가길드.py","file_name":"모험가길드.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21084501523","text":"# 1463\n''' 시간초과 ..\nn = int(input())\ncount = 0\ndef temp(n):\n global count\n arr = [n]\n if (n-1) % 3 == 0:\n count += 1\n arr[0] = n-1\n if arr[0]%3 == 0 and arr[0] >= 3:\n count += 1\n arr.append(arr[0]//3)\n elif arr[0]%2 == 0 and arr[0] >= 2:\n count += 1\n arr.append(arr[0]//2)\n # print(arr)\n return arr\nwhile(True):\n if n == 1:\n print(count)\n break\n tmp = temp(n)\n n = min(tmp)\n'''\n\ndef calc(l):\n global ans\n ans+=1\n nl=[]\n for a in l:\n nl.append(a-1)\n if a%3==0 and a>=3:\n nl.append(a/3)\n if a%2==0 and a>=2:\n nl.append(a/2)\n return nl\n\nn=int(input())\nl=[]\nl.append(n)\nans=0\nwhile(1):\n if min(l)==1:\n break\n l=calc(l)\nprint(ans)","repo_name":"HaloKim/self_study","sub_path":"1d1c/백준/day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26509768702","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nx = np.loadtxt('../data/logisticX.csv',delimiter=',')\ny = np.loadtxt('../data/logisticY.csv').reshape(-1,1)\n\nx = (x - np.mean(x))/np.std(x) # Normalize\nx = np.concatenate((np.ones((x.shape[0],1)),x),axis=1)\n\n############ Part (a) ############\n\ntheta = np.zeros((x.shape[1],1))\nsigmoid = lambda z: 1/(1 + np.exp(z))\npred = sigmoid(np.dot(x,theta))\ndiff = y - pred\ncost = -1*np.sum(y*np.log(pred) + (1-y)*np.log(1-pred))\niterations = 0\n\nwhile(cost > 0.0001 and iterations < 10): # Stopping Criteria\n hessian = np.dot(np.dot(x.T,np.diag((pred*(1-pred)).reshape(-1,))),x)\n theta = theta - np.dot(np.linalg.pinv(hessian),np.dot(x.T,diff))\n pred = sigmoid(np.dot(x,theta))\n diff = y - pred\n cost = -1*np.sum(y*np.log(pred) + (1-y)*np.log(1-pred))\n iterations += 1\n print(iterations,\" - \",cost)\n\nprint(\"Final Cost - \",cost)\nprint(\"Final Parameters - {0},{1},{2}\".format(theta[0],theta[1],theta[2]))\n\n# theta[0] = -0.2130308\n# theta[1] = -2.65801937\n# theta[2] = 2.66106075\n\n##################################\n\n############ Part (b) ############\n\nsns.set()\nsns.regplot(x[(y == 1).reshape(-1,),1],x[(y == 1).reshape(-1,),2],fit_reg=False,marker='+',color='blue')\nsns.regplot(x[(y == 0).reshape(-1,),1],x[(y == 0).reshape(-1,),2],fit_reg=False,marker='.',color='red')\naxes = plt.gca()\nx_vals = np.array(axes.get_xlim())\ny_vals = -1*(theta[0] + theta[1] * x_vals)/theta[2]\nplt.plot(x_vals,y_vals,color='g')\nplt.title(\"Logistic Regression\")\nplt.xlabel(r'$X_1$')\nplt.ylabel(r'$X_2$')\nneg_patch = plt.plot([],[],marker=\".\",ms=10,ls=\"\",mec=None,color='red',label=\"Negative\")[0]\npos_patch = plt.plot([],[],marker=\"P\",ms=10,ls=\"\",mec=None,color='blue',label=\"Positive\")[0]\nplt.legend(handles=[pos_patch,neg_patch],loc=2)\nplt.show()\n\n##################################","repo_name":"messi313/COL-774-Assignments","sub_path":"Assignment 1/src/logistic.py","file_name":"logistic.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34761061392","text":"import calculus\nimport matplotlib.pyplot as plt\nimport modul\nimport math\nimport numpy as np\ndef f4(x):\n return 5*x**3 - 2*x**2+2*x-3\nxk1 = -2\nxk2 = 2\nhk = 0.1\nx_l1 = np.arange(xk1,xk2,hk)\nd_l1 = []\nfor x in x_l1:\n d = 15*x**2 - 4*x +2\n d_l1.append(d)\n\na,b = calculus.derivacija(f4,0.1,-2,2)\nplt.plot(x_l1,d_l1)\nplt.scatter(a,b, s = 5, color = 'r')\nplt.show()\n\ndef ftrig(x):\n return math.sin(2*x) - math.cos(x)\nxt1 = -10\nxt2 = 10\nht = 0.01\nd_l2 = []\nx_l2 = np.arange(xt1,xt2,ht)\n\nfor x in x_l2:\n d = math.cos(2*x)*2+math.sin(x)\n d_l2.append(d)\n\nc,d = calculus.derivacija(ftrig,0.01,-10,10)\nplt.plot(x_l2,d_l2)\nplt.scatter(c,d, s = 1, color = 'r')\nplt.show()\n","repo_name":"BrunaMariani/PAF","sub_path":"vjezbe4/testiranjederivacija.py","file_name":"testiranjederivacija.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26035387270","text":"import re # regex\n\nfrom datetime import datetime, timezone\n\nfrom COMMON_abst.messaging import (\n ProcessImplementation,\n DeviceQueue,\n DeviceMessage,\n MsgType,\n)\n\nfrom bluepy.btle import (\n Peripheral,\n DefaultDelegate,\n Characteristic,\n ADDR_TYPE_PUBLIC,\n ADDR_TYPE_RANDOM,\n UUID,\n)\n\nfrom time import sleep as thread_sleep\n\nfrom func_timeout import func_timeout, FunctionTimedOut\n\n\nclass _Dev_callback_Impl(DefaultDelegate):\n @staticmethod\n def has_method(o, name):\n return callable(getattr(o, name, None))\n\n def __init__(self, receiver: \"Device\" = None):\n DefaultDelegate.__init__(self)\n if receiver and _Dev_callback_Impl.has_method(receiver, \"handleNotification\"):\n self._receiver = receiver\n else:\n raise (Exception(\"This class has no handleNotification function!\"))\n\n def handleNotification(self, cHandle, data):\n self._receiver.handleNotification(cHandle, data)\n\n\n\"\"\"Extend peripheral to include workaround when bluepy helper is stuck \"\"\"\n\n\nclass SafePeripheral(Peripheral):\n def __init__(self):\n super().__init__()\n\n def disconnect(self):\n try:\n func_timeout(5, super().disconnect)\n except BrokenPipeError:\n #print(\"Broken pipe\")\n if self._helper:\n #print(\"SafePer terminating helper\")\n self._helper.terminate() # forcefully close helper\n self._helper.wait(0.001)\n self._helper = None\n except FunctionTimedOut:\n #print(\"Timed out\")\n if self._helper:\n #print(\"SafePer terminating helper\")\n self._helper.terminate() # forcefully close helper\n self._helper.wait(0.001)\n self._helper = None\n\n except Exception as e:\n #print(\"Exception\")\n print(str(e))\n\n\n\"\"\"SAAM device interface module.\"\"\"\n\n\nclass Device(ProcessImplementation):\n def __init__(\n self,\n name,\n deviceType,\n deviceMac,\n outputQueue: DeviceQueue = None,\n args=(),\n iface=None,\n device_addr_public: bool = True,\n logger=None,\n ):\n \"\"\"Initialize device\n\n Every device extends this class, and provides it's run function\n\n private variables accessed via property decorator, so they are not changed after creation\n \"\"\"\n ProcessImplementation.__init__(self, name=name, args=args)\n\n # set up regex that only keeps hex characters\n self._regex = re.compile(\"[^0123456789abcdef]\")\n\n # if no mac address is given, give up\n if not deviceMac:\n raise Exception(\"DEV: mac address is not known!\")\n\n self.__deviceMac = deviceMac.lower()\n self.__deviceType = deviceType\n self.__deviceIface = iface\n self.__device_addr_type = (\n ADDR_TYPE_PUBLIC if device_addr_public else ADDR_TYPE_RANDOM\n )\n self._logger = logger\n\n # set up outbound comm channel\n self.__output = outputQueue\n\n # set up ble device\n self._peripheral = SafePeripheral().withDelegate(_Dev_callback_Impl(self))\n\n # set up receiving queue\n self.__input = DeviceQueue()\n\n @property\n def iface(self):\n return self.__deviceIface\n\n @property\n def deviceMac(self):\n return self.__deviceMac\n\n @property\n def deviceMacMinimal(self):\n return self._regex.sub(\"\", self.__deviceMac)\n\n @property\n def deviceType(self):\n return self.__deviceType\n\n @property\n def input(self) -> DeviceQueue:\n return self.__input\n\n @property\n def output(self) -> DeviceQueue:\n return self.__output\n\n @staticmethod\n def has_method(o, name):\n return callable(getattr(o, name, None))\n\n def _sendToOutput(self, msg: DeviceMessage):\n if not self.output:\n self._log(\"DEV: Output queue is not instanced\")\n return\n if not msg.sender:\n msg._sender = self\n self.output.push(msg)\n\n def _log(self, msg):\n if False:\n print(datetime.now(timezone.utc).strftime(\"%Y_%m_%d %T %f\"), msg)\n if self._logger:\n self._logger.debug(\n \"%s %s\", datetime.now(timezone.utc).strftime(\"%Y_%m_%d %T %f\"), msg\n )\n\n # helpers for logging (shorhand for log string to output queue)\n def _sendToOutputInfo(self, msg):\n self._sendToOutput(DeviceMessage(self, MsgType.LogInfo, data=msg))\n\n def _sendToOutputDebug(self, msg):\n self._sendToOutput(DeviceMessage(self, MsgType.LogDebug, data=msg))\n\n def _sendToOutputError(self, msg):\n self._sendToOutput(DeviceMessage(self, MsgType.LogError, data=msg))\n\n def _sendToOutputDev(self, msg):\n self._sendToOutput(DeviceMessage(self, MsgType.DeviceSpecific, data=msg))\n\n def _sendToOutputStop(self, msg):\n self._sendToOutput(DeviceMessage(self, MsgType.Stop, data=msg))\n\n def startDevice(self):\n \"\"\"Create process if not existing, and send START signal via queue\"\"\"\n if not self.is_alive():\n self.start()\n self._log(\"DEV: Starting device thread {}\".format(self.name))\n\n def stopDevice(self, blocking=False):\n \"\"\"Send stop signal if running, and wait for join (if blocking stop is demanded)\"\"\"\n self._log(\"DEV: Stop msg to {}\".format(self.name))\n if self.is_alive():\n self.__input.push(DeviceMessage(self, MsgType.Stop))\n if blocking:\n self._log(\"DEV: Waiting for thread {}\".format(self.name))\n self.join() # wait for thread to stop\n self._log(\"DEV: {} considered stopped\".format(self.name))\n\n def msgToDevice(self, msg: DeviceMessage):\n self.__input.push(msg)\n\n @property\n def peripheral(self) -> SafePeripheral:\n return self._peripheral\n\n @property\n def is_connected(self):\n return self._is_connected()\n\n def _is_connected(self, _recursive_depth=10):\n try:\n if self._peripheral.getState() == \"conn\":\n return True\n except Exception as e:\n # this is a workaround for when dbus is very very busy, and getState is blocked by waiting messages on the bus\n if (\n (len(e.args) >= 2)\n and (\"rsp\" in e.args[1])\n and (e.args[1][\"rsp\"][0] == \"ntfy\")\n ):\n while self._peripheral.waitForNotifications(0.001):\n pass\n return self._is_connected(_recursive_depth - 1)\n if _recursive_depth == 0:\n self._log(\"Exception while checking connection state!\")\n return False\n\n def disconnect_ble(self):\n \"\"\" call is_connected to clear dbus\n if congested bunch of messages might be missed or disc command \n will never be fired\"\"\"\n self._log(\"Disconnect called on the device!\")\n self._peripheral.disconnect()\n\n \"\"\"additional check/workaund for bluepy hanging, where we definitely kill the connection\"\"\"\n thread_sleep(0.5) # wait for disconnect to do its thing\n helper = self.peripheral._helper\n if helper:\n self._log(\"Terminate helper!\")\n helper.terminate()\n helper.wait(0.01)\n\n def run(self):\n # run indefinetly until device is connected\n if not Device.has_method(self, \"state_machine_step\"):\n self._log('Device Implementation has no \"state_machine_step\" function!')\n return\n\n # connect ble peripheral\n try:\n self.peripheral.connect(\n self.__deviceMac, self.__device_addr_type, self.__deviceIface\n )\n except:\n pass\n\n state_machine = 0\n sent_stop = False\n while self.is_connected:\n try:\n state_machine = func_timeout(\n 120, self.state_machine_step, args=[state_machine]\n )\n except FunctionTimedOut as e:\n self._log(\n \"Exiting device thread! State machine took too long! \\n{}\".format(\n str(e)\n )\n )\n self._sendToOutputStop(self.deviceMac)\n sent_stop = True\n break\n except Exception as e:\n self._log(\"Exception occured while connected\\n{}\".format(str(e)))\n self.disconnect_ble()\n break\n else:\n if state_machine == 0:\n self._log(\"Unsuccessful connection attempt! (state_machine == 0)\")\n else:\n self._log(\"is_connected() func returned False (or error happened)!\")\n\n try:\n func_timeout(\n 30, self.disconnect_ble\n ) # if there was an error, wrap this in timeout just in case\n except FunctionTimedOut:\n pass # there is nothing we can do at this point...\n self._log(\"Exiting device thread!\")\n if not sent_stop:\n self._sendToOutputStop(self.deviceMac)\n\n @property\n def services(self):\n return self._peripheral.services\n\n def handleNotification(self, cHandle, data):\n self._log(\n \"Received notification from handle [ REIMPLEMENT THIS! ] {}\".format(cHandle)\n )\n pass\n\n def state_machine_step(self, step_number):\n self._log(\n \"uHub state machine running [ REIMPLEMENT THIS! ] {}\".format(step_number)\n )\n thread_sleep(1)\n return 0\n\n def _getCharacteristic(self, uuid, description=None) -> Characteristic:\n uuid = UUID(uuid)\n chs = self.peripheral.getCharacteristics(uuid=uuid)\n if len(chs) > 0:\n return chs[0]\n else:\n return None\n","repo_name":"biasizzo/Saam-Rpi-Libra.Scale","sub_path":"RPi/Applications/saam-ble/COMMON_abst/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":9842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20270176505","text":"import numpy as np\nimport pandas as pd\nimport vectorbt as vbt\n\n# Preferences\nnum = 30\n# metric = \"positions.win_rate\" # total_return | positions.win_rate\n# metric = (\"max_drawdown\", ) # Must be a tuple\nmetric = \"total_return\"\ndata_file = \"data_1m.csv\"\n\n# Read data from csv\nbtc_price = pd.read_csv(data_file)[[\"timestamp\", \"close\"]]\nbtc_price[\"date\"] = pd.to_datetime(btc_price[\"timestamp\"], unit=\"s\")\nbtc_price = btc_price.set_index(\"date\")[\"close\"]\n\n# VectorBT part\nrsi = vbt.RSI.run(btc_price, window=100, short_name=\"rsi\")\n\n# Make a grid\n# entry_points = np.linspace(32.3, 33.8, num=num)\n# exit_points = np.linspace(63, 64.2, num=num)\nentry_points = np.linspace(30, 50, num=num)\nexit_points = np.linspace(58, 72, num=num)\n\ngrid = np.array(np.meshgrid(entry_points, exit_points)).T.reshape(-1, 2)\nentries = rsi.rsi_crossed_below(list(grid[:, [0]]))\nexits = rsi.rsi_crossed_above(list(grid[:, [1]]))\npf = vbt.Portfolio.from_signals(btc_price, entries, exits)\n\n# print(pf.stats())\n\npf_perf = pf.deep_getattr(metric)\n\npf_perf_matrix = pf_perf.vbt.unstack_to_df(\n index_levels=\"rsi_crossed_above\", column_levels=\"rsi_crossed_below\"\n)\n\nprint(pf_perf_matrix)\n\npf_perf_matrix.vbt.heatmap(xaxis_title=\"entry\", yaxis_title=\"exit\").show()\n\n# pf.plot().show()\n","repo_name":"RomanchenkoAS/binance-trading-bot","sub_path":"backtest/backtest.py","file_name":"backtest.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42954443482","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Модуль, описывающий работу с БД\"\"\"\n\nimport datetime\nimport os\n\nfrom pymongo import MongoClient\nfrom sqlalchemy import create_engine, exists\nfrom sqlalchemy.orm import sessionmaker\n\nfrom client.settings import DATABASE\nfrom .models import Base, Contact, MessageHistory, ConnectedUser, User\n\n\nclass Repository:\n \"\"\"\n Класс - оболочка для работы с базой данных клиента.\n Использует SQLite базу данных, реализован с помощью\n SQLAlchemy ORM и используется декларативный подход.\n \"\"\"\n def __init__(self, name):\n self.user_name = name\n if not os.path.exists(DATABASE):\n os.mkdir(DATABASE)\n self.engine = create_engine(\n f'sqlite:///{os.path.join(DATABASE, f\"client_{name}.db\")}',\n echo=False,\n pool_recycle=7200,\n connect_args={'check_same_thread': False})\n\n Base.metadata.create_all(self.engine)\n Session = sessionmaker(bind=self.engine)\n\n self.session = Session()\n\n def add_contact(self, user_name: str):\n \"\"\"\n Метод добавляющий контакт в базу данных.\n :param user_name: Имя контакта\n :return:\n \"\"\"\n if not self.session.query(\n exists().where(Contact.name == user_name)).scalar():\n contact = Contact(user_name)\n self.session.add(contact)\n self.session.commit()\n return contact\n\n def add_client(self, user_name: str):\n \"\"\"\n Метод добавляющий подключённого клиента в базу данных.\n :param user_name: Имя клиента\n :return:\n \"\"\"\n if not self.session.query(\n exists().where(ConnectedUser.name == user_name)).scalar():\n contact = ConnectedUser(user_name)\n self.session.add(contact)\n self.session.commit()\n return contact\n\n def del_contact(self, user_name: str):\n \"\"\"\n Метод удаляющий определённый контакт.\n :param user_name: Имя контакта\n :return:\n \"\"\"\n self.session.query(Contact).filter_by(name=user_name).delete()\n self.session.commit()\n\n def clear_contacts(self):\n \"\"\"\n Метод очищает локальный список контактов.\n :return:\n \"\"\"\n self.session.query(Contact).delete()\n self.session.query(ConnectedUser).delete()\n self.session.commit()\n\n def save_message(self, contact: str, direction: str, message: str):\n \"\"\"\n Метод сохраняющий сообщение в базе данных.\n :param contact: Имя отправителя.\n :param direction: Направление.\n :param message: Текст сообщения.\n :return:\n \"\"\"\n message_row = MessageHistory(contact, direction, message)\n self.session.add(message_row)\n self.session.commit()\n\n def get_user_by_name(self, name: str):\n \"\"\"\n Метод получения объекта пользователя по его имени.\n :param name: Имя клиента\n :return: Объект клиента\n \"\"\"\n user = self.session.query(User).filter(User.name == name)\n return user.first() if user.count() else None\n\n def add_user(self):\n \"\"\"\n Метод добавления пользователя.\n Создаёт запись в таблице входивших пользователей.\n :return:\n \"\"\"\n user = self.get_user_by_name(self.user_name)\n if not user:\n user = User(self.user_name)\n self.session.add(user)\n self.session.commit()\n return user\n\n def save_avatar(self, img_path):\n \"\"\"\n Метод добавления аватара пользователя.\n :param img_path: Путь к изображению\n :return:\n \"\"\"\n user = self.get_user_by_name(self.user_name)\n if user:\n user.avatar = img_path\n self.session.add(user)\n self.session.commit()\n\n def get_history(self, contact=None) -> list:\n \"\"\"\n Метод возвращающий историю сообщений с определённым пользователем.\n :param contact: Имя контакта\n :return:\n \"\"\"\n query = self.session.query(MessageHistory)\n if contact:\n query = query.filter_by(contact=contact)\n return [(history_row.contact, history_row.direction,\n history_row.message, history_row.time)\n for history_row in query.all()]\n\n def get_contacts(self) -> list:\n \"\"\"\n Метод возвращающий список всех контактов.\n :return:\n \"\"\"\n query = self.session.query(Contact.name).all()\n return [value for (value, ) in query]\n\n def get_connected(self, search=None) -> list:\n \"\"\"\n Метод возвращающий список подключённых пользователей.\n :param search: Строка-фильтр\n :return:\n \"\"\"\n if search:\n search = f'%{search}%'\n query = self.session.query(ConnectedUser.name).filter(\n ConnectedUser.name.like(search)).all()\n else:\n query = self.session.query(ConnectedUser.name).all()\n return [value for (value, ) in query]\n\n def check_contact(self, contact: str) -> bool:\n \"\"\"\n Метод проверяющий существует ли контакт.\n :param contact: Имя контакта\n :return:\n \"\"\"\n if self.session.query(Contact).filter_by(name=contact).count():\n return True\n else:\n return False\n\n\nclass MongoRepository:\n def __init__(self, name):\n self.client = MongoClient()\n self.db = self.client[f'client_{name}']\n self.users = self.db.users\n self.connected_users = self.db.connected_users\n self.contacts = self.db.contacts\n self.history = self.db.history\n self.user_name = name\n\n def add_contact(self, user_name: str):\n contact = self.contacts.find_one({'user_name': user_name})\n if not contact:\n contact = {\n 'user_name': user_name,\n }\n contact = self.contacts.insert_one(contact)\n return contact\n\n def add_client(self, user_name: str):\n client = self.connected_users.find_one({'user_name': user_name})\n if not client:\n client = {\n 'user_name': user_name,\n }\n client = self.connected_users.insert_one(client)\n return client\n\n def del_contact(self, user_name: str):\n contact = self.contacts.find_one({'user_name': user_name})\n if not contact:\n self.contacts.delete_one(user_name)\n\n def clear_contacts(self):\n self.db.drop_collection('contacts')\n self.db.drop_collection('connected_users')\n\n def save_message(self, contact: str, direction: str, message: str):\n history = {\n 'contact': contact,\n 'direction': direction,\n 'message': message,\n 'time': datetime.datetime.now().strftime('%Y-%m-%d %H:%M'),\n }\n self.history.insert_one(history)\n\n def get_user_by_name(self, name: str):\n user = self.users.find_one({'user_name': name})\n return user if user else None\n\n def add_user(self):\n user = self.get_user_by_name(self.user_name)\n if not user:\n user = {\n 'user_name': self.user_name,\n }\n user = self.users.insert_one(user)\n return user\n\n def save_avatar(self, img_path):\n user = self.get_user_by_name(self.user_name)\n if user:\n values = {\"$set\": {'avatar': img_path}}\n self.users.update_one(user, values)\n\n def get_history(self, contact=None) -> list:\n query = self.history.find()\n if contact:\n query = self.history.find({'contact': contact})\n return [(history.get('contact'), history.get('direction'),\n history.get('message'), history.get('time'))\n for history in query]\n\n def get_contacts(self) -> list:\n query = self.contacts.find()\n return [value.get('user_name') for value in query]\n\n def get_connected(self, search=None) -> list:\n if search:\n query = self.connected_users.find(\n {'user_name': {\n '$regex': search\n }})\n else:\n query = self.connected_users.find()\n return [value.get('user_name') for value in query]\n\n def check_contact(self, contact: str) -> bool:\n if self.contacts.find_one({'user_name': contact}):\n return True\n else:\n return False\n","repo_name":"SPELLGIRL/Python_professional","sub_path":"spell_messenger/client/spell_messenger_client/client/db/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":9276,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6214727523","text":"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nimport missingno as msno\nfrom datetime import date\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import LocalOutlierFactor\nfrom sklearn.preprocessing import MinMaxScaler, LabelEncoder, StandardScaler, RobustScaler\n\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', None)\npd.set_option('display.float_format', lambda x: '%.3f' % x)\npd.set_option('display.width', 500)\n\n# Loading Data\ndef load():\n data = pd.read_csv(\"titanic.csv\")\n return data\n\ndf = load()\ndf.head()\n\ndf.columns = [col.upper() for col in df.columns]\n\n############ FEATURE ENGINEERING ############\n\n# Cabin bool\ndf[\"NEW_CABIN_BOOL\"] = df[\"CABIN\"].notnull().astype('int')\n# Name count\ndf[\"NEW_NAME_COUNT\"] = df[\"NAME\"].str.len()\n# name word count\ndf[\"NEW_NAME_WORD_COUNT\"] = df[\"NAME\"].apply(lambda x: len(str(x).split(\" \")))\n# name dr\ndf[\"NEW_NAME_DR\"] = df[\"NAME\"].apply(lambda x: len([x for x in x.split() if x.startswith(\"Dr\")]))\n# name title\ndf['NEW_TITLE'] = df.NAME.str.extract(' ([A-Za-z]+)\\.', expand=False)\n# family size\ndf[\"NEW_FAMILY_SIZE\"] = df[\"SIBSP\"] + df[\"PARCH\"] + 1\n# age_pclass\ndf[\"NEW_AGE_PCLASS\"] = df[\"AGE\"] * df[\"PCLASS\"]\n# is alone\ndf.loc[((df[\"PARCH\"] + df[\"SIBSP\"]) > 0), \"NEW_IS_ALONE\"] = \"NO\"\ndf.loc[((df[\"PARCH\"] + df[\"SIBSP\"]) == 0), \"NEW_IS_ALONE\"] = \"YES\"\n# age level\ndf.loc[(df[\"AGE\"] < 18), \"NEW_AGE_CAT\"] = 'young'\ndf.loc[((df[\"AGE\"] >= 18) & (df[\"AGE\"] < 56)), \"NEW_AGE_CAT\"] = 'mature'\ndf.loc[(df[\"AGE\"] > 56), \"NEW_AGE_CAT\"] = 'senior'\n# sex x age\ndf.loc[((df[\"SEX\"] == \"male\") & (df[\"AGE\"] < 21)), \"NEW_SEX_CAT\"] = 'youngmale'\ndf.loc[((df[\"SEX\"] == \"male\") & (df[\"AGE\"] > 21) & (df[\"AGE\"] < 50)), \"NEW_SEX_CAT\"] = 'maturemale'\ndf.loc[((df[\"SEX\"] == \"male\") & (df[\"AGE\"] > 50)), \"NEW_SEX_CAT\"] = 'seniormale'\ndf.loc[((df[\"SEX\"] == \"female\") & (df[\"AGE\"] < 21)), \"NEW_SEX_CAT\"] = 'youngfemale'\ndf.loc[((df[\"SEX\"] == \"female\") & (df[\"AGE\"] > 21) & (df[\"AGE\"] < 50)), \"NEW_SEX_CAT\"] = 'maturefemale'\ndf.loc[((df[\"SEX\"] == \"female\") & (df[\"AGE\"] > 50)), \"NEW_SEX_CAT\"] = 'seniorfemale'\n\ndf.head(20)\ndf.info()\n\ndef grab_col_names(dataframe, cat_th=10, car_th=20):\n\n #cat_cols\n cat_cols=[col for col in dataframe.columns if dataframe[col].dtypes == \"O\"]\n num_but_cat=[col for col in dataframe.columns if dataframe[col].dtypes != 'O' and dataframe[col].nunique() < cat_th]\n cat_but_car = [col for col in dataframe.columns if dataframe[col].dtypes == 'O' and dataframe[col].nunique() > car_th]\n cat_cols = cat_cols + num_but_cat\n cat_cols = [col for col in cat_cols if col not in cat_but_car]\n\n #num_cols\n num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != 'O']\n num_cols = [col for col in num_cols if col not in cat_cols]\n\n return cat_cols, num_cols, cat_but_car\n\ncat_cols, num_cols, cat_but_car = grab_col_names(df)\n\nnum_cols = [col for col in num_cols if \"PASSENGERID\" not in col]\n\n############ OUTLIERS ############\n\ndef outlier_threshold(dataframe, col_name, q1=0.25, q3=0.75):\n quartile1 = dataframe[col_name].quantile(q1)\n quartile3 = dataframe[col_name].quantile(q3)\n interquartile_range = quartile3-quartile1\n up_limit = quartile3 + 1.5 * interquartile_range\n low_limit = quartile1 - 1.5 * interquartile_range\n return low_limit, up_limit\n\ndef check_outlier(dataframe, col_name):\n low_limit, up_limit = outlier_threshold(dataframe, col_name)\n if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None):\n return True\n else:\n return False\n\ndef grab_outliers(dataframe, col_name, index=False):\n low, up = outlier_threshold(dataframe, col_name)\n\n if dataframe[((dataframe[col_name] < low) | (dataframe[col_name] > up))].shape[0] > 10:\n print(dataframe[((dataframe[col_name] < low) | (dataframe[col_name] > up))].head())\n else:\n print(dataframe[((dataframe[col_name] < low) | (dataframe[col_name] > up))])\n\n if index:\n outlier_index = dataframe[((dataframe[col_name] < low) | (dataframe[col_name] > up))].index\n return outlier_index\n\ndef replace_with_thresholds(dataframe, variable):\n low_limit, up_limit = outlier_threshold(dataframe, variable)\n dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit\n dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit\n\nfor col in num_cols:\n print(col, check_outlier(df, col))\n\n# Replacing thresholds for outliers\nfor col in num_cols:\n replace_with_thresholds(df, col)\n\n############ MISSING VALUES ############\n\ndef missing_values_table(dataframe, na_name=False):\n na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0]\n\n n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False)\n ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100).sort_values(ascending=False)\n missing_df = pd.concat([n_miss, np.round(ratio, 2)], axis=1, keys=['n_miss', 'ratio'])\n print(missing_df, end=\"\\n\")\n\n if na_name:\n return na_columns\n\nmissing_values_table(df)\n\n# dropped the columns we filled in\ndf.drop(\"CABIN\", inplace=True, axis=1)\nremove_cols = [\"TICKET\", \"NAME\"]\ndf.drop(remove_cols, inplace=True, axis=1)\n\n# filled na variables in age column by median\ndf[\"AGE\"] = df[\"AGE\"].fillna(df.groupby(\"NEW_TITLE\")[\"AGE\"].transform(\"median\"))\n\n### redefining the age columns\n# age_pclass\ndf[\"NEW_AGE_PCLASS\"] = df[\"AGE\"] * df[\"PCLASS\"]\n# age level\ndf.loc[(df['AGE'] < 18), 'NEW_AGE_CAT'] = 'young'\ndf.loc[(df['AGE'] >= 18) & (df['AGE'] < 56), 'NEW_AGE_CAT'] = 'mature'\ndf.loc[(df['AGE'] >= 56), 'NEW_AGE_CAT'] = 'senior'\n# sex x age\ndf.loc[(df['SEX'] == 'male') & (df['AGE'] <= 21), 'NEW_SEX_CAT'] = 'youngmale'\ndf.loc[(df['SEX'] == 'male') & (df['AGE'] > 21) & (df['AGE'] < 50), 'NEW_SEX_CAT'] = 'maturemale'\ndf.loc[(df['SEX'] == 'male') & (df['AGE'] >= 50), 'NEW_SEX_CAT'] = 'seniormale'\ndf.loc[(df['SEX'] == 'female') & (df['AGE'] <= 21), 'NEW_SEX_CAT'] = 'youngfemale'\ndf.loc[(df['SEX'] == 'female') & (df['AGE'] > 21) & (df['AGE'] < 50), 'NEW_SEX_CAT'] = 'maturefemale'\ndf.loc[(df['SEX'] == 'female') & (df['AGE'] >= 50), 'NEW_SEX_CAT'] = 'seniorfemale'\n\nmissing_values_table(df)\ndf = df.apply(lambda x: x.fillna(x.mode()[0]) if (x.dtype == \"O\" and len(x.unique()) <= 10) else x, axis=0)\n\n############ LABEL ENCODING ############\n\nbinary_cols = [col for col in df.columns if df[col].dtype not in [int, float, \"int64\"]\n and df[col].nunique()==2]\n\ndef label_encoder(dataframe, binary_col):\n labelencoder = LabelEncoder()\n dataframe[binary_col] = labelencoder.fit_transform(dataframe[binary_col])\n return dataframe\n\nfor col in binary_cols:\n label_encoder(df, col)\n\n\n############ RARE ENCODING ############\n\ndef rare_analyser(dataframe, target, cat_cols):\n for col in cat_cols:\n print(col, \":\", len(dataframe[col].value_counts()))\n print(pd.DataFrame({\"COUNT\": dataframe[col].value_counts(),\n \"RATIO\": dataframe[col].value_counts() / len(dataframe),\n \"TARGET_MEAN\": dataframe.groupby(col)[target].mean()}), end=\"\\n\\n\\n\")\ndef rare_encoder(dataframe, rare_perc):\n temp_df = dataframe.copy()\n\n rare_columns = [col for col in temp_df.columns if temp_df[col].dtypes == 'O'\n and (temp_df[col].value_counts() / len(temp_df) < rare_perc).any(axis=None)]\n\n for var in rare_columns:\n tmp = temp_df[var].value_counts() / len(temp_df)\n rare_labels = tmp[tmp < rare_perc].index\n temp_df[var] = np.where(temp_df[var].isin(rare_labels), 'Rare', temp_df[var])\n\n return temp_df\n\nrare_analyser(df, \"SURVIVED\", cat_cols)\ndf = rare_encoder(df, 0.01)\n\ndf[\"NEW_TITLE\"].value_counts()\n\n\n############ ONE-HOT ENCODING ############\n\ndef one_hot_encoder(dataframe, categorical_cols, drop_first=True):\n dataframe = pd.get_dummies(dataframe, columns=categorical_cols, drop_first=drop_first)\n return dataframe\n\nohe_cols = [col for col in df.columns if 10 >= df[col].nunique() > 2]\n\ndf = one_hot_encoder(df, ohe_cols)\n\ncat_cols, num_cols, cat_but_car = grab_col_names(df)\nnum_cols = [col for col in num_cols if \"PASSENGERID\" not in col]\n\nrare_analyser(df, \"SURVIVED\", cat_cols)\n\nuseless_cols = [col for col in df.columns if df[col].nunique() == 2 and\n (df[col].value_counts() / len(df) < 0.01).any(axis=None)]\n\n# df.drop(useless_cols, axis=1, inplace=True)\n\n############ STANDART SCALER ############\n\nscaler = StandardScaler()\ndf[num_cols] = scaler.fit_transform(df[num_cols])\n\ndf[num_cols].head()\n\n############ MODEL ############\n\ny = df[\"SURVIVED\"]\nX = df.drop([\"SURVIVED\", \"PASSENGERID\"], axis=1)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=17)\n\nfrom sklearn.ensemble import RandomForestClassifier\n\nrf_model = RandomForestClassifier(random_state=46).fit(X_train, y_train)\ny_pred = rf_model.predict(X_test)\naccuracy_score(y_pred, y_test)\n\n","repo_name":"MuhittinTanoba/ML-Bootcamp","sub_path":"Projects/Titanic-Feature Engineering & Data Preprocessing.py","file_name":"Titanic-Feature Engineering & Data Preprocessing.py","file_ext":"py","file_size_in_byte":9057,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"2840098242","text":"# Load digital dictionary file as a list of words\nfile = open(\"dictionary.txt\", \"r\")\n# Accept a word from a user\ninput_word = input(\"Enter a word to check for anagram: >>\")\n# Create an empty list to hold anagrams\nanagram_list = []\n# Sort the user-word\ninput_word = input_word.lower()\nuser_word = sorted(input_word)\n# Loop through each word in the word list:\nfor word in file:\n word = word.strip().lower()\n # Sort the word\n word = sorted(word)\n # if word sorted is equal to user-word sorted:\n if word == user_word:\n # Append word to anagrams list\n anagram_list.append(word)\n# Print anagrams list\nprint(anagram_list)","repo_name":"mikefreeman90/Impractical_Projects","sub_path":"Chapter3/anagrams.py","file_name":"anagrams.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20546711341","text":"\nimport open3d as o3d\nimport numpy as np\n\nprint(\"Load a ply point cloud, print it, and render it\")\nply_point_cloud = o3d.data.PLYPointCloud()\npcd = o3d.io.read_point_cloud(ply_point_cloud.path)\nprint(pcd)\nprint(np.asarray(pcd.points))\n\no3d.visualization.draw_plotly([pcd],\n zoom=0.3412,\n front=[0.4257, -0.2125, -0.8795],\n lookat=[2.6172, 2.0475, 1.532],\n up=[-0.0694, -0.9768, 0.2024])\n","repo_name":"davidscmx/camera_lidar_fusion","sub_path":"tests/test_open3d.py","file_name":"test_open3d.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"10200834013","text":"import re\n\ns = input()\ns\nvw = 'aeiou'\nkev=0\nst =0\nfor i in range(len(s)):\n if s[i].lower() in vw:\n kev += (len(s) - i)\n else:\n st += (len(s)-i)\n\nif kev > st:\n print(\"Kevin\", kev)\nelif kev < st:\n print(\"Stuart\", st)\nelse:\n print('Draw')\n","repo_name":"sivatoms/PyReddy","sub_path":"HackerRankMinionGame.py","file_name":"HackerRankMinionGame.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71464443415","text":"# -*- coding: utf-8 -*\nfrom annoying.decorators import render_to\nfrom django.contrib.auth.decorators import login_required\nfrom store.views import get_global_data, handle_uploaded_file\nfrom accounts.models import Profile, Legal, Delivery\nfrom accounts.forms import LoginForm, ProfileForm, LegalForm, DeliveryForm\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom store.send_html_mail import send_html_mail\nimport md5\n\n@render_to('login.html')\ndef signlogup(request):\n data = get_global_data(request)\n data['message'] = False\n if request.user.is_authenticated():\n return HttpResponseRedirect('/profile/')\n elif request.method == 'POST':\n form = LoginForm(request.POST)\n if form.is_valid() and 'password' in request.POST:\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n request.session.set_expiry(60*60*24*365)\n login(request, user)\n return HttpResponseRedirect('/profile/')\n\n else:\n data['message'] = 'Эккаунт заблокирован'\n else:\n data['message'] = 'Неправильная пара логин/пароль'\n\n elif 'username' in request.POST:\n username = request.POST['username']\n try:\n user = User.objects.create_user(username, username, username)\n user.is_active = False\n user.is_staff = False\n activate_code = md5.new(username).hexdigest()\n send_html_mail('Activation letter', username, {'name': username, 'code': activate_code}, 'emails/activate_letter.html', sender='web@fastprint.info')\n user.save()\n profile = Profile(user = user, phone = '+7 ')\n profile.save()\n return { 'message': 'Спасибо за регистрацию. Вам выслано письмо с инструкциями по активации эккаунта.' }\n except:\n if User.objects.get(username=username) is not None:\n data['message'] = 'Пользователь с таким e-mail уже зарегистрирован'\n else:\n data['message'] = 'Неизвестная ошибка при добавлении пользователя. Обратитесь в поддержку'\n\n else:\n form = LoginForm()\n\n data['login_form'] = form\n return data\n\ndef kickout(request):\n logout(request)\n return HttpResponseRedirect('/login/')\n\ndef confirm_email(request, username, code):\n try:\n user = User.objects.get(username = username)\n if user.username == username and md5.new(username).hexdigest() == code:\n user.is_active = True\n user.save()\n return HttpResponseRedirect('/activate/complite/')\n return HttpResponseRedirect('/activate/failed/')\n except:\n return HttpResponseRedirect('/activate/failed/')\n\n@login_required\ndef update(request):\n '''\n Данная функция отвечает за обноление всех данных о пользователе: Профиль, Юр. данные, Инф. о доставке.\n Она принимает на вход всего 1 форму и в зависимости от параметров расбрасывает эти данные в набор моделей.\n '''\n if request.method == 'POST': # Если пришли данные от формы\n form = ProfileForm(request.POST, request.FILES) # Создадним экземпляр формы\n if form.is_valid(): # Если все данные введены корректно...\n # Если произошли изменения в модели пользователя\n user = User.objects.get(id=request.user.id)\n user.first_name = form.cleaned_data['first_name']\n user.last_name = form.cleaned_data['last_name']\n user.save()\n \n # Если произошли изменения в модели профиля\n profile = Profile.objects.get(user=user)\n profile.phone = form.cleaned_data['phone']\n is_legal = int(form.cleaned_data['is_legal'])\n # Подгрузка аватарки на сервер\n try:\n profile.img = handle_uploaded_file(request.FILES['img'], 'user_pic')\n except:\n pass\n # Проверка, является ли пользователь юр.лицом\n if is_legal == 0:\n profile.is_legal = False\n else:\n profile.is_legal = True\n profile.save()\n\n # Если пользователь становится юр.лицом, необходимо сохранить в базе его юридические данные\n form = LegalForm(request.POST)\n if form.is_valid():\n try:\n legal = Legal.objects.get(profile=profile)\n legal.title = form.cleaned_data['legal_name']\n legal.inn = form.cleaned_data['inn']\n legal.kpp = form.cleaned_data['kpp']\n legal.bik = form.cleaned_data['bik']\n legal.rs = int(form.cleaned_data['rs'])\n legal.ks = int(form.cleaned_data['ks'])\n legal.post = form.cleaned_data['post']\n except:\n legal = Legal(\n profile = profile,\n title = form.cleaned_data['legal_name'],\n inn = form.cleaned_data['inn'],\n kpp = form.cleaned_data['kpp'],\n bik = form.cleaned_data['bik'],\n rs = int(form.cleaned_data['rs']),\n ks = int(form.cleaned_data['ks']),\n post = form.cleaned_data['post'],\n )\n legal.save()\n\n # Если пользователь вводит свои данные по доставке\n form = DeliveryForm(request.POST)\n if form.is_valid():\n try:\n delivery = Delivery.objects.get(profile=profile)\n delivery.title = form.cleaned_data['delivery_name']\n delivery.address = form.cleaned_data['address']\n delivery.city = form.cleaned_data['city']\n except:\n delivery = Delivery(\n profile = profile,\n title = form.cleaned_data['delivery_name'],\n address = form.cleaned_data['address'],\n city = form.cleaned_data['city']\n )\n delivery.save()\n\n\n return HttpResponseRedirect('/profile/')\n\n@login_required\n@render_to('profile.html')\ndef profile(request, *args):\n data = get_global_data(request)\n # Получаем данные профиля\n user = {\n 'first_name':request.user.first_name,\n 'last_name':request.user.last_name,\n 'email':request.user.email,\n 'password':request.user.password,\n }\n profile = Profile.objects.get(user=request.user)\n if profile is not None:\n user['phone'] = profile.phone\n user['img'] = profile.img\n user['is_legal'] = profile.is_legal\n\n # Получаем данные ЮЛ\n try:\n legal = Legal.objects.get(profile=profile)\n if legal is not None:\n legal = {\n 'legal_name': legal.title,\n 'inn': legal.inn,\n 'kpp': legal.kpp,\n 'bik': legal.bik,\n 'rs': legal.rs,\n 'ks': legal.ks,\n 'post': legal.post,\n }\n except:\n legal = {}\n\n # Получаем данные доставки\n try:\n delivery = Delivery.objects.get(profile=profile)\n if delivery is not None:\n delivery = {\n 'delivery_name': delivery.title,\n 'city': delivery.city,\n 'address': delivery.address,\n }\n except:\n delivery = {}\n\n data['form_profile'] = ProfileForm(user)\n data['form_legal'] = LegalForm(legal)\n data['form_delivery'] = DeliveryForm(delivery)\n return data","repo_name":"fastprint/fastprint","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8741,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"72625380054","text":"import re, os, sys\nimport pandas as pd\n\nclass HiddenPrints:\n def __enter__(self):\n self._original_stdout = sys.stdout\n sys.stdout = open(os.devnull, 'w')\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n sys.stdout.close()\n sys.stdout = self._original_stdout\n\n#所有标题识别正则表达式及其对应id\nPATTERN_TITLE_LIST = {\n '\\??[一二三四五六七八九十]+\\s*、': 0,\n '(\\s*[一二三四五六七八九十]+\\s*)|\\(\\s*[一二三四五六七八九十]+\\s*\\)': 1,\n '\\??\\d+\\s*[\\.\\.]': 2,\n '\\??\\d{1,2}\\s?[\\..]\\s?\\d{1,2}': 3,\n '\\(?\\d+\\)|(?\\d+)': 4,\n '\\??\\d+\\s*、': 5,\n '附\\s*[录表][一二三四五六七八九十0-9]?[:\\:\\d+]?': 10,\n '[①②③④⑤⑥⑦⑧⑨⑩⑪⑫⑬⑭⑮⑯⑰⑱⑲⑳]':11,\n '模\\s*块?\\s*[一二三四五六七八九十0-9]\\s*[、:]?': 997,\n '项\\s*目\\s*[一二三四五六七八九十0-9]\\s*、?': 998,\n '表\\d+\\s': 999, # [\\u4e00-\\u9fa5_a-zA-Z0-9]{2,6}\\s*[\\::]\n}\nPATTERN_LABEL_NUMBER = r'[^一二三四五六七八九十0-9①②③④⑤⑥⑦⑧⑨⑩⑪⑫⑬⑭⑮⑯⑰⑱⑲⑳]+'\n\ndef initWithLabels(text:str) -> str:\n \"\"\"\n :param text:待清洗的文本\n :return: 带有文章结构标签的文本\n \"\"\"\n\n text = re.sub(r'[\\s\\t\\u3000\\n\\?]{2,}', '\\n', text)\n for title_pattern in PATTERN_TITLE_LIST:\n # print('>> ', title_pattern)\n # 先全标\n pattern = r\"([\\n\\s\\t]+)(\" + title_pattern + r\")(\\s*[\\u4e00-\\u9fa5_a-zA-Z])\"\n text = re.sub(pattern,\n r'\\1\\2\\3', text)\n return text\n\n\ndef lastNumber(current:str)->str:\n \"\"\"\n :param current:当前文章标题编码\n :return: 当前所处标题的前一个同级标题编码\n \"\"\"\n print('in lastNumber:', current)\n try:\n current = re.findall(f'(.*?)', current)[0]\n except:\n pass\n current = re.sub('', '', current) # re.findall(f'(.*?)', current)\n label = re.sub(PATTERN_LABEL_NUMBER, '', current)\n if label in '一 二 三 四 五 六 七 八 九 十 十一 十二 十三 十四 十五'.split(' '):\n tmp = '一 二 三 四 五 六 七 八 九 十 十一 十二 十三 十四 十五'.split(' ')\n try:\n return tmp[tmp.index(label) - 1]\n except:\n pass\n elif label in '① ② ③ ④ ⑤ ⑥ ⑦ ⑧ ⑨ ⑩ ⑪ ⑫ ⑬ ⑭ ⑮ ⑯ ⑰ ⑱ ⑲ ⑳'.split(' '):\n tmp = '① ② ③ ④ ⑤ ⑥ ⑦ ⑧ ⑨ ⑩ ⑪ ⑫ ⑬ ⑭ ⑮ ⑯ ⑰ ⑱ ⑲ ⑳'.split(' ')\n try:\n return tmp[tmp.index(label) - 1]\n except:\n pass\n else:\n try:\n return str(int(label) - 1)\n except:\n pass # TODO: 没想到还有什么情况\n return str(-1)\n\n\ndef lastTitle(number, current):\n if len(re.findall('[0-9一二三四五六七八九十]+', current)) == 1:\n return re.sub('[0-9一二三四五六七八九十]+', number, current)\n\n\ndef getIndex(above, id, current, below):\n if len(above) > 0:\n print('en(above) > 0')\n if isFirst(current):\n print('>> 当前语句为标题1')\n result = re.findall(f'(.*?)', above)\n if len(result) > 0:\n print(result[-1])\n if int(result[-1][0]) == id:\n return int(result[-1][1])\n else:\n return int(result[-1][1]) + 1\n else:\n last_number = lastNumber(current)\n print('last number: ', last_number)\n if last_number != '-1':\n print('last number:::', last_number, current)\n label = re.findall(\n f'(.*?)[\\s\\t]*?(.*?)[\\s\\t\\n]',\n above)\n if len(label) > 0:\n print('>> 对应标签记录: ', label)\n zidx = [int(lab[0]) for lab in label]\n content = [lab[1] for lab in label]\n df = pd.DataFrame(columns=['z_index', 'content', 'title'])\n df['z_index'] = zidx\n df['content'] = content\n df['title'] = [lab[-1] for lab in label]\n print('>> all last label: ', df)\n df = df.drop_duplicates(['z_index'], keep='last')\n possible_index, possible_title = [], []\n for index, row in df.iterrows():\n if last_number == re.sub(PATTERN_LABEL_NUMBER, '', row['content']):\n print(row['content'])\n possible_index.append(row['z_index'])\n possible_title.append(row['title'])\n print('possible_index: ', possible_index)\n if len(possible_index) == 1:\n return possible_index[0]\n elif len(possible_index) > 1:\n coincidence, most_plossible_index = 0, -1\n current_title = re.sub(f'.*?|[\\s\\t\\n]', '', current)\n print('当前标题内容:', current_title, possible_index)\n for i in range(len(possible_index)): # [::-1], possible_title[::-1])\n idx, text = possible_index[::-1][i], possible_title[::-1][i]\n print(\"过往标题内容:\", idx, text)\n if i < len(possible_index) - 1:\n context = \\\n re.split(f'',\n above)[-1]\n context = f''.join(\n re.split(f'', context)[:-1])\n all_label = re.findall(r'', context)\n tmp_index = 999\n for lab in all_label[::-1]:\n past_idx = re.findall(r'', lab)[0]\n if tmp_index <= int(past_idx): continue\n past_class = re.findall(r'', lab)[0]\n print(past_idx, past_class,\n f'(.*?)')\n past_label = \\\n re.findall(f'(.*?)',\n context)[-1]\n next_label = re.findall(f'(.*?)',\n below)\n if len(next_label) > 0:\n if re.sub(PATTERN_LABEL_NUMBER, '', past_label) == lastNumber(\n re.sub(PATTERN_LABEL_NUMBER, '', next_label[0])): return idx\n tmp_index = int(past_idx)\n coinci = len(set(current_title).intersection(set(re.sub('[\\s\\n\\t]', '', text))))\n print('重合率: ', coinci)\n if coinci > coincidence:\n coincidence, most_plossible_index = coinci, idx\n if most_plossible_index == -1:\n return possible_index[-1]\n else:\n return most_plossible_index\n else:\n print('无相关过往纪录', id)\n return 0\n\n\ndef isFirst(current):\n current = re.sub('.*?$', '', current)\n current = re.sub('', '', current)\n current_tag = re.sub(PATTERN_LABEL_NUMBER, '', current)\n print('标题数: ', current_tag)\n try:\n (str(current_tag) in ['1', '一', '①'] and not ('.' in current or '.' in current)) or \\\n (list(str(current_tag))[-1] in ['1', '一'] and ('.' in current or '.' in current))\n except:\n print()\n return (str(current_tag) in ['1', '一', '①'] and not ('.' in current or '.' in current)) or \\\n (list(str(current_tag))[-1] in ['1', '一'] and ('.' in current or '.' in current))\n\n\ndef stuctureIdentifier(text:str)->str:\n \"\"\"\n :param text:待清洗的文本\n :return: 识别并重新定义个标题层级关系的文本\n \"\"\"\n above = ''\n below = initWithLabels(text)\n\n while len(below) > 0:\n # print('已完成: ',above)\n\n print('\\n************************** loop **********************************')\n # print(below)\n pos = re.search(r\"\\(.*?)\\[\\s\\t]*?(.*?)[\\s\\t\\n]\", below)\n if pos == None: return above + below\n print('>> 当前语句: ', below[pos.start():pos.end()])\n id = (re.findall(r\"\\.*?\\\", below[pos.start():pos.end()])[0])\n print('>> 当前标题id: ', id)\n z_index = getIndex(above + below[:pos.start()], id, below[pos.start():pos.end()], below[pos.end():])\n print('图层: ', str(z_index).zfill(3))\n if int(id) >= 0:\n above += below[:pos.start()]\n above += re.sub(r'_\\d+(>)', '_' + str(z_index).zfill(3) + '>', below[pos.start():pos.end()])\n print('add above')\n below = below[pos.end():]\n continue\n above += below[:pos.end()]\n below = below[pos.end():]\n return above\n\ndef addLable(text:str)->str:\n \"\"\"\n :param text: 待清洗的文本\n :return: 最终还有文章结构的文本\n console不输出任何log\n \"\"\"\n with HiddenPrints():\n text = stuctureIdentifier(re.sub('\\n+', '\\n', text))\n return text\n\nif __name__ == '__main__':\n pass","repo_name":"zzyLT/GeneralTextCleaner","sub_path":"filter/SentenceExtractor/text_structure.py","file_name":"text_structure.py","file_ext":"py","file_size_in_byte":10424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43618225191","text":"\nrows = []\n\nwith open('input2.txt') as f:\n for l in f:\n row = [int(a) for a in l.strip().split(\"\\t\") ]\n rows.append(sorted(row))\n\nchecksum = 0\nfor r in rows:\n diff = r[-1] - r[0]\n checksum += diff\nprint(checksum)\n\nchecksum2 = 0\nfor r in rows:\n for i, num in enumerate(r):\n for j in range(len(r)):\n if (1.0 * num) % r[j] == 0 and i != j:\n checksum2 += (1.0 * num) / r[j]\n print(str(num) + \" and \" + str(r[j]) )\n \nprint (checksum2)\n","repo_name":"slashbreak/Advent-of-Code","sub_path":"2017/day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39082043356","text":"import tensorflow as tf\r\nimport numpy as np\r\n\r\n### Softmax Classification 실습 ##\r\n### 데이터 : 공유폴더내/iris.csv\r\nxy = np.loadtxt('iris.csv',delimiter=',',dtype=np.float32)\r\nx_data = xy[:,0:-1]\r\ny_data = xy[:,[-1]]\r\n\r\n### Softmax Classification ####\r\n# xy = np.loadtxt('data-04-zoo.csv', delimiter=\",\", dtype=np.float32)\r\n# x_data = xy[:,0:-1]\r\n# y_data = xy[:,[-1]]\r\n\r\n# print (x_data, y_data)\r\n\r\nX = tf.placeholder(tf.float32, [None,4])\r\nY = tf.placeholder(tf.int32, [None,1])\r\n\r\n# One Hot Encoding\r\nY_one_hot = tf.one_hot(Y,4)\r\nY_one_hot = tf.reshape(Y_one_hot,[-1,4])\r\n\r\nW = tf.Variable(tf.random_normal([4,4]), name='weight')\r\nb = tf.Variable(tf.random_normal([4]), name='bias')\r\n\r\nlogits = tf.matmul(X,W)+b\r\nhypothesis = tf.nn.softmax(logits)\r\n\r\n# Cross Entropy\r\ncost_i = tf.nn.softmax_cross_entropy_with_logits(\r\n\t\tlogits=logits, labels=Y_one_hot)\r\ncost = tf.reduce_mean(cost_i)\r\n\r\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)\r\n\r\n# accuracy 계산\r\nprediction = tf.argmax(hypothesis,1)\r\ncorrect_prediction = tf.equal(prediction, tf.argmax(Y_one_hot,1))\r\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n\r\nwith tf.Session() as sess:\r\n\tsess.run(tf.global_variables_initializer())\r\n\r\n\tfor step in range(2000):\r\n\t\tsess.run(optimizer, feed_dict={X:x_data, Y:y_data})\r\n\t\tif step % 100:\r\n\t\t\tloss, acc = sess.run([cost, accuracy], \r\n\t\t\t\t\t\t\tfeed_dict={X:x_data, Y:y_data})\r\n\t\t\tprint (\"Step=\",step,\"Loss=\",loss,\"Acc=\",acc,\"\\n\")\r\n\r\n\tpred = sess.run(prediction, \r\n\t\t\t\t\t\t\tfeed_dict={X:x_data})\r\n\r\n\t# 실제 값과 비교\r\n\tfor p, y in zip(pred, y_data.flatten()):\r\n\t\tprint (p,y)\r\n\r\n########## 복습 #################\r\n# Regression : x -> 분류, x -> y\r\n# 공부한 시간 -> 내 점수 예측\r\n# x_data = [1,2,3,4,5,6]\r\n# y_data = [3,6,9,12,15,18]\r\n\r\n# W = tf.Variable(tf.random_normal([1]), name='weight')\r\n# b = tf.Variable(tf.random_normal([1]), name='bias')\r\n\r\n# hypothesis = x_data * W + b\r\n# cost = tf.reduce_mean(tf.square(hypothesis-y_data))\r\n# optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)\r\n# train = optimizer.minimize(cost)\r\n\r\n# sess = tf.Session()\r\n# sess.run(tf.global_variables_initializer())\r\n\r\n# for step in range(2000):\r\n# \tsess.run(train)\r\n# \tif step % 20 == 0:\r\n# \t\tprint (step, sess.run(cost), sess.run(W), sess.run(b))\r\n\r\n####### Logistic Classification ##############\r\n# xy = np.loadtxt('data-03-diabetes.csv', delimiter=',', dtype=np.float32)\r\n# x_data = xy[:,0:-1]\r\n# y_data = xy[:,[-1]]\r\n\r\n# # x_data = [[1,2],[2,3],[4,1],[4,3],[5,3],[6,2]]\r\n# # y_data = [[0],[0],[0],[1],[1],[1]]\r\n\r\n# X = tf.placeholder(tf.float32, shape=[None,8])\r\n# Y = tf.placeholder(tf.float32, shape=[None,1])\r\n\r\n# W = tf.Variable(tf.random_normal([8,1]), name='weight')\r\n# b = tf.Variable(tf.random_normal([1]), name='bias')\r\n\r\n# hypothesis = tf.sigmoid(tf.matmul(X,W)+b)\r\n# # hypothesis = tf.div(1,(1+tf.exp(tf.matmul(X,W)+b)))\r\n# cost = -tf.reduce_mean(Y * tf.log(hypothesis)+ (1-Y) * tf.log(1-hypothesis))\r\n# train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)\r\n\r\n# predicted = tf.cast(hypothesis>0.5, dtype=tf.float32)\r\n# accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))\r\n\r\n# with tf.Session() as sess:\r\n# \tsess.run(tf.global_variables_initializer())\r\n\r\n# \tfor i in range(10000):\r\n# \t\tcost_val, _ = sess.run([cost, train], feed_dict={X: x_data, Y: y_data})\r\n# \t\tif i % 200 == 0:\r\n# \t\t\tprint (i, cost_val)\r\n\r\n# \th,c,a = sess.run([hypothesis, predicted, accuracy],\r\n# \t\t\t\tfeed_dict={X:x_data, Y:y_data})\r\n# \tprint (\"\\nHypothesis=\",h,\"\\nCorrect=\",c,\"\\nAccuracy=\",a)\r\n\r\n\r\n# 주식정보 가져오는거\r\n# pip install googlefinance.client\r\n# from googlefinance.client import get_price_data\r\n\r\n# param = {\r\n# \t'q':\"GOOGL\", # Stock Symbol\r\n# \t'i':\"86400\", # Interval size(second)\r\n# \t'x':\"NASD\", # Stock exchange symbol\r\n# \t'p':\"1Y\" # Period\r\n# }\r\n\r\n# result = get_price_data(param)\r\n# print (result)","repo_name":"classHANA/python_practice","sub_path":"180103.py","file_name":"180103.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42405414550","text":"from logging import info\n\nfrom .test_scrum_common import TestScrumCommon\n\n\nclass TestScrumTask(TestScrumCommon):\n def setUp(self):\n super(TestScrumTask, self).setUp()\n\n def test_scrum_task(self):\n if not self.project_tasks:\n raise AssertionError('Error in data. Please Check Project Tasks.')\n info('Details of tasks:')\n for task in self.project_tasks:\n if not task.name:\n raise AssertionError(\n 'Error in data. Please Check Project Tasks Name.')\n info('Details of : %s' % task.name)\n if not task.project_id:\n raise AssertionError(\n 'Error in data. Please Check Project Tasks Project.')\n info(' Project : %s' % task.project_id.name)\n if not task.sprint_id:\n raise AssertionError(\n 'Error in data. Please Check Project Tasks Sprint.')\n info(' Sprint : %s' % task.sprint_id.name)\n info(' Assigned to : %s' % task.user_id.name)\n info(' Company : %s' % task.company_id.name)\n if not task.start_date and task.end_date:\n raise AssertionError(\n 'Error in data. Please Check Project Tasks Date.')\n info(' Date : %s - %s' % (task.start_date, task.end_date))\n info(' Actual End Date : %s' % task.actual_end_date)\n info(' Deadline : %s' % task.date_deadline)\n info(' Reference : %s' % task.task_seq)\n info(' Story : %s' % task.story_id.name)\n info(' Velocity : %d' % task.velocity)\n info(' Release Planning : %s' % task.release_planning_id.name)\n info(' Priority : %s' % task.priority)\n info(' Description : %s' % task.description)\n","repo_name":"flectra-hq/flectra","sub_path":"addons/project_scrum/tests/test_scrum_task.py","file_name":"test_scrum_task.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"67"} +{"seq_id":"36869233152","text":"\n# -*- coding: utf-8 -*-\n# import module snippets\nimport hashlib\nimport json\nimport io\nimport os\nimport urllib\nimport boxsdk\nfrom .client import Client\n\n\nclass File(Client):\n\n def info(self, file_id: str):\n url = self.client.get_url(\"files\", file_id)\n try:\n response = self.client.make_request(\n method='GET',\n url=url\n ).json()\n return response\n except boxsdk.exception.BoxAPIException as e:\n raise e\n\n def download(\n self, file_id: str, version_id: str = None,\n offset: int = None, length: int = None,\n output_path: str = None\n ):\n byte_range = None\n file_version = None\n\n if offset is not None and length is not None:\n byte_range = (offset, length)\n if version_id is not None:\n file_version = self.client.file_version(version_id)\n\n try:\n box_file = self.client.file(file_id=file_id)\n except boxsdk.exception.BoxAPIException as e:\n raise e\n try:\n if output_path is not None:\n box_file = box_file.get()\n file_path = os.path.join(output_path, box_file.name)\n writeable_stream = open(file_path, \"wb\")\n box_file.download_to(\n writeable_stream,\n file_version=file_version,\n byte_range=byte_range\n )\n writeable_stream.close()\n else:\n return box_file.content(\n file_version=file_version,\n byte_range=byte_range\n )\n except boxsdk.exception.BoxAPIException as e:\n raise e\n\n def upload(\n self, folder_id: str, stream: io.BytesIO,\n name: str, overwrite: bool = False\n ):\n stream.seek(0)\n try:\n box_file = self.client.folder(folder_id).upload_stream(stream, name)\n return box_file\n except boxsdk.exception.BoxAPIException as e:\n if e.code == \"item_name_in_use\" and overwrite:\n file = self.client.file(e.context_info['conflicts']['id'])\n upload_file = file.update_contents_with_stream(stream)\n return upload_file\n else:\n raise e\n\n def preflight(self, name: str, parent_id: str, size: int):\n url = self.client.get_url(\"files\", \"content\")\n data = json.dumps({\n \"name\": name,\n \"parent\": {\n \"id\": parent_id\n },\n \"size\": size\n })\n try:\n response = self.client.make_request(\n method='OPTIONS',\n url=url,\n data=data\n ).json()\n return response\n except boxsdk.exception.BoxAPIException as e:\n raise e\n\n\n def multipart_upload(self, name: str, parent_id: str, size: int, data: io.BytesIO):\n try:\n folder = self.client.folder(parent_id)\n response = folder.upload_stream(\n file_stream = data,\n file_name = name,\n preflight_check=True,\n preflight_expected_size=size,\n upload_using_accelerator=True\n )\n return response\n except boxsdk.exception.BoxAPIException as e:\n raise e\n\n def copy(\n self, file_id: str, parent_id: str,\n name: str = None, version_id: str = None\n ):\n try:\n file_to_copy = self.client.file(file_id)\n destination_folder = self.client.folder(parent_id)\n file_copy = file_to_copy.copy(destination_folder, name=name)\n response = file_copy.response_object\n return response\n except boxsdk.exception.BoxAPIException as e:\n raise e\n\n def lock(\n self, file_id: str,\n expires_at: str = None,\n is_download_prevented: bool = None\n ):\n url = self.client.get_url(\"files\", file_id)\n lock = {\n \"type\": \"lock\"\n }\n if expires_at is not None:\n lock[\"expires_at\"] = expires_at\n if is_download_prevented is not None:\n lock[\"is_download_prevented\"] = is_download_prevented\n data = json.dumps({\n \"lock\": lock\n })\n try:\n response = self.client.make_request(\n method='PUT',\n url=url,\n data=data\n ).json()\n return response\n except boxsdk.exception.BoxAPIException as e:\n raise e\n\n def unlock(self, file_id: str):\n url = self.client.get_url(\"files\", file_id)\n data = json.dumps({\"lock\": None})\n try:\n response = self.client.make_request(\n method='PUT',\n url=url,\n data=data\n ).json()\n return response\n except boxsdk.exception.BoxAPIException as e:\n raise e\n\n def uploader(self, stream, length, name, folder_id, overwrite: bool = True):\n exists = False\n file_id = None\n try:\n preflight = self.preflight(\n name=name, parent_id=folder_id, size=length\n )\n except boxsdk.exception.BoxAPIException as e:\n if e.code == \"item_name_in_use\":\n if not overwrite:\n raise e\n exists = True\n file_id = e.context_info['conflicts']['id']\n else:\n raise e\n try:\n if length <= 20000000:\n uploaded_file = self.upload(\n folder_id=folder_id, stream=io.BytesIO(stream.read()),\n name=name, overwrite=overwrite\n )\n return uploaded_file.id, uploaded_file.name\n # Chunk upload\n session = None\n if exists:\n session = self.client.file(\n file_id=file_id\n ).create_upload_session(length)\n else:\n session = self.client.folder(\n folder_id=folder_id\n ).create_upload_session(file_size=length, file_name=name)\n parts = []\n sha1 = hashlib.sha1()\n for part_index in range(session.total_parts):\n copied_length = 0\n chunk = b''\n while copied_length < session.part_size:\n buffer = stream.read(session.part_size - copied_length)\n if buffer is None:\n continue\n if len(buffer) == 0:\n break\n chunk += buffer\n copied_length += len(buffer)\n uploaded_part = session.upload_part_bytes(\n chunk, part_index*session.part_size, length)\n parts.append(uploaded_part)\n updated_sha1 = sha1.update(chunk)\n content_sha1 = sha1.digest()\n uploaded_file = session.commit(\n content_sha1=content_sha1, parts=parts)\n return uploaded_file.id, uploaded_file.name\n except Exception as e:\n raise e\n","repo_name":"cloudnative-co/python-box-sdk","sub_path":"Box/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":7266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19824338676","text":"import requests\nfrom datetime import datetime,timedelta\nimport os\n\nclass Task1():\n \"\"\"\n Задача №1\n Кто самый умный супергерой?\n Есть API по информации о супергероях с информацией по всем супергероям. Нужно определить кто самый умный(intelligence)\n из трех супергероев- Hulk, Captain America, Thanos.\n \"\"\"\n def __init__(self, url='https://akabab.github.io/superhero-api/api/all.json'):\n self.url = url\n self.full_statistic = None\n def refresh_hero_statistic(self):\n r = requests.get(self.url)\n if r.status_code==200:\n self.full_statistic = r.json()\n return r.status_code\n def get_most_intelligence(self, heroes=['Hulk', 'Captain America', 'Thanos']):\n out = []\n if self.full_statistic is None:\n self.refresh_hero_statistic()\n for hero in self.full_statistic:\n if hero['name'] in heroes:\n out.append((hero['name'],hero['powerstats'].get('intelligence')))\n return sorted(out, key=lambda hero_name: hero_name[1], reverse=True)[0][0]\nclass YaUploader:\n \"\"\"\n Задача №2\n У Яндекс.Диска есть очень удобное и простое API. Для описания всех его методов существует Полигон.\n Нужно написать программу, которая принимает на вход путь до файла на компьютере и сохраняет на Яндекс.Диск\n с таким же именем.\n\n Все ответы приходят в формате json;\n Загрузка файла по ссылке происходит с помощью метода put и передачи туда данных;\n Токен можно получить кликнув на полигоне на кнопку \"Получить OAuth-токен\".\n HOST: https://cloud-api.yandex.net:443\n\n Важно: Токен публиковать в github не нужно, переменную для токена нужно оставить пустой!\n\n Шаблон для программы\n class YaUploader:\n def __init__(self, token: str):\n self.token = token\n\n def upload(self, file_path: str):\n '''Метод загружает файлы по списку file_list на яндекс диск'''\n # Тут ваша логика\n # Функция может ничего не возвращать\n\n\n if __name__ == '__main__':\n # Получить путь к загружаемому файлу и токен от пользователя\n path_to_file = ...\n token = ...\n uploader = YaUploader(token)\n result = uploader.upload(path_to_file)\n \"\"\"\n\n base_url = 'https://cloud-api.yandex.net/v1/disk'\n def __init__(self, token: str):\n self.session = requests.session()\n self.session.headers = {'Accept': 'application/json',\n 'Authorization': 'OAuth '+ token}\n\n def get_files(self,path=''):\n r = self.session.get(f'{self.base_url}/resources/files')\n print(r.status_code)\n print(r.text)\n\n\n def upload(self, file_path: str):\n file_url = self.session.get(f'{self.base_url}/resources/upload?path={file_path}&overwrite=true').json()\n with open(file_path,'rb') as file:\n restp = self.session.post(file_url['href'], files={'file': file})\n return restp.status_code\n\nclass Task3:\n '''\n *Задача №3(необязательная)\n Самый важный сайт для программистов это stackoverflow. И у него тоже есть API Нужно написать программу,\n которая выводит все вопросы за последние два дня и содержит тэг 'Python'. Для этого задания токен не требуется.\n '''\n def __init__(self):\n pass\n def date_ofset(self,time_delta=2):\n dt = datetime(year=datetime.today().year,month=datetime.today().month, day=datetime.today().day,\n hour=3,minute=0,second=0) - timedelta(days=time_delta)\n return int(dt.timestamp())\n\n def get_questions(self,tags='python',time_delta=0):\n url = f\"https://api.stackexchange.com/2.3/search?fromdate={self.date_ofset(time_delta)}\" \\\n f\"&order=desc&sort=activity&tagged={tags}&site=stackoverflow\"\n resp = requests.get(url)\n if resp.status_code==200:\n return resp.json()['items']\n return []\n\n\nif __name__==\"__main__\":\n\n task1 = Task1()\n print(task1.get_most_intelligence())\n\n # Task#2\n path_to_file = \"homework.py\"\n token = os.getenv('token')\n uploader = YaUploader(token)\n uploader.upload(path_to_file)\n\n #Task3\n task3 = Task3()\n for question in task3.get_questions(tags='python', time_delta=3):\n print(question['question_id'], question['title'])\n\n\n\n\n\n\n","repo_name":"KostyaKovalenko2007/netology","sub_path":"9.http.requests/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":5320,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35796353479","text":"import time\nimport sys\nimport freshroastsr700\nimport logging\nimport Pyro4\n\n\n@Pyro4.expose\nclass Roaster(object):\n def __init__(self):\n \"\"\"Creates a freshroastsr700 object passing in methods included in this\n class.\"\"\"\n self.roaster = freshroastsr700.freshroastsr700(\n self.update_data, self.next_state, thermostat=True)\n\n def update_data(self):\n \"\"\"This is a method that will be called every time a packet is opened\n from the roaster.\"\"\"\n cur_state = self.roaster.get_roaster_state()\n print(\"Current Temperature:\", self.roaster.current_temp, cur_state)\n\n def next_state(self):\n \"\"\"This is a method that will be called when the time remaining ends.\n The current state can be: roasting, cooling, idle, sleeping, connecting,\n or unkown.\"\"\"\n if(self.roaster.get_roaster_state() == 'roasting'):\n self.roaster.time_remaining = 20\n self.roaster.cool()\n elif(self.roaster.get_roaster_state() == 'cooling'):\n self.roaster.idle()\n\n def run_roast(self):\n if(self.roaster.get_roaster_state() == 'idle'):\n self.roaster.roast()\n\n def set_fan_speed(self, speed):\n new_speed = int(speed)\n self.roaster.fan_speed = new_speed\n\n def set_temperature(self, temperature):\n new_temperature = int(temperature)\n if new_temperature < 150:\n self.roaster.cool()\n else:\n self.roaster.target_temp = new_temperature\n\n def set_time(self, time):\n new_time = int(time)\n self.roaster.time_remaining = new_time\n\n def output_current_state(self):\n cur_state = self.roaster.get_roaster_state()\n cur_temp = str(self.roaster.current_temp)\n ret_state = cur_temp + cur_state\n return ret_state\n\n\nif __name__ == '__main__':\n # Create a roaster object.\n r = Roaster()\n \n # Set logging\n #logging.basicConfig(filename=\"RoastControl_debug_log.log\",level=logging.DEBUG)\n \n # Conenct to the roaster.\n r.roaster.auto_connect()\n \n # Wait for the roaster to be connected.\n while(r.roaster.connected is False):\n print(\"Please connect your roaster...\")\n time.sleep(1)\n \n daemon = Pyro4.Daemon() # make a Pyro daemon\n ns = Pyro4.locateNS()\n uri = daemon.register(r)\n \n print(\"Ready. Object uri =\", uri) # print the uri so we can use it in the client later\n ns.register(\"roaster.sr700\", uri)\n daemon.requestLoop() # start the event loop of the server to wait for calls\n","repo_name":"infinigrove/SR700-Artisan-PDServer","sub_path":"SAPDServer.py","file_name":"SAPDServer.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"67"} +{"seq_id":"23812037783","text":"import matplotlib.pyplot as plt\nimport wordcloud \n\n\ndef word_cloud(word_list,title=\"wordcloud\"):\n\n word_string = \" \".join(word_list)\n word_cloud = wordcloud.WordCloud(font_path='src/SourceHanSans-Regular.ttc').generate(word_string)\n\n plt.figure(figsize=(20,10))\n plt.imshow(word_cloud)\n plt.savefig(title)\n\n\n\n\n\n\n\n\n","repo_name":"sunnyyang1576/Impact-of-Government","sub_path":"data scrapping/财政部政策信息抓取/src/nlp_tools.py","file_name":"nlp_tools.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37827615596","text":"import os\nimport logging\nimport bb\nimport urllib\nfrom bb import data\nfrom bb.fetch2 import FetchMethod\nfrom bb.fetch2 import FetchError\nfrom bb.fetch2 import logger\nfrom bb.fetch2 import runfetchcmd\n\nclass Wget(FetchMethod):\n \"\"\"Class to fetch urls via 'wget'\"\"\"\n def supports(self, url, ud, d):\n \"\"\"\n Check to see if a given url can be fetched with wget.\n \"\"\"\n return ud.type in ['http', 'https', 'ftp']\n\n def recommends_checksum(self, urldata):\n return True\n\n def urldata_init(self, ud, d):\n if 'protocol' in ud.parm:\n if ud.parm['protocol'] == 'git':\n raise bb.fetch2.ParameterError(\"Invalid protocol - if you wish to fetch from a git repository using http, you need to instead use the git:// prefix with protocol=http\", ud.url)\n\n if 'downloadfilename' in ud.parm:\n ud.basename = ud.parm['downloadfilename']\n else:\n ud.basename = os.path.basename(ud.path)\n\n ud.localfile = data.expand(urllib.unquote(ud.basename), d)\n\n def download(self, uri, ud, d, checkonly = False):\n \"\"\"Fetch urls\"\"\"\n\n basecmd = d.getVar(\"FETCHCMD_wget\", True) or \"/usr/bin/env wget -t 2 -T 30 -nv --passive-ftp --no-check-certificate\"\n\n if not checkonly and 'downloadfilename' in ud.parm:\n dldir = d.getVar(\"DL_DIR\", True)\n bb.utils.mkdirhier(os.path.dirname(dldir + os.sep + ud.localfile))\n basecmd += \" -O \" + dldir + os.sep + ud.localfile\n\n if checkonly:\n fetchcmd = d.getVar(\"CHECKCOMMAND_wget\", True) or d.expand(basecmd + \" --spider '${URI}'\")\n elif os.path.exists(ud.localpath):\n # file exists, but we didnt complete it.. trying again..\n fetchcmd = d.getVar(\"RESUMECOMMAND_wget\", True) or d.expand(basecmd + \" -c -P ${DL_DIR} '${URI}'\")\n else:\n fetchcmd = d.getVar(\"FETCHCOMMAND_wget\", True) or d.expand(basecmd + \" -P ${DL_DIR} '${URI}'\")\n\n uri = uri.split(\";\")[0]\n\n fetchcmd = fetchcmd.replace(\"${URI}\", uri.split(\";\")[0])\n fetchcmd = fetchcmd.replace(\"${FILE}\", ud.basename)\n if not checkonly:\n logger.info(\"fetch \" + uri)\n logger.debug(2, \"executing \" + fetchcmd)\n bb.fetch2.check_network_access(d, fetchcmd)\n runfetchcmd(fetchcmd, d, quiet=checkonly)\n\n # Sanity check since wget can pretend it succeed when it didn't\n # Also, this used to happen if sourceforge sent us to the mirror page\n if not os.path.exists(ud.localpath) and not checkonly:\n raise FetchError(\"The fetch command returned success for url %s but %s doesn't exist?!\" % (uri, ud.localpath), uri)\n\n return True\n\n def checkstatus(self, uri, ud, d):\n return self.download(uri, ud, d, True)\n","repo_name":"ipTronix/altera-opencv","sub_path":"bitbake/lib/bb/fetch2/wget.py","file_name":"wget.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"67"} +{"seq_id":"19497412211","text":"# -*- coding: utf-8 -*-\n\nimport codecs, sys, re\n\n#HTMLからページのタイトルを抜き出す\ndef getTitle(src):\n title=re.search('.*',src)\n return title\n\n#ニコニコ大百科の単語記事ページのタイトルから単語と読みを取るなければNone\ndef getTango(title):\n if u'とは' in title:\n tango = title.split(u'とは')\n tango[0]=tango[0].replace(u\"\",u\"\")\n tango[1]=tango[1].replace(u\" (\",u\"\")\n return tango\n else:\n return None\n\ndef makeDic(num):\n lines = [line.strip() for line in codecs.open('newtango/{}.html'.format(num), 'r', 'utf-8')]\n for line in lines:\n title=getTitle(line)\n if title:\n tango=getTango(title.group())\n break\n if tango == None:\n return \"\"\n else:\n return \"{}\\t{}\\n\".format(tango[0], tango[1])\n\nif __name__ == '__main__':\n\n num=1\n end_num=500\n dic=\"\"\n\n while num <= end_num:\n dic = dic + makeDic(num)\n num += 1\n\n fp = open(\"newWord.txt\", \"w\")\n fp.write(dic)\n fp.close()\n","repo_name":"pannda0330/addDictionary","sub_path":"match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14651218574","text":"'''\n Author: Jordan Madden\n Usage: python train_recognizer.py\n'''\n\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.svm import SVC\nfrom imutils import paths\nimport numpy as np\nimport argparse\nimport imutils\nimport pickle\nimport cv2\nimport os\n\n# Declare relevant constants(filepaths etc)\nDATASET = \"dataset\"\nDETECTOR = \"face_detection_model\"\nEMBEDDER = \"openface_nn4.small2.v1.t7\"\nRECOGNIZER = \"output/recognizer.pickle\"\nEMBEDDINGS = \"output/embeddings.pickle\"\nLE = \"output/le.pickle\"\nCONFIDENCE = 0.5\n\n# Load our serialized face detector from disk\nprint(\"[INFO] loading face detector...\")\nprotoPath = os.path.sep.join([DETECTOR, \"deploy.prototxt\"])\nmodelPath = os.path.sep.join([DETECTOR,\n\t\"res10_300x300_ssd_iter_140000.caffemodel\"])\ndetector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)\n\n# Load our serialized face embedding model from disk\nprint(\"[INFO] loading face recognizer...\")\nembedder = cv2.dnn.readNetFromTorch(EMBEDDER)\n\n# Get the paths to the input images in our dataset\nprint(\"[INFO] quantifying faces...\")\nimagePaths = list(paths.list_images(DATASET))\n\n# Create the lists for the face embeddings and corresponding names\nknownEmbeddings = []\nknownNames = []\n\n# Initialize the total number of faces processed\ntotal = 0\n\nfor (i, imagePath) in enumerate(imagePaths):\n\t# Extract the person name from the image path\n\tprint(\"[INFO] processing image {}/{}\".format(i + 1,\n\t\tlen(imagePaths)))\n\tname = imagePath.split(os.path.sep)[-2]\n\n\t# Load and preprocess the image, then grab the image dimensions\n\timage = cv2.imread(imagePath)\n\timage = imutils.resize(image, width=600)\n\t(h, w) = image.shape[:2]\n\n\t# Construct a blob from the image and detect faces in the image\n\timageBlob = cv2.dnn.blobFromImage(\n\t\tcv2.resize(image, (300, 300)), 1.0, (300, 300),\n\t\t(104.0, 177.0, 123.0), swapRB=False, crop=False)\n\tdetector.setInput(imageBlob)\n\tdetections = detector.forward()\n\n\tif len(detections) > 0:\n\t\t# Find the confidence of each detection\n\t\ti = np.argmax(detections[0, 0, :, 2])\n\t\tconfidence = detections[0, 0, i, 2]\n\n\t\t# Filter out weak detections\n\t\tif confidence > CONFIDENCE:\n\t\t\t# Compute the bounding box coordinates of the face\n\t\t\tbox = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n\t\t\t(startX, startY, endX, endY) = box.astype(\"int\")\n\n\t\t\t# Extract the face ROI and grab the ROI dimensions\n\t\t\tface = image[startY:endY, startX:endX]\n\t\t\t(fH, fW) = face.shape[:2]\n\n\t\t\tif fW < 20 or fH < 20:\n\t\t\t\tcontinue\n\n\t\t\t# Preprocess the face, then extract the face embeddings\n\t\t\tfaceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255,\n\t\t\t\t(96, 96), (0, 0, 0), swapRB=True, crop=False)\n\t\t\tembedder.setInput(faceBlob)\n\t\t\tvec = embedder.forward()\n\n\t\t\t# Add the name of the person + corresponding face\n\t\t\t# embedding to their respective lists\n\t\t\tknownNames.append(name)\n\t\t\tknownEmbeddings.append(vec.flatten())\n\t\t\ttotal += 1\n\n# Dump the facial embeddings + names to disk\nprint(\"[INFO] serializing {} encodings...\".format(total))\ndata = {\"embeddings\": knownEmbeddings, \"names\": knownNames}\nf = open(EMBEDDINGS, \"wb\")\nf.write(pickle.dumps(data))\nf.close()\n\n# encode the labels\nprint(\"[INFO] encoding labels...\", end=\"\")\nle = LabelEncoder()\nlabels = le.fit_transform(data[\"names\"])\nprint(\"DONE\")\n\n# Split the data into training and test sets\nX_train, X_test, y_train, y_test = train_test_split(data[\"embeddings\"], labels, \n test_size=0.20,random_state=109)\n\n# Train the model used to actually recognize the faces\nprint(\"[INFO] training model...\", end=\"\")\nrecognizer = SVC(C=1.0, kernel=\"linear\", probability=True)\nrecognizer.fit(X_train, y_train)\nprint(\"DONE\")\n\n# Make prediction on test dataset and determine model metrics\ny_pred = recognizer.predict(X_test)\nacc = accuracy_score(y_test, y_pred)\nprec = precision_score(y_test, y_pred, average='weighted', zero_division=1)\nrecall = recall_score(y_test, y_pred, average='weighted')\nprint(\"Accuracy: {:.2f}\".format(acc))\nprint(\"Precision: {:.2f}\".format(prec))\nprint(\"Recall: {:.2f}\".format(recall))\n\n# Write the actual face recognition model to disk\nf = open(RECOGNIZER, \"wb\")\nf.write(pickle.dumps(recognizer))\nf.close()\n\n# Write the label encoder to disk\nf = open(LE, \"wb\")\nf.write(pickle.dumps(le))\nf.close()","repo_name":"neddamj/AINSB","sub_path":"src/face_recognition/custom-recognizer/train_recognizer.py","file_name":"train_recognizer.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"71657106452","text":"'''\nFile : copyfileS3\nDescription : to copy files from one s3 bucket to another s3 bucket\nCreator : Imanpreet Singh \n\nVersion Date Author Description\n1 26-01-2022 Imanpreet Singh\n\n'''\n\nimport boto3 \nimport os \nimport sys\nimport time\nfrom logging import exception\nfrom datetime import datetime\n\ndef CopyFileS3(sourcebucket, targetbucket, sourcefilename, targetfilename, logfile, **s3params):\n\n #-----------------------------------------------------------\n #logging\n #-----------------------------------------------------------\n if logfile == '' :\n msg = \"logfile is mandatory, exiting\"\n print (msg)\n sys.exit(1)\n \n currenttime = datetime.now().strftime('%d%m%Y_%H%M%S')\n logfileobj = open(logfile, \"a\")\n \n msg = \"CopyFileS3 process started\"\n logfileobj.write(\"\\n{}: {}\".format(currenttime,msg))\n print(msg)\n\n\n #-----------------------------------------------------------\n #input parameters check and variable declration\n #-----------------------------------------------------------\n \n msg = ''\n if len(s3params) == 0:\n msg = \"s3 params are not provided, exiting\"\n elif sourcebucket == '' :\n msg = \"sourcebucket is mandatory, exiting\"\n elif targetbucket == '' :\n msg = \"targetbucket is mandatory, exiting\" \n elif sourcefilename == '' :\n msg = \"sourcefilename is mandatory, exiting\" \n elif targetfilename == '' :\n msg = \"targetfilename is mandatory, exiting\"\n\n if len(msg) != 0:\n currenttime = datetime.now().strftime('%d%m%Y_%H%M%S')\n logfileobj.write(\"\\n{}: {}\".format(currenttime,msg))\n print (msg)\n sys.exit(1)\n\n\n #-----------------------------------------------------------\n #S3 Client\n #-----------------------------------------------------------\n s3client = boto3.client('s3',**s3params) \n try:\n s3client = boto3.client('s3',**s3params) \n except:\n msg = \"Issue in creating S3 client\"\n currenttime = datetime.now().strftime('%d%m%Y_%H%M%S')\n logfileobj.write(\"\\n{}: {}\".format(currenttime,msg))\n print (msg)\n sys.exit(1)\n\n\n #-----------------------------------------------------------\n #check bucket existence\n #-----------------------------------------------------------\n\n try:\n s3client.head_bucket(Bucket =sourcebucket)\n msg = \"{} bucket exist\".format(sourcebucket)\n currenttime = datetime.now().strftime('%d%m%Y_%H%M%S')\n logfileobj.write(\"\\n{}: {}\".format(currenttime,msg))\n #print (msg)\n except:\n msg = \"{} bucket doesn't exists, exiting\".format(sourcebucket)\n currenttime = datetime.now().strftime('%d%m%Y_%H%M%S')\n logfileobj.write(\"\\n{}: {}\".format(currenttime,msg))\n print (msg)\n sys.exit(1)\n\n try:\n s3client.head_bucket(Bucket =targetbucket)\n msg = \"{} bucket exist\".format(targetbucket)\n currenttime = datetime.now().strftime('%d%m%Y_%H%M%S')\n logfileobj.write(\"\\n{}: {}\".format(currenttime,msg))\n #print (msg)\n except:\n msg = \"{} bucket doesn't exists, exiting\".format(targetbucket)\n currenttime = datetime.now().strftime('%d%m%Y_%H%M%S')\n logfileobj.write(\"\\n{}: {}\".format(currenttime,msg))\n print (msg)\n sys.exit(1)\n\n #-----------------------------------------------------------\n #check file existence\n #-----------------------------------------------------------\n \n try:\n s3client.head_object(Bucket= sourcebucket, Key = sourcefilename)\n msg = \"{} file exists in bucket {}\".format(sourcebucket, sourcefilename)\n currenttime = datetime.now().strftime('%d%m%Y_%H%M%S')\n logfileobj.write(\"\\n{}: {}\".format(currenttime,msg))\n #print (msg)\n except:\n msg = \"{} file doesn't exist in bucket {}, nothing to copy, exiting\".format(sourcefilename,sourcebucket)\n currenttime = datetime.now().strftime('%d%m%Y_%H%M%S')\n logfileobj.write(\"\\n{}: {}\".format(currenttime,msg))\n print (msg)\n return(0)\n\n try:\n s3client.head_object(Bucket= targetbucket, Key = targetfilename)\n msg = \"{} file already exists in bucket {}, nothing to copy, exiting\".format(targetfilename,targetbucket)\n currenttime = datetime.now().strftime('%d%m%Y_%H%M%S')\n logfileobj.write(\"\\n{}: {}\".format(currenttime,msg))\n print (msg)\n return(0)\n\n except:\n msg = \"{} file doesn't exist in bucket {}, copying file\".format(targetfilename,targetbucket)\n currenttime = datetime.now().strftime('%d%m%Y_%H%M%S')\n logfileobj.write(\"\\n{}: {}\".format(currenttime,msg))\n print (msg)\n\n #-----------------------------------------------------------\n #S3 resoruce\n #-----------------------------------------------------------\n \n awssession = boto3.session.Session(**s3params)\n s3resource = awssession.resource('s3')\n bucket = s3resource.Bucket(targetbucket)\n\n\n #-----------------------------------------------------------\n #S3 copy\n #-----------------------------------------------------------\n\n try:\n copysource = {'Bucket': sourcebucket, 'Key': sourcefilename}\n bucket.copy(copysource,targetfilename)\n time.sleep(3)\n msg = \"File {} copied from bucket - {} to bucket - {} as {}\".format(sourcefilename,sourcebucket,targetbucket, targetfilename)\n currenttime = datetime.now().strftime('%d%m%Y_%H%M%S')\n logfileobj.write(\"\\n{}: {}\".format(currenttime,msg))\n #print (msg)\n return(1)\n except:\n msg = \"File {} copy from bucket - {} to bucket - {} as {} failed\".format(sourcefilename,sourcebucket,targetbucket, targetfilename)\n currenttime = datetime.now().strftime('%d%m%Y_%H%M%S')\n logfileobj.write(\"\\n{}: {}\".format(currenttime,msg))\n print (msg)\n sys.exit(1)\n ","repo_name":"IMANPREETSINGH/Data_Engineering","sub_path":"Int/awsS3/copyfileS3.py","file_name":"copyfileS3.py","file_ext":"py","file_size_in_byte":6041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42169827044","text":"import bittensor\n\nfrom config import Config\nfrom metagraph import Metagraph\nfrom dendrite import Dendrite\nfrom nucleus import Nucleus\nfrom neuron import Neuron\n\nfrom Crypto.Hash import SHA256\nfrom datetime import timedelta\nimport grpc\nfrom loguru import logger\nimport pickle\nimport numpy as np\nimport random\nimport time\nfrom timeloop import Timeloop\n\n\ndef set_timed_loops(tl, config, neuron, metagraph):\n\n # Test self.\n # @tl.job(interval=timedelta(seconds=1))\n # def test():\n # channel = grpc.insecure_channel(config.serve_address + \":\" + config.port)\n #\n # for _ in range(100):\n # # Inc message id.\n # message_id = random.randint(0, 1000000)\n #\n # # Make request.\n # spikes = np.array([['apples']])\n # stub = bittensor.proto.bittensor_pb2_grpc.BittensorStub(channel)\n #\n # time_str = str(time.time())\n # # Build hash.\n # hash = SHA256.new()\n # hash.update(config.identity.encode())\n # hash.update(spikes.tobytes())\n # hash.update(time_str.encode())\n # message_hash = hash.digest()\n #\n # # Build request.\n # request = bittensor.proto.bittensor_pb2.SpikeRequest()\n # request.parent_id = config.identity\n # request.message_id = message_hash\n # request.payload = pickle.dumps(spikes, protocol=0)\n #\n # # Send Spike.\n # try:\n # response = stub.Spike(request)\n # response = pickle.loads(response.payload).reshape(1, 128)\n #\n # except Exception as e:\n # logger.error(str(e))\n #\n # # Make grad request.\n # grad = np.zeros((1, 128))\n # stub = bittensor.proto.bittensor_pb2_grpc.BittensorStub(channel)\n #\n # # Build hash.\n # hash = SHA256.new()\n # hash.update(config.identity.encode())\n # hash.update(spikes.tobytes())\n # hash.update(time_str.encode())\n # message_hash = hash.digest()\n #\n # request = bittensor.proto.bittensor_pb2.GradeRequest()\n # request.parent_id = config.identity\n # request.message_id = message_hash\n # request.payload = pickle.dumps(grad, protocol=0)\n #\n # # Send grade request.\n # try:\n # stub.Grade(request)\n # except Exception as e:\n # logger.error(str(e))\n\n # Pull the updated graph state (Vertices, Edges, Weights)\n @tl.job(interval=timedelta(seconds=7))\n def pull_metagraph():\n metagraph.pull_metagraph()\n\n # Reselect channels.\n @tl.job(interval=timedelta(seconds=10))\n def connect():\n neuron.connect()\n\n # Apply a gradient step.\n @tl.job(interval=timedelta(seconds=3))\n def learn():\n neuron.Learn()\n\n\ndef main():\n\n config = Config()\n\n metagraph = Metagraph(config)\n\n dendrite = Dendrite(config, metagraph)\n\n nucleus = Nucleus(config)\n\n neuron = Neuron(config, dendrite, nucleus, metagraph)\n\n neuron.serve()\n\n # Start timed calls.\n tl = Timeloop()\n set_timed_loops(tl, config, neuron, metagraph)\n tl.start(block=False)\n logger.info('Started Timers.')\n\n def tear_down(_config, _neuron, _dendrite, _nucleus, _metagraph):\n logger.debug('tear down.')\n del _neuron\n del _dendrite\n del _nucleus\n del _metagraph\n del _config\n\n try:\n logger.info('Begin wait on main...')\n while True:\n logger.debug('heartbeat')\n time.sleep(100)\n\n except KeyboardInterrupt:\n logger.debug('Neuron stopped with keyboard interrupt.')\n tear_down(config, neuron, dendrite, nucleus, metagraph)\n\n except Exception as e:\n logger.error('Neuron stopped with interrupt on error: ' + str(e))\n tear_down(config, neuron, dendrite, nucleus, metagraph)\n\n\nif __name__ == '__main__':\n logger.debug(\"started neuron.\")\n main()\n","repo_name":"unconst/BitTensor","sub_path":"neurons/boltzmann/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4034,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"67"} +{"seq_id":"37826308180","text":"\n\n# Upload the file \"Othello.zip\" and then unzip it\ntry:\n from google.colab import files\n from zipfile import ZipFile\n uploaded = files.upload()\n with ZipFile(\"Othello.zip\", 'r') as zip_file:\n zip_file.extractall()\nexcept Exception as e:\n pass\n\n# Import packages. Run this cell.\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom collections import deque\nimport random\nimport torch.optim as optim\nfrom Arena import Arena\nfrom OthelloGame import OthelloGame\nfrom OthelloPlayers import *\nimport math\nfrom tqdm.notebook import tqdm\nfrom random import shuffle\n\nclass PolicyNet(nn.Module):\n \"\"\"\n This class implements the policy network \n \"\"\"\n def __init__(self, game):\n super().__init__()\n \n # parameters\n self.board_x, self.board_y = game.getBoardSize()\n self.action_size = game.getActionSize()\n self.num_channels = 256 # number of channels for the Conv2d layer\n self.dropout = 0.3 # Dropout probability\n \n # convolutional layers\n self.conv1 = nn.Conv2d(1, self.num_channels, 3, stride=1, padding=1)\n self.conv2 = nn.Conv2d(self.num_channels, self.num_channels, 3, stride=1, padding=1)\n self.conv3 = nn.Conv2d(self.num_channels, self.num_channels, 3, stride=1)\n\n self.bn1 = nn.BatchNorm2d(self.num_channels)\n self.bn2 = nn.BatchNorm2d(self.num_channels)\n self.bn3 = nn.BatchNorm2d(self.num_channels)\n\n self.fc1 = nn.Linear(self.num_channels*(self.board_x-2)*(self.board_y-2), 512)\n self.fc_bn1 = nn.BatchNorm1d(512)\n\n self.fc2 = nn.Linear(512, self.action_size)\n\n self.fc3 = nn.Linear(512, 1)\n\n def forward(self, s):\n \"\"\"\n Args:\n s: board configurtion, torch.Tensor with shape (batch_size, board_x, board_y)\n Returns:\n pi: log probability of actions in state s, torch.Tensor with shape (batch_size, action_size)\n v: value of state s, torch.Tensor with shape (batch_size, 1)\n \"\"\"\n s = s.view(-1, 1, self.board_x, self.board_y) # batch_size x 1 x board_x x board_y\n s = F.relu(self.bn1(self.conv1(s))) # batch_size x num_channels x board_x x board_y\n s = F.relu(self.bn2(self.conv2(s))) # batch_size x num_channels x board_x x board_y\n s = F.relu(self.bn3(self.conv3(s))) # batch_size x num_channels x (board_x-2) x (board_y-2)\n s = s.view(-1, self.num_channels*(self.board_x-2)*(self.board_y-2))\n\n s = F.dropout(F.relu(self.fc_bn1(self.fc1(s))), p=self.dropout, training=self.training) # batch_size x 512\n\n # log probability of actions in state s\n pi = F.log_softmax(self.fc2(s), dim=1) # batch_size x action_size\n # value of state s\n v = torch.tanh(self.fc3(s)) # batch_size x 1\n\n return pi, v\n\n# The following is a class to implement MCTS.\n# You can also write your own codes to implement MCTS,\n# but completing the following codes will be easier than starting from scratch.\nclass MCTS:\n \"\"\"\n This class handles the MCTS tree.\n \"\"\"\n def __init__(self, game, policy_net):\n self.game = game\n self.policy_net = policy_net\n \n self.num_MCTS_sims = 50 # number of simulations for MCTS for each action\n self.bonus_term_factor = 1.0\n \n self.Qsa = {} # stores Q values for s,a\n self.Nsa = {} # stores number of times edge s,a was visited\n self.Ns = {} # stores number of times board s was visited\n self.Ps = {} # stores initial policy (returned by policy network)\n\n self.Es = {} # stores game.getGameEnded for board s\n self.Vs = {} # stores game.getValidMoves for board s\n\n def getActionProb(self, canonicalBoard):\n \"\"\"\n This function performs num_MCTS_sims simulations of MCTS starting from\n canonicalBoard.\n \n Args:\n canonicalBoard: canonical board configuration, a 2D numpy array:\n 1=current player, -1=the opponent, 0=empty\n first dim is row , second is column\n Returns:\n probs: a list with len=action_size, which is a policy vector \n where the probability of the ith action is proportional to Nsa[(s,a)]\n \"\"\"\n # Doing self.num_MCTS_sims times of simulations starting from the state 'canonicalBoard'\n for i in range(self.num_MCTS_sims):\n self.search(canonicalBoard)\n\n # Use string representation for the state\n s = self.game.stringRepresentation(canonicalBoard)\n \"\"\"\n Please complete the codes for calculating the updated policy vector 'probs' using 'self.Nsa'\n Some information you may need:\n self.Nsa[(s, a)] stores number of times edge s,a was visited.\n If (s,a) is not in self.Nsa, then s has not been visited.\n self.game.getActionSize() returns the number of actions, i.e., n*n+1.\n \"\"\"\n # You can uncomment the following codes and fill in the blanks\n ### BEGIN SOLUTION\n # YOUR CODE HERE\n counts = [self.Nsa[(s, a)] if (s, a) in self.Nsa else 0 for a in range(self.game.getActionSize())]\n counts = [x ** (1. / 1) for x in counts]\n counts_sum = float(sum(counts))\n probs = [x / counts_sum for x in counts]\n ### END SOLUTION\n return probs\n\n def search(self, canonicalBoard):\n \"\"\"\n This function performs one simulation of MCTS. It is recursively called\n till a leaf node is found. The action chosen at each node is one that\n has the maximum upper confidence bound as in the paper.\n\n Once a leaf node is found, the neural network is called to return an\n initial policy P and a value v for the state. This value is propagated\n up the search path. In case the leaf node is a terminal state, the\n outcome is propagated up the search path. The values of Ns, Nsa, Qsa are\n updated.\n\n NOTE: the return values are the negative of the value of the current\n state. This is done since v is in [-1,1] and if v is the value of a\n state for the current player, then its value is -v for the other player.\n \n This is a recursive function.\n \n Args:\n canonicalBoard: canonical board configuration, a 2D numpy array:\n 1=current player, -1=the opponent, 0=empty\n first dim is row , second is column\n Returns:\n v: the negative of the value of the current canonicalBoard\n \"\"\"\n \n # Use string representation for the state\n s = self.game.stringRepresentation(canonicalBoard)\n \n # Update self.Es\n if s not in self.Es:\n self.Es[s] = self.game.getGameEnded(canonicalBoard, 1)\n \n \n if self.Es[s] != 0: # The game ended, which means that s is a terminal node\n # If the current player won, then return -1 (The value for the other player).\n # Otherwise, return 1 (The value for the other player).\n return -self.Es[s]\n\n if s not in self.Ps: # There is no policy for the current state s, which means that s is a leaf node (a new state)\n \n # Set Q(s,a)=0 and N(s,a)=0 for all a\n for a in range(self.game.getActionSize()):\n self.Qsa[(s, a)] = 0\n self.Nsa[(s, a)] = 0\n \n # Calculate the output of the policy network, which are the policy and the value for state s\n board = torch.FloatTensor(canonicalBoard.astype(np.float64)).view(1, self.policy_net.board_x,\n self.policy_net.board_y)\n self.policy_net.eval()\n with torch.no_grad():\n pi, v = self.policy_net(board)\n self.Ps[s] = torch.exp(pi).data.cpu().numpy()[0] # The policy for state s\n v = v.data.cpu().numpy()[0][0] # The value of state s\n \n # Masking invalid moves\n valids = self.game.getValidMoves(canonicalBoard, 1)\n self.Ps[s] = self.Ps[s] * valids \n sum_Ps_s = np.sum(self.Ps[s])\n if sum_Ps_s > 0:\n self.Ps[s] /= sum_Ps_s # renormalize\n else:\n # if all valid moves were masked make all valid moves equally probable\n self.Ps[s] = self.Ps[s] + valids\n self.Ps[s] /= np.sum(self.Ps[s])\n \n self.Vs[s] = valids # Stores the valid moves\n self.Ns[s] = 0\n return -v\n \n # pick the action with the highest upper confidence bound (ucb) and assign it to best_act\n best_act = -1\n valids = self.Vs[s]\n cur_best = -float('inf')\n for a in range(self.game.getActionSize()):\n if valids[a]:\n \"\"\"\n Please complete the codes for picking the action with the highest UCB\n Some information you may need:\n self.Qsa[(s, a)] stores the Q value for s,a\n self.bonus_term_factor=1.0 is the factor \"h\" in the UCB (See Eq.(1) in the reference guide)\n self.Ps stores the policy returned by policy network\n self.Ps[s][a] is the probability corresponding to state s and action a\n self.Ns[s] stores the number of times board s was visited\n self.Nsa[(s, a)] stores number of times edge s,a was visited\n \"\"\"\n # You can uncomment the following codes and fill in the blanks\n ### BEGIN SOLUTION\n # YOUR CODE HERE\n if (s, a) in self.Qsa:\n u = self.Qsa[(s, a)] + self.bonus_term_factor * self.Ps[s][a] * math.sqrt(self.Ns[s]) / (1 + self.Nsa[(s, a)])\n else:\n EPS = 1e-8\n u = self.bonus_term_factor * self.Ps[s][a] * math.sqrt(self.Ns[s] + EPS) # Q = 0 ?\n\n if u > cur_best:\n cur_best = u\n best_act = a\n ### END SOLUTION\n \n # Continue the simulation: take action best_act in the simulation\n a = best_act\n next_s, next_player = self.game.getNextState(canonicalBoard, 1, a)\n next_s = self.game.getCanonicalForm(next_s, next_player)\n\n v = self.search(next_s) # This returns the value for the current player\n \n \"\"\"\n Please complete the codes for updating the Q function ('self.Qsa')\n and the number of times that (s,a) has been visited ('self.Nsa')\n Some information you may need:\n self.Qsa[(s, a)] stores the Q value for s,a\n self.Ns[s] stores the number of times board s was visited\n self.Nsa[(s, a)] stores number of times edge s,a was visited\n v is the value for the current player\n \"\"\"\n # You can uncomment the following codes and fill in the blanks\n ### BEGIN SOLUTION\n # YOUR CODE HERE\n if (s, a) in self.Qsa:\n self.Qsa[(s, a)] = (self.Nsa[(s, a)] * self.Qsa[(s, a)] + v) / (self.Nsa[(s, a)] + 1)\n self.Nsa[(s, a)] += 1\n\n else:\n self.Qsa[(s, a)] = v\n self.Nsa[(s, a)] = 1\n ### END SOLUTION\n \n # Update the number of times that s has been visited\n self.Ns[s] += 1\n \n return -v\n\n# The following is a class to implement the whole learning process.\n# You can also write your own codes,\n# but completing the following codes will be easier than starting from scratch.\nclass Coach():\n \"\"\"\n This class executes the self-play + learning.\n \"\"\"\n def __init__(self, game):\n self.game = game\n self.nnet = PolicyNet(game)\n self.pnet = PolicyNet(game) # the competitor network\n self.mcts = MCTS(game, self.nnet)\n self.epochs = 10 # number of training epochs for each iteration\n self.learning_rate = 0.001\n self.batch_size = 64 # batch size\n self.trainExamples = [] # historical examples for training\n self.numIters = 2 # number of iterations\n self.numEps = 20 # number of complete self-play games for one iteration.\n self.arenaCompare = 40 # number of games to play during arena play to determine if new net will be accepted.\n self.updateThreshold = 0.6 # During arena playoff, new neural net will be accepted if threshold or more of games are won.\n\n def train(self):\n \"\"\"\n Performs numIters iterations with numEps episodes of self-play in each\n iteration. After every iteration, it retrains neural network with\n examples in trainExamples (which has a maximum length of maxlenofQueue).\n It then pits the new neural network against the old one and accepts it\n only if it wins >= updateThreshold fraction of games.\n \"\"\"\n for i in range(1, self.numIters + 1):\n print(f'Starting Iter #{i} ...')\n\n for _ in tqdm(range(self.numEps), desc=\"Self Play\"):\n self.mcts = MCTS(self.game, self.nnet) # reset search tree\n self.trainExamples.extend(self.executeEpisode()) # save the iteration examples to the history\n \n # shuffle examples before training \n shuffle(self.trainExamples)\n\n # training new network, keeping a copy of the old one\n self.pnet.load_state_dict(self.nnet.state_dict())\n\n optimizer = optim.Adam(self.nnet.parameters(), lr=self.learning_rate)\n\n for epoch in range(self.epochs):\n print('EPOCH ::: ' + str(epoch + 1))\n self.nnet.train()\n \n \"\"\"\n Please complete the training codes for self.nnet\n Some information you may need:\n self.trainExamples is a list that stores historical examples for training\n self.trainExamples[i] has the form (canonicalBoard, pi, v)\n The output of self.nnet include pi and v, where\n pi are the log probabilities of actions in state s;\n v is the value of state s.\n \"\"\"\n # You can uncomment the following codes and fill in the blanks\n ### BEGIN SOLUTION\n # YOUR CODE HERE\n batch_count = int(len(self.trainExamples) / self.batch_size)\n t = tqdm(range(batch_count), desc='Training Net')\n\n for _ in t:\n sample_ids = np.random.randint(len(self.trainExamples), size=self.batch_size)\n boards, pis, vs = list(zip(*[self.trainExamples[i] for i in sample_ids]))\n boards = torch.FloatTensor(np.array(boards).astype(np.float64))\n target_pis = torch.FloatTensor(np.array(pis))\n target_vs = torch.FloatTensor(np.array(vs).astype(np.float64))\n\n out_pi, out_v = self.nnet(boards)\n l_pi = -torch.sum(target_pis* out_pi)/ target_pis.size()[0] \n l_v = torch.sum((target_vs - out_v.view(-1)) ** 2) / target_vs.size()[0]\n total_loss = l_pi + l_v\n\n optimizer.zero_grad()\n total_loss.backward()\n optimizer.step()\n \n ### END SOLUTION\n \n pmcts = MCTS(self.game, self.pnet)\n nmcts = MCTS(self.game, self.nnet)\n\n print('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x)),\n lambda x: np.argmax(nmcts.getActionProb(x)), self.game)\n pwins, nwins, draws = arena.playGames(self.arenaCompare)\n\n print('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) < self.updateThreshold:\n print('REJECTING NEW MODEL')\n self.nnet.load_state_dict(self.pnet.state_dict())\n else:\n print('ACCEPTING NEW MODEL')\n self.pnet.load_state_dict(self.nnet.state_dict())\n self.trainExamples = []\n \n def play(self, canonicalBoard):\n \"\"\"\n Args:\n canonicalBoard: canonical board configuration, a 2D numpy array:\n 1=current player, -1=the opponent, 0=empty\n first dim is row , second is column\n Returns:\n action: Putting a disc on row x and column y of the board corresponds to action=x*n+y. action=n*n means passing.\n (Row and column are counting from 0 to n-1.) \n \"\"\"\n mcts = MCTS(self.game, self.nnet)\n action = np.argmax(mcts.getActionProb(canonicalBoard))\n return action\n \n def executeEpisode(self):\n \"\"\"\n This function executes one episode of self-play, starting with player 1 (Black player).\n As the game is played, each turn is added as a training example to\n trainExamples. The game is played till the game ends. After the game\n ends, the outcome of the game is used to assign values to each example\n in trainExamples.\n\n Returns:\n trainExamples: a list of examples of the form (canonicalBoard, pi, v)\n pi is the MCTS informed policy vector, v is +1 if\n the player eventually won the game, -1 if the player lost the game, and otherwise 0.000001\n \"\"\"\n trainExamples = []\n board = self.game.getInitBoard()\n self.curPlayer = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board, self.curPlayer)\n \n # After 10 steps, we use the greedy action rather than a random action\n if episodeStep < 10:\n pi = self.mcts.getActionProb(canonicalBoard)\n else:\n pi = list(np.zeros((self.game.getActionSize(),)))\n pi[np.argmax(self.mcts.getActionProb(canonicalBoard))] = 1\n \n # Add symmetric samples\n sym = self.game.getSymmetries(canonicalBoard, pi)\n \n for b, p in sym:\n trainExamples.append([b, self.curPlayer, p, None])\n \n # Take action according to the policy pi\n action = np.random.choice(len(pi), p=pi)\n board, self.curPlayer = self.game.getNextState(board, self.curPlayer, action)\n\n r = self.game.getGameEnded(board, self.curPlayer)\n\n if r != 0: # if the current episode of game ended\n trainExamples = [(x[0], x[2], r * ((-1) ** (x[1] != self.curPlayer))) for x in trainExamples]\n return trainExamples","repo_name":"jonathanNi98/Othello-Using-AlphaZero-Algorithm","sub_path":"Othello-Using-AlphaZero-Algorithm.py","file_name":"Othello-Using-AlphaZero-Algorithm.py","file_ext":"py","file_size_in_byte":19218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25972347050","text":"import torch\nfrom torch.utils.data import DataLoader, SequentialSampler\nimport torch.optim as optim\nfrom time import time\n\nfrom util.parser import parse_args\nfrom util.load_data import Data\nfrom util.eval_model import test_model\n\nfrom NGCF import NGCF\n\nif __name__ == '__main__':\n args = parse_args()\n args.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print(\"Using \" + str(args.device) + \" for computations\")\n\n train_file = args.data_path + '/' + args.dataset + '/' + args.train_file\n test_file = args.data_path + '/' + args.dataset + '/' + args.test_file\n file_path = args.data_path + '/' + args.dataset\n \n data = Data(file_path, train_file, test_file, args.batch_size)\n\n train_loader = DataLoader(\n data,\n batch_size = args.batch_size,\n sampler = SequentialSampler(data),\n num_workers = 8\n )\n\n test_loader = DataLoader(\n data,\n batch_size = args.batch_size,\n sampler = SequentialSampler(data),\n num_workers = 8\n )\n\n args.node_dropout = eval(args.node_dropout)\n args.message_dropout = eval(args.message_dropout)\n\n norm_adj = data.get_adj_mat()\n model = NGCF(data.n_users, data.n_items, norm_adj, args).to('cuda')\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n \n start_epoch = 0\n total_time = 0\n\n for epoch in range(start_epoch, args.epoch):\n t0_start = time()\n loss = 0\n\n for idx, (users, pos_items, neg_items) in enumerate(train_loader):\n u_g_embeddings, pos_i_g_embeddings, neg_i_g_embeddings = model(users, pos_items, neg_items,\n drop_flag=args.node_dropout)\n\n batch_loss = model.bpr_loss(u_g_embeddings, pos_i_g_embeddings, neg_i_g_embeddings)\n\n optimizer.zero_grad()\n batch_loss.backward()\n optimizer.step()\n\n loss += batch_loss\n\n t0_end = time()\n print('epoch {} : loss {} , time {}s'.format(epoch + 1, loss.item(), t0_end - t0_start))\n total_time += t0_end-t0_start\n\n if (epoch + 1) % 20 == 0:\n data.set_mode(2)\n ret = test_model(test_loader, data, model, args.batch_size ,eval(args.ks) ,drop_flag=False)\n data.set_mode(1)\n print(ret)\n\n print(\"Total run time :\" + str(total_time))\n\n\n\n\n","repo_name":"Yujaeseo/NGCF_Pytorch","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"8440207490","text":"\"\"\"\njacobian.py: Use Finite Difference and analytical methods to calculate Jacobian for evalf.py.\n\"\"\"\n\nimport numpy as np\nfrom evalf import evalf, P_max\n\n\ndef jacobian_fd(x, p, u, variable='x'):\n epsilon = 0.0001\n num_nodes = x.shape[0]\n\n Jf = np.zeros((num_nodes, num_nodes))\n for k in range(num_nodes):\n e_k = np.zeros(num_nodes)\n e_k[k] = epsilon\n if variable == 'x':\n Jf[:, k] = (evalf(x + e_k, p, u) - evalf(x, p, u))/epsilon\n elif variable == 'u':\n e_k = np.column_stack((e_k, np.zeros(num_nodes)))\n Jf[:, k] = (evalf(x, p, u + e_k) - evalf(x, p, u)) / epsilon\n return Jf\n\n\ndef jacobian_analytical(x, p, u):\n num_nodes = x.shape[0]\n\n J_f = np.zeros((num_nodes, num_nodes))\n\n for k in range(num_nodes):\n J_f[k, k] = -1/(p[k] * np.cosh(2*x[k]/(P_max * p[k])))\n\n if k != 0:\n J_f[k, k-1] = 1/(p[k-1] * np.cosh(2*x[k-1]/(P_max * p[k-1])))\n return J_f\n","repo_name":"thungyx/trainsimulator","sub_path":"jacobian.py","file_name":"jacobian.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3675929657","text":"\"\"\"\nSum of Probabilities\n--------------------\nDiagram of a sum of probabilities\n\"\"\"\n# Author: Jake VanderPlas <vanderplas@astro.washington.edu>\n# License: BSD\n# The figure produced by this code is published in the textbook\n# \"Statistics, Data Mining, and Machine Learning in Astronomy\" (2013)\n# For more information, see http://astroML.github.com\nfrom matplotlib import pyplot as plt\n\n# create plot\nfig = plt.figure(figsize=(8, 6), facecolor='w')\nax = plt.axes([0, 0, 1, 1], xticks=[], yticks=[], frameon=False)\n\n# draw intersecting circles\nax.add_patch(plt.Circle((1.5, 0.2), 2.2, fc='gray', ec='black', alpha=0.5))\nax.add_patch(plt.Circle((-1.5, 0.2), 2.2, fc='gray', ec='black', alpha=0.5))\n\n# add text\ntext_kwargs = dict(ha='center', va='center', fontsize=20)\nax.text(-1.6, 0.2, \"$p(A)$\", **text_kwargs)\nax.text(1.6, 0.2, \"$p(B)$\", **text_kwargs)\nax.text(0.0, 0.2, \"$p(A \\cap B)$\", **text_kwargs)\nax.text(0, -2.3, \"$p(A \\cup B) = p(A) + p(B) - p(A \\cap B)$\", **text_kwargs)\n\nax.set_xlim(-4, 4)\nax.set_ylim(-3, 3)\n\nplt.show()\n","repo_name":"ryanmaas/astroML","sub_path":"book_figures/chapter3/fig_prob_sum.py","file_name":"fig_prob_sum.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"20755137918","text":"import json\n\ndef compute_frequency(infile):\n tweet_dict = {}\n frequency_dict = {}\n total_count = 0 \n for each_tweet in infile:\n each_tweet_json = json.loads(each_tweet)\n \n tweet_token = each_tweet_json['text'].encode('utf').split()\n for each_token in tweet_token:\n tweet_dict.setdefault(each_token,0)\n tweet_dict[each_token] = tweet_dict[each_token]+1\n total_count = total_count +1\n for each_token in tweet_dict:\n frequency_dict[each_token] = float(1.0*tweet_dict[each_token]/float(total_count))\n return frequency_dict\n\nif __name__=='__main__':\n import sys\n infile = open(sys.argv[1],'r')\n frequency_dict = compute_frequency(infile)\n #print frequency_dict\n for each in frequency_dict:\n fq = frequency_dict[each]\n #fq = '{4:f}'.format(fq)\n print(\"%s %f\" %(each, fq))\n","repo_name":"vineetyadav/data_sciene_twitter_sentiment","sub_path":"frequency.py","file_name":"frequency.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"46784629566","text":"#Oblicz koszt wyprawy znając dystans, spalanie na 100km i cenę litra benzyny.\n# Załóżmy, że spalanie na 100km wynosi 6,4 l, trasa to 120km, litr benzyny kosztuje 5,04 zł.\n#Zmodyfikuj skrypt tak, by przyjmował wartości od użytkownika.\n\nprint(\"koszt wyprawy\")\nspalanie_100=float(input(\"ile na 100km spala twoje auto\"))\ntrasa=float(input(\"ile przejechałeś km ??\"))\ncena=float(input(\"podaj koszt benzyny za 1 litr\"))\nwynik=((trasa*spalanie_100)/100)*cena\nresult=cena*trasa/100*spalanie_100\nprint(\"Cena całkowita trasy 120 km\",round(wynik,2))\nprint(\"Cena całkowita trasy 120 km\",round(result,2))","repo_name":"Mateusz45412/CODE-ME","sub_path":"02_Variable/zad1.py","file_name":"zad1.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37896226798","text":"# Definitions\nEXECUTING = \"executing\"\nQUEUED = \"queued\"\nPLEDGED = \"pledged\"\nIGNORE = \"ignore\"\n\n\nclass Node(object):\n def __init__(self):\n self.children = []\n\n def add_child(self, node):\n self.children.append(node)\n\n def get_leaves(self, leaves=[]):\n # If the node has no leaves, return the node in a list\n if not self.children:\n leaves.append(self)\n return leaves\n\n # Recursively get to the bottom\n for child in self.children:\n child.get_leaves(leaves)\n\n return leaves\n\n\nclass Share(Node):\n \"\"\"\n Implement the share node\n \"\"\"\n\n _attributes = (\n \"name\",\n \"value\",\n \"parent\",\n \"prodsourcelabel\",\n \"workinggroup\",\n \"campaign\",\n \"processingtype\",\n \"transpath\",\n \"vo\",\n \"rtype\",\n \"queue_id\",\n \"throttled\",\n )\n\n def __str__(self, level=0):\n \"\"\"\n Print the tree structure\n \"\"\"\n ret = \"{0} name: {1}, value: {2}\\n\".format(\"\\t\" * level, self.name, self.value)\n for child in self.children:\n ret += child.__str__(level + 1)\n return ret\n\n def __repr__(self):\n return self.__str__()\n\n def __mul__(self, other):\n \"\"\"\n If I multiply a share object by a number, multiply the value field\n \"\"\"\n self.value *= other\n return self.value\n\n def __rmul__(self, other):\n return self.__mul__\n\n def __imul__(self, other):\n return self.__mul__\n\n def __init__(\n self,\n name,\n value,\n parent,\n prodsourcelabel,\n workinggroup,\n campaign,\n processingtype,\n transpath,\n rtype,\n vo,\n queue_id,\n throttled,\n ):\n # Create default attributes\n for attr in self._attributes:\n setattr(self, attr, None)\n\n Node.__init__(self)\n self.name = name\n self.value = value\n self.parent = parent\n self.prodsourcelabel = prodsourcelabel\n self.workinggroup = workinggroup\n self.campaign = campaign\n self.processingtype = processingtype\n self.transpath = transpath\n self.rtype = rtype\n self.vo = vo\n self.queue_id = queue_id\n self.throttled = throttled\n\n def pretty_print_hs_distribution(self, hs_distribution, level=0):\n try:\n executing = hs_distribution[self.name][EXECUTING] / 1000.0\n except Exception:\n executing = 0\n\n try:\n target = hs_distribution[self.name][PLEDGED] / 1000.0\n except Exception:\n target = 0\n\n try:\n queued = hs_distribution[self.name][QUEUED] / 1000.0\n except Exception:\n queued = 0\n\n ret = \"{0} name: {1}, values: {2:.1f}k|{3:.1f}k|{4:.1f}k\\n\".format(\"\\t\" * level, self.name, executing, target, queued)\n for child in self.children:\n ret += child.pretty_print_hs_distribution(hs_distribution, level + 1)\n return ret\n\n def normalize(self, multiplier=100, divider=100):\n \"\"\"\n Will run down the branch and normalize values beneath\n \"\"\"\n self.value *= multiplier * 1.0 / divider\n if not self.children:\n return\n\n divider = 0\n for child in self.children:\n divider += child.value\n\n multiplier = self.value\n\n for child in self.children:\n child.normalize(multiplier=multiplier, divider=divider)\n\n return\n\n def sort_branch_by_current_hs_distribution(self, hs_distribution):\n \"\"\"\n Runs down the branch in order of under-pledging. It returns a list of sorted leave shares\n \"\"\"\n sorted_shares = []\n\n # If the node has no leaves, return the node in a list\n if not self.children:\n sorted_shares = [self]\n return sorted_shares\n\n # If the node has leaves, sort the children\n children_sorted = []\n for child1 in self.children:\n loop_index = 0\n insert_index = len(children_sorted) # insert at the end, if not deemed otherwise\n\n # Calculate under-pledging\n try:\n child1_under_pledge = hs_distribution[child1.name][EXECUTING] * 1.0 / hs_distribution[child1.name][PLEDGED]\n except ZeroDivisionError:\n child1_under_pledge = 10**6 # Initialize to a large default number\n\n for child2 in children_sorted:\n try:\n # Calculate under-pledging\n child2_under_pledge = hs_distribution[child2.name][EXECUTING] * 1.0 / hs_distribution[child2.name][PLEDGED]\n except ZeroDivisionError:\n child2_under_pledge = 10**6 # Initialize to a large default number\n except KeyError:\n continue # Does not exist\n\n if child1_under_pledge < child2_under_pledge:\n insert_index = loop_index\n break\n\n loop_index += 1\n\n # Insert the child into the list\n children_sorted.insert(insert_index, child1)\n\n # Go recursively and sort the grand* children\n for child in children_sorted:\n sorted_shares.extend(child.sort_branch_by_current_hs_distribution(hs_distribution))\n\n return sorted_shares\n\n def aggregate_hs_distribution(self, hs_distribution):\n \"\"\"\n We have the current HS distribution values for the leaves, but want to propagate it updwards to the parents.\n We will traverse the tree from top to bottom and bring up the aggregated values.\n \"\"\"\n executing, queued, pledged = 0, 0, 0\n\n # If the node has no children, it's a leave and should have an entry in the hs_distribution\n if not self.children:\n try:\n executing = hs_distribution[self.name][EXECUTING]\n queued = hs_distribution[self.name][QUEUED]\n pledged = hs_distribution[self.name][PLEDGED]\n except KeyError:\n pass\n\n return executing, queued, pledged\n\n # If the node has children, sum up the values of the children\n executing = 0\n queued = 0\n pledged = 0\n\n for child in self.children:\n (\n executing_child,\n queued_child,\n pledged_child,\n ) = child.aggregate_hs_distribution(hs_distribution)\n executing += executing_child\n queued += queued_child\n pledged += pledged_child\n\n # Add the aggregated value to the map\n hs_distribution[self.name] = {\n EXECUTING: executing,\n QUEUED: queued,\n PLEDGED: pledged,\n }\n\n # Return the aggregated values\n return executing, queued, pledged\n\n # return column names\n def column_names(cls):\n ret = \"\"\n for attr in cls._attributes:\n if ret != \"\":\n ret += \",\"\n ret += attr\n return ret\n\n column_names = classmethod(column_names)\n","repo_name":"PanDAWMS/panda-server","sub_path":"pandaserver/taskbuffer/GlobalShares.py","file_name":"GlobalShares.py","file_ext":"py","file_size_in_byte":7148,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"67"} +{"seq_id":"340531483","text":"from math import pow\r\nnum=int(input(\"enter the no. here: \"))\r\norg=num\r\nsum=0\r\nfor i in range(1,501):\r\n\tnew=num%10\r\n\tt=pow(new,3)\r\n\tsum=sum+t\r\n\tnum=num//10\r\n\ti+=1\r\nprint(\"the sum of the given no. is\",int(sum))\r\ny=int(sum)\r\nif org==y:\r\n\tprint(\"the given no. is armstrong\")\r\nelse:\r\n\tprint(\"not armstrong\")","repo_name":"YOGESH-TECH/python-basic-problems","sub_path":"cubesum.py","file_name":"cubesum.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"32607724314","text":"import ast\nfrom typing import List\n\nfrom peval.tools import replace_fields, ast_transformer, ast_inspector\nfrom peval.core.expression import try_peval_expression\nfrom peval.tools import ast_equal\nfrom peval.typing import ConstsDictT, PassOutputT\n\n\ndef prune_cfg(node: ast.AST, bindings: ConstsDictT) -> PassOutputT:\n while True:\n new_node = node\n\n for func in (\n remove_unreachable_statements,\n simplify_loops,\n remove_unreachable_branches,\n ):\n new_node = func(new_node, ctx=dict(bindings=bindings))\n\n if ast_equal(new_node, node):\n break\n\n node = new_node\n\n return new_node, bindings\n\n\n@ast_transformer\ndef remove_unreachable_statements(node, walk_field, **kwds):\n for attr in (\"body\", \"orelse\"):\n if hasattr(node, attr):\n old_list = getattr(node, attr)\n not_list = isinstance(old_list, ast.AST)\n if not_list:\n old_list = [old_list]\n new_list = filter_block(old_list)\n if new_list is not old_list:\n new_list = walk_field(new_list, block_context=True)\n if not_list:\n new_list = new_list[0]\n kwds = {attr: new_list}\n node = replace_fields(node, **kwds)\n return node\n\n\ndef filter_block(node_list: List[ast.AST]) -> List[ast.AST]:\n \"\"\"\n Remove no-op code (``pass``), or any code after\n an unconditional jump (``return``, ``break``, ``continue``, ``raise``).\n \"\"\"\n if len(node_list) == 1:\n return node_list\n\n new_list = []\n for node in node_list:\n if type(node) == ast.Pass:\n continue\n new_list.append(node)\n if type(node) in (ast.Return, ast.Break, ast.Continue, ast.Raise):\n break\n if len(new_list) == len(node_list):\n return node_list\n else:\n return new_list\n\n\n@ast_inspector\nclass _find_jumps:\n @staticmethod\n def handle_FunctionDef(skip_fields, **_):\n skip_fields()\n\n @staticmethod\n def handle_ClassDef(skip_fields, **_):\n skip_fields()\n\n @staticmethod\n def handle_Break(state, **_):\n return state.with_(jumps_counter=state.jumps_counter + 1)\n\n @staticmethod\n def handle_Raise(state, **_):\n return state.with_(jumps_counter=state.jumps_counter + 1)\n\n @staticmethod\n def handle_Return(state, **_):\n return state.with_(jumps_counter=state.jumps_counter + 1)\n\n\ndef find_jumps(node: List[ast.AST]) -> int:\n return _find_jumps(dict(jumps_counter=0), node).jumps_counter\n\n\n@ast_transformer\nclass simplify_loops:\n @staticmethod\n def handle_While(node, **_):\n last_node = node.body[-1]\n unconditional_jump = type(last_node) in (ast.Break, ast.Raise, ast.Return)\n if unconditional_jump and find_jumps(node.body) == 1:\n if type(last_node) == ast.Break:\n new_body = node.body[:-1]\n else:\n new_body = node.body\n return ast.If(test=node.test, body=new_body, orelse=node.orelse)\n else:\n return node\n\n\n@ast_transformer\nclass remove_unreachable_branches:\n @staticmethod\n def handle_If(node, ctx, walk_field, **_):\n evaluated, test = try_peval_expression(node.test, ctx.bindings)\n if evaluated:\n taken_node = node.body if test else node.orelse\n new_node = walk_field(taken_node, block_context=True)\n return new_node\n else:\n return node\n","repo_name":"fjarri/peval","sub_path":"peval/components/prune_cfg.py","file_name":"prune_cfg.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"67"} +{"seq_id":"36422919179","text":"from pathlib import Path\nfrom typing import TypeVar, Type, Optional\n\nfrom jcx.db.record import Record, RecordFilter\nfrom jcx.sys.fs import StrPath\nfrom jcx.text.txt_json import load_json\n\nR = TypeVar(\"R\", bound=Record)\n\n\ndef load_list(record_type: Type[R], folder: StrPath, filter_: Optional[RecordFilter] = None) -> list[R]:\n \"\"\"加载记录到列表\"\"\"\n records: list[R] = []\n\n folder = Path(folder)\n if not folder.is_dir():\n return records\n\n for f in folder.glob('*.json'):\n r = load_json(f, record_type).unwrap()\n if filter_ and not filter_(r):\n continue\n records.append(r)\n return records\n\n\ndef load_dict(record_type: Type[R], folder: StrPath, filter_: Optional[RecordFilter] = None) -> dict[int, R]:\n \"\"\"加载记录到字典\"\"\"\n rs = load_list(record_type, folder, filter_)\n return {r.id: r for r in rs}\n","repo_name":"dayn9t/jcx","sub_path":"jcx/db/jdb/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71446278294","text":"import tensorflow as tf\nfrom tensorflow.contrib import rnn\n\n\ndef variable_summaries(var):\n # 添加summaries\n print(var.op.name,\" \",var.get_shape().as_list())\n\nclass LSTM(object):\n\n def __init__(self,config):\n\n self.initial_learning_rate = config.initial_learning_rate\n self.min_learning_rate = config.min_learning_rate\n self.decay_step = config.decay_step\n self.decay_rate = config.decay_rate\n self.num_step = config.num_step\n self.num_classes = config.num_classes\n self.hidden_neural_size = config.hidden_neural_size\n self.vocabulary_size = config.vocabulary_size\n self.embedding_dim = config.embedding_dim\n self.hidden_layer_num = config.hidden_layer_num\n self.w2v = config.w2v\n self.input_x = tf.placeholder(tf.int32,[None,self.num_step],name=\"input_x\")\n self.input_y = tf.placeholder(tf.int32,[None,self.num_classes],name=\"input_y\")\n\n\n self.dropout_keep_prob = tf.placeholder(tf.float32,name='dropout_keep_prob')\n variable_summaries(self.input_x)\n\n #embedding layer\n with tf.device('/cpu:0'),tf.name_scope(\"embedding_layer\"):\n # W 是在训练时得到的词向量\n W = tf.Variable(self.w2v,name=\"W\")\n # tf.nn.embedding_lookup是真正的embedding操作, 查找input_x中所有的ids,获取它们的word vector。batch中的每个sentence的每个word都要查找。\n # 所以得到的结果是一个三维的tensor,[None, sequence_length, embedding_size]\n inputs = tf.nn.embedding_lookup(W,self.input_x)\n\n inputs = tf.nn.dropout(inputs,self.dropout_keep_prob,name=\"dropout\")\n\n variable_summaries(inputs)\n\n if self.hidden_layer_num >1 :\n lstmCells = rnn.MultiRNNCell([self.lstm_cell() for _ in range(self.hidden_layer_num)],state_is_tuple=True)\n else:\n lstmCells = self.lstm_cell()\n # 获取LSTM单元输出outputs\n outputs,states = tf.nn.dynamic_rnn(lstmCells,inputs,dtype=tf.float32)\n\n with tf.name_scope(\"mean_pooling_layer\"):\n # 第一种方法,所有outputs求平均, 收敛容易点,顺序要求弱一点,词频特性会比较明显\n # output = tf.reduce_sum(outputs, 1) / tf.cast(outputs.get_shape()[1], tf.float32)\n # 第二种方法,取最后一次的outputs, 会更强调序列的顺序,收敛也会难一点\n output = outputs[:, self.num_step - 1, :]\n variable_summaries(output)\n\n with tf.name_scope(\"softmax_layer\"):\n softmax_w = tf.get_variable('softmax_w',[self.hidden_neural_size,self.num_classes],dtype=tf.float32)\n softmax_b = tf.get_variable(\"softmax_b\",[self.num_classes],dtype=tf.float32)\n\n #得到所有类别的分数\n self.logits = tf.add(tf.matmul(output,softmax_w),softmax_b,name='logits')\n variable_summaries(self.logits)\n\n\n #输出层\n with tf.name_scope(\"output\"):\n self.cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y,logits=self.logits + 1e-10)\n self.loss = tf.reduce_mean(self.cross_entropy,name='loss')\n tf.summary.scalar(\"loss\",self.loss)\n # 计算预测类别,分数最大对应的类别\n self.prediction = tf.argmax(self.logits,1,name='prediction')\n # tf.equal(x, y)返回的是一个bool tensor,如果xy对应位置的值相等就是true,否则false。得到的tensor是[batch, 1]的。\n correct_prediction = tf.equal(self.prediction, tf.argmax(self.input_y, 1))\n self.correct_num = tf.reduce_sum(tf.cast(correct_prediction,tf.float32))\n # tf.cast(x, dtype)将bool tensor转化成float类型的tensor,方便计算\n # tf.reduce_mean()本身输入的就是一个float类型的vector(元素要么是0.0,要么是1.0),\n # 直接对这样的vector计算mean得到的就是accuracy,不需要指定reduction_indices\n self.accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32),name=\"accuracy\")\n tf.summary.scalar('accuracy',self.accuracy)\n\n self.global_step = tf.Variable(0,name=\"global_step\",trainable=False)\n\n self.learning_rate = tf.maximum(tf.train.exponential_decay(self.initial_learning_rate,self.global_step,\n self.decay_step,self.decay_rate,staircase=True),self.min_learning_rate)\n\n tf.summary.scalar('learning_rate',self.learning_rate)\n\n tvars = tf.trainable_variables()\n grads,_ = tf.clip_by_global_norm(tf.gradients(self.loss,tvars),config.max_grad_norm)\n\n optimizer = tf.train.AdamOptimizer(self.learning_rate)\n optimizer.apply_gradients(zip(grads,tvars))\n\n self.train_op = optimizer.apply_gradients(zip(grads,tvars),global_step=self.global_step)\n self.summary = tf.summary.merge_all()\n\n\n def lstm_cell(self):\n lstm_cell = rnn.LSTMCell(self.hidden_neural_size,forget_bias=2.0)\n lstm_cell = rnn.DropoutWrapper(lstm_cell,output_keep_prob=self.dropout_keep_prob)\n return lstm_cell\n\n\n","repo_name":"fanfanfeng/deeplearning","sub_path":"src/sogou_classfication/lstm_model.py","file_name":"lstm_model.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"43230254699","text":"# coding:utf-8\nfrom django.shortcuts import render\nfrom django.http import JsonResponse, HttpResponseNotAllowed\nfrom django.utils import timezone\n# from django.contrib.auth.models import User\nfrom ..account.models import Profile\nfrom ..article.models import Article\nfrom .models import Comment, SubComment\n\n\n# Create your views here.\ndef add_reply(request, article):\n if request.method == 'POST':\n post_data = eval(request.body)\n\n content = post_data.get('content', None)\n\n if request.user.username:\n\n if content == '':\n respose = {'status': False, 'data': {'error': '回复不能为空!'}}\n return JsonResponse(respose)\n\n else:\n url = timezone.now().strftime('%Y%m%d%H%M%S')\n reply_time = timezone.now()\n user = Profile.objects.get(username=request.user.username)\n article = Article.objects.get(url=article)\n\n try:\n comment = Comment(url=url,\n article=article,\n reply_user=user,\n content=content,\n reply_time=reply_time)\n except Exception as err:\n print(err)\n\n comment.save()\n respose = {'status': True, 'data': {'error': '成功!'}}\n return JsonResponse(respose)\n else:\n respose = {'status': False, 'data': {'error': '请先登录!', 'not_login': True}}\n return JsonResponse(respose)\n\n\ndef add_sub_reply(request, head):\n if request.method == 'POST':\n post_data = eval(request.body)\n\n reply_object_str = post_data.get('reply_object', None)\n content = post_data.get('content', None)\n\n if request.user.username:\n\n if content == '' or reply_object_str == '':\n respose = {'status': False, 'data': {'error': '回复或回复对象不能为空!'}}\n return JsonResponse(respose)\n else:\n reply_time = timezone.now()\n user = Profile.objects.get(username=request.user)\n\n reply_object = Profile.objects.get(username=reply_object_str)\n head = Comment.objects.get(url=head)\n\n try:\n sub_comment = SubComment(head=head,\n reply_user=user,\n reply_object=reply_object,\n content=content,\n reply_time=reply_time)\n except Exception as err:\n print(err)\n sub_comment.save()\n respose = {'status': True, 'data': {'error': '成功!'}}\n return JsonResponse(respose)\n\n else:\n respose = {'status': False, 'data': {'error': '请先登录!', 'not_login': True}}\n return JsonResponse(respose)","repo_name":"doxiaodong/mysite","sub_path":"apps/comments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"74846185814","text":"import numpy as np\r\nfrom roug_ml.utl.etl import read_data\r\nfrom roug_ml.utl.etl import extract_features\r\nimport tensorflow as tf\r\nfrom models.nn_models import MLPModel\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.preprocessing import RobustScaler, StandardScaler\r\nfrom models.pipelines.pipelines import NNTensorFlow\r\nfrom models.scalers.scalers3d import NDStandardScaler\r\nimport matplotlib.pyplot as plt\r\nnp.random.seed(1)\r\ntf.random.set_seed(2)\r\nimport os\r\nimport scipy.io as sio\r\n\r\n\r\nif __name__ == '__main__':\r\n # file_name = 'sp623_pat1'\r\n main_data_path = 'C:/Users/kenso/Documents/MATLAB/data_from_cea'\r\n fol, _path, list_of_patients = next(os.walk((main_data_path)))\r\n\r\n for patient_file in list_of_patients:\r\n print(patient_file[:-4])\r\n if patient_file[:-4] in ['sp623_pat_24', 'sp623_pat_27']:\r\n continue\r\n path_to_bd1 = os.path.join(main_data_path, patient_file)\r\n list_of_pat = ['patient' + str(i + 1) for i in range(20)]\r\n dataset = {}\r\n mat_contents = sio.loadmat(path_to_bd1)\r\n # try:\r\n dataset['clinical_study'] = patient_file[:3]\r\n try:\r\n dataset['name'] = mat_contents['data_patient_i']['patientInfo'][0][0]['name'][0][0][0]\r\n except:\r\n dataset['name'] ='not name'\r\n\r\n # in_dataset['sex'] = mat_contents['data_patient_i']['patientInfo'][0][0]['sex'][0][0][0]\r\n try:\r\n dataset['weight'] = float(mat_contents['data_patient_i']['patientInfo'][0][0]['weight'][0][0][0])\r\n except:\r\n dataset['weight'] = 0\r\n dataset['age'] = int(mat_contents['data_patient_i']['patientInfo'][0][0]['age'][0][0][0])\r\n # in_dataset['height'] = float(mat_contents['data_patient_i']['patientInfo'][0][0]['height'][0][0][0])\r\n dataset['cgm'] = mat_contents['data_patient_i']['glycemia'][0, 0]['CGM1'][0, 0]['value'][0, 0]\r\n dataset['bolus'] = mat_contents['data_patient_i']['insulin'][0, 0]['bolus'][0, 0]['value'][0, 0]\r\n dataset['basal'] = mat_contents['data_patient_i']['insulin'][0, 0]['basal'][0, 0]['value'][0, 0]\r\n dataset['meal'] = mat_contents['data_patient_i']['meal'][0, 0]['value'][0, 0]\r\n dataset['AGcount_m'] = mat_contents['data_patient_i']['sport'][0, 0]['AGcount_m'][0][0]\r\n dataset['activity_type'] = mat_contents['data_patient_i']['sport'][0, 0]['activity_type'][0, 0]\r\n dataset['EE_wmlm'] = mat_contents['data_patient_i']['sport'][0, 0]['EE_wmlm'][0, 0]\r\n dataset['BPM'] = mat_contents['data_patient_i']['sport'][0, 0]['BPM'][0, 0]\r\n dataset['intensity'] = mat_contents['data_patient_i']['sport'][0, 0]['value'][0, 0]\r\n\r\n # in_dataset = pd.read_csv(os.path.join(path_to_bd1, pat + 'gly.csv'), names=colnames, header=None)\r\n\r\n dataset['CGM'] = dataset['cgm'] * 18\r\n fig, axes = plt.subplots(nrows=5, ncols=2, sharex=True)\r\n axes[0, 0].set_title(dataset['clinical_study'] + '_' + dataset['name'])\r\n axes[0, 0].plot(dataset['CGM'], 'ro')\r\n axes[0, 0].set_ylabel(\"CGM\")\r\n\r\n axes[3, 0].plot(dataset['basal'], 'bo')\r\n axes[3, 0].set_ylabel(\"basal\")\r\n\r\n axes[2, 0].plot(dataset['bolus'], 'bo')\r\n axes[2, 0].set_ylabel(\"bolus\")\r\n\r\n axes[1, 0].plot(dataset['meal'], 'bo')\r\n axes[1, 0].set_ylabel(\"meal\")\r\n\r\n axes[0, 1].plot(dataset['EE_wmlm'], 'bo')\r\n axes[0, 1].set_ylabel(\"EE_wmlm\")\r\n\r\n axes[3, 1].plot(dataset['intensity'], 'bo')\r\n axes[3, 1].set_ylabel(\"sport\")\r\n\r\n axes[1, 1].plot(dataset['AGcount_m'], 'bo')\r\n axes[2, 1].set_ylabel(\"AGcount_m\")\r\n\r\n axes[3, 1].plot(dataset['BPM'], 'bo')\r\n axes[3, 1].set_ylabel(\"BPM\")\r\n\r\n axes[4, 1].plot(dataset['activity_type'], 'bo')\r\n axes[4, 1].set_ylabel(\"activity_type\")\r\n # plt.title(pat)\r\n plt.savefig(os.path.join(main_data_path, 'figs', dataset['clinical_study'] + '_' + dataset['name'] + '.png'))\r\n plt.close()\r\n # except:\r\n # continue\r\n # plt.show()","repo_name":"roug84/ml_roug_project","sub_path":"sandbox/mainreadcea_structure.py","file_name":"mainreadcea_structure.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18798916049","text":"from nltk import *\nfrom nltk.corpus import *\nfrom nltk.wsd import lesk\nimport nltk.data\nimport pprint\nfrom Tkinter import *\nimport ScrolledText\nimport random\n\n\nclass Complexinator(Frame):\n\n def __init__(self, parent):\n Frame.__init__(self, parent, background='gray')\n\n self.parent = parent\n self.parent.title('Complexinator')\n self.pack(fill=BOTH, expand=1)\n\n self.centerWindow()\n self.initWidgets()\n\n def centerWindow(self):\n w = 1000\n h = 700\n\n sw = self.parent.winfo_screenwidth()\n sh = self.parent.winfo_screenheight()\n\n x = (sw - w)/2\n y = (sh - h)/2\n self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))\n\n\n def complexify(self, mode='c'):\n self.l33ttext.delete(1.0, END)\n\n n00btext = self.n00btext.get(1.0,'end-1c')\n l33ttext = n00btext\n complexity = self.complexity_slider.get()\n use_nouns = self.nouns.get()\n use_verbs = self.verbs.get()\n use_adjectives = self.adjectives.get()\n\n word_syns = {}\n\n sentence_finder = nltk.data.load('tokenizers/punkt/english.pickle')\n sentences = sentence_finder.tokenize(n00btext.strip())\n for sentence in sentences:\n tokens = word_tokenize(sentence)\n tagged_words = pos_tag(tokens)\n for (word, tag) in tagged_words:\n if random.randrange(1,100) > complexity:\n continue\n\n if (use_nouns and 'NN' in tag) \\\n or (use_verbs and 'VB' in tag) \\\n or (use_adjectives and 'JJ' in tag) \\\n :\n if 'NN' in tag:\n pos = wordnet.NOUN\n elif 'VB' in tag:\n pos = wordnet.VERB\n elif 'JJ' in tag:\n pos = wordnet.ADJ\n\n\n wsd = lesk(tokens, word, pos)\n all_synsets = wordnet.synsets(word, pos=pos)\n\n wsd_syns = ['None']\n guess_syns = ['None']\n\n # Starts off with the synonyms from the Word-Sense Disambiguation, but if that returns nothing useful, it blindly guesses through all possible synonym sets\n if wsd:\n wsd_syns = wsd.lemma_names()\n\n if all_synsets:\n guess_syns = all_synsets[0].lemma_names()\n for ss in all_synsets:\n if len(ss.lemma_names()) > 1:\n guess_syns = ss.lemma_names()\n break\n\n\n if mode == 's':\n l33ttext += word + '\\nWSD Synonyms: ' + ', '.join(wsd_syns) + '\\nGuessed Synonyms: ' + ', '.join(guess_syns) + '\\n\\n'\n\n if mode == 'c':\n # It's hack-y to use a random number as the cursor, but w/e, a web app will replace this soon\n new_syns = wsd_syns + guess_syns\n new_syns = filter(lambda w: w != word and w != 'None', new_syns)\n if len(new_syns) > 0:\n for i in range(len(new_syns)):\n syn = new_syns[i]\n syn = syn.replace('_', ' ')\n if word[len(word)-1] == 's':\n syn += 's'\n new_syns[i] = syn\n\n word_syns[word] = (0, new_syns)\n\n\n if mode == 'c':\n l33ttext = self.replace_words_with_syns(l33ttext, word_syns)\n\n self.l33ttext.insert(END, l33ttext)\n\n\n def replace_words_with_syns(self, l33ttext, word_syns):\n l33ttext = l33ttext.split(' ')\n for (word, (cursor, syns)) in word_syns.iteritems():\n l = len(syns)\n word_inds = (l33ttext.index(w) for w in l33ttext if word in w)\n for i in word_inds:\n if cursor == l-1:\n cursor = 0\n else:\n cursor += 1\n\n replacement_syn = syns[cursor]\n\n\n old_word = l33ttext[i]\n l33ttext[i] = old_word.replace(word, replacement_syn)\n\n l33ttext = ' '.join(l33ttext)\n\n return l33ttext\n\n\n def initWidgets(self):\n self.textboxes_frame = Frame(self.parent)\n self.textboxes_frame.pack(fill=BOTH, expand=1, side=LEFT)\n\n # The input textbox\n self.n00btext = ScrolledText.ScrolledText(self.textboxes_frame, wrap=WORD)\n self.n00btext.pack(fill=BOTH)\n\n # The output textbox\n self.l33ttext = ScrolledText.ScrolledText(self.textboxes_frame, wrap=WORD)\n self.l33ttext.pack(fill=BOTH)\n\n # The button and parameters\n self.options_frame = Frame(self.parent)\n self.options_frame.pack(fill=BOTH, expand=1, side=TOP)\n\n self.complexify_button = Button(self.options_frame, text='COMPLEXIFY', command=self.complexify)\n self.complexify_button.pack(side=TOP, fill=X)\n\n self.stats_button = Button(self.options_frame, text='STATS', command=lambda: self.complexify('s'))\n self.stats_button.pack(side=TOP, fill=X)\n\n\n self.complexity_slider = Scale(self.options_frame, orient=HORIZONTAL, from_=0, to=100)\n self.complexity_slider.set(100)\n self.complexity_slider.pack()\n\n\n self.nouns = IntVar()\n nouns_check = Checkbutton(self.options_frame, text='Nouns?', variable=self.nouns)\n nouns_check.select()\n nouns_check.pack(anchor=W)\n\n self.verbs = IntVar()\n verbs_check = Checkbutton(self.options_frame, text='Verbs?', variable=self.verbs)\n verbs_check.pack(anchor=W)\n\n self.adjectives = IntVar()\n adjectives_check = Checkbutton(self.options_frame, text='Adjectives?', variable=self.adjectives)\n adjectives_check.pack(anchor=W)\n\n\n\ndef main():\n root = Tk()\n root.resizable(0,0)\n app = Complexinator(root)\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MartinPetkov/complexinator","sub_path":"complexinator_gui.py","file_name":"complexinator_gui.py","file_ext":"py","file_size_in_byte":6090,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"25672326044","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 17 22:25:22 2021\n\n@author: Eliza\n\"\"\"\n# Import the required libraries.\nimport random\n\n# Create \"NewHawk\" class to move the hawks, make them find and eat bats and die if they eat an infected bat.\nclass NewHawk():\n # Set up class constructor.\n def __init__(self, habitat, habitat_size, bats):\n self.habitat = habitat # Save a link to the \"habitat\" variable within the class.\n self.habitat_size = habitat_size # Save a link to the \"habitat_size\" variable within the class.\n self.bats = bats # Save a link to the \"bats\" variable within the class.\n self.y = 0 # Set the starting y coordinate to 0 (this is changed in the \"find_habitat\" function).\n self.x = 0 # Set the starting x coordinate to 0 (this is changed in the \"find_habitat\" function).\n self.alive = True # Set the default state of the hawks as alive.\n self.hunting_ground = 0 # Set the habitat type that the hawks are spawned in and stay in.\n self.find_habitat(self.hunting_ground) # Run the find habitat function to find initial habitat of new hawk.\n \n # Create update function to run the \"fly\" and \"find_and_eat_bats\" functions. \n def update(self):\n self.fly() \n self.find_and_eat_bats()\n \n # Create function to move hawks depending on a random number, as long as they're alive, within the bounds of the map and within the chosen hunting ground.\n def fly(self):\n if self.alive == True:\n validMove = False\n # If the pre-selected tile is not valid, select another tile.\n while (validMove == False):\n # Based on a random number, select one tile up or down.\n if random.random() < 0.5:\n newY = (self.y + 1)\n else:\n newY = (self.y - 1)\n # Based on a random number, select one tile left or right. \n if random.random() < 0.5:\n newX = (self.x + 1)\n else:\n newX = (self.x - 1)\n # If the newly selected tile is within the bounds of the map\n if 0 <= newY <= self.habitat_size - 1 and 0 <= newX <= self.habitat_size - 1:\n # And within the chosen hunting ground\n if self.habitat[newY][newX] == self.hunting_ground:\n # Move there.\n self.y = newY\n self.x = newX\n validMove = True\n \n # Create a function to find and eat bats. If the bat is infected, the hawk dies. \n def find_and_eat_bats(self):\n # Iterate over each bat, and catch and eat it with a 70% success rate.\n for bat in self.bats:\n # Testing that the hawk and bat are at the same location. \n # print(bat.x, bat.y, self.x, self.y)\n \n # If the hawk and bat are at the same location, it can be eaten.\n if self.x == bat.x and self.y == bat.y:\n if random.random() < 0.7: # This is where the 'success rate' is set.\n bat.alive = False\n # Testing that the hawks are 'eating' the bats.\n # print(bat.alive,\"yummy bat\")\n\n # If the bat is infected, the hawk dies.\n if bat.infected == True:\n self.alive = False\n # Testing that the hawks are 'dying'.\n #print(self.alive, \"oh no, I died\")\n \n # Choose new coordinates and the habitat at that location, if not the chosen habitat type in self.hunting_ground, choose another set until it is, then move there\n def find_habitat(self, habitat_type):\n validMove = False \n \n # Select another tile randomly.\n while (validMove == False):\n # Randomly select a new tile.\n newX = random.randint(0, self.habitat_size - 1)\n newY = random.randint(0, self.habitat_size - 1)\n \n # If the new tile is in the chosen habitat type, move there.\n if self.habitat[newY][newX] == habitat_type:\n self.x = newX\n self.y = newY\n validMove = True\n \n\n ","repo_name":"plaidlemming/Assignment2","sub_path":"Code/hawk.py","file_name":"hawk.py","file_ext":"py","file_size_in_byte":4385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38699248291","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport ast\nimport torch\nimport pickle\nimport pandas as pd\nfrom tqdm import tqdm\nfrom collections import Counter\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom chi_square import to_one_list\nfrom aspect_matching import decode_pickle\npd.set_option('display.max_columns', 50)\n\n\ndef calculate_tf_idf(df):\n vectorizer = TfidfVectorizer()\n words = [' '.join(w) for w in df['trigramSentence']]\n vectors = vectorizer.fit_transform(words)\n #print('Done vectorizing for tf-idf')\n feature_names = vectorizer.get_feature_names()\n dense = vectors.todense()\n denselist = dense.tolist()\n result = pd.DataFrame(denselist, columns=feature_names)\n result.index = list(df['company'])\n return result\n\n\ndef get_most_common_trigrams(words, n=100):\n keywords = Counter(w for w in words if len(w.split('_')) >= 3).most_common(n)\n return [word for word, freq in keywords]\n\n\ndef get_most_frequent_words(all_companies_words):\n frequency = Counter(all_companies_words)\n d_descending = {k: v for k, v in sorted(frequency.items(),\n key=lambda item: item[1],\n reverse=True)}\n drop_words = list(d_descending.keys())[:10]\n print(drop_words)\n return drop_words\n\n\ndef drop_most_frequent_words(words, drop_words):\n for drop in drop_words:\n words = list(filter(lambda a: a != drop, words))\n print(words.count(drop))\n return words\n \n\n\nif __name__ == \"__main__\":\n\n # Parameters\n texttype = 'cons'\n path = '../sample_data/abae/'+texttype\n \n # Bring data\n with open(path + '/aspect_size_12/cluster_map.txt', 'r') as f:\n cluster_map = f.readlines()\n cluster_map = ''.join([i.replace('\\n', '') for i in cluster_map])\n cluster_map = ast.literal_eval(cluster_map)\n \n testpath = path + '/aspect_size_12/tests_results/'\n df = []\n for file in tqdm(os.listdir(testpath)):\n with open(testpath+file, 'rb') as f:\n data = pickle.load(f, encoding='bytes')\n df += decode_pickle(data)\n \n with open(path + '/indices.txt', 'r') as f:\n indices = f.readlines()\n indices = [i.replace('\\n','') for i in indices]\n \n for d, idx in zip(df, indices):\n d['sentenceId'] = int(idx)\n \n # Match sentence with company\n origin = torch.load('../sample_data/master/review_metadata.pt')\n origin = origin[['reviewId','company']]\n sentence = torch.load('../sample_data/master/sentence_match.pt')\n df = pd.DataFrame(df)\n final = pd.merge(sentence, origin, on='reviewId')\n del origin, sentence\n \n master = pd.merge(df, final[['sentenceId','trigramSentence','company']], on='sentenceId')\n del final, df\n \n # Make master tf-idf file\n master2 = master.groupby(['company','aspect_1'])['trigramSentence'].apply(list).reset_index(name='trigramSentence')\n del master\n master2['trigramSentence'] = master2['trigramSentence'].apply(lambda s: to_one_list(s))\n torch.save(master2, path+'/aspect_size_12/master_tf_idf.pt')\n \n # Extract keywords with tf-idf per industry per aspect\n text_type = 'pros'\n group ='ggroup' #'gind'\n group_number = 2030\n company_of_interest = 'American_Airlines_Group_Inc'\n \n #aspect = 'SeniorLeadership'\n aspect_of_interest = 'Leadership'\n master = torch.load(f'../sample_data/abae/{text_type}/aspect_size_12/master_tf_idf.pt')\n company_gics = torch.load('../sample_data/master/company_gics.pt')\n master = pd.merge(master, company_gics, on='company')\n \n tfidf = master[(master['aspect_1']==aspect_of_interest) & (master[group]==group_number)]\n most_frequent_words = get_most_frequent_words(tfidf['trigramSentence'].sum())\n \n tfidf = calculate_tf_idf(tfidf)\n tfidf['conml'] = tfidf.index\n \n tfidf = tfidf.drop(columns=most_frequent_words)\n# drop_most_frequent_words(words, drop_words)\n \n tfidf[tfidf['conml']==company_of_interest].transpose().drop(['conml']).sort_values(by=company_of_interest).tail(30)\n #tfidf['yes_man']\n \n final = tfidf.T[:-1]\n final = final[(final != 0).sum(1) <= 1]\n test = final.T\n test['conml'] = test.index\n \n test[test['conml']==company_of_interest].transpose().drop(['conml']).sort_values(by=company_of_interest).tail(30)\n \n\n \n ","repo_name":"elainespak/glassdoor_aspect_based_sentiment_analysis","sub_path":"utils/tf_idf_keyword_extraction.py","file_name":"tf_idf_keyword_extraction.py","file_ext":"py","file_size_in_byte":4353,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"67"} +{"seq_id":"10715197123","text":"from Bio import SeqIO\nfrom collections import defaultdict\nfrom itertools import permutations\nimport argparse\nimport os\n\nparser = argparse.ArgumentParser()\nparser.add_argument('fasta', type=str, nargs='+',\n\thelp='fasta files to be concatenated')\nparser.add_argument('-d','--hmmdir', type=str,\n\thelp='Directory containing hmm output')\nargs = parser.parse_args()\n\ndef parse_fasta(infile):\n return {rec.id:rec for rec in SeqIO.parse(infile, 'fasta')}\n\ndef get_domains(infile):\n domains = defaultdict(dict)\n count = defaultdict(int)\n for line in open(infile):\n if not line.startswith('#'):\n line = line.strip().split()\n recid = line[0]\n domain = line[3]\n tstart = line[17]\n tend = line[18]\n score = line[7]\n if domain in domains[recid]:\n count[recid] += 1\n domain = domain + '_{}'.format(count[recid])\n domains[recid][domain] = {'tstart':int(tstart), 'tend':int(tend), 'score':float(score), 'length':int(tend)-int(tstart)}\n return domains\n\ndef resolve_overlaps(d, b):\n if (d['tend'] >= b['tstart'] and d['tend'] <= b['tend']):\n if d['score'] > b['score']:\n b['tstart'] = d['tend'] + 1\n elif b['score'] > d['score']:\n d['tend'] = b['tstart'] - 1\n elif (d['tstart'] <= b['tend'] and d['tstart'] >= b['tstart']):\n if d['score'] > b['score']:\n b['tend'] = d['tstart'] - 1\n elif b['score'] > d['score']:\n d['tstart'] = b['tend'] + 1\n\ndef resolve_incompatible_domains(domains):\n for k,v in domains.items():\n for (a,b),(c,d) in permutations(v.items(),2):\n resolve_overlaps(b,d)\n for k,v in domains.items():\n for a,b in list(v.items()):\n if b['tend'] - b['tstart'] <= .3 * b['length']:\n v.pop(a)\n\ndef exctract_subseq(seq, domain, meta):\n subseq = seq[meta['tstart']:meta['tend']]\n subseq.id = seq.id + '_' + domain\n subseq.description = ''\n return subseq\n\ndef main(base, hmmdir):\n seqdict = parse_fasta(fasta)\n base = os.path.basename(fasta).replace('.fasta', '')\n domains = get_domains('{}/{}.out'.format(hmmdir,base))\n resolve_incompatible_domains(domains)\n\n for split_file in set([k.split('_')[0] for i in domains.values() for k in i.keys()]):\n subseqs = []\n for seq, doms in domains.items():\n for dom, meta in doms.items():\n if dom.split('_')[0] == split_file:\n subseqs.append(exctract_subseq(seqdict[seq], dom, meta))\n if len(subseqs) >= 0.0 * len(seqdict.values()):\n with open(\"clusters_splitted/{}_{}.fasta\".format(base, split_file), 'w') as out:\n SeqIO.write(subseqs, out, 'fasta')\n\nif __name__ == '__main__':\n for fasta in args.fasta:\n main(fasta, args.hmmdir)\n","repo_name":"maxemil/haloarchaea-evolution","sub_path":"scripts/split_clusters_domain.py","file_name":"split_clusters_domain.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33057984727","text":"\nimport os\nimport logging\nimport time\nimport warnings\nimport numpy as np\nimport cv2\nfrom PIL import Image\n\n\ndef preprocess(img, boxes, style=0):\n \n if style == 0:\n img = img/255.\n elif style == 1:#tensorflow\n img = img / 127.5 - 1.0\n else:#caffe,bgr\n img -= [123.68,116.779,103.939]\n \n boxes[..., 0:4] /= np.tile(img[0].shape[0:2][::-1], [2])\n return img.copy(), boxes.copy()\n\ndef resize_img_aug(img,dst_size):\n img_wh = img.shape[0:2][::-1]\n dst_size = np.array(dst_size)\n scale = dst_size/img_wh\n min_scale = np.min(scale)\n random_resize_style = np.random.randint(0, 5)\n resize_list = [cv2.INTER_AREA,cv2.INTER_CUBIC,cv2.INTER_LINEAR,cv2.INTER_NEAREST,cv2.INTER_LANCZOS4]\n img = cv2.resize(img, None, fx=min_scale, fy=min_scale, interpolation=resize_list[random_resize_style])\n img_wh = img.shape[0:2][::-1]\n pad_size = dst_size - img_wh\n half_pad_size = pad_size//2\n img = np.pad(img,[(half_pad_size[1],pad_size[1]-half_pad_size[1]),(half_pad_size[0],pad_size[0]-half_pad_size[0]),(0,0)], constant_values=np.random.randint(0, 255))\n return img, min_scale, pad_size\ndef resize_img(img,dst_size):\n img_wh = img.shape[0:2][::-1]\n dst_size = np.array(dst_size)\n scale = dst_size/img_wh\n min_scale = np.min(scale)\n img = cv2.resize(img, None, fx=min_scale, fy=min_scale)\n img_wh = img.shape[0:2][::-1]\n pad_size = dst_size - img_wh\n half_pad_size = pad_size//2\n img = np.pad(img,[(half_pad_size[1],pad_size[1]-half_pad_size[1]),(half_pad_size[0],pad_size[0]-half_pad_size[0]),(0,0)])\n return img, min_scale, pad_size\n","repo_name":"wangermeng2021/Scaled-YOLOv4-tensorflow2","sub_path":"utils/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"67"} +{"seq_id":"15355038174","text":"import config\nimport random\nimport itertools\nfrom utils import red, green, blue, yellow\nif config.OUTPUT_TO_FILE:\n import sys\n sys.stdout = open(config.LOG_FILE, 'a')\n# Set the random seed, if applicable\nif config.RANDOM_SEED is not None:\n random.seed(config.RANDOM_SEED)\n\n\nclass Rule:\n \"\"\"A simulation rule defined using Klein's (1971) rule language.\"\"\"\n\n def __init__(self, action_list, subrules, raw_definition):\n \"\"\"Initialize a Rule object.\"\"\"\n self.action_list = action_list\n self.subrules = subrules\n self.raw_definition = raw_definition\n\n def __str__(self):\n \"\"\"Return string representation.\"\"\"\n return self.raw_definition\n\n def __repr__(self):\n \"\"\"Return string representation.\"\"\"\n return \", \".join(str(action) for action in self.action_list)\n\n def test(self, universe):\n \"\"\"Test this rule, given the current state of the given universe.\"\"\"\n if config.VERBOSITY >= 2:\n print(f\"Testing rule: {self.action_list[0]}...\")\n # Collect candidate bindings for action subjects and objects\n binding_candidates = {}\n y_restriction = float(\"inf\")\n for action in self.action_list:\n # Collect candidate subjects\n if isinstance(action.subject, Variable):\n class_name = action.subject.class_name\n binding_candidates[action.subject.name] = universe.classes[class_name]\n if action.subject.y_restriction_part:\n y_restriction = min(y_restriction, action.subject.y_restriction_part)\n else:\n binding_candidates[action.subject] = [action.subject] # Ex: candidate_bindings['GEORGE'] = ['GEORGE']\n if action.object:\n # Collect candidate objects\n if isinstance(action.object, Variable):\n class_name = action.object.class_name\n binding_candidates[action.object.name] = universe.classes[class_name]\n if action.object.y_restriction_part:\n y_restriction = min(y_restriction, action.object.y_restriction_part)\n else:\n binding_candidates[action.object] = [action.object]\n # Test all bindings, unless we reach a maximum specified by a Y-restriction part\n rule_executions = 0\n variable_ordering = list(binding_candidates.keys())\n candidate_bindings = list(itertools.product(*binding_candidates.values()))\n for ordered_candidates_list in candidate_bindings:\n if len(set(ordered_candidates_list)) != len(ordered_candidates_list):\n continue\n candidate_binding = {}\n for i, variable_name in enumerate(variable_ordering):\n candidate_binding[variable_name] = ordered_candidates_list[i]\n if self._triggered(universe=universe, partial_bindings=candidate_binding):\n self.fire(universe=universe, bindings=candidate_binding)\n rule_executions += 1\n if rule_executions == y_restriction:\n return\n\n def _triggered(self, universe, partial_bindings):\n \"\"\"Return whether this rule fires with the given variable binding.\"\"\"\n if config.VERBOSITY >= 3:\n print(f\" Bindings: {partial_bindings}\")\n probability = 0.0\n if config.VERBOSITY >= 3:\n print(f\" Probability is {probability}\")\n for subrule in self.subrules:\n if subrule.holds(universe=universe, partial_bindings=partial_bindings):\n increment = subrule.true_value\n else:\n increment = subrule.false_value\n # Potentially short-circuit\n if abs(increment) >= config.SHORT_CIRCUIT_PROBABILITY_INCREMENT_ABSOLUTE_THRESHOLD:\n if increment > 0:\n if config.VERBOSITY >= 3:\n print(\" Short-circuit trigger!\")\n return True\n if config.VERBOSITY >= 3:\n print(\" Short-circuit abandon!\")\n return False\n # Otherwise, increment the running probability\n probability += increment\n if config.VERBOSITY >= 3:\n print(f\" Probability is now {probability}\")\n if random.random() < probability:\n if config.VERBOSITY >= 3:\n print(green(f\" Triggered!\"))\n return True\n if config.VERBOSITY >= 3:\n print(f\" Did not trigger\")\n return False\n\n def fire(self, universe, bindings):\n \"\"\"Execute all the actions in the action list for this rule.\"\"\"\n triples_to_add_next_time_frame = []\n for action in self.action_list:\n triple = action.execute(bindings=bindings)\n triples_to_add_next_time_frame.append(triple)\n universe.queue_triples(triples=triples_to_add_next_time_frame)\n\n\nclass Action:\n \"\"\"An action to be executed upon a rule firing in Klein's (1971) simulation engine.\"\"\"\n\n def __init__(self, action_subject, action_relation, action_object, raw_definition):\n \"\"\"Initialize an Action object.\"\"\"\n self.subject = action_subject\n self.relation = action_relation\n self.object = action_object # None if this modifies an attribute\n self.raw_definition = raw_definition\n\n def __str__(self):\n \"\"\"Return string representation.\"\"\"\n return self.raw_definition\n\n def __repr__(self):\n \"\"\"Return string representation.\"\"\"\n return self.__str__()\n\n def execute(self, bindings):\n \"\"\"Return a triple to be added to the network next time frame.\"\"\"\n if isinstance(self.subject, Variable):\n ground_subject = bindings[self.subject.name]\n else:\n ground_subject = bindings[self.subject]\n if self.object is None:\n ground_object = None\n elif isinstance(self.object, Variable):\n ground_object = bindings[self.object.name]\n else:\n ground_object = bindings[self.object]\n triple_to_add = (ground_subject, self.relation, ground_object)\n if config.VERBOSITY >= 2:\n if ground_object:\n print(green(f\" {ground_subject} {self.relation} {ground_object}\"))\n else:\n print(green(f\" {ground_subject} {self.relation}\"))\n return triple_to_add\n\n\nclass Subrule:\n \"\"\"A subrule in a rule defined using Klein's (1971) rule language.\"\"\"\n\n def __init__(self, true_value, false_value, sentence_list, raw_definition):\n \"\"\"Initialize a Subrule object.\"\"\"\n self.true_value = true_value\n self.false_value = false_value\n self.sentence_list = sentence_list\n self.raw_definition = raw_definition\n\n def __str__(self):\n \"\"\"Return string representation.\"\"\"\n return self.raw_definition\n\n def __repr__(self):\n \"\"\"Return string representation.\"\"\"\n return self.__str__()\n\n def holds(self, universe, partial_bindings):\n \"\"\"Return whether the condition expressed in this subrule holds, given the variable binding.\n\n Note that variables (besides any X or Y introduced in the rule header) cannot be passed\n across subrule boundaries, meaning the bindings are local to the subrule at hand (1971:13).\n \"\"\"\n if config.VERBOSITY >= 3:\n print(f\" Testing subrule: {self.__str__()}\")\n partial_bindings = {variable: [ground] for variable, ground in partial_bindings.items()}\n # Collect candidate bindings for all variables referenced in the sentence list\n flattened_sentence_list = []\n for item in self.sentence_list:\n if type(item) is list:\n flattened_sentence_list += item\n else:\n flattened_sentence_list.append(item)\n new_binding_candidates = {}\n for sentence in flattened_sentence_list:\n if not isinstance(sentence, Sentence):\n continue\n if isinstance(sentence.subject, Variable):\n if sentence.subject.name in partial_bindings:\n continue\n class_name = sentence.subject.class_name\n new_binding_candidates[sentence.subject.name] = universe.classes[class_name]\n else:\n if sentence.subject in partial_bindings:\n continue\n # Ex: candidate_bindings['GEORGE'] = ['GEORGE']\n new_binding_candidates[sentence.subject] = [sentence.subject]\n for sentence in flattened_sentence_list:\n if not isinstance(sentence, Sentence):\n continue\n if not sentence.object:\n continue\n if isinstance(sentence.object, Variable):\n if sentence.object.name in partial_bindings:\n continue\n class_name = sentence.object.class_name\n new_binding_candidates[sentence.object.name] = universe.classes[class_name]\n else:\n if sentence.object in partial_bindings:\n continue\n # Ex: candidate_bindings['GEORGE'] = ['GEORGE']\n new_binding_candidates[sentence.object] = [sentence.object]\n # Test all bindings\n binding_candidates = {**partial_bindings, **new_binding_candidates} # Merge the two dictionaries\n variable_ordering = list(binding_candidates.keys())\n candidate_bindings = list(itertools.product(*binding_candidates.values()))\n for ordered_candidates_list in candidate_bindings:\n candidate_binding = {}\n for i, variable_name in enumerate(variable_ordering):\n candidate_binding[variable_name] = ordered_candidates_list[i]\n if config.VERBOSITY >= 3:\n print(f\" Binding: {candidate_binding}\")\n this_rule_holds = self._evaluate_sentences(\n universe=universe,\n binding=candidate_binding,\n sentence_list=self.sentence_list\n )\n if this_rule_holds:\n return True\n return False\n\n def _evaluate_sentences(self, universe, binding, sentence_list):\n \"\"\"Evaluate the given sentence(s) or list of sentence(s).\"\"\"\n sentence_list = list(sentence_list) # Make a copy, to be safe\n while any(component for component in sentence_list if not isinstance(component, str)):\n for i, component in enumerate(sentence_list):\n if isinstance(component, list):\n sentence_list[i] = self._evaluate_sentences(\n universe=universe,\n binding=binding,\n sentence_list=component\n )\n continue\n if component == '&':\n sentence_list[i] = 'and'\n continue\n if component == '/':\n sentence_list[i] = 'or'\n continue\n if isinstance(component, str):\n continue\n if isinstance(component, bool):\n sentence_list[i] = str(component)\n continue\n # If we get to here, it's a Sentence or a TimeSentence object\n if isinstance(component, Sentence):\n sentence_list[i] = str(component.evaluate(bindings=binding, universe=universe))\n else:\n sentence_list[i] = str(component.evaluate(universe=universe))\n return eval(' '.join(str(component) for component in sentence_list))\n\n\nclass Sentence:\n \"\"\"A precondition (\"sentence\") on a subrule.\"\"\"\n\n def __init__(self, sentence_subject, sentence_relation, sentence_object):\n \"\"\"Initialize a Sentence object.\"\"\"\n self.subject = sentence_subject\n self.relation = sentence_relation\n self.object = sentence_object\n\n def __str__(self):\n \"\"\"Return string representation.\"\"\"\n return f\"{self.subject} {self.relation} {self.object}\"\n\n def __repr__(self):\n \"\"\"Return string representation.\"\"\"\n return self.__str__()\n\n def evaluate(self, bindings, universe):\n \"\"\"Return whether this sentence holds, given the binding and the current state of the modelled universe.\"\"\"\n if isinstance(self.subject, Variable):\n ground_subject = bindings[self.subject.name]\n else:\n ground_subject = bindings[self.subject]\n if self.object is None:\n ground_object = None\n elif isinstance(self.object, Variable):\n ground_object = bindings[self.object.name]\n else:\n ground_object = bindings[self.object]\n evaluation = universe.match(\n triple_subject=ground_subject,\n triple_relation=self.relation,\n triple_object=ground_object\n )\n if config.VERBOSITY >= 3:\n print(f\" Evaluated sentence to {evaluation}: ({ground_subject} {self.relation} {ground_object})\")\n return evaluation\n\n\nclass TimeSentence:\n \"\"\"A precondition (\"sentence\") on a subrule pertaining to the current plot time.\"\"\"\n\n def __init__(self, operator, time_value):\n \"\"\"Initialize a TimeSentence object.\"\"\"\n self.operator = operator\n self.time_value = int(time_value)\n\n def __str__(self):\n \"\"\"Return string representation.\"\"\"\n return f\"T {self.operator} {self.time_value}\"\n\n def __repr__(self):\n \"\"\"Return string representation.\"\"\"\n return self.__str__()\n\n def evaluate(self, universe):\n \"\"\"Return whether this sentence holds, given the current time frame of the modelled universe.\"\"\"\n if self.operator == '==':\n return universe.time == self.time_value\n if self.operator == '!=':\n return universe.time != self.time_value\n if self.operator == '<':\n return universe.time < self.time_value\n if self.operator == '>':\n return universe.time > self.time_value\n\n\nclass Relation:\n \"\"\"A relation between nodes in a semantic network.\"\"\"\n\n def __init__(self, name, negate_field, duration_modifier_operator, duration_modifier_time_value):\n \"\"\"Initialize a Relation object.\"\"\"\n self.name = name # Name of the relation\n self.negate_field = negate_field # True if this relation has a delete field or negate field\n if duration_modifier_operator:\n assert duration_modifier_time_value, (\n f\"Relation '{name}' has duration_modifier_operator but no duration_modifier_time_value.\"\n )\n self.duration_modifier_operator = duration_modifier_operator\n self.duration_modifier_time_value = int(duration_modifier_time_value) if duration_modifier_time_value else None\n\n def __str__(self):\n \"\"\"Return string representation.\"\"\"\n negation = \"!=\" if self.negate_field else ''\n if self.duration_modifier_operator:\n duration_modifier = f\"{self.duration_modifier_operator}{self.duration_modifier_time_value}\"\n else:\n duration_modifier = ''\n return f\"{negation}{self.name}{duration_modifier}\"\n\n def __repr__(self):\n \"\"\"Return string representation.\"\"\"\n return self.__str__()\n\n\nclass Variable:\n \"\"\"A variable in a rule.\"\"\"\n\n def __init__(self, name, class_name=None, y_restriction_part=None):\n \"\"\"Initialize a Variable object.\"\"\"\n self.name = name\n self.class_name = class_name\n self.y_restriction_part = y_restriction_part\n\n def __str__(self):\n \"\"\"Return string representation.\"\"\"\n name = self.name if self.name else '#'\n class_name_component = f\".{self.class_name}\" if self.class_name else ''\n y_restriction_part_component = f\":{self.y_restriction_part}\" if self.y_restriction_part else ''\n return f\"{name}{class_name_component}{y_restriction_part_component}\"\n\n def __repr__(self):\n \"\"\"Return string representation.\"\"\"\n return self.__str__()\n","repo_name":"james-owen-ryan/messy-71","sub_path":"rules.py","file_name":"rules.py","file_ext":"py","file_size_in_byte":16144,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"12019451551","text":"import pygame\r\n\r\n# Set up colours.\r\nCLR_WHITE = (255, 255, 255)\r\nCLR_BLACK = (0, 0, 0)\r\nCLR_BLUE = (0, 0, 255)\r\nCLR_RED = (255, 0, 0)\r\nCLR_GREEN = (0, 255, 0)\r\nCLR_YELLOW = (255, 255, 0)\r\n\r\n\r\ndef map_coords_to_pixels(map_x, map_y):\r\n \"\"\"\r\n Simple conversion function to transform map coordinates into screen coordinates for drawing sprites.\r\n :param map_x: (int) The map coordinate x axis (tiles)\r\n :param map_y: (int) The map coordinate y axis (tiles)\r\n :return: pixes\r\n \"\"\"\r\n return int(map_x * 16), int(map_y * 16)\r\n\r\n\r\ndef render_all(screen_surface, screen_width, screen_height, view_port_width, view_port_height, view_port_x_offset,\r\n view_port_y_offset, game_map, player, entities, visible_map_chunk, sprites):\r\n \"\"\"\r\n\r\n :param screen_surface: obj - the main pygame drawing surface.\r\n :param screen_width: int - screen width in pixels\r\n :param screen_height: int - screen height in pixels\r\n :param view_port_width: int - width in pixels of the screen segment which displays the map\r\n :param view_port_height: int - height in pixels of the screen segment which displays the map\r\n :param view_port_x_offset: int - pixels to shift view port to the left.\r\n :param view_port_y_offset: int - pixels to shift view port down.\r\n :param game_map: game map object\r\n :param player: player object\r\n :param entities: list - tracking all entities in game.\r\n :return:\r\n \"\"\"\r\n\r\n # Set the background colour of the window to black.\r\n screen_surface.fill(CLR_BLACK)\r\n\r\n # Invoke individual draw functions.\r\n render_map(screen_surface, view_port_x_offset, view_port_y_offset, game_map, visible_map_chunk, sprites)\r\n render_entities(screen_surface, view_port_x_offset, view_port_y_offset, entities, visible_map_chunk)\r\n render_bottom_hud(screen_surface, screen_width, screen_height, view_port_width, view_port_height, view_port_x_offset, view_port_y_offset, player)\r\n render_top_hud(screen_surface, screen_width, screen_height, view_port_width, view_port_height, view_port_x_offset, view_port_y_offset, player)\r\n\r\n # Refresh the display.\r\n pygame.display.flip()\r\n\r\n # Clear the entities\r\n clear_entities(screen_surface, view_port_x_offset, view_port_y_offset, entities, visible_map_chunk)\r\n\r\n\r\ndef render_top_hud(screen_surface, screen_width, screen_height, view_port_width, view_port_height, view_port_x_offset, view_port_y_offset, player):\r\n hud_screen_x1 = view_port_x_offset\r\n hud_screen_x2 = view_port_x_offset + view_port_width\r\n hud_screen_y1 = 0\r\n hud_screen_y2 = view_port_y_offset\r\n\r\n hud_width = hud_screen_x2 - hud_screen_x1\r\n hud_height = hud_screen_y2 - hud_screen_y1\r\n\r\n draw_element(screen_surface, hud_screen_x1, hud_screen_y1, hud_width, hud_height, CLR_BLUE)\r\n\r\n\r\ndef render_bottom_hud(screen_surface, screen_width, screen_height, view_port_width, view_port_height, view_port_x_offset, view_port_y_offset, player):\r\n hud_screen_x1 = view_port_x_offset\r\n hud_screen_x2 = view_port_x_offset + view_port_width\r\n hud_screen_y1 = view_port_y_offset + view_port_height\r\n hud_screen_y2 = screen_height\r\n\r\n hud_width = hud_screen_x2 - hud_screen_x1\r\n hud_height = hud_screen_y2 - hud_screen_y1\r\n\r\n draw_element(screen_surface, hud_screen_x1, hud_screen_y1, hud_width, hud_height, CLR_BLUE)\r\n\r\n\r\ndef render_map(screen_surface, view_port_x_offset, view_port_y_offset, game_map, visible_map_chunk, sprites):\r\n map_chunk_x1 = visible_map_chunk.x1\r\n map_chunk_y1 = visible_map_chunk.y1\r\n\r\n floor_colour = CLR_BLACK\r\n wall_colour = CLR_WHITE\r\n\r\n # Load sprites\r\n SPR_TREE = sprites.get(\"tree\")\r\n\r\n # Draw walls (blocked tiles).\r\n for x, y in visible_map_chunk:\r\n if game_map.blocked[x, y]: # If it's a wall, work out its screen position, create filled surface and blit.\r\n tile_colour = wall_colour\r\n tile_sprite = SPR_TREE\r\n else:\r\n tile_colour = floor_colour\r\n tile_sprite = None\r\n\r\n # Calculate screen position for tile. Draw it!\r\n tile_screen_x, tile_screen_y = map_coords_to_pixels(x - map_chunk_x1, y - map_chunk_y1)\r\n draw_element(screen_surface, tile_screen_x + view_port_x_offset, tile_screen_y + view_port_y_offset, 16, 16, tile_colour, tile_sprite)\r\n\r\n\r\ndef render_entities(screen_surface, view_port_x_offset, view_port_y_offset, entities, visible_map_chunk):\r\n map_chunk_x1 = visible_map_chunk.x1\r\n map_chunk_y1 = visible_map_chunk.y1\r\n\r\n # Iterate through entities and blit it's surface to the screen.\r\n for entity in entities:\r\n entity_screen_x, entity_screen_y = map_coords_to_pixels(entity.map_x - map_chunk_x1, entity.map_y - map_chunk_y1)\r\n screen_surface.blit(entity.surf, (entity_screen_x + view_port_x_offset, entity_screen_y + view_port_y_offset))\r\n\r\n\r\ndef draw_element(screen_surface, screen_x, screen_y, element_width, element_height, colour, sprite=None):\r\n element_surface = pygame.Surface((element_width, element_height))\r\n\r\n # If there is a sprite, blit it to the element's surface - if not just fill with block colour.\r\n if sprite:\r\n element_surface.blit(sprite, (0, 0))\r\n else:\r\n element_surface.fill(colour)\r\n\r\n screen_surface.blit(element_surface, (screen_x, screen_y))\r\n\r\n\r\ndef clear_entities(screen_surface, view_port_x_offset, view_port_y_offset, entities, visible_map_chunk):\r\n map_chunk_x1 = visible_map_chunk.x1\r\n map_chunk_y1 = visible_map_chunk.y1\r\n\r\n # Iterate through entities and clear it.\r\n for entity in entities:\r\n entity_screen_x, entity_screen_y = map_coords_to_pixels(entity.map_x - map_chunk_x1, entity.map_y - map_chunk_y1)\r\n clear_element(screen_surface, entity_screen_x, entity_screen_y, 16, 16)\r\n\r\n\r\ndef clear_element(screen_surface, screen_x, screen_y, element_width, element_height, colour=CLR_BLACK):\r\n element_surface = pygame.Surface((element_width, element_height))\r\n element_surface.fill(colour)\r\n screen_surface.blit(element_surface, (screen_x, screen_y))\r\n","repo_name":"Gazhole/PyGameRL","sub_path":"render_functions.py","file_name":"render_functions.py","file_ext":"py","file_size_in_byte":6051,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"19173563113","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTests for twisted_event_dispatcher module\n\"\"\"\nimport logging\nimport mock\n\nfrom twisted.trial import unittest\nfrom twisted.internet import defer\n\nfrom oni.twisted_event_dispatcher import EventDispatcher\n\nDEV_LOGGER = logging.getLogger(__name__)\n\n\nclass TestEventDispatcher(unittest.TestCase):\n '''\n Test EventDispatcher\n '''\n factory = EventDispatcher\n\n def setUp(self):\n '''setUp test'''\n self.inst = self.factory(('username', 'role'))\n self.inst.start()\n\n def tearDown(self):\n '''tearDown test'''\n return self.inst.stop()\n\n @staticmethod\n def listen_fn_mock():\n '''Mock for listen fn'''\n return mock.Mock(spec='__call__')\n\n @defer.inlineCallbacks\n def test_all_match(self):\n '''\n Test event handler is fired when all details match\n '''\n listen_fn = self.listen_fn_mock()\n\n yield self.inst.add_event_handler(listen_fn, 'during', username='bob', role='admin')\n event = 'some_event'\n\n yield self.inst.fire_event(event, username='bob', role='admin')\n\n listen_fn.assert_called_once_with(event)\n\n @defer.inlineCallbacks\n def test_no_match(self):\n '''\n Test handler doesn't fire when no details match\n '''\n listen_fn = self.listen_fn_mock()\n\n yield self.inst.add_event_handler(listen_fn, 'during', username='bob', role='admin')\n event = 'some_event'\n\n yield self.inst.fire_event(event, username='susan', role='admin')\n\n self.assertFalse(listen_fn.called, 'function should not have been called')\n\n @defer.inlineCallbacks\n def test_partial_match(self):\n '''\n Test handler fires when one detail matches, and remaining details are unspecifed\n '''\n listen_fn = self.listen_fn_mock()\n\n yield self.inst.add_event_handler(listen_fn, 'during', role='admin')\n event = 'some_event'\n\n yield self.inst.fire_event(event, username='susan', role='admin')\n\n listen_fn.assert_called_once_with(event)\n\n @defer.inlineCallbacks\n def test_remove(self):\n '''\n Test a removed handler no longer fires\n '''\n listen_fn = self.listen_fn_mock()\n\n handle = yield self.inst.add_event_handler(\n listen_fn, 'during', username='bob', role='admin')\n event = 'some_event'\n\n yield self.inst.fire_event(event, username='bob', role='admin')\n\n yield self.inst.remove_event_handler(handle)\n\n listen_fn.assert_called_once_with(event)\n\n @defer.inlineCallbacks\n def test_auto_remove(self):\n '''\n Test that a function that is garbage collected no longer fires indicating it has\n been automatically removed from dispatcher\n '''\n listen_fn = self.listen_fn_mock()\n event = 'some_event'\n\n # Create lambda that can be deleted\n def listen_fn_wrapper(event):\n '''Just pass through to mock'''\n return listen_fn(event)\n\n yield self.inst.add_event_handler(\n listen_fn_wrapper, 'during', username='bob', role='admin')\n\n # Fire test\n yield self.inst.fire_event(event, username='bob', role='admin')\n listen_fn.assert_called_once_with(event)\n listen_fn.reset_mock()\n\n # Delete and fire again\n del listen_fn_wrapper\n\n yield self.inst.fire_event(event, username='bob', role='admin')\n\n self.assertFalse(listen_fn.called, 'function should not have been called')\n\n @defer.inlineCallbacks\n def test_auto_remove_with_instance_method(self):\n '''\n Test that a instance method that is garbage collected no longer fires indicating it has\n been automatically removed from dispatcher\n '''\n listen_fn = self.listen_fn_mock()\n event = 'some_event'\n\n # Create instance that can be deleted\n class TestClass(object):\n def listen_fn_wrapper(self, event):\n '''Just pass through to mock'''\n return listen_fn(event)\n\n inst = TestClass()\n\n yield self.inst.add_event_handler(\n inst.listen_fn_wrapper, 'during', username='bob', role='admin')\n\n # Fire test\n yield self.inst.fire_event(event, username='bob', role='admin')\n listen_fn.assert_called_once_with(event)\n listen_fn.reset_mock()\n\n # Delete and fire again\n del inst\n\n yield self.inst.fire_event(event, username='bob', role='admin')\n\n self.assertFalse(listen_fn.called, 'function should not have been called')\n\n @defer.inlineCallbacks\n def test_stop_start(self):\n '''\n Test that stopping and starting dispatcher prevent and allow handling as expected.\n '''\n listen_fn = self.listen_fn_mock()\n\n self.inst.add_event_handler(listen_fn, 'during', role='admin')\n event = 'some_event'\n self.inst.fire_event(event, username='susan', role='admin')\n\n yield self.inst.stop()\n\n listen_fn.assert_called_once_with(event)\n\n self.inst.fire_event(event, username='susan', role='admin')\n\n listen_fn.assert_called_once_with(event)\n\n self.inst.start()\n\n listen_fn.assert_called_once_with(event)\n\n yield self.inst.fire_event(event, username='susan', role='admin')\n\n self.assertEqual(listen_fn.call_count, 2)\n\n def test_invalid_spec(self):\n '''\n Test that specify a match spec that isn't present during setup raises an exception.\n '''\n listen_fn = self.listen_fn_mock()\n\n self.assertRaises(\n ValueError,\n self.inst.add_event_handler,\n listen_fn,\n 'during',\n password='admin')\n","repo_name":"cscutcher/twisted_event_dispatcher","sub_path":"oni/twisted_event_dispatcher/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11201753493","text":"import pyautogui as pg\nimport webbrowser as web\nimport datetime\nfrom time import sleep\n\n# variables with defined times\nfirst_hour = datetime.datetime.strptime(\"10:00\", \"%H:%M\").time()\nsecond_hour = datetime.datetime.strptime(\"11:00\", \"%H:%M\").time()\nthird_hour = datetime.datetime.strptime(\"13:00\", \"%H:%M\").time()\nfourth_hour = datetime.datetime.strptime(\"15:00\", \"%H:%M\").time()\n\n# variable with phone number\nphone_number = \"55_41992220452\"\n\n# variables with messages\nmessage_one = \"Ola\"\nmessage_two = \"Oi\"\nmessage_three = \"Tudo bem?\"\nmessage_four = \"Lembre de tomar água\"\n\n# sending the messages relative to the time\nwhile True:\n now_hour = datetime.datetime.now().time()\n\n if first_hour == now_hour:\n web.open(\"https://web.whatsapp.com/send?phone=\"+phone_number+\"&text=\"+message_one)\n sleep(4)\n width, height = pg.size()\n pg.click(width/2, height/2)\n sleep(7)\n pg.press(\"esc\")\n sleep(5)\n pg.press(\"enter\")\n sleep(10)\n pg.hotkey(\"ctrl\", \"w\")\n elif second_hour == now_hour:\n web.open(\"https://web.whatsapp.com/send?phone=\" + phone_number + \"&text=\" + message_two)\n sleep(4)\n width, height = pg.size()\n pg.click(width / 2, height / 2)\n sleep(7)\n pg.press(\"esc\")\n sleep(5)\n pg.press(\"enter\")\n sleep(10)\n pg.hotkey(\"ctrl\", \"w\")\n elif third_hour == now_hour:\n web.open(\"https://web.whatsapp.com/send?phone=\" + phone_number + \"&text=\" + message_three)\n sleep(4)\n width, height = pg.size()\n pg.click(width / 2, height / 2)\n sleep(7)\n pg.press(\"esc\")\n sleep(5)\n pg.press(\"enter\")\n sleep(10)\n pg.hotkey(\"ctrl\", \"w\")\n elif fourth_hour == now_hour:\n web.open(\"https://web.whatsapp.com/send?phone=\" + phone_number + \"&text=\" + message_four)\n sleep(4)\n width, height = pg.size()\n pg.click(width / 2, height / 2)\n sleep(7)\n pg.press(\"esc\")\n sleep(5)\n pg.press(\"enter\")\n sleep(10)\n pg.hotkey(\"ctrl\", \"w\")\n","repo_name":"danoliver1792/send_messages_whatsapp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15008030932","text":"import picamera\nfrom gpiozero import DistanceSensor\nfrom time import sleep\nsensor = DistanceSensor(23, 24)\ncamera = picamera.PiCamera()\ncamera.resolution= (640, 480)\ni = 0\nwhile True:\n if sensor.distance < 1.0:\n camera.capture(\"foo{i}.jpeg\".format(i=i))\n i += 1\n sleep(1)\n","repo_name":"kbenedek/XAMK_IoT_2021","sub_path":"4/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2170473918","text":"from flask import Blueprint, request, abort\nfrom models.user import User\nfrom models.friendship import Friendship\nfrom schemas.friendship_schema import FriendshipSchema\nfrom init import db, bcrypt\nfrom datetime import date, timedelta, datetime\nfrom sqlalchemy import select, or_\nfrom flask_jwt_extended import create_access_token, get_jwt_identity, jwt_required\nfrom utils import retrieve_resource_by_id, add_resource_to_db, confirm_authorisation, check_authentication, is_child\n\n\nfriendships_bp = Blueprint('friendships', __name__, url_prefix='/users/<int:user_id>/friendships')\n\n\n@friendships_bp.route('/', methods=['GET'])\n@jwt_required()\ndef read_friendships(user_id):\n check_authentication()\n # Sends a query to the db asking it to retrieve a user instance that has an id that matches the user_id provided as a parameter in the route.\n # If no user has that id, a customised error message will be sent with an appropriate status code.\n retrieve_resource_by_id(user_id, model=User, resource_type='user')\n # Sends a query to the db asking it to retrieve all friendship instances that have a user1_id or user2_id that matches the user_id provided as a parameter in the route.\n stmt = select(Friendship).where(or_(Friendship.user1_id == user_id, Friendship.user2_id == user_id))\n friendships = db.session.scalars(stmt)\n return FriendshipSchema(many=True).dump(friendships)\n\n\n@friendships_bp.route('/<int:friendship_id>', methods=['GET'])\n@jwt_required()\ndef read_friendship(user_id, friendship_id):\n check_authentication()\n # Sends a query to the db asking it to retrieve a user instance that has an id that matches the user_id provided as a parameter in the route.\n # If no user has that id, a customised error message will be sent with an appropriate status code.\n user = retrieve_resource_by_id(user_id, model=User, resource_type='user')\n # Sends a query to the db asking it to retrieve a friendship instance that has an id that matches the friendship_id provided as a parameter in the route.\n # If no friendship has that id, a customised error message will be sent with an appropriate status code.\n friendship = retrieve_resource_by_id(friendship_id, model=Friendship, resource_type='friendship')\n is_child(parent=user, child=friendship, id_str=['user1_id', 'user2_id'])\n return FriendshipSchema().dump(friendship)\n\n\n@friendships_bp.route('/', methods=['POST'])\n@jwt_required()\ndef create_friendship(user_id):\n check_authentication()\n # Sends a query to the db asking it to retrieve a user instance that has an id that matches the user_id provided as a parameter in the route.\n # If no user has that id, a customised error message will be sent with an appropriate status code.\n retrieve_resource_by_id(user_id, model=User, resource_type='user')\n \n jwt_id = int(get_jwt_identity())\n if jwt_id < user_id:\n smaller_user_id = jwt_id\n larger_user_id = user_id\n else:\n smaller_user_id = user_id\n larger_user_id = jwt_id\n\n friendship = Friendship(\n user1_id=smaller_user_id,\n user2_id=larger_user_id,\n requester=1 if jwt_id == smaller_user_id else 2,\n date_time=datetime.now()\n )\n # Sends a query to the db asking it to create a new row in the friendships table which maps to the friendship instance defined above.\n # During this process, if the constraints listed below are violated, an appropriate error message and status code will be sent in response.\n add_resource_to_db(friendship, constraint_errors_config=[\n ('friendship_users_uc', 409, 'These two users already have an existing friendship.'),\n ('friendship_user_ids_sorted_cc', 400, 'user1_id must be smaller than user2_id.')\n ])\n return FriendshipSchema().dump(friendship), 201\n\n\n\n@friendships_bp.route('/<int:friendship_id>', methods=['PUT', 'PATCH'])\n@jwt_required()\ndef update_friendship(user_id, friendship_id):\n check_authentication()\n friendship_data = FriendshipSchema().load(request.json, partial=True)\n # Sends a query to the db asking it to retrieve a user instance that has an id that matches the user_id provided as a parameter in the route.\n # If no user has that id, a customised error message will be sent with an appropriate status code.\n user = retrieve_resource_by_id(user_id, model=User, resource_type='user')\n # Sends a query to the db asking it to retrieve a friendship instance that has an id that matches the friendship_id provided as a parameter in the route.\n # If no friendship has that id, a customised error message will be sent with an appropriate status code.\n friendship = retrieve_resource_by_id(friendship_id, model=Friendship, resource_type='friendship')\n is_child(parent=user, child=friendship, id_str=['user1_id', 'user2_id'])\n confirm_authorisation(friendship, action='update', resource_type='friendship')\n friendship.status = friendship_data.get('status') if friendship_data.get('status') != None else friendship.status\n # Sends a query to the db asking it to update the row in the friendships table that maps to the friendship instance above and apply the same changes to it that were made to the model instance.\n db.session.commit()\n return FriendshipSchema().dump(friendship)\n\n\n@friendships_bp.route('/<int:friendship_id>', methods=['DELETE'])\n@jwt_required()\ndef delete_friendship(user_id, friendship_id):\n check_authentication()\n # Sends a query to the db asking it to retrieve a user instance that has an id that matches the user_id provided as a parameter in the route.\n # If no user has that id, a customised error message will be sent with an appropriate status code.\n user = retrieve_resource_by_id(user_id, model=User, resource_type='user')\n # Sends a query to the db asking it to retrieve a friendship instance that has an id that matches the friendship_id provided as a parameter in the route.\n # If no friendship has that id, a customised error message will be sent with an appropriate status code.\n friendship = retrieve_resource_by_id(friendship_id, model=Friendship, resource_type='friendship')\n is_child(parent=user, child=friendship, id_str=['user1_id', 'user2_id'])\n confirm_authorisation(friendship, action='delete', resource_type='friendship')\n # Sends a query to the db asking it to delete the row in the friendships table that maps to the provided friendship instance.\n db.session.delete(friendship)\n db.session.commit()\n return {'message': f'Friendship {friendship.id} deleted successfully'}","repo_name":"jfhaines/social-media-api","sub_path":"controllers/friendship_controller.py","file_name":"friendship_controller.py","file_ext":"py","file_size_in_byte":6560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33952221432","text":"#!/usr/bin/env python\n\nimport botocore\nimport croniter\nimport datetime\nimport boto3\nimport string\n\n\n# return true if the cron schedule falls between now and now+seconds\ndef time_to_action(sched, now, seconds):\n try:\n cron = croniter.croniter(sched, now)\n d1 = now + datetime.timedelta(0, seconds)\n if (seconds > 0):\n d2 = cron.get_next(datetime.datetime)\n ret = (now < d2 and d2 < d1)\n else:\n d2 = cron.get_prev(datetime.datetime)\n ret = (d1 < d2 and d2 < now)\n\n except:\n ret = False\n\n return ret\n\nnow = datetime.datetime.now()\n\n\nfor region in boto3.session.Session().get_available_regions('rds'):\n\n \n try:\n client = boto3.client('rds', region)\n response = client.describe_db_instances()\n start_list = []\n stop_list = []\n\n for resp in response['DBInstances']:\n db_instance_arn = resp['DBInstanceArn']\n tag_list = client.list_tags_for_resource(ResourceName=db_instance_arn)\n \n for tags in tag_list['TagList']: \n \n if tags['Key'] == 'auto:stop':\n tag_value = tags['Value']\n stop_sched = str.replace(tag_value,\"@\",\"*\",6)\n \n #check status is running\n if resp['DBInstanceStatus'] == 'available' and time_to_action(stop_sched, now, 11 * -60):\n instanceid = resp['DBInstanceIdentifier']\n client.stop_db_instance(DBInstanceIdentifier=instanceid)\n\n elif tags['Key'] == 'auto:start':\n tag_value = tags['Value']\n start_sched = str.replace(tag_value,\"@\",\"*\",6)\n\n #check status is running\n if resp['DBInstanceStatus'] == 'stopped' and time_to_action(start_sched, now, 11 * 60):\n instanceid = resp['DBInstanceIdentifier']\n client.start_db_instance(DBInstanceIdentifier=instanceid)\n\n #check for manual stopped instance that have been auto restarted and stop them\n db_events = client.describe_events(SourceType='db-instance')\n\n for db_event in db_events['Events']:\n\n\n sourcearn = db_event['SourceArn']\n while sourcearn.find(':') != -1:\n sourcearn = sourcearn.split(':', 1)[-1]\n\n response = client.describe_db_instances(DBInstanceIdentifier=sourcearn)\n\n if db_event['Message'] == 'DB instance is being started due to it exceeding the maximum allowed time being stopped.' and response['DBInstances'][0]['DBInstanceStatus'] == 'available':\n client.stop_db_instance(DBInstanceIdentifier=sourcearn)\n\n \n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'AuthFailure':\n continue\n elif e.response['Error']['Code'] == 'InvalidClientTokenId':\n continue\n else:\n print ('Exception error in %s: %s' % (region, e))\n\n except Exception as e:\n print ('Exception error in %s: %s' % (region, e))\n\n","repo_name":"aahzwww/aws_boto","sub_path":"rds_operator.py","file_name":"rds_operator.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33182136301","text":"def check(player_name,list):\r\n if len(list)==3:\r\n if list[0]+list[1]+list[2]==15:\r\n print(player_name,\" is the winner\\n the game is over.\")\r\n exit()\r\n elif len(list)==4:\r\n if list[0]+list[1]+list[3]==15 or list[0]+list[2]+list[3]==15 or list[1]+list[2]+list[3]==15 :\r\n print(player_name,\" is the winner\\n the game is over.\")\r\n exit()\r\n else:\r\n if list[0]+list[1]+list[4]==15 or list[0]+list[2]+list[4]==15 or list[0]+list[3]+list[4]==15 or list[2]+list[3]+list[4]==15 or list[1]+list[3]+list[4]==15 or list[1]+list[2]+list[4]==15 :\r\n print(player_name,\" is the winner\\nthe game is over.\")\r\n exit()\r\nnumbers_list=[1,2,3,4,5,6,7,8,9]\r\nplayer_number_1_list=[]\r\nplayer_number_2_list=[]\r\nplayer_number_1=input(\"please enter your name \")\r\nplayer_number_2=input(\"please enter your name \")\r\nwhile True:\r\n print(\"please \",player_number_1,\" pick up your number from this list \",numbers_list)\r\n a1=int(input())\r\n while a1 not in numbers_list:\r\n print(\"please \",player_number_1,\" try to pick up again \",numbers_list)\r\n a1=int(input())\r\n else:\r\n numbers_list.remove(a1)\r\n player_number_1_list.append(a1)\r\n if len(player_number_1_list)>=3:\r\n check(player_number_1,player_number_1_list) \r\n print(\"please \",player_number_2,\" pick up your number from this list \",numbers_list)\r\n b1=int(input())\r\n while b1 not in numbers_list:\r\n print(\"please \",player_number_2,\" try to pick up again \",numbers_list)\r\n b1=int(input())\r\n else:\r\n numbers_list.remove(b1)\r\n player_number_2_list.append(b1)\r\n if len(player_number_2_list)>=3:\r\n check(player_number_2,player_number_2_list)\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n ","repo_name":"AliiHassann/Number-scrabble-Game","sub_path":"20210243_3_Number scrabble.py","file_name":"20210243_3_Number scrabble.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28621787232","text":"def rabatt(betr, proz):\r\n #print(betr, proz)\r\n rabattBetrag=betr*proz/100\r\n #print(rabattBetrag)\r\n return rabattBetrag\r\n\r\ndef main():\r\n betrag=float(input(\"Bitte geben Sie den Betrag ein: \"))\r\n rabattProzent=float(input(\"Bitte geben Sie den Rabatt ein: \"))\r\n ergebnis=rabatt(rabattProzent,betrag)\r\n print(\"Ursprünglicher Betrag:\", betrag, \"€,\")\r\n print(\"Rabatt:\", ergebnis, \"€,\")\r\n print(\"Endbetrag:\", betrag-ergebnis, \"€.\")\r\n \r\nmain()","repo_name":"bymyselfstudio/PythonPlayground","sub_path":"RabattRechner.py","file_name":"RabattRechner.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22996838660","text":"class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n\n def isSibling(self, root, x, y):\n if not root:\n return False\n\n return ((root.left and root.left.val == x and root.right and root.right.val == y) or\n (root.left and root.left.val == y and root.right and root.right.val == x) or\n self.isSibling(root.left, x, y) or\n self.isSibling(root.right, x, y))\n\n def level(self, root, p, lvl):\n if not root:\n return 0\n if root.val == p:\n return lvl\n\n # Return level if Node is present in left subtree\n l = self.level(root.left, p, lvl + 1)\n if l != 0:\n return l\n\n # Else search in right subtree\n return self.level(root.right, p, lvl + 1)\n\n def isCousins(self, root, x: int, y: int) -> bool:\n if ((self.level(root, x, 0) == self.level(root, y, 0)) and\n not (self.isSibling(root, x, y))):\n return True\n else:\n return False\n\nroot = TreeNode(1)\nroot.left = TreeNode(2)\nroot.right = TreeNode(3)\nroot.left.left = TreeNode(4)\nroot.left.right = TreeNode(5)\nroot.right.left = TreeNode(6)\nroot.right.right = TreeNode(7)\n\nX = Solution()\nprint(X.isCousins(root,4,6))\n\n# Time Complexity : O(N) , N = Number of nodes in binary tree.\n# Space Complexity : O(N) , N = Number of nodes in binary tree.","repo_name":"anugrah18/Leetcode_solutions","sub_path":"Tree/993-CousinsInBinaryTree.py","file_name":"993-CousinsInBinaryTree.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38182528879","text":"import json\nimport logging\nimport os\n\n# See comment at our point of usage of subprocess on why this usage is ok\nimport subprocess # nosec\nimport sys\nfrom vscode_datascience_helpers.daemon.daemon_python import (\n error_decorator,\n PythonDaemon as BasePythonDaemon,\n change_exec_context,\n)\n\n\nclass PythonDaemon(BasePythonDaemon):\n def __init__(self, rx, tx):\n super().__init__(rx, tx)\n self.log.info(\"DataScience Daemon init\")\n\n def __getitem__(self, item):\n \"\"\"Override getitem to ensure we use these methods.\"\"\"\n self.log.info(\"Execute rpc method %s in DS Daemon\", item)\n return super().__getitem__(item)\n\n @error_decorator\n def m_exec_module(self, module_name, args=[], cwd=None, env=None):\n self.log.info(\"Exec in DS Daemon %s with args %s\", module_name, args)\n args = [] if args is None else args\n\n if module_name == \"jupyter\":\n if args[0] == \"kernelspec\" and self._is_module_installed(\n \"jupyter_client.kernelspec\"\n ):\n if args == [\"kernelspec\", \"list\", \"--json\"]:\n return self._execute_and_capture_output(\n self._print_kernel_list_json\n )\n elif args == [\"kernelspec\", \"list\"]:\n return self._execute_and_capture_output(self._print_kernel_list)\n elif args == [\"kernelspec\", \"--version\"]:\n return self._execute_and_capture_output(\n self._print_kernelspec_version\n )\n if (\n args[0] == \"nbconvert\"\n and self._is_module_installed(\"nbconvert\")\n and args[-1] != \"--version\"\n ):\n return self._execute_and_capture_output(lambda: self._convert(args))\n if args[0] == \"notebook\" and args[1] == \"--version\":\n try:\n from notebook import notebookapp as app\n\n return {\"stdout\": \".\".join(list(str(v) for v in app.version_info))}\n \"\"\" We specifically don't want to bubble up an error from --version so pass exception here \"\"\"\n except Exception: # nosec\n pass\n # kernelspec, nbconvert are subcommands of jupyter.\n # python -m jupyter kernelspec, python -m jupyter nbconvert,\n # In such cases, even if the modules kernelspec or nbconvert are not installed in the current\n # environment, jupyter will find them in current path.\n # So if we cannot find the corresponding subcommands, lets revert to subprocess.\n self.log.info(\n \"Exec in DS Daemon with as subprocess, %s with args %s\",\n module_name,\n args,\n )\n return self._exec_with_subprocess(module_name, args, cwd, env)\n else:\n self.log.info(\"check base class stuff\")\n return super().m_exec_module(module_name, args, cwd, env)\n\n def _exec_with_subprocess(self, module_name, args=[], cwd=None, env=None):\n # The usage here traced up to our execModule function on the daemon and only goes down this path for jupyter subcommands\n # in m_exec_module in this class if module_name == jupyter and we are in control of the args we know that we are safe here\n result = subprocess.run( # nosec\n [sys.executable, \"-m\", module_name] + args, capture_output=True\n )\n encoding = os.getenv(\"PYTHONIOENCODING\", \"utf-8\")\n stdout = result.stdout.decode(encoding)\n stderr = result.stderr.decode(encoding)\n self.log.info(\n \"subprocess output for, %s with args %s, \\nstdout is %s, \\nstderr is %s\",\n module_name,\n args,\n stdout,\n stderr,\n )\n return {\"stdout\": stdout, \"stderr\": stderr}\n\n @error_decorator\n def m_exec_module_observable(self, module_name, args=None, cwd=None, env=None):\n self.log.info(\n \"Exec in DS Daemon (observable) %s with args %s\", module_name, args\n )\n args = [] if args is None else args\n\n # Assumption is that `python -m jupyter notebook` or `python -m notebook` with observable output\n # will only ever be used to start a notebook and nothing else.\n # E.g. `python -m jupyter notebook --version` wouldn't require the use of exec_module_observable,\n # In such cases, we can get the output immediately.\n if (module_name == \"jupyter\" and args[0] == \"notebook\") or (\n module_name == \"notebook\"\n ):\n self._start_notebook(args, cwd, env)\n else:\n return super().m_exec_module_observable(module_name, args, cwd, env)\n\n def _print_kernelspec_version(self):\n import jupyter_client\n\n # Check whether kernelspec module exists.\n import jupyter_client.kernelspec\n\n sys.stdout.write(jupyter_client.__version__)\n sys.stdout.flush()\n\n def _print_kernel_list(self):\n self.log.info(\"listing kernels\")\n # Get kernel specs.\n import jupyter_client.kernelspec\n\n specs = jupyter_client.kernelspec.find_kernel_specs()\n sys.stdout.write(\n os.linesep.join(list(\"{0} {1}\".format(k, v) for k, v in specs.items()))\n )\n sys.stdout.flush()\n\n def _print_kernel_list_json(self):\n self.log.info(\"listing kernels as json\")\n # Get kernel specs.\n import jupyter_client.kernelspec\n\n specs = jupyter_client.kernelspec.KernelSpecManager().get_all_specs()\n all_specs = {\"kernelspecs\": specs}\n sys.stdout.write(json.dumps(all_specs))\n sys.stdout.flush()\n\n def _convert(self, args):\n self.log.info(\"Starting nbconvert wirth args %s\", args)\n from nbconvert import nbconvertapp as app\n\n try:\n sys.argv = [\"\"] + args\n app.main()\n except Exception as e:\n self.log.info(\"Nbconvert error: %s\", e)\n raise\n\n def _start_notebook(self, args, cwd, env):\n from notebook import notebookapp as app\n\n # Args must not have ['notebook'] in the begining. Drop the `notebook` subcommand when using `jupyter`\n args = args[1:] if args[0] == \"notebook\" else args\n self.log.info(\"Starting notebook with args %s\", args)\n\n # When launching notebook always ensure the first argument is `notebook`.\n with change_exec_context(args, cwd, env):\n app.launch_new_instance()\n","repo_name":"komeilkma/LHC-monitoring-control-system","sub_path":"src/jupyter_daemon.py","file_name":"jupyter_daemon.py","file_ext":"py","file_size_in_byte":6542,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"67"} +{"seq_id":"39850675825","text":"from torchvision.transforms import Normalize, ToTensor, Compose, Resize\n\n\nclass TransformDefault:\n\n @staticmethod\n def mnist(normalize=Normalize(mean=(0.1307,), std=(0.3081,))):\n transforms = [ToTensor()]\n if normalize:\n transforms.append(normalize)\n return Compose(transforms)\n\n @staticmethod\n def cifar10():\n normalize = Normalize(\n mean=(0.4914, 0.4822, 0.4465),\n std=(0.247, 0.243, 0.261)\n )\n return Compose([ToTensor(), normalize])\n\n @staticmethod\n def imagenet():\n normalize = Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n )\n return Compose([Resize(size=(224, 224)), ToTensor(), normalize])\n","repo_name":"dizcza/pytorch-mighty","sub_path":"mighty/utils/data/transforms_default.py","file_name":"transforms_default.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"31082913961","text":"#EXCEPTION HANDLING\r\n\r\ntry:\r\n length = 10\r\n width = 0\r\n length/width\r\nexcept:\r\n print(\"Division by zero is invalid kindly change your input\")\r\n\r\ntry:\r\n length = 10\r\n width = 0\r\n length/width\r\nexcept ZeroDivisionError:\r\n print(\"Division by zero is invalid kindly change your input\")\r\n length = 10\r\n area=length/width\r\nprint(area)\r\n\r\ntry:\r\n length = 10\r\n length/width\r\nexcept NameError:\r\n print(\"variable has been used before defining it\")\r\nexcept ZeroDivisionError:\r\n print(\"Division by Zero is Invalid kindly change ur input\")\r\nexcept Exception:\r\n print(\"New error has occured\")\r\nfinally:\r\n print(\"i will be executed atleast one\")","repo_name":"SURYArolex/surya-project","sub_path":"exception handle.py","file_name":"exception handle.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34021822417","text":"from flask import Flask,session\nfrom flask_session import Session\nfrom application.models import db\nfrom os import urandom\nfrom application.graphs import Draw_eng,Draw_chem,Draw_maths,Draw_phy\n#---------------------------- Imports -----------------\n\napp = None\n\ndef create_app():\n app = Flask(__name__,template_folder=\"templates\")\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///Student_DB.db'\n app.config[\"SESSION_PERMANENT\"] = False\n app.config[\"SESSION_TYPE\"] = \"filesystem\"\n Session(app)\n db.init_app(app)\n app.app_context().push()\n Draw_phy()\n Draw_maths()\n Draw_chem()\n Draw_eng()\n return app\n\napp = create_app()\n\nfrom application.views import *\n\nif __name__ == '__main__':\n app.debug = True\n app.secret_key = urandom(24)\n app.run(threaded=True, host='0.0.0.0', port=5000)\n","repo_name":"Combatant003/Flask_app_with_Terraform","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38427383297","text":"import paho.mqtt.publish as publish\nimport RPi.GPIO as GPIO\nimport time\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--litros-pulso\", help=\"litros por pulso\",\n type=int)\nparser.add_argument(\"--pin\", help=\"litros por pulso\",\n type=int, default=24)\nargs = parser.parse_args()\n\nprint(\"Usando litros pulso: \" + str(args.litros_pulso))\n\nlitros_pulso = 0\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(24, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\nwhile True:\n Pulso_cont_1 = GPIO.input(args.pin)\n P_C_1 = not Pulso_cont_1\n \n if P_C_1:\n litros_pulso = litros_pulso + args.litros_pulso\n print(time.strftime(\"%d/%m/%Y-%H:%M\"))\n print(litros_pulso, 'litros')\n \n minuto = time.strftime(\"%M:%S\")\n if minuto in('00:00', '15:00', '30:00', '45:00'):\n print('Datos enviados')\n publish.single(\"GPIO\", litros_pulso, hostname=\"192.168.168.152\")\n litros_pulso = 0\n \n time.sleep(1)\n \n\n\n","repo_name":"marcosperezsouto/marcos","sub_path":"leer_completo.py","file_name":"leer_completo.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20019463132","text":"from rest_framework import serializers\n\nfrom runner.models.execution import Execution\n\n\nclass ExecutionSerializer(serializers.ModelSerializer):\n execution_steps = serializers.PrimaryKeyRelatedField(\n many=True, read_only=True, allow_empty=True\n )\n class Meta:\n model = Execution\n fields = (\n \"id\",\n \"pytest_id\",\n \"pytest_node\",\n \"machine\",\n \"execution_steps\",\n )\n read_only_fields = (\"id\", \"execution_steps\")","repo_name":"marcelotrevisani/nnatest_api","sub_path":"nnatest_api/runner/serializers/execution_serializer.py","file_name":"execution_serializer.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20965458261","text":"import re\n\nfrom .. import reporting\nreport = reporting.get_report(\"preprocessor.common\")\n\nclass PreprocessorError(Exception):\n \"\"\" Custom Exception type that allows preprocessing errors to be reported \"\"\"\n\n def __init__(self, message, path=None):\n \"\"\" Initialise the validation error.\n\n Args:\n message: The exception message\n path : Path to the file being processed\n \"\"\"\n super().__init__(message)\n self.path = path\n\n# Define regular expressions used to extract different macros\npreprocessor_regex = {\n 'include': re.compile(r\"^[ ]{0,}#(include)[ ]{0,}(.*?)$\"),\n 'define' : re.compile(r\"^[ ]{0,}#(define)[ ]{0,}(.*?)$\"),\n # IF Blocks\n 'if' : re.compile(r\"^[ ]{0,}#(if|ifdef|ifndef)[\\s]+(.*?)(?:# (.*?))?$\"),\n 'elif' : re.compile(r\"^[ ]{0,}#(elif|elseif)[\\s]{0,}(.*?)$\"),\n 'endif' : re.compile(r\"^[ ]{0,}#(endif)[ ]{0,}\"),\n 'else' : re.compile(r\"^[ ]{0,}#(else)[ ]{0,}$\"),\n # FOR Blocks\n 'for' : re.compile(r\"^[ ]{0,}#(for)[\\s]+([^\\s]+[\\s]+in[\\s]+.*?)[\\s]{0,}(?:[:])?[\\s]{0,}$\"),\n 'endfor' : re.compile(r\"^[ ]{0,}#(endfor)\"),\n}\n\ndef evaluate_expression(expression, file):\n \"\"\"\n Evaluate an expression with access to the preprocessor scope of #define'd\n constants.\n\n Args:\n expression: The expression to evaluate\n file : The PreprocessorFile instance (to get values!)\n \"\"\"\n # Check this isn't already a number\n if not isinstance(expression, str):\n return expression\n elif expression.replace('.','').isdigit():\n return float(expression) if '.' in expression else int(expression)\n # Try to find and replace all constants encased in brackets '<MYCONST>'\n matches = re.findall(r\"[<]([A-Za-z]{1}[A-Za-z0-9_]+)[>]\", expression)\n sanitised = expression\n for match in matches:\n value = file.resolve_value(match)\n if not value:\n raise ValueError(report.error(\n f\"Could not resolve value for '{match}' in expression \"\n f\"'{expression}' in file: {file.path}\"\n ))\n # NOTE: Only replace the first occurrence to avoid partial replacement!\n sanitised = sanitised.replace(f\"<{match}>\", str(value), 1)\n # Now try to find and replace any constants not enclosed in brackets 'MYCONST'\n matches = re.findall(r\"[^A-Za-z0-9<]{0,1}([A-Za-z]{1}[A-Za-z0-9_]+)[^A-Za-z0-9>]{0,1}\", expression)\n for match in matches:\n value = file.resolve_value(match)\n # NOTE: Only replace the first occurrence to avoid partial replacement!\n if value:\n sanitised = sanitised.replace(str(match), str(value), 1)\n # Evaluate the sanitised expression\n try:\n result = eval(sanitised)\n except:\n raise ValueError(report.error(\n f\"Failed to resolve expression '{expression}' in file: {file.path}\"\n ))\n return result\n","repo_name":"bluwireless/blade","sub_path":"blade/preprocessor/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"67"} +{"seq_id":"5069575620","text":"from functools import reduce\nimport socket\ndef str_upper(str):\n value = str.lower()\n value = value.title()\n return value\n\n\nl2 = map(str_upper, ['adam', 'LISA', 'barT'])\n\ns= socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ns.connect(('www.sina.com.cn', 80))\n\ns.send(b'GET / HTTP/1.1\\r\\nHost: www.sina.com.cn\\r\\nConnection: close\\r\\n\\r\\n')\n\n# receive date:\nbuffer = []\nwhile True:\n # receive max date each 1kb\n d = s.recv(1024)\n if d:\n buffer.append(d)\n else:\n break\ndata = b''.join(buffer)\ns.close()\nheader, html = data.split(b'\\r\\n\\r\\n', 1)\nprint(header.decode('utf-8'))\n\nwith open('sina.html', 'wb') as f:\n f.write(html)","repo_name":"zigzodiac/web-project","sub_path":"Ftptest/test/pythonstudy/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23587726748","text":"#!/usr/bin/python3.8\n\nimport json\nfrom os import path\nimport random\nimport string\nimport yaml\nimport argparse\nfrom kubernetes.client.rest import ApiException\nfrom kubernetes import client, config\nimport logging\n\nlogging.DEBUG = 1\n\ndef select_pod(ns, label, k8s_api_v1):\n return k8s_api_v1.list_namespaced_pod(ns, label_selector=label)\n\ndef get_logs(label, ns):\n \n logs = {}\n\n try:\n k8s_api_v1 = client.CoreV1Api()\n \n ret = select_pod(ns, label, k8s_api_v1)\n #print(ret)\n for pod in ret.items:\n #print(pod)\n pod_name = pod.metadata.name\n log_resp = k8s_api_v1.read_namespaced_pod_log(name=pod_name, namespace=ns, container=label.split('=', 1)[1]) # assuming container name is same as label name.\n describe_resp = k8s_api_v1.list_namespaced_event(ns, field_selector=f'involvedObject.name={pod_name}')\n logging.debug('Retrived logs for pod {0}'.format(pod_name))\n logs[pod_name] = (log_resp, describe_resp)\n \n return logs\n except ApiException as e:\n logging.error('Found exception in reading the logs \\n {0}'.format(e))\n\n\ndef write_logs(logs):\n\n for name, (log, describe) in logs.items():\n\n with open(path.join(path.dirname(__file__), '{0}_log'.format(name),), 'w') as log_file:\n log_file.write(log)\n logging.debug('Wrote logfile {0}_log'.format(name))\n\n with open(path.join(path.dirname(__file__), '{0}_describe'.format(name),), 'w') as describe_file:\n describe_file.write(str(describe.items))\n logging.debug('Wrote describefile {0}_describe'.format(name)) \n \n\ndef load_experiment(exp):\n\n with open(path.join(path.dirname(__file__), exp)) as exp_file:\n return yaml.safe_load(exp_file)\n\ndef kill_pod(label, ns, exp):\n\n write_logs(get_logs(label, ns))\n\n exp_yaml = load_experiment(exp)\n\n name = exp_yaml['metadata']['name']\n exp_yaml['metadata']['name'] = name+'-'+''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(6))\n exp_yaml['metadata']['namespace'] = ns\n exp_yaml['spec']['selector']['namespaces'] = [ns]\n exp_yaml['spec']['selector']['labelSelectors'] = {label.split('=', 1)[0]: label.split('=', 1)[1]}\n\n crd_api = client.CustomObjectsApi()\n\n group = exp_yaml['apiVersion'].split('/', 1)[0]\n version = exp_yaml['apiVersion'].split('/', 1)[1]\n plural = exp_yaml['kind'].lower()\n \n\n \n try:\n\n crd_chaos = crd_api.create_namespaced_custom_object(\n group, version, ns, plural, exp_yaml, _preload_content=False\n )\n return json.loads(crd_chaos.data)\n except ApiException as e:\n if e.status == 409:\n logging.debug(\"Custom resource object {0}/{1} already exists\".format(group, version))\n return json.loads(e.body)\n else:\n raise Exception(\n \"Failed to create custom resource object: '{0}' {1}\".format(e.reason, e.body)\n )\n\ndef main():\n\n config.load_kube_config()\n\n parser = argparse.ArgumentParser(prog = 'init-chaos-test',\n description = 'Launches a preconfigured chaos-mesh experiment.')\n parser.add_argument('-e', '--experiment', type=str, dest='exp', required=True,\n help='The yaml/json experiment to launch.')\n parser.add_argument('-ns', '--namespace', type=str, dest='ns', default='default',\n help='The target namespace')\n parser.add_argument('-l', '--label', type=str, dest='label',\n help='The label of the pod to be killed. Example app=checkout.')\n\n args = parser.parse_args()\n \n resp = kill_pod(args.label, args.ns, args.exp)\n\n print('Created chaos pod with name \"{0}\" in namespace \"{1}\" with labelSelectors: \"{2}\"'\n .format(resp['metadata']['name'], resp['metadata']['namespace'], resp['spec']['selector']['labelSelectors']))\n\nif __name__ == '__main__':\n main()","repo_name":"Sam97ish/py-chaosmesh-ci-scirpt","sub_path":"init-chaos-test.py","file_name":"init-chaos-test.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37881985797","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport random\nimport numpy as np\nimport os\nimport cv2\nimport tensorflow as tf\n\nfrom art.attacks.evasion import DeepFool, UniversalPerturbation, SquareAttack, BoundaryAttack\nfrom art.estimators.classification import KerasClassifier, SklearnClassifier\nfrom sklearn.svm import SVC\nfrom numpy import save\nfrom PIL import Image\n\n# read in images and class labels for training data\ndef create_training_data():\n for categories in CATEGORIES:\n path = os.path.join(DATADIR_TRAIN, categories)\n class_num = CATEGORIES.index(categories)\n for img in os.listdir(path):\n try:\n # img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)\n img_array = np.array(Image.open(os.path.join(path, img)).convert('RGB')) # remove the a channel\n new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n training_data.append([new_array, class_num])\n except Exception as e:\n pass\n\n#read in images and class labels for test data\ndef create_testing_data():\n path = DATADIR_TEST\n for img in os.listdir(path):\n try:\n class_name = img.split('_')[0]\n class_num = CATEGORIES.index(class_name)\n # img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)\n img_array = np.array(Image.open(os.path.join(path, img)).convert('RGB')) # remove the a channel\n new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n testing_data.append([new_array, class_num])\n except Exception as e:\n pass\n\n\ndef transform2Grey(input_image):\n \"\"\"perform the transformation and return an array\"\"\"\n return np.array([create_features(img) for img in input_image])\n\n\ndef create_features(img):\n color_features = img.flatten()\n flat_features = np.hstack(color_features)\n return flat_features\n\n\ndef convertLabel(labels):\n outputLabels = []\n zeroListTemplate = [0] * (max(labels) + 1)\n for label in labels:\n newList = zeroListTemplate.copy()\n newList[label] = 1\n outputLabels.append(newList)\n\n return outputLabels\n\n# Set training dataset directory and limiting the numbers to 2 category\nDATADIR_TRAIN = os.path.dirname(os.path.split(os.getcwd())[0]) + r\"/Final_Data/Train\"\nDATADIR_TEST = os.path.dirname(os.path.split(os.getcwd())[0]) + r\"/Final_Data/Test\"\nCATEGORIES = ['i2', 'i4', 'i5', 'io', 'p11', 'p26', 'pl5', 'pl30', 'pl40', 'pl50']\nIMG_SIZE = 50\n\ntf.compat.v1.disable_eager_execution()\n\ntraining_data = []\ntesting_data = []\ncreate_training_data()\ncreate_testing_data()\nrandom.shuffle(training_data)\n\nX = []\ny = []\nX_testing = []\ny_testing = []\n\nfor features, label in training_data:\n X.append(features)\n y.append(label)\n\nfor features, label in testing_data:\n X_testing.append(features)\n y_testing.append(label)\n\n\nX = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 3)\nX = X / 255\nX_testing = np.array(X_testing).reshape(-1, IMG_SIZE, IMG_SIZE, 3)\nX_testing = X_testing / 255\n\nx_train = X\ny_train = y\nx_test = X_testing\ny_test = y_testing\n","repo_name":"jemzhang98/adversarial-attack","sub_path":"traffic-sign-classification/3-attack/CreateTrainTestFromFinalData.py","file_name":"CreateTrainTestFromFinalData.py","file_ext":"py","file_size_in_byte":3175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25064151942","text":"import itertools\n\nimport numpy as np\nfrom collections import defaultdict\nimport lightgbm as lgbm\n\n\nclass IlmartDistill:\n def __init__(self, model: lgbm.Booster, distill_mode=\"full\", n_sample=None):\n self.model = model\n self.feat_name_to_index = {feat: i for i, feat in enumerate(self.model.dump_model()['feature_names'])}\n self.feat_min = {}\n self.feat_max = {}\n for feat_name, feat_info in self.model.dump_model()[\"feature_infos\"].items():\n feat_index = self.feat_name_to_index[feat_name]\n feat_range = feat_info[\"max_value\"] - feat_info[\"min_value\"]\n self.feat_min[feat_index] = feat_info[\"min_value\"] - feat_range * 0.5\n self.feat_max[feat_index] = feat_info[\"max_value\"] + feat_range * 0.5\n self.n_sample = n_sample\n self.distill_mode = distill_mode\n\n # To be computed later\n self.hist = None\n self.splitting_values = None\n self.__create_hist_dict()\n\n def __compute_hist(self, tree_structure: dict, feat_used: tuple, feat_min_max=None):\n if feat_min_max is None:\n feat_min_max = np.array([[self.feat_min[feat], self.feat_max[feat]] for feat in feat_used], dtype='f')\n if \"leaf_index\" in tree_structure:\n limits = []\n for i, feat in enumerate(feat_used):\n start = np.nonzero(np.isclose(self.splitting_values[feat], feat_min_max[i][0]))[0][0]\n try:\n end = np.nonzero(np.isclose(self.splitting_values[feat], feat_min_max[i][1]))[0][0]\n except Exception as e:\n end = len(self.splitting_values[feat]) - 1\n limits.append((start, end))\n\n selection = self.hist[feat_used]\n slicing = tuple([slice(start, end) for (start, end) in limits])\n selection[slicing] += tree_structure[\"leaf_value\"]\n return\n\n split_index = feat_used.index(tree_structure[\"split_feature\"])\n if \"left_child\" in tree_structure:\n new_min_max = np.copy(feat_min_max)\n new_min_max[split_index][1] = min(new_min_max[split_index][1], tree_structure[\"threshold\"])\n self.__compute_hist(tree_structure[\"left_child\"], feat_used, feat_min_max=new_min_max)\n\n if \"right_child\" in tree_structure:\n new_min_max = np.copy(feat_min_max)\n new_min_max[split_index][0] = max(new_min_max[split_index][0], tree_structure[\"threshold\"])\n self.__compute_hist(tree_structure[\"right_child\"], feat_used, feat_min_max=new_min_max)\n\n return\n\n @staticmethod\n def __splitting_values(tree_structure, splitting_values_forest, feat_used=None):\n split_feat = tree_structure.get(\"split_feature\", None)\n if split_feat is None:\n return feat_used\n if feat_used is None:\n feat_used = set()\n feat_used.add(split_feat)\n splitting_values_forest[split_feat].add(tree_structure[\"threshold\"])\n IlmartDistill.__splitting_values(tree_structure[\"left_child\"], splitting_values_forest, feat_used)\n IlmartDistill.__splitting_values(tree_structure[\"right_child\"], splitting_values_forest, feat_used)\n return feat_used\n\n def __create_hist_dict(self):\n self.hist = {}\n feats_used = []\n tree_infos = self.model.dump_model()[\"tree_info\"]\n\n splitting_values_set = defaultdict(set)\n self.splitting_values = {}\n\n # Retrive all the splitting values\n for tree_info in tree_infos:\n tree_structure = tree_info[\"tree_structure\"]\n feats_used.append(IlmartDistill.__splitting_values(tree_structure, splitting_values_set))\n\n if self.distill_mode == \"full\":\n # Add maximum and minimum to have the complete range\n for feat in splitting_values_set.keys():\n splitting_values_set[feat].add(self.feat_max[feat])\n splitting_values_set[feat].add(self.feat_min[feat])\n\n # From the set created to a numpy array with all the values and saved on the current object\n for feat, values in splitting_values_set.items():\n self.splitting_values[feat] = np.array(sorted(list(splitting_values_set[feat])))\n else:\n feat_infos = self.model.dump_model()[\"feature_infos\"]\n for feat, infos in feat_infos.items():\n feat_i = self.feat_name_to_index[feat]\n # self.n_sample + 1 because we want exactly self.n_sample bins\n step = (self.feat_max[feat_i] - self.feat_min[feat_i]) / (self.n_sample + 1)\n self.splitting_values[feat_i] = np.arange(self.feat_min[feat_i], self.feat_max[feat_i], step)\n\n # Create a numpy array with shape corresponding to the feature dimension\n for feat_used in feats_used:\n feats_key = tuple(sorted(feat_used))\n if feats_key not in self.hist:\n shape = tuple([len(self.splitting_values[feat]) - 1 for feat in feats_key])\n self.hist[feats_key] = np.zeros(shape)\n\n # Compute hist for each tree\n if self.distill_mode == \"full\":\n for tree_info, feats in zip(tree_infos, feats_used):\n tree_structure = tree_info[\"tree_structure\"]\n feats_key = tuple(sorted(feats))\n self.__compute_hist(tree_structure, feats_key)\n else:\n for feats_used in self.hist.keys():\n mid_points = [(self.splitting_values[feat_used][1:] + self.splitting_values[feat_used][:-1]) / 2\n for feat_used in feats_used]\n for coord, value in enumerate(itertools.product(*mid_points)):\n sample = np.zeros(self.model.num_feature())\n for i, feat_i in enumerate(feats_used):\n sample[feat_i] = value[i]\n\n sample = sample.reshape((1, -1))\n if len(feats_used) == 1:\n self.hist[feats_used][coord] = self.model.predict(sample)\n else:\n self.hist[feats_used][coord // self.n_sample, coord % self.n_sample] = self.model.predict(\n sample)\n\n @staticmethod\n def __predict(row, model, interactions_limit=-1):\n res = 0\n interaction_to_exclude = []\n if interactions_limit != -1:\n inter_contrib = [(feats, value)for feats, value in model.expected_contribution().items() if len(feats) > 1]\n inter_contrib.sort(key=lambda x: x[1], reverse=True)\n interaction_to_exclude = [feats for feats, value in inter_contrib[interactions_limit:]]\n for feats_hist, hist in model.hist.items():\n if feats_hist in interaction_to_exclude:\n continue\n indices = []\n for feat in feats_hist:\n index_to_add = np.searchsorted(model.splitting_values[feat], row[feat])\n index_to_add -= 1\n index_to_add = max(0, index_to_add)\n index_to_add = min(len(model.splitting_values[feat]) - 2, index_to_add)\n indices.append(index_to_add)\n res += hist[tuple(indices)]\n return res\n\n def predict(self, X, interactions_limit=-1):\n res = np.apply_along_axis(IlmartDistill.__predict, 1, X, self, interactions_limit=interactions_limit)\n return res\n\n def expected_contribution(self):\n return {feats: np.abs(hist).mean() for feats, hist in self.hist.items()}\n","repo_name":"veneres/ilmart","sub_path":"src/ilmart/ilmart_distill.py","file_name":"ilmart_distill.py","file_ext":"py","file_size_in_byte":7538,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"67"} +{"seq_id":"14241049919","text":"import random\n\ndef check(numGuessed,theNum):\n\tcows=int(0)\n\tbulls=int(0)\n\tfor i in range(4):\n\t\tif(numGuessed[i]==theNum[i]):\n\t\t\tcows+=1\n\t\telse:\n\t\t\tbulls+=1\n\n\tprint(\"The Num : \",theNum,\"\\nGuessed num : \",numGuessed)\n\tprint(cows,\" :cows\",bulls,\" :bulls\")\n\n\n\n\nnum=int(input(\"Enter a number\\n\"))\nRnum=int(random.randint(0000,9999))\ncheck(str(num),str(Rnum))","repo_name":"DADDY-DOUBLESHOT/Python","sub_path":"cows_bulls.py","file_name":"cows_bulls.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12927063067","text":"\"\"\" Formal grammar of Tiny Basic (https://en.wikipedia.org/wiki/Tiny_BASIC)\n line ::= number statement CR | statement CR\n statement ::= PRINT expr-list\n IF expression operator expression THEN statement\n GOTO number\n INPUT var-list\n LET var = expression\n GOSUB number\n RETURN\n CLEAR\n LIST\n RUN\n END\n expr-list ::= (string|expression) (, (string|expression) )*\n var-list ::= var (, var)*\n expression ::= term ((+|-) term)*\n term ::= factor ((*|/) factor)*\n factor ::= var | number | (expression)\n var ::= A | B | C ... | Y | Z\n number ::= digit digit*\n digit ::= 0 | 1 | 2 | 3 | ... | 8 | 9\n operator ::= < (>|=|ε) | > (<|=|ε) | =\n string ::= \" (a|b|c ... |x|y|z|A|B|C ... |X|Y|Z|digit)* \" \"\"\"\nfrom enum import Enum\nfrom collections import defaultdict\nfrom collections import namedtuple\nimport sys\nimport bastors.lex as lex\n\n\nclass ConditionEnum(Enum):\n \"\"\"Represents the types of a condition, used in if statements or loops\"\"\"\n\n INITIAL = 0\n AND = 1\n OR = 2\n\n\nVariableCondition = namedtuple(\"VariableCondition\", [\"var\", \"type\"])\nNotVariableCondition = namedtuple(\"NotVariableCondition\", [\"var\", \"type\"])\nTrueFalseCondition = namedtuple(\"TrueFalseCondition\", [\"value\", \"type\"])\nCondition = namedtuple(\"Condition\", [\"left\", \"operator\", \"right\", \"type\"])\n\n\ndef invert_conditions(conditions):\n \"\"\"\n Use De Morgan's law to perform not(conditions):\n not(A and B) = not A or not B\n not(A or B) = not A and not B\n \"\"\"\n table = {\"<>\": \"=\", \"<=\": \">\", \">=\": \"<\"}\n inv_conds = []\n for cond in conditions:\n if isinstance(cond, VariableCondition):\n inv_conds.append(NotVariableCondition(cond.var, cond.type))\n continue\n\n if isinstance(cond, NotVariableCondition):\n inv_conds.append(VariableCondition(cond.var, cond.type))\n continue\n\n if isinstance(cond, TrueFalseCondition):\n if cond.value == \"true\":\n inv_conds.append(TrueFalseCondition(\"false\", cond.type))\n else:\n inv_conds.append(TrueFalseCondition(\"true\", cond.type))\n continue\n\n # invert the relation\n for op1, op2 in table.items():\n if cond.operator == op1:\n new_op = op2\n break\n if cond.operator == op2:\n new_op = op1\n break\n\n # Turn AND to OR and OR to AND\n new_type = cond.type\n if cond.type == ConditionEnum.AND:\n new_type = ConditionEnum.OR\n elif cond.type == ConditionEnum.OR:\n new_type = ConditionEnum.AND\n\n inv_conds.append(Condition(cond.left, new_op, cond.right, new_type))\n\n return inv_conds\n\n\n#\n# These are the statements that we currently construct from TinyBasic\n#\nProgram = namedtuple(\"Program\", \"statements\")\nLet = namedtuple(\"Let\", [\"label\", \"lval\", \"rval\"])\nIf = namedtuple(\"If\", [\"label\", \"conditions\", \"statements\"])\nGoto = namedtuple(\"Goto\", [\"label\", \"target_label\"])\nPrint = namedtuple(\"Print\", [\"label\", \"exp_list\"])\nGosub = namedtuple(\"Gosub\", [\"label\", \"target_label\"])\nReturn = namedtuple(\"Return\", [\"label\"])\nInput = namedtuple(\"Input\", [\"label\", \"variables\"])\nFor = namedtuple(\"For\", [\"var\", \"start\", \"stop\", \"step\", \"statements\", \"label\"])\nNext = namedtuple(\"Next\", [\"label\"])\nEnd = namedtuple(\"End\", [\"label\"])\n\nArithmeticExpression = namedtuple(\"ArithmeticExpression\", [\"left\", \"operator\", \"right\"])\nBooleanExpression = namedtuple(\"BooleanExpression\", [\"conditions\"])\nVariableExpression = namedtuple(\"VariableExpression\", [\"var\"])\nNotExpression = namedtuple(\"NotExpression\", [\"exp\"])\nParenExpression = namedtuple(\"ParenExpression\", [\"exp\"])\n\n\nclass ParseError(Exception):\n \"\"\" An error while parsing TinyBasic \"\"\"\n\n def __init__(self, message, line, col):\n super(ParseError, self).__init__(message)\n self.line = line\n self.col = col\n\n\nclass Parser: # pylint: disable=too-few-public-methods\n \"\"\" The parse() method consumes tokens from the lexer (lex.py) and attempts\n to parse TinyBasic from that and will generate a SyntaxTreeish\n structure that can be used to generate Rust later on. \"\"\"\n\n def __init__(self, code):\n self._code = code\n self._statements = defaultdict(list)\n self._context = \"main\"\n self._current_token = None\n self._token_iter = None\n self.functions = dict()\n\n def __parse_error(self, msg):\n token = self._current_token\n raise ParseError(\n \"error: %s [%d:%d]\" % (msg, token.line, token.col), token.line, token.col\n )\n\n def __eat(self, token_type):\n if self._current_token.type == token_type:\n try:\n self._current_token = next(self._token_iter)\n except StopIteration:\n self._current_token = lex.Token(\"EOF\", lex.TokenEnum.EOF, -1, -1)\n else:\n self.__parse_error(\n \"expected token %s was %s\" % (token_type, self._current_token.type)\n )\n\n def __parse_factor(self):\n \"\"\"factor ::= var | number | (expression)\"\"\"\n token = self._current_token\n if token.type == lex.TokenEnum.VARIABLE:\n self.__eat(lex.TokenEnum.VARIABLE)\n return VariableExpression(\n token.value.lower()\n ) # Rust does not like CAPS variables\n if token.type == lex.TokenEnum.NUMBER:\n self.__eat(lex.TokenEnum.NUMBER)\n return token.value\n if token.type == lex.TokenEnum.LPAREN:\n self.__eat(lex.TokenEnum.LPAREN)\n node = self.__parse_exp()\n self.__eat(lex.TokenEnum.RPAREN)\n return ParenExpression(node)\n return None\n\n def __parse_term(self):\n \"\"\"term ::= factor ((*|/) factor)*\"\"\"\n node = self.__parse_factor()\n\n while self._current_token.value in (\"*\", \"/\"):\n token = self._current_token\n self.__eat(lex.TokenEnum.ARITHMETIC_OP)\n factor = self.__parse_factor()\n node = ArithmeticExpression(node, token.value, factor)\n\n return node\n\n def __parse_exp(self):\n \"\"\"\n expression ::= term ((+|-) term)*\n term ::= factor ((*|/) factor)*\n factor ::= var | number | (expression)\n \"\"\"\n node = self.__parse_term()\n\n while self._current_token.value in (\"-\", \"+\"):\n token = self._current_token\n self.__eat(lex.TokenEnum.ARITHMETIC_OP)\n node = ArithmeticExpression(node, token.value, self.__parse_term())\n\n return node\n\n def __parse_let(self, label):\n \"\"\"\n LET var = expression\n \"\"\"\n lval = VariableExpression(self._current_token.value.lower())\n self.__eat(lex.TokenEnum.VARIABLE)\n\n if not self._current_token.value == \"=\":\n self.__parse_error(\"expected assign operator (=)\")\n\n self.__eat(lex.TokenEnum.RELATION_OP)\n\n rval = self.__parse_exp()\n\n return Let(label, lval, rval)\n\n def __parse_print(self, label):\n \"\"\"\n PRINT expr-list\n expr-list ::= (string|expression) (, (string|expression) )*\n \"\"\"\n exp_list = []\n\n while True:\n if self._current_token.type == lex.TokenEnum.STRING:\n exp_list.append(self._current_token.value)\n self.__eat(lex.TokenEnum.STRING)\n else:\n exp_list.append(self.__parse_exp())\n\n if self._current_token.type == lex.TokenEnum.COMMA:\n self.__eat(lex.TokenEnum.COMMA)\n else:\n break\n\n return Print(label, exp_list)\n\n def __parse_if(self, conditions, label):\n \"\"\"\n IF expression operator expression THEN statement\n operator ::= < (>|=|ε) | > (<|=|ε) | =\n \"\"\"\n left = self.__parse_exp()\n relop = self._current_token.value\n self.__eat(lex.TokenEnum.RELATION_OP)\n right = self.__parse_exp()\n\n if conditions is None:\n conditions = [Condition(left, relop, right, ConditionEnum.INITIAL)]\n else:\n cond_type = ConditionEnum.AND\n conditions.append(Condition(left, relop, right, cond_type))\n\n if self._current_token.value != \"THEN\":\n line = self._current_token.line\n col = self._current_token.col\n raise ParseError(\"expected THEN at %d:%d\" % (line, col), line, col)\n self.__eat(lex.TokenEnum.STATEMENT) # THEN\n\n if self._current_token.value == \"GOTO\":\n self.__eat(lex.TokenEnum.STATEMENT)\n goto = self.__parse_goto(label)\n return If(label, conditions, [goto])\n\n if self._current_token.type == lex.TokenEnum.NUMBER:\n target = int(self._current_token.value)\n self.__eat(lex.TokenEnum.NUMBER)\n goto = Goto(label, target)\n return If(label, conditions, [goto])\n\n if self._current_token.value == \"IF\":\n self.__eat(lex.TokenEnum.STATEMENT)\n return self.__parse_if(conditions, None)\n\n return If(label, conditions, [self.__parse_statement(None)])\n\n def __parse_goto(self, label):\n \"\"\"GOTO number\"\"\"\n try:\n target_label = int(self._current_token.value)\n self.__eat(lex.TokenEnum.NUMBER)\n except ValueError:\n line = self._current_token.line\n col = self._current_token.col\n raise ParseError(\"expected number [%d:%d]\" % (line, col), line, col)\n\n return Goto(label, target_label)\n\n def __parse_gosub(self, label):\n \"\"\"\n GOSUB number\n\n We turn a GOSUB statement into a function call.\n \"\"\"\n try:\n target_label = int(self._current_token.value)\n self.__eat(lex.TokenEnum.NUMBER)\n except ValueError:\n line = self._current_token.line\n col = self._current_token.col\n raise ParseError(\"expected number [%d:%d]\" % (line, col), line, col)\n\n fn = Gosub(label, target_label)\n self.functions[target_label] = fn\n return fn\n\n def __parse_input(self, label):\n \"\"\"INPUT var-list\"\"\"\n variables = list()\n while True:\n token = self._current_token\n self.__eat(lex.TokenEnum.VARIABLE)\n variables.append(VariableExpression(token.value.lower()))\n\n if self._current_token.type == lex.TokenEnum.COMMA:\n self.__eat(lex.TokenEnum.COMMA)\n else:\n break\n\n return Input(label, variables)\n\n def __parse_for(self, label):\n var = VariableExpression(self._current_token.value.lower())\n self.__eat(lex.TokenEnum.VARIABLE)\n self.__eat(lex.TokenEnum.RELATION_OP)\n\n start = int(self._current_token.value)\n self.__eat(lex.TokenEnum.NUMBER)\n\n if (self._current_token.type != lex.TokenEnum.STATEMENT or\n self._current_token.value != \"TO\"):\n # parse error: missing TO\n line = self._current_token.line\n col = self._current_token.col\n raise ParseError(\"expected TO keyword [%d:%d]\" % (line, col), line, col)\n self.__eat(lex.TokenEnum.STATEMENT)\n\n stop = int(self._current_token.value)\n self.__eat(lex.TokenEnum.NUMBER)\n\n if (self._current_token.type == lex.TokenEnum.STATEMENT and\n self._current_token.value == \"STEP\"):\n self.__eat(lex.TokenEnum.STATEMENT)\n step = int(self._current_token.value)\n self.__eat(lex.TokenEnum.NUMBER)\n else:\n step = 1\n\n statements = list()\n while True:\n statement = self.__process_line()\n if statement is None:\n line = self._current_token.line\n col = self._current_token.col\n raise ParseError(\"missing NEXT [%d:%d]\" % (line, col), line, col)\n \n if isinstance(statement, Next):\n break;\n else:\n statements.append(statement)\n\n return For(var, start, stop, step, statements, label)\n\n def __parse_statement(self, label):\n token = self._current_token\n if token.type == lex.TokenEnum.EOF:\n return None\n\n self.__eat(lex.TokenEnum.STATEMENT)\n if token.value == \"RETURN\":\n return Return(label)\n if token.value == \"LET\":\n return self.__parse_let(label)\n if token.value == \"PRINT\":\n return self.__parse_print(label)\n if token.value == \"IF\":\n return self.__parse_if(None, label)\n if token.value == \"GOTO\":\n return self.__parse_goto(label)\n if token.value == \"GOSUB\":\n return self.__parse_gosub(label)\n if token.value == \"INPUT\":\n return self.__parse_input(label)\n if token.value == \"FOR\":\n return self.__parse_for(label)\n if token.value == \"NEXT\":\n self.__eat(lex.TokenEnum.VARIABLE)\n return Next(label)\n if token.value == \"END\":\n return End(label)\n\n raise ParseError(\n \"Unknown statement: %s\" % token.value,\n self._current_token.line,\n self._current_token.col,\n )\n\n def __process_line(self, fwd_label=None):\n while self._current_token.type == lex.TokenEnum.COMMENT:\n self.__eat(lex.TokenEnum.COMMENT)\n\n if self._current_token.type == lex.TokenEnum.NUMBER:\n label = int(self._current_token.value)\n self.__eat(lex.TokenEnum.NUMBER)\n else:\n label = None\n\n if fwd_label is not None:\n label = fwd_label\n\n if self._current_token.type == lex.TokenEnum.VARIABLE:\n return self.__parse_let(label)\n\n if self._current_token.type == lex.TokenEnum.COMMENT:\n self.__eat(lex.TokenEnum.COMMENT)\n return self.__process_line(label)\n\n return self.__parse_statement(label)\n\n def __parse_program(self):\n while True:\n statement = self.__process_line()\n if statement is None:\n break\n\n # If the label matches a GOSUB target (stored in the\n # functions list) we are now in that functions context and store\n # the statements there.\n if statement.label is not None:\n if int(statement.label) in self.functions:\n self._context = statement.label\n\n self._statements[self._context].append(statement)\n\n return Program(self._statements)\n\n def parse(self):\n \"\"\" Attempts to parse a TineBasic program based on the tokens received\n from the lexer (lex.py). See the namedtuples above for what\n statements are generated to a list on the program node. \"\"\"\n lexer = lex.Lexer(self._code)\n self._token_iter = iter(lexer.get_tokens())\n self._current_token = next(self._token_iter)\n\n return self.__parse_program()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n sys.exit()\n\n try:\n FP = open(sys.argv[1], \"r\")\n PROGRAM = FP.read()\n except IOError:\n print(\"could not read file: %s\" % sys.argv[1])\n sys.exit()\n\n try:\n TREE = Parser(PROGRAM).parse()\n except ParseError as err:\n print(err)\n sys.exit(1)\n","repo_name":"jonasdn/bastors","sub_path":"bastors/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":15541,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"7568599207","text":"######################################################\n#\n# PyRAI2MD 2 module for computing surface hopping\n#\n# Author Jingbai Li\n# Sep 7 2021\n#\n######################################################\n\nimport numpy as np\nfrom PyRAI2MD.Dynamics.Propagators.fssh import FSSH\nfrom PyRAI2MD.Dynamics.Propagators.gsh import GSH\n\ndef SurfaceHopping(traj):\n \"\"\" Computing surface hopping \n\n Parameters: Type:\n traj class\t trajectory class\n\n Attribute: Type:\n sfhp str surface hopping method\n\n Return: Type:\n traj class\t molecule class\n\n \"\"\"\n\n sfhp = traj.sfhp\n if sfhp.lower() == 'fssh':\n traj_dict = {key: getattr(traj, key) for key in traj.attr}\n At, Ht, Dt, V, hoped, old_state, state, info = FSSH(traj_dict)\n elif sfhp.lower() == 'gsh':\n At, Ht, Dt, V, hoped, old_state, state, info = GSH(traj)\n elif sfhp.lower() == 'nosh':\n traj.shinfo = 'no surface hopping is performed'\n return traj\n\n traj.A = np.copy(At)\n traj.H = np.copy(Ht)\n traj.D = np.copy(Dt)\n traj.velo = np.copy(V)\n traj.hoped = hoped\n traj.last_state = old_state\n traj.state = state\n traj.shinfo = info\n\n return traj\n","repo_name":"lopez-lab/PyRAI2MD","sub_path":"PyRAI2MD/Dynamics/Propagators/surface_hopping.py","file_name":"surface_hopping.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"67"} +{"seq_id":"23148457817","text":"import traceback\n\nfrom pyfileconfgui.dash_ext.python import PythonBlockComponent\n\nDEFAULT_HEIGHT = '300px'\n\n\nclass TracebackComponent(PythonBlockComponent):\n\n def __init__(self, id: str, **kwargs):\n if 'style' not in kwargs:\n kwargs['style'] = {'max-height': DEFAULT_HEIGHT}\n elif 'max-height' not in kwargs['style']:\n kwargs['style']['max-height'] = DEFAULT_HEIGHT\n content = traceback.format_exc()\n super().__init__(id, content, **kwargs)\n\n def refresh(self):\n self.content = traceback.format_exc()\n","repo_name":"nickderobertis/py-file-conf-gui","sub_path":"pyfileconfgui/dash_ext/tb.py","file_name":"tb.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40614170523","text":"import numpy as np\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\ndef F1(x,y):\r\n result = np.sin(x/2) + np.cos(2*y)\r\n return result\r\ndef F2(x,y):\r\n result = -abs(x-2)-abs((.5*y)+1)+3\r\n return result\r\nx = np.arange(0,10,.01)\r\ny = np.arange(0,10,.01)\r\nprint(x,y)\r\nvalue=F2(x,y)\r\n\r\nstep = .01\r\nnet_steps = []\r\n\r\nfor i in range(0,100):\r\n x_start = random.randint(0,len(x)-1)\r\n x_start = x[x_start]\r\n y_start = random.randint(0,len(y)-1)\r\n y_start = y[y_start]\r\n print(x_start,y_start)\r\n done=False\r\n number_steps = 0\r\n while done==False:\r\n \r\n x_only_higher = round(F2(x_start+step,y_start),2)\r\n x_only_lower = round(F2(x_start-step,y_start),2)\r\n y_only_higher = round(F2(x_start,y_start+step),2)\r\n y_only_lower = round(F2(x_start,y_start-step),2)\r\n both_higher = round(F2(x_start+step,y_start+step),2)\r\n both_lower = round(F2(x_start-step,y_start-step),2)\r\n stagger_1 = round(F2(x_start-step,y_start-step),2)\r\n stagger_2 = round(F2(x_start+step,y_start+step),2)\r\n value = max([x_only_higher,x_only_lower,y_only_higher,y_only_lower,\r\n both_higher,both_lower,stagger_1,stagger_2])\r\n if round(F2(x_start,y_start),2)>=value:\r\n done = True\r\n else:\r\n number_steps += 1\r\n if value == x_only_higher:\r\n x_start = x_start+step\r\n elif value == x_only_lower:\r\n x_start = x_start-step\r\n elif value == y_only_higher:\r\n y_start = y_start+step\r\n elif value == y_only_lower:\r\n y_start = y_start-step\r\n elif value == both_higher:\r\n y_start = y_start+step\r\n x_start = x_start+step\r\n elif value == both_lower:\r\n y_start = y_start-step\r\n x_start = x_start-step\r\n elif value == stagger_1:\r\n y_start = y_start+step\r\n x_start = x_start-step\r\n elif value == stagger_2:\r\n y_start = y_start-step\r\n x_start = x_start+step\r\n net_steps.append(number_steps)\r\nmean = np.mean(net_steps)\r\nprint(\"mean \", mean)\r\nstd = np.std(net_steps)\r\nprint(\"std \",std)\r\nprint(value)\r\n","repo_name":"Fjenks/portfolio","sub_path":"python/hill_climbing.py","file_name":"hill_climbing.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6598856833","text":"\"\"\"\nWrite a function that will get document a number as an argument\n(in this format \"FV/BI/2023-1/10\") and will return a dictionary\ncontaining elements of this number.\n\nNumber consists of:\ndocument type/department/year-quarter/number\n\nExample:\nsplit_document_number_to_parts(\"FV/BI/2023-1/10\")\n\nreturns\n{\n \"document_type\": \"FV\",\n \"department\": \"BI\",\n \"year\": 2023,\n \"quarter\": 1,\n \"number\": 10\n}\n\"\"\"\n\n\"\"\"\nType hinting is a technique of marking what data type arguments should have\nand what data type will be returned from the function.\n\"\"\"\n\n\ndef split_document_number_to_parts(document_number: str) -> dict:\n \"\"\"\n Splitting document number string into a dictionary structure.\n For example document number \"FV/BI/2023-1/10\" will be parsed into:\n {\"document_type\": \"FV\", \"department\": \"BI\", \"year\": 2023, \"quarter\": 1, \"number\": 10 }.\n\n :param document_number: For example \"FV/BI/2023-1/10\"\n :return: Dictionary with document_type, department, year, quarter, number keys.\n \"\"\"\n if document_number.count('/') != 3:\n # approach with returning a special value that indicates that something is wrong\n return None\n\n segments = document_number.split('/')\n\n return {\n 'document_type': segments[0],\n 'department': segments[1],\n 'year': int(segments[2].split('-')[0]),\n 'quarter': int(segments[2].split('-')[1]),\n 'number': int(segments[3])\n }\n\n\n# splitted_document_number = split_document_number_to_parts(\"FV/BI/2023-1/10\")\nsplitted_document_number = split_document_number_to_parts(\"asdqweasd\")\nif splitted_document_number is None:\n print('The number is wrong')\nelse:\n print(splitted_document_number)\n\n for name, value in splitted_document_number.items():\n print(name, value)\n","repo_name":"piotrgradzinski/python_20230422","sub_path":"functions_pgg/ex_2_v2.py","file_name":"ex_2_v2.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18485278117","text":"#!/usr/bin/env python\n\"\"\"\nrough draft of a gui for rapidly switching between possible versions while codesigning.\nCurrently imports the classes it uses from computer_speech.py and angle_distance.py.\n\"\"\"\nimport rospy\nimport Tkinter as tk\nimport math\nimport subprocess\nfrom tango_tracker import Tango_tracker\nfrom height_communication_testing import Absolute_height, Angle_height, Body_mapping\n\nclass Controller(tk.Frame):\n def __init__(self, module_list, mote=None, master=None):\n tk.Frame.__init__(self, master)\n self.m = module_list\n self.grid()\n self.createWidgets()\n\n def createWidgets(self):\n dc = 1.00\n self.on_m1 = tk.Button(self, text=\"angle on\", command=self.m[0].turn_on)\n self.on_m1.grid()\n self.off_m1 = tk.Button(self, text=\"angle off\", command=self.m[0].turn_off)\n self.off_m1.grid()\n self.on_m2 = tk.Button(self, text='inches on', command=self.m[1].turn_on)\n self.on_m2.grid()\n self.off_m2 = tk.Button(self, text='inches off', command=self.m[1].turn_off)\n self.off_m2.grid()\n self.off_m3= tk.Button(self, text='body parts on', command=self.m[2].turn_on)\n self.off_m3.grid()\n self.off_m3 = tk.Button(self, text='body parts off', command=self.m[2].turn_off)\n self.off_m3.grid()\n self.quit_button = tk.Button(self, text=\"Quit\",command=self.quit)\n self.quit_button.grid()\n\n def call_all(self):\n # self.m[0].delay_coefficient = self.m1_dc.get()\n\n for i in self.m:\n i.call()\n\n self.after(10, self.call_all)\n\n\n\nif __name__ == \"__main__\":\n\n tt = Tango_tracker()\n v1 = Angle_height(tt)\n v2 = Absolute_height(tt)\n v3 = Body_mapping(tt)\n\n control = Controller([v1, v2, v3])\n control.master.title(\"Testing GUI\")\n control.after(100, control.call_all)\n control.mainloop()\n","repo_name":"occamLab/eye-helper-cv","sub_path":"eye_helper/scripts/height_test_gui.py","file_name":"height_test_gui.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"73376587093","text":"\"\"\"\n@File : 03_detect_from_camera.py \n@Contact : 156618056@qq.com\n@License : Free\n@Modified: 2020/8/11 23:20\n@Author : Karol Wu\n@Version : 1.0 \n@Des : None\n\"\"\"\nfrom yolov5.Detection import ObjectDetection\n\n\npath_to_weight = \"pre_trained_model/yolov5s.pt\"\n\ndetector = ObjectDetection(weight_path=path_to_weight)\ndetector.detectObjectsFromCamera(out_to_file=\"videos/detected/camera_detection.mp4\", save_video=True,\n confidence=0.4, custom_class=None)\n","repo_name":"ZuyongWu/yolov5s","sub_path":"03_detect_from_camera.py","file_name":"03_detect_from_camera.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30644008175","text":"from typing import Optional, Dict, Any, Mapping, Union\nimport json\n\nimport aiohttp\nimport asyncio\n\nfrom API.jikanpy.jikanpy.abstractjikan import AbstractJikan\nfrom API.jikanpy.jikanpy.exceptions import APIException\n\n\nclass AioJikan(AbstractJikan):\n \"\"\"Asynchronous Jikan wrapper\"\"\"\n\n def __init__(\n self,\n selected_base: Optional[str] = None,\n use_ssl: bool = True,\n session: Optional[Any] = None,\n loop: Optional[Any] = None,\n ) -> None:\n super().__init__(selected_base=selected_base, use_ssl=use_ssl)\n self.loop = asyncio.get_event_loop() if loop is None else loop\n self.session = (\n aiohttp.ClientSession(loop=self.loop) if session is None else session\n )\n\n async def _check_response( # type: ignore\n self, response: Any, **kwargs: Union[int, Optional[str]]\n ) -> None:\n \"\"\"Overrides _check_response in AbstractJikan\"\"\"\n if response.status >= 400:\n try:\n json_resp = await response.json()\n error_msg = json_resp.get(\"error\")\n except json.decoder.JSONDecodeError:\n error_msg = \"\"\n err_str: str = \"{} {}: error for \".format(response.status, error_msg)\n err_str += \", \".join(\"=\".join((str(k), str(v))) for k, v in kwargs.items())\n raise APIException(err_str)\n\n async def _get( # type: ignore\n self,\n endpoint: str,\n id: int,\n extension: Optional[str],\n page: Optional[int] = None,\n ) -> Dict:\n url: str = self._get_url(endpoint, id, extension, page)\n response = await self.session.get(url)\n await self._check_response(response, id=id, endpoint=endpoint)\n json = await response.json()\n return json\n\n async def _get_creator( # type: ignore\n self, creator_type: str, creator_id: int, page: Optional[int] = None\n ) -> Dict:\n url: str = self._get_creator_url(creator_type, creator_id, page)\n response = await self.session.get(url)\n await self._check_response(response, id=creator_id, endpoint=creator_type)\n json = await response.json()\n return json\n\n async def search( # type: ignore\n self,\n search_type: str,\n query: str,\n page: Optional[int] = None,\n parameters: Optional[Mapping[str, Optional[Union[int, str, float]]]] = None,\n ) -> Dict:\n url: str = self._get_search_url(search_type, query, page, parameters)\n response = await self.session.get(url)\n kwargs: Dict[str, str] = {\"search type\": search_type, \"query\": query}\n await self._check_response(response, **kwargs)\n json = await response.json()\n return json\n\n async def season(self, year: int, season: str) -> Dict: # type: ignore\n url: str = self._get_season_url(year, season)\n response = await self.session.get(url)\n await self._check_response(response, year=year, season=season)\n json = await response.json()\n return json\n\n async def season_archive(self) -> Dict: # type: ignore\n response = await self.session.get(self.season_archive_url)\n await self._check_response(response)\n json = await response.json()\n return json\n\n async def season_later(self) -> Dict: # type: ignore\n response = await self.session.get(self.season_later_url)\n await self._check_response(response)\n json = await response.json()\n return json\n\n async def schedule(self, day: Optional[str] = None) -> Dict: # type: ignore\n url: str = self._get_schedule_url(day)\n response = await self.session.get(url)\n await self._check_response(response, day=day)\n json = await response.json()\n return json\n\n async def top( # type: ignore\n self, type: str, page: Optional[int] = None, subtype: Optional[str] = None\n ) -> Dict:\n url: str = self._get_top_url(type, page, subtype)\n response = await self.session.get(url)\n await self._check_response(response, type=type)\n json = await response.json()\n return json\n\n async def genre( # type: ignore\n self, type: str, genre_id: int, page: Optional[int] = None\n ) -> Dict:\n url: str = self._get_genre_url(type, genre_id, page)\n response = await self.session.get(url)\n await self._check_response(response, id=genre_id, type=type)\n json = await response.json()\n return json\n\n async def user( # type: ignore\n self,\n username: str,\n request: Optional[str] = None,\n argument: Optional[Union[int, str]] = None,\n page: Optional[int] = None,\n parameters: Optional[Mapping] = None,\n ) -> Dict:\n url: str = self._get_user_url(username, request, argument, page, parameters)\n response = await self.session.get(url)\n await self._check_response(response, username=username, request=request)\n json = await response.json()\n return json\n\n async def meta( # type: ignore\n self,\n request: str,\n type: Optional[str] = None,\n period: Optional[str] = None,\n offset: Optional[int] = None,\n ) -> Dict:\n url: str = self._get_meta_url(request, type, period, offset)\n response = await self.session.get(url)\n await self._check_response(response, request=request, type=type, period=period)\n json = await response.json()\n return json\n\n async def close(self) -> None:\n await self.session.close()\n","repo_name":"HoodyH/Pavlov-Old","sub_path":"API/jikanpy/jikanpy/aiojikan.py","file_name":"aiojikan.py","file_ext":"py","file_size_in_byte":5551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1151324448","text":"import re\r\n#test = \"x + 4x + 5 + 3x + 2x + 5x - x - 6x - 7x - 8 = 2x - 1\"\r\n#test = \"5x = 8\"\r\n#test = \"x + 3x = 4 + 4\"\r\n\r\n#creates two lists for each side of the equation containing all the terms and operands\r\ndef createList(splitPart):\r\n for term in splitPart:\r\n if term == \"\":\r\n splitPart.remove(term)\r\n\r\n for i in range(len(splitPart)):\r\n if splitPart[i] == \"x\":\r\n splitPart[i] = \"1x\" \r\n\r\n #if the first term is not preceded by a minus it is automatically a positive term\r\n newList = [[splitPart[0], \"+\"]]\r\n\r\n #adds each term to newList and appends the operator preceding the term\r\n count = 0\r\n for term in splitPart:\r\n if count > 0 and not term in [\"+\",\"-\"]:\r\n newList.append([term, splitPart[count - 1]])\r\n count += 1\r\n return newList\r\n\r\n#Create four lists that seperate the terms into positive terms containing x, negative terms containing x, \r\n#positive integer terms, and negative integer terms\r\ndef populateLists(termSide):\r\n pList = []\r\n mList = []\r\n pNumList = []\r\n mNumList = []\r\n for term in termSide:\r\n if term[0].find(\"x\") > -1:\r\n if term[1] == \"+\":\r\n pList.append(term[0])\r\n elif term[1] == \"-\":\r\n mList.append(term[0])\r\n elif term[1] == \"+\":\r\n pNumList.append(term[0])\r\n elif term[1] == \"-\":\r\n mNumList.append(term[0])\r\n return pList, mList, pNumList, mNumList\r\n\r\ndef calcTotalX(terms, operator):\r\n total = 0\r\n for term in terms:\r\n if term.find(\"x\") > -1:\r\n xterm = term.replace(\"x\", \"\")\r\n if operator == \"+\":\r\n total += int(xterm)\r\n else:\r\n total -= int(xterm)\r\n elif operator == \"+\":\r\n total += int(term)\r\n else:\r\n total -= int(term)\r\n return total\r\n\r\n#This is made into a function simply so that one can rerun the program from the shell by calling this function.\r\ndef findx():\r\n expression = input(\"Enter an equation:\")\r\n pattern = '[0-9]*x[0-9]*'\r\n numPat = '[0-9]+'\r\n total = 0\r\n\r\n p1split = expression.split(\"=\")[0].split(\" \")\r\n p2split = expression.split(\"=\")[1].split(\" \")\r\n\r\n leftTerms = createList(p1split)\r\n rightTerms = createList(p2split) \r\n\r\n seperatedLeft = populateLists(leftTerms)\r\n seperatedRight = populateLists(rightTerms)\r\n\r\n leftXPosi = calcTotalX(seperatedLeft[0], \"+\")\r\n rightXPosi = calcTotalX(seperatedRight[0], \"+\")\r\n leftXNeg = calcTotalX(seperatedLeft[1], \"-\")\r\n rightXNeg = calcTotalX(seperatedRight[1], \"-\")\r\n\r\n totalXL = (leftXPosi - rightXPosi) + (leftXNeg - rightXNeg)\r\n\r\n leftNumPosi = calcTotalX(seperatedLeft[2], \"+\")\r\n rightNumPosi = calcTotalX(seperatedRight[2], \"+\")\r\n leftNumNeg = calcTotalX(seperatedLeft[3], \"-\")\r\n rightNumNeg = calcTotalX(seperatedRight[3], \"-\")\r\n\r\n totalNumR = (rightNumPosi - leftNumPosi) + (rightNumNeg - leftNumNeg)\r\n\r\n xVal = totalNumR/totalXL\r\n\r\n print(\"x = \", str(xVal))\r\n\r\nfindx()\r\n","repo_name":"raluijk/Algebra_Helper","sub_path":"algebra_helper.py","file_name":"algebra_helper.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13977645193","text":"from typing import List, Tuple\nfrom lexer_token import Token\nfrom variables import Variables\n\nclass Lexer(Variables):\n def __init__(self) -> None:\n self.tokens = []\n self.current_literal = \"\"\n self.pos = 0\n self.input = \"\"\n\n def next_char(self):\n \"\"\"\n Move to next character in input str\n \"\"\"\n self.pos += 1\n if self.pos == len(self.input):\n self.current_char = None\n else:\n self.current_char = self.input[self.pos]\n\n def tokenize(self, input : str) -> List[Token]:\n \"\"\"\n Convert input string into list of tuples. \n Tuple contasn token type and values.\n Params: \n input, str : User's input\n Return:\n tokens, list[tuple] : List of tokens.\n \"\"\"\n self.input = input\n self.current_char = input[self.pos]\n self.variable_complete = None\n while self.current_char != None:\n if self.current_char == self.T_PLUS:\n self.add_tokens(self.T_STRING, self.current_literal)\n self.add_tokens(self.T_PLUS, self.current_char)\n elif self.current_char == self.T_MINUS:\n self.add_tokens(self.T_STRING, self.current_literal)\n self.add_tokens(self.T_MINUS, self.current_char)\n elif self.current_char == self.T_DIVISION:\n self.add_tokens(self.T_STRING, self.current_literal)\n self.add_tokens(self.T_DIVISION, self.current_char)\n elif self.current_char == self.T_MULTIPLY:\n self.add_tokens(self.T_STRING, self.current_literal)\n self.add_tokens(self.T_MULTIPLY, self.current_char)\n elif self.current_char == self.LPAREN:\n self.add_tokens(self.T_STRING, self.current_literal)\n self.add_tokens(self.LPAREN, self.current_char)\n elif self.current_char == self.RPAREN:\n self.add_tokens(self.T_STRING, self.current_literal)\n self.add_tokens(self.RPAREN, self.current_char)\n elif self.current_char == self.T_EQUALS:\n self.add_tokens(self.T_VARIABLE, self.current_literal)\n else: \n self.current_literal += self.current_char\n self.next_char()\n \n self.pos = 0\n if len(self.current_literal) > 0:\n self.add_tokens(self.T_STRING, self.current_literal)\n\n if len(self.tokens) == 0:\n print(f\"Current token '{self.current_literal}' is not defined.\")\n return []\n else:\n to_return_tokens = self.tokens\n self.tokens = []\n return to_return_tokens\n\n def add_tokens(self, _type : str, value : str) -> None:\n \"\"\"\n 1. Create an instance of Token class with right type.\n 2. Add an instance of Token class to self.tokens.\n \"\"\"\n if _type == self.T_VARIABLE:\n self.tokens.append(Token(_type, None, value))\n self.variable_complete = False\n elif self.variable_complete == False:\n print(\"asd\")\n is_number, new_value = self.transfrom_input(value)\n if is_number:\n print(\"asdas\")\n self.tokens[-1].value = new_value\n else:\n self.tokens[-1].value = value\n self.variable_complete = True\n elif _type == self.T_STRING and value != \"\":\n is_number, new_value = self.transfrom_input(value)\n if is_number:\n self.tokens.append(Token(self.T_NUMBER, new_value))\n else:\n self.tokens.append(Token(self.T_NUMBER, new_value))\n elif value != \"\":\n self.tokens.append(Token(_type, value))\n self.current_literal = \"\"\n\n def transfrom_input(self, value):\n \"\"\"\n Check if input is int or float and transfrom it.\n \"\"\"\n is_number = False\n new_value = 0\n if value.isdigit():\n is_number == True\n new_value = int(value)\n else:\n try:\n new_value = float(value)\n is_number = True\n except:\n pass\n return is_number, new_value","repo_name":"Samiimov/basic_lang_with_python","sub_path":"lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43545648991","text":"import json\nimport os\nimport glob\nimport re\nimport shutil\nfrom pickle import BUILD\nimport pathlib\nfrom fnmatch import fnmatch\n\nBUILD_FOLDER = \"build\"\nCOMPONENTS_FOLDER = \"components\"\nSRC_FILES = glob.glob(\"dist/**/*\", recursive=True)\nTRANSLATION_FILE = \"translations.json\"\nIGNORE_FILE = \".buildignore\"\n\nif os.path.exists(IGNORE_FILE):\n with open(IGNORE_FILE, \"r\", encoding=\"utf-8\") as file:\n IGNORED_FILES = file.read().splitlines()\nelse:\n IGNORED_FILES = []\n\n\ndef build_path(path, locale=\"\"):\n return os.path.join(BUILD_FOLDER + (\"\" if BUILD_FOLDER[-1] == \"/\" else \"/\"),\n (locale + \"/\" if locale != \"\" else \"\"),\n path)\n\n\nif not os.path.exists(BUILD_FOLDER):\n os.mkdir(BUILD_FOLDER)\nelse:\n for filename in os.listdir(BUILD_FOLDER):\n file_path = build_path(filename)\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n\nprint(\"🗑️ Deleted old build structure in \" + BUILD_FOLDER + \".\")\n\ntranslation_map = {}\n\nwith open(TRANSLATION_FILE, \"r\", encoding=\"utf-8\") as f:\n raw_translation_map = f.read()\n translation_map = json.loads(raw_translation_map)\n\nprint(\"📖 Loaded translation files.\")\n\n\ndef ordinal(n): return \"%d%s\" % (\n n, \"tsnrhtdd\"[(n//10 % 10 != 1)*(n % 10 < 4)*n % 10::4])\n\n\ndef matches_ignore(filename):\n for ignore in IGNORED_FILES:\n if fnmatch(filename, ignore):\n return True\n return False\n\n\nlocales = translation_map[\"locales\"]\n\nlocale_idx_map = {}\nfor i in range(len(locales)):\n locale_idx_map[i] = locales[i]\n\n\nfor locale_idx, locale in enumerate(locales):\n print(\"🌐 Building locale\", locale,\n \"(\" + str(locale_idx + 1) + \"/\" + str(len(locales)) + \")\")\n\n os.mkdir(build_path(\"\", locale))\n\n files_processed = 0\n for idx, file in enumerate(SRC_FILES):\n filename, file_extension = os.path.splitext(file)\n\n src_path = pathlib.Path(*pathlib.Path(file).parts[1:])\n\n is_ignored = matches_ignore(src_path)\n\n output_path = build_path(\n src_path, \"\" if is_ignored else locale)\n\n if is_ignored and locale_idx > 0:\n continue\n\n if os.path.isdir(file):\n dir_name = output_path\n os.mkdir(dir_name)\n print(\"📂 Made directory \" + dir_name + \".\")\n continue\n\n if file_extension != '.html':\n shutil.copyfile(file, output_path)\n continue\n\n with open(file, \"r\", encoding=\"utf-8\") as f:\n lines = f.readlines()\n new_lines = []\n\n for line in lines:\n def translate_line(tline):\n translation_matches = re.findall(\"~{~.+~}~\", tline)\n if (len(translation_matches) != 0):\n translation_match = translation_matches[0]\n translation_key = re.findall(\n \"(?<=~{~).+(?=~}~)\", translation_match)[0]\n translation_value = translation_map[translation_key][locale_idx]\n return tline.replace(\n translation_match, translation_value)\n return tline\n\n new_line = translate_line(line)\n\n component_matches = re.findall(\"={=.+=}=\", line)\n if len(component_matches) != 0:\n component_match = component_matches[0]\n component_path = re.findall(\n \"(?<=={=).+(?=\\(.*\\)=}=)\", component_match)[0]\n component_props_string = re.findall(\n \"(?<=\\().+(?=\\))\", component_match)\n\n with open(component_path, encoding=\"utf-8\") as f:\n component_code = f.read()\n\n if len(component_props_string):\n component_props = component_props_string[0].split(\n ';')\n\n for component_prop in component_props:\n prop_key, prop_value = component_prop.split(\n '=')\n component_code = re.sub(\n \"-{-\" + prop_key + \"-}-\", prop_value, component_code)\n\n for prop in re.findall(\"-{-.+-}-\", component_code):\n component_code = re.sub(prop, \"\", component_code)\n\n before_component = re.findall(\"[ \\t]+(?=={=)\", line)\n if len(before_component):\n component_code = re.sub(\n \"\\n\", \"\\n\" + before_component[0], component_code)\n\n new_component_lines = []\n for component_line in component_code.split(\"\\n\"):\n new_component_lines.append(\n translate_line(component_line))\n component_code = \"\".join(new_component_lines)\n\n new_line = line.replace(\n component_match, component_code)\n\n new_line = re.sub(\"\\%locale\\%\", locale, new_line)\n\n new_lines.append(new_line)\n\n with open(output_path, \"w\", encoding=\"utf-8\") as buildf:\n buildf.writelines(new_lines)\n\n files_processed += 1\n print(\"✅ [\", locale, \"] Done with \" +\n str(ordinal(files_processed)) + \" file.\")\n","repo_name":"SSRSM/ssrsm.mk","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":5568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32099026561","text":"from __future__ import unicode_literals\nimport frappe\nimport random\nfrom frappe.utils import random_string\nfrom erpnext.projects.doctype.timesheet.test_timesheet import make_timesheet\nfrom erpnext.projects.doctype.timesheet.timesheet import make_salary_slip, make_sales_invoice\nfrom frappe.utils.make_random import how_many, get_random\n\ndef work():\n\tfrappe.set_user(frappe.db.get_global('demo_hr_user'))\n\tyear, month = frappe.flags.current_date.strftime(\"%Y-%m\").split(\"-\")\n\n\t# process payroll\n\tif not frappe.db.get_value(\"Salary Slip\", {\"month\": month, \"fiscal_year\": year}):\n\t\tprocess_payroll = frappe.get_doc(\"Process Payroll\", \"Process Payroll\")\n\t\tprocess_payroll.company = frappe.flags.company\n\t\tprocess_payroll.month = month\n\t\tprocess_payroll.fiscal_year = year\n\t\tprocess_payroll.create_sal_slip()\n\t\tprocess_payroll.submit_salary_slip()\n\t\tr = process_payroll.make_journal_entry(frappe.get_value('Account',\n\t\t\t{'account_name': 'Salary'}))\n\n\t\tjournal_entry = frappe.get_doc(r)\n\t\tjournal_entry.cheque_no = random_string(10)\n\t\tjournal_entry.cheque_date = frappe.flags.current_date\n\t\tjournal_entry.posting_date = frappe.flags.current_date\n\t\tjournal_entry.insert()\n\t\tjournal_entry.submit()\n\t\n\tif frappe.db.get_global('demo_hr_user'):\n\t\tmake_timesheet_records()\n\ndef get_timesheet_based_salary_slip_employee():\n\treturn frappe.get_all('Salary Structure', fields = [\"distinct employee as name\"],\n\t\tfilters = {'salary_slip_based_on_timesheet': 1})\n\t\ndef make_timesheet_records():\n\temployees = get_timesheet_based_salary_slip_employee()\n\tfor employee in employees:\n\t\tts = make_timesheet(employee.name, simulate = True, billable = 1, activity_type=get_random(\"Activity Type\"))\n\n\t\trand = random.random()\n\t\tif rand >= 0.3:\n\t\t\tmake_salary_slip_for_timesheet(ts.name)\n\n\t\trand = random.random()\n\t\tif rand >= 0.2:\n\t\t\tmake_sales_invoice_for_timesheet(ts.name)\n\ndef make_salary_slip_for_timesheet(name):\n\tsalary_slip = make_salary_slip(name)\n\tsalary_slip.insert()\n\tsalary_slip.submit()\n\ndef make_sales_invoice_for_timesheet(name):\n\tsales_invoice = make_sales_invoice(name)\n\tsales_invoice.customer = get_random(\"Customer\")\n\tsales_invoice.append('items', {\n\t\t'item_code': get_random_item(),\n\t\t'qty': 1,\n\t\t'rate': 1000\n\t})\n\tsales_invoice.set_missing_values()\n\tsales_invoice.calculate_taxes_and_totals()\n\tsales_invoice.insert()\n\tsales_invoice.submit()\n\ndef get_random_item():\n\treturn frappe.db.sql_list(\"\"\" select name from `tabItem` where\n\t\thas_variants = 0 order by rand() limit 1\"\"\")[0]\n","repo_name":"srinivasragav/erp","sub_path":"erpnext/demo/user/hr.py","file_name":"hr.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"4301182641","text":"# -*- coding: utf-8 -*-\n\n# Django Framework\nfrom django.conf import settings\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, Http404\nfrom django.core.files.images import ImageFile\n\n# Gallerie\nfrom gallery.models import Photograph, Photo\nfrom gallery.forms import UploadForm\n\n# Other Modules\nimport shutil\nimport os\nimport zipfile\n\nfrom PIL import Image\n\n# Create your views here.\n\n\ndef view_content(request):\n return render(request, 'gallery/index.html')\n\n\ndef handle_uploaded_file(f, upload_auth):\n with open(settings.MEDIA_ROOT + str(f), 'wb+') as destination:\n for chunk in f.chunks():\n destination.write(chunk)\n arc = zipfile.ZipFile(\n settings.MEDIA_ROOT + \"77_toutes_photos.zip\", \"a\",\n zipfile.ZIP_DEFLATED)\n before = os.listdir(settings.MEDIA_ROOT)\n try:\n shutil.unpack_archive(\n settings.MEDIA_ROOT + str(f),\n extract_dir=settings.MEDIA_ROOT)\n except Exception as e:\n os.remove(settings.MEDIA_ROOT + str(f))\n raise Exception(e)\n after = os.listdir(settings.MEDIA_ROOT)\n diff = [fpath for fpath in after if fpath not in before]\n #print(diff)\n\n for i in diff:\n #print(i)\n path = settings.MEDIA_ROOT + i\n im_name = upload_auth.firstname + \"_\" + upload_auth.lastname + \"_\" + i\n mid_thumb = \"m_\" + im_name\n small_thumb = \"s_\" + im_name\n # Create Thumbnails\n tmp_pict = Image.open(path)\n ratio = max(tmp_pict.size[0] / 950, tmp_pict.size[1] / 712)\n tmp_pict.thumbnail(\n tuple([int(x / ratio) for x in tmp_pict.size]), Image.ANTIALIAS)\n tmp_pict.save(mid_thumb, tmp_pict.format)\n tmp_pict.close()\n\n tmp_pict = Image.open(path)\n ratio = max(tmp_pict.size[0] / 130, tmp_pict.size[1] / 98)\n tmp_pict.thumbnail(\n tuple([int(x / ratio) for x in tmp_pict.size]), Image.ANTIALIAS)\n tmp_pict.save(small_thumb, tmp_pict.format)\n tmp_pict.close()\n\n # Create an Photo instance\n photo = Photo(author=upload_auth)\n photo.med_thumb.save(mid_thumb, ImageFile(open(mid_thumb, 'rb')))\n photo.small_thumb.save(small_thumb, ImageFile(open(small_thumb, 'rb')))\n photo.save()\n\n os.rename(path, settings.MEDIA_ROOT + im_name)\n arc.write(settings.MEDIA_ROOT + im_name)\n os.remove(settings.MEDIA_ROOT + im_name)\n os.remove(mid_thumb)\n os.remove(small_thumb)\n os.remove(settings.MEDIA_ROOT + str(f))\n arc.close()\n\n\ndef view_upload(request):\n if request.method == 'POST':\n form = UploadForm(request.POST, request.FILES)\n if form.is_valid():\n lastname = form.cleaned_data['lastname']\n firstname = form.cleaned_data['firstname']\n try:\n upload_auth = Photograph.objects.get(\n lastname=lastname, firstname=firstname)\n except Exception as e:\n upload_auth = Photograph(\n lastname=lastname, firstname=firstname)\n upload_auth.save()\n try:\n handle_uploaded_file(request.FILES['archive'], upload_auth)\n except Exception as e:\n print(e)\n else:\n form = UploadForm()\n return render(request, 'gallery/upload.html', locals())\n\n\ndef view_gallery(request, pgraph_id):\n desired_aut = get_object_or_404(Photograph, id=pgraph_id)\n photos = Photo.objects.filter(author=desired_aut)\n if len(photos):\n main_pic = photos[0]\n return render(request, 'gallery/gallery.html', locals())\n\n\ndef view_photo(request, pgraph_id, photo_id):\n desired_aut = get_object_or_404(Photograph, id=pgraph_id)\n photos = Photo.objects.filter(author=desired_aut)\n if photos:\n main_pic = photos[0]\n for i in photos:\n if (str(i.id) == str(photo_id)):\n main_pic = i\n\n return render(request, 'gallery/gallery.html', locals())\n\n\ndef view_list_pgraphs(request):\n return render(request, 'gallery/all_pgraphs.html', locals())\n","repo_name":"MFreeze/photos","sub_path":"gallery/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17805574374","text":"# Securonix backend for sigmac \n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport re\nimport sigma\nfrom sigma.parser.condition import ConditionOR\nfrom .base import SingleTextQueryBackend\nfrom ..parser.modifiers.base import SigmaTypeModifier\n\n\nclass SecuronixBackend(SingleTextQueryBackend):\n \"\"\"Converts Sigma rule into Securonix Spotter search\"\"\"\n reEscape = re.compile('([\"\\\\\\()])')\n identifier = \"securonix\"\n active = True\n andToken = \" AND \"\n orToken = \" OR \"\n notToken = \" NOT \"\n subExpression = \"(%s)\"\n listExpression = \"(%s)\"\n listSeparator = \" OR \"\n valueExpression = '\"%s\"'\n containsExpression = \"%s CONTAINS %s\"\n startsWithExpression = \"%s STARTS WITH %s\"\n endsWithExpression = \"%s ENDS WITH %s\"\n nullExpression = \"%s NULL\"\n notNullExpression = \"%s NOT NULL\"\n mapExpression = \"%s = %s\"\n mapListsSpecialHandling = True\n mapListValueExpression = \"%s = %s\"\n functionalityCount = 0\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n aFL = [\"rg_functionality\"]\n self.default_field = \"rawevent\"\n\n for item in self.sigmaconfig.fieldmappings.values():\n if item.target_type is list:\n aFL.extend(item.target)\n else:\n aFL.append(item.target)\n self.allowedFieldsList = list(set(aFL))\n\n def generateNode(self, node):\n # print(\"node is {}\".format(node))\n if type(node) == sigma.parser.condition.ConditionAND:\n return self.generateANDNode(node)\n elif type(node) == sigma.parser.condition.ConditionOR:\n return self.generateORNode(node)\n elif type(node) == sigma.parser.condition.ConditionNOT:\n return self.generateNOTNode(node)\n elif type(node) == sigma.parser.condition.ConditionNULLValue:\n return self.generateNULLValueNode(node)\n elif type(node) == sigma.parser.condition.ConditionNotNULLValue:\n return self.generateNotNULLValueNode(node)\n elif type(node) == sigma.parser.condition.NodeSubexpression:\n return self.generateSubexpressionNode(node)\n elif type(node) == tuple:\n return self.generateMapItemNode(node)\n elif type(node) in (str, int):\n return self.generateValueNode(node, False)\n elif type(node) == list:\n return self.generateListNode(node)\n elif isinstance(node, SigmaTypeModifier):\n return self.generateTypedValueNode(node)\n else:\n raise TypeError(\"Node type %s was not expected in Sigma parse tree\" % (str(type(node))))\n\n # Skip logsource value from sigma document for separate path.\n def generateCleanValueNodeLogsource(self, value):\n return self.valueExpression % (self.cleanValue(str(value)))\n\n def generate(self, sigmaparser):\n \"\"\"Method is called for each sigma rule and receives the parsed rule (SigmaParser)\"\"\"\n\n # Retrieve the value of fields from the rule and generate spotter query\n fields, mappedAttribute = self.generateQueryforFields(sigmaparser)\n\n result = \"\"\n try:\n timeframe = sigmaparser.parsedyaml['detection']['timeframe']\n except:\n timeframe = None\n for parsed in sigmaparser.condparsed:\n result = self.generateQuery(parsed, timeframe)\n\n if result is None or result.find(\"rg_functionality\") == -1:\n print(\"Logsource mapping not found in configuration file.\\n\")\n return\n\n if mappedAttribute is not None:\n result += fields\n\n # appending index value\n index = self.appendIndexinQuery(result.find(\"rawevent\"))\n result = index + result\n\n # replace escape characters due to \"\\\"\n result = result.replace(\"\\\\\\\\\", \"\\\\\").replace(\"\\\\\\\"\", \"\\\"\").replace(\"\\\"\\\\\\\\\", \"\\\"\\\\\").replace(\"\\?\",\"?\").replace(\"\\*\",\"*\")\n\n # replace escape characters due to \"(\" and \")\"\n result = result.replace(\"\\(\", \"(\").replace(\"\\)\", \")\")\n\n return result\n\n def sigma_to_spotter(self, line):\n key_words=[\"CONTAINS \", \"IN \", \"BETWEEN \", \"ENDS WITH \", \"STARTS WITH \", \"EQUALS \", \"NULL \"]\n if(\"NOT (\" in line):\n sigma = line\n query = \"\"\n fetched = 0\n while True:\n if(\"NOT (\" in sigma):\n query += sigma[fetched:sigma.find(\"NOT\")]\n fetched = sigma.find(\"NOT\")\n s_half = sigma[fetched+3:]\n within_brackets = s_half[:s_half.find(\")\")+1]\n within_brackets_len = len(within_brackets)\n for i in key_words:\n within_brackets = within_brackets.replace(i,\"NOT \"+i)\n query += within_brackets\n fetched+= within_brackets_len\n sigma = sigma.replace(\"NOT\",\"\",1)\n else:\n query+=sigma[fetched:]\n break\n return query\n else:\n return line\n\n def generateQuery(self, parsed, timeframe):\n self.functionalityCount = 0\n result = self.generateNode(parsed.parsedSearch)\n if result and parsed.parsedAgg:\n result += self.generateAggregation(parsed.parsedAgg, timeframe)\n query = self.sigma_to_spotter(str(result))\n return query\n\n # Generate Query for Fields\n def generateQueryforFields(self, sigmaparser):\n columns = list()\n notMapped = list()\n mappedAttr = None\n fields = \"\"\n try:\n for field in sigmaparser.parsedyaml[\"fields\"]:\n mappedAttr = sigmaparser.config.get_fieldmapping(field).resolve_fieldname(field, sigmaparser)\n if mappedAttr == field:\n notMapped.append(field)\n if type(mappedAttr) == str:\n columns.append(mappedAttr)\n elif type(mappedAttr) == list:\n columns.extend(mappedAttr)\n else:\n raise TypeError(\"Field mapping must return string or list\")\n\n fields = \",\".join(str(x) for x in columns)\n fields = \" | TABLE \" + fields\n\n if len(notMapped) > 0:\n consoleOutput = \"No attribute mapping found for \"\n consoleOutput += \", \".join(notMapped)\n consoleOutput += \" in configuration file.\"\n print(consoleOutput, \"\\n\")\n\n except KeyError: # no 'fields' attribute\n mappedAttr = None\n pass\n\n return fields, mappedAttr\n\n # Appending index value in the output Query\n def appendIndexinQuery(self, findRawevent):\n if findRawevent != -1:\n index = \"index = archive AND \"\n else:\n index = \"index = activity AND \"\n return index\n\n # Clearing values from special characters.\n def generateMapItemNode(self, node):\n key, value = node\n if key in self.allowedFieldsList:\n if key == \"rg_functionality\":\n if self.functionalityCount > 0:\n return\n self.functionalityCount += 1\n if self.mapListsSpecialHandling == False and type(value) in (\n str, int, list) or self.mapListsSpecialHandling == True and type(value) in (str, int):\n return self.generateQueryForWildcardRule(key, value)\n elif type(value) is list:\n return self.generateMapItemListNode(key, value)\n elif value is None:\n return self.nullExpression % (key,)\n else:\n raise TypeError(\"Backend does not support map values of type \" + str(type(value)))\n else:\n # Rawevent change begins\n key = self.default_field\n if self.mapListsSpecialHandling == False and type(value) in (\n str, int, list) or self.mapListsSpecialHandling == True and type(value) in (str, int):\n return self.generateRaweventQueryforStringInteger(key, value)\n elif isinstance(value, list):\n return self.generateMapItemListNode(key, value)\n elif value is None:\n return\n else:\n raise TypeError(\"Backend does not support map values of type \" + str(type(value)))\n\n # Function to generate Spotter query for CONTAINS, ENDS WITH, STARTS WITH\n def generateQueryForWildcardRule(self, key, value):\n # contains expression, checking length in case only a wildcard is provided as field value\n if isinstance(value, str) and value.startswith(\"*\") and value.endswith(\"*\") and len(value) > 1:\n outputValue = self.cleanValue(re.sub(\"^\\*|\\*$\", \"\", value))\n if self.isWildCardPresent(outputValue):\n return self.mapExpression % (key, self.generateValueNode(\"*{}*\".format(outputValue), True))\n return self.containsExpression % (key, self.generateValueNode(outputValue, True))\n # endswith expression, checking length in case only a wildcard is provided as field value\n elif isinstance(value, str) and value.startswith(\"*\") and len(value) > 1:\n outputValue = self.cleanValue(re.sub(\"^\\*|\\*$\", \"\", value))\n if self.isWildCardPresent(outputValue):\n return self.mapExpression % (key, self.generateValueNode(\"*{}\".format(outputValue), True))\n return self.endsWithExpression % (key, self.generateValueNode(outputValue, True))\n # startswith expression, checking length in case only a wildcard is provided as field value\n elif isinstance(value, str) and value.endswith(\"*\") and len(value) > 1:\n # Issue related to * in between the string\n outputValue = self.cleanValue(re.sub(\"^\\*|\\*$\", \"\", value))\n if self.isWildCardPresent(outputValue):\n return self.mapExpression % (key, self.generateValueNode(\"{}*\".format(outputValue), True))\n return self.startsWithExpression % (key, self.generateValueNode(outputValue, True))\n else:\n return self.mapExpression % (key, self.generateCleanValueNodeLogsource(value))\n\n # Function to generate Spotter Query for rawevent in case of mapListsSpecialHandling is True and value is\n # instance of string and integer\n def generateRaweventQueryforStringInteger(self, key, value):\n if isinstance(value, str):\n output_value = self.cleanValue(re.sub(\"^\\*|\\*$\", \"\", value))\n if output_value:\n if self.isWildCardPresent(output_value):\n output_value = self.generateValueNode(\"*{}*\".format(output_value), True)\n return self.mapExpression % (key, output_value)\n output_value = self.generateValueNode(output_value, True)\n return self.containsExpression % (key, output_value)\n else:\n return\n elif isinstance(value, int):\n output_value = self.generateValueNode(value, True)\n return self.containsExpression % (key, output_value)\n\n # for keywords values with space\n # keyword change starts (added keyword present in the method arguments)\n def generateValueNode(self, node, attrPresent):\n if attrPresent:\n if type(node) is int:\n return self.cleanValue(str(node))\n return self.valueExpression % (str(node).strip())\n else:\n output_value = self.cleanValue(re.sub(\"^\\*|\\*$\", \"\", node))\n if self.isWildCardPresent(output_value):\n return 'rawevent = \"*{}*\"'.format(output_value.strip())\n return 'rawevent CONTAINS \"{}\"'.format(output_value.strip())\n\n # collect elements of Securonix search using OR\n def generateMapItemListNode(self, key, value):\n itemslist = list()\n result = \"(\"\n if key == self.default_field:\n for item in value:\n output_value = self.cleanValue(re.sub(\"^\\*|\\*$\", \"\", item))\n if output_value:\n if self.isWildCardPresent(output_value):\n output_value = self.generateValueNode(\"*{}*\".format(output_value), True)\n itemslist.append(self.mapExpression % (key, output_value))\n else:\n output_value = self.generateValueNode(output_value, True)\n itemslist.append(self.containsExpression % (key, output_value))\n else:\n return\n else:\n for item in value:\n # used generateValueNode method instead of generateCleanValueNodeLogsource previously\n itemslist.append((self.generateQueryForWildcardRule(key, item)))\n result += \" OR \".join(itemslist)\n result += \")\"\n return result\n\n # Function to determine the presence of wildcard for rawevent attribute\n def isWildCardPresent(self, value):\n if value.find(\"*\") != -1:\n return True\n return False\n\n def generateAggregation(self, agg, timeframe):\n if agg == None:\n return \"\"\n if agg.aggfunc == sigma.parser.condition.SigmaAggregationParser.AGGFUNC_NEAR:\n raise NotImplementedError(\"'Near' aggregation operator is not yet implemented for this backend\")\n if agg.groupfield == None:\n if agg.aggfunc_notrans == 'count':\n if agg.aggfield == None and timeframe != None:\n return \" AND eventtime AFTER -%s | WHERE count %s %s \" % (timeframe, agg.cond_op, agg.condition)\n elif agg.aggfield == None and timeframe == None:\n return \"| WHERE count %s %s\" % (agg.cond_op, agg.condition)\n return \"\"\n else:\n if agg.aggfunc_notrans == 'count':\n if agg.aggfield == None and timeframe != None:\n return \" AND eventtime AFTER -%s | STATS %s | WHERE count %s %s \" % (\n timeframe, agg.groupfield, agg.cond_op, agg.condition)\n elif agg.aggfield == None and timeframe == None:\n return \" | STATS %s | WHERE count %s %s \" % (agg.groupfield, agg.cond_op, agg.condition)\n else:\n agg.aggfunc_notrans = 'DISTINCT'\n if timeframe != None:\n return \" AND eventtime AFTER -%s | STATS %s(%s) %s | WHERE DISTINCT(%s) %s %s\" % (\n timeframe, agg.aggfunc_notrans, agg.aggfield, agg.groupfield, agg.aggfield, agg.cond_op,\n agg.condition)\n return \"| STATS %s(%s) %s | WHERE DISTINCT(%s) %s %s\" % (\n agg.aggfunc_notrans, agg.aggfield or \"\", agg.groupfield or \"\", agg.aggfield or \"\", agg.cond_op,\n agg.condition)\n","repo_name":"Securonix/sigma2snypr","sub_path":"securonix.py","file_name":"securonix.py","file_ext":"py","file_size_in_byte":15433,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"18338917911","text":"# Reed Solomon\r\nimport random\r\nfrom itertools import combinations\r\n\r\nk = 3\r\ns = 1\r\nn = k + 2 * s\r\n\r\ndef encoding(m, p):\r\n def input_vector_representation_inefficient(m):\r\n m_vector = []\r\n # P - 161 bits; 20 ch at the time, binary repr of ascii < 161\r\n\r\n # repr in baza P\r\n int_m = int(m)\r\n r = int_m % p\r\n while r:\r\n m_vector.append(r)\r\n int_m = int_m // p\r\n r = int_m % p\r\n return list(reversed(m_vector))\r\n\r\n def input_vector_representation_efficient():\r\n p = 9\r\n vector = [int.to_bytes(ord(ch),1,'big') for ch in m]\r\n print(vector)\r\n\r\n input_vector_representation_efficient()\r\n print(input_vector_representation_inefficient(m))\r\n\r\n m_vector = input_vector_representation_inefficient(m)\r\n\r\n y = []\r\n\r\n for i in range(1, n + 1):\r\n polynomial = 0\r\n for poz in range(0, k - 2):\r\n polynomial += ((m_vector[poz] * i + m_vector[poz + 1]) * i) % p\r\n y.append(polynomial % 11)\r\n return y # d\r\n\r\n\r\ndef decoding(z):\r\n\r\n def compute_A():\r\n # compute combinatii de K elemente din 1...n\r\n possible_A = [list(c) for c in list(combinations(range(1, n + 1), 3))]\r\n for A in possible_A: # calculez coeficentul liber pana cand fc = 0\r\n sum_products = 0\r\n for i in A:\r\n product = 1\r\n j_product = 1\r\n for j in A:\r\n if i != j:\r\n j_product *= j\r\n product *= i-j\r\n sum_products += z[i-1] * pow(product, p-2, p) * j_product\r\n fc = sum_products % p\r\n if fc == 0:\r\n return A\r\n A = compute_A()\r\n print(\"A: \", A)\r\n\r\n # def compute_polynomial():\r\n\r\n\r\ndef simulate_error(encoded_vector):\r\n random_poz = random.randint(0, len(encoded_vector) - 1)\r\n encoded_vector[random_poz] = (encoded_vector[random_poz] + random.randint(10, 100)) % p\r\n return encoded_vector\r\n\r\n\r\nif __name__ == '__main__':\r\n input_m = \"29\"\r\n p = 11\r\n y_encoding = encoding(input_m, p)\r\n print(y_encoding)\r\n print(simulate_error(y_encoding))\r\n z = [9, 2, 6, 5, 8] # one error\r\n m_decoded = decoding(z)\r\n print(m_decoded)\r\n","repo_name":"dariastarica/Cryptography-Algorithms","sub_path":"Reed-Solomon encoding decoding/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33543244588","text":"\"\"\"\nUnit tests\n\"\"\"\nfrom __future__ import annotations\n\nfrom collections import namedtuple\n\nimport pytest\n\nimport pepotron\n\n\n@pytest.mark.parametrize(\n \"search, expected_url\",\n [\n (\"8\", \"https://peps.python.org/pep-0008/\"),\n (\"12\", \"https://peps.python.org/pep-0012/\"),\n (\"2.7\", \"https://peps.python.org/pep-0373/\"),\n (None, \"https://peps.python.org\"),\n (\"dead batteries\", \"https://peps.python.org/pep-0594/\"),\n (\"release\", \"https://peps.python.org/topic/release/\"),\n (\"typing\", \"https://peps.python.org/topic/typing/\"),\n (\"topics\", \"https://peps.python.org/topic/\"),\n (\"topic\", \"https://peps.python.org/topic/\"),\n ],\n)\ndef test_url(search: str, expected_url: str) -> None:\n # Act\n pep_url = pepotron.pep_url(search)\n # Assert\n assert pep_url == expected_url\n\n\ndef test_next() -> None:\n # Arrange\n Pull = namedtuple(\"Pull\", [\"title\"])\n prs = [\n Pull(title=\"PEP 716: Seven One Six\"),\n Pull(title=\"PEP 717: Seven One Seven\"),\n ]\n # mock _get_github_prs:\n pepotron._get_github_prs = lambda: prs\n\n # Act\n next_pep = pepotron.pep_url(\"next\")\n\n # Assert\n assert next_pep.startswith(\"Next available PEP: \")\n assert next_pep.split()[-1].isdigit()\n\n\n@pytest.mark.parametrize(\n \"search, base_url, expected_url\",\n [\n (\n \"8\",\n \"https://hugovk.github.io/peps\",\n \"https://hugovk.github.io/peps/pep-0008/\",\n ),\n (\n \"3.11\",\n \"https://hugovk.github.io/peps\",\n \"https://hugovk.github.io/peps/pep-0664/\",\n ),\n (\n None,\n \"https://hugovk.github.io/peps\",\n \"https://hugovk.github.io/peps\",\n ),\n (\n \"dead batteries\",\n \"https://hugovk.github.io/peps\",\n \"https://hugovk.github.io/peps/pep-0594/\",\n ),\n ],\n)\ndef test_url_base_url(search: str, base_url: str, expected_url: str) -> None:\n # Act\n pep_url = pepotron.pep_url(search, base_url)\n # Assert\n assert pep_url == expected_url\n\n\n@pytest.mark.parametrize(\n \"search, expected_url\",\n [\n (\"594\", \"https://pep-previews--2440.org.readthedocs.build/pep-0594/\"),\n (None, \"https://pep-previews--2440.org.readthedocs.build\"),\n ],\n)\ndef test_url_pr(search: str | None, expected_url: str) -> None:\n # Arrange\n pr = 2440\n # Act\n pep_url = pepotron.pep_url(search, pr=pr)\n # Assert\n assert pep_url == expected_url\n\n\ndef test__download_peps_json_ok() -> None:\n # Arrange\n pepotron._cache.clear(clear_all=True)\n # Act\n filename = pepotron._download_peps_json()\n # Assert\n assert filename.suffix == \".json\"\n\n\ndef test__download_peps_json_error() -> None:\n with pytest.raises(RuntimeError):\n pepotron._download_peps_json(\"https://httpstat.us/404\")\n\n\ndef test_pep() -> None:\n url = pepotron.open_pep(\"8\", dry_run=True)\n assert url == \"https://peps.python.org/pep-0008/\"\n\n\ndef test_open_bpo() -> None:\n url = pepotron.open_bpo(38374, dry_run=True)\n assert url == \"https://bugs.python.org/issue?@action=redirect&bpo=38374\"\n","repo_name":"hugovk/pepotron","sub_path":"tests/test_pepotron.py","file_name":"test_pepotron.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"67"} +{"seq_id":"4436769258","text":"import numpy as np\n\n#First, verify for f(x) = exp(x)\n\n#x = 42 is easily verifiable since it was used in class\n\nx = 42\neps = 2**(-52)\ndx = eps**(1/3) \n#dx = (eps*f/f''')**(1/3) = (eps*e**x/e**x)**(1/3)= eps**(1/3)\n\nx1 = x + dx\nx2 = x - dx\nx3 = x + 2*dx\nx4 = x - 2*dx\n\nf1 = np.exp(x1)\nf2 = np.exp(x2)\nf3 = np.exp(x3)\nf4 = np.exp(x4)\n\nderiv1 = (f1 + f3 - f2 - f4)/(6*dx)\n\nprint(\"Derivative is \",deriv1,\" with fractional error \",1-deriv1/np.exp(x))\n\n#Now we verify for f(x) = exp(0.01x)\n\ndx2 = eps^(1/3)/0.01 \n#dx = (eps*f/f''')**(1/3) = (eps*exp(0.01x)/(0.01**3*exp(0.01x))**(1/3) = eps**(1/3)/0.01\n\nf5 = np.exp(0.01*x1)\nf6 = np.exp(0.01*x2)\nf7 = np.exp(0.01*x3)\nf8 = np.exp(0.01*x4)\n\nderiv2 = (f5 + f7 - f6 - f8)/(6*dx2)\n\nprint(\"Derivative is \",deriv2,\" with fractional error \",1-deriv2/np.exp(0.01*x))\n\n\n","repo_name":"VegaHitti/PHYS-512","sub_path":"problem_sets/ps1/Q1/Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18051886983","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[41]:\n\n\nfrom qiskit import QuantumRegister, ClassicalRegister, transpile\nfrom qiskit.circuit.library import RYGate\nfrom qiskit import QuantumCircuit, execute\nfrom qiskit import Aer\nimport random\nfrom math import pi\nimport numpy as np\n\n\n# In[42]:\n\n\nmapping_dict={'00':'1','01':'2','10':'3','11':'4'}\nglobal calls\ncalls=0\n\n\n# In[43]:\n\n\ndef initial_circuit():\n backend = Aer.get_backend('unitary_simulator')\n \n circuit = QuantumCircuit(10,10)\n\n for i in range(10):\n rand_no=round(random.random()*1000)%2 #choose 0,1 randomly\n if rand_no==1:\n circuit.x(i)\n circuit.h(range(0,10))\n\n for i in range(9):\n circuit.cx(i,i+1)\n return(circuit)\n\n\n# In[44]:\n\n\ndef angle_randomiser():\n theta_d=round(random.randrange(2,100))\n phi_d=round(random.randrange(2,100))\n lambda_d=round(random.randrange(2,100))\n theta,phi,lambd = [pi/theta_d,pi/phi_d,pi/lambda_d]\n return(theta,phi,lambd)\n\n\n# In[45]:\n\n\ndef unitary_and_measure(circuit,theta,phi,lambd,iterations=1):\n backend = Aer.get_backend('unitary_simulator')\n m=circuit.copy()\n for repeats in range(iterations):\n m.u(theta,phi,lambd,range(0,10))\n job = backend.run(transpile(m, backend))\n res=job.result().get_unitary(m, decimals=3)\n return res\n\n\n# In[46]:\n\n\ndef sorting_and_indexing(state_vec_rep):\n srt=sorted(set((state_vec_rep)))\n dict_indx={} #for indexing the probability amplitudes (between 0-7)\n index=0\n for num in srt:\n if index>=8:\n index=index%8\n dict_indx[num]=index\n index+=1\n dict_freq={} #for capturing the probability distribution of each index OR probability amplitude\n for num in state_vec_rep:\n if dict_indx[num] not in dict_freq.keys():\n dict_freq[dict_indx[num]]=1/1024\n else:\n dict_freq[dict_indx[num]]+=1/1024\n return(dict_freq)\n\n\n# In[47]:\n\n\ndef dec_bin(n,dict_freq):\n try:\n val=dict_freq[n]\n except:\n val=0\n return val\n\n\n# In[48]:\n\n\ndef circuit_bin4(theta0, theta1, theta2):\n \"\"\"create |ψ2> state\"\"\"\n qc = QuantumCircuit(2, 2)\n # --- creating |ψ1> ----\n qc.ry(theta0, 0)\n # --- creating |ψ1> ----\n\n # === expanding |ψ1> into |ψ2> ===\n # apply RY(theta1) to the|0> state\n qc.x(0)\n qc.cry(theta1, 0, 1)\n qc.x(0)\n # apply RY(theta2) to the|1> state\n qc.cry(theta2, 0, 1)\n # === expanding |ψ1> into |ψ2> ===\n qc.measure([0, 1], [1, 0])\n return qc\n\n\n# In[49]:\n\n\ndef food(dict_freq):\n backend = Aer.get_backend(\"qasm_simulator\")\n shots = 1\n \n p000,p001,p010,p011,p100,p101,p110,p111 = [dec_bin(i,dict_freq) for i in range(8)]\n \n #initializing for foods\n f_p0=sum([p001,p011])/sum([p001,p011,p101,p111])\n f_p1=sum([p101,p111])/sum([p001,p011,p101,p111])\n f_p00=p001/sum([p001,p011,p101,p111])\n f_p01=p011/sum([p001,p011,p101,p111])\n f_p10=p101/sum([p001,p011,p101,p111])\n f_p11=p111/sum([p001,p011,p101,p111])\n\n # aggregate the distribution into two bins\n theta0 = 2 * np.arccos(np.sqrt(f_p0))\n try:\n theta1 = 2 * np.arccos(np.sqrt(f_p00 / f_p0))\n except:\n theta1=0\n try:\n theta2 = 2 * np.arccos(np.sqrt(f_p10 / f_p1))\n except:\n theta2=0\n\n # construct circuit and measure\n qc = circuit_bin4(theta0, theta1, theta2)\n counts = execute(qc, backend, shots=shots).result().get_counts()\n for state in counts.keys():\n return(state)\n\n\n# In[50]:\n\n\ndef poison(dict_freq):\n backend = Aer.get_backend(\"qasm_simulator\")\n shots = 1\n \n p000,p001,p010,p011,p100,p101,p110,p111 = [dec_bin(i,dict_freq) for i in range(8)]\n \n #initializing for poison\n p_p0=sum([p000,p010])/sum([p000,p010,p100,p110])\n p_p1=sum([p100,p110])/sum([p000,p010,p100,p110])\n p_p00=p000/sum([p000,p010,p100,p110])\n p_p01=p010/sum([p000,p010,p100,p110])\n p_p10=p100/sum([p000,p010,p100,p110])\n p_p11=p110/sum([p000,p010,p100,p110])\n \n # aggregate the distribution into two bins\n theta0 = 2 * np.arccos(np.sqrt(p_p0))\n try:\n theta1 = 2 * np.arccos(np.sqrt(p_p00 / p_p0))\n except:\n theta1=0\n try:\n theta2 = 2 * np.arccos(np.sqrt(p_p10 / p_p1))\n except:\n theta2=0\n\n # construct circuit and measure\n qc = circuit_bin4(theta0, theta1, theta2)\n counts = execute(qc, backend, shots=shots).result().get_counts()\n for state in counts.keys():\n return(state)\n\n\n# In[51]:\n\n\ndef outgoing(dict_freq):\n li=[]\n for i in range(3):\n if i==1:\n li.append('food_'+mapping_dict[food(dict_freq)])\n else:\n li.append('poison_'+mapping_dict[poison(dict_freq)])\n return(li)\n\n\n# In[52]:\n\n\ndef placement(snake_list,outgoing):\n li=[]\n li2=snake_list.copy()\n while len(li)<3:\n x=round(random.randrange(0,600))\n y=round(random.randrange(0,600))\n if [x,y] in li2:\n continue\n else:\n li.append([x,y])\n li2.append([x,y])\n li_out=[]\n for i in range(3):\n li_out.append({outgoing[i]:li[i]})\n return(li_out)\n\n\n# In[53]:\n\n\ndef send_items(snake_list):\n global calls\n global circuit\n global theta\n global phi\n global lambd\n if calls==0:\n circuit =initial_circuit()\n theta,phi,lambd = angle_randomiser()\n unitary=unitary_and_measure(circuit,theta,phi,lambd).data\n state_vec_norm=[(x.real)**2+(x.imag)**2 for x in unitary[0]]\n dict_freq=sorting_and_indexing(state_vec_norm)\n \n fin_placed=placement(snake_list,outgoing(dict_freq))\n calls+=1\n else:\n unitary=unitary_and_measure(circuit,theta,phi,lambd,calls).data\n state_vec_norm=[(x.real)**2+(x.imag)**2 for x in unitary[0]]\n dict_freq=sorting_and_indexing(state_vec_norm)\n \n fin_placed=placement(snake_list,outgoing(dict_freq))\n calls+=1\n if calls>15:\n calls=0\n return(fin_placed)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Rohit-IISc/QSnake","sub_path":"SnakeQCAModule.py","file_name":"SnakeQCAModule.py","file_ext":"py","file_size_in_byte":5990,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"840780087","text":"# AUTOMAÇÃO, ANDAMENTO DO JOGO!\r\n\r\nfrom time import sleep\r\nimport Descricao as d\r\nimport Questoes as p\r\nimport Gerador as g\r\n\r\ndef automacao_do_jogo(nome,n,respondidas,quant1,quant2,quant3,quant4,quant5,quant6,quant7):\r\n jogando=True\r\n while jogando==True:\r\n lista=d.premiacao(n)\r\n print('')\r\n print(f'Vamos para a Pergunta {n} que vale R$ {lista[3]}')\r\n sleep(3)\r\n print('-'*50)\r\n elimina=0\r\n pergunta=0\r\n while pergunta in respondidas:\r\n pergunta=g.gerarnumero()\r\n # print(pergunta) // VERIFICANDO QUAL PERGUNTA O ALGORITMO ALEATÓRIO ESCOLHEU PARA RESPONDER\r\n resposta=p.questao(nome,n,respondidas,pergunta,lista,elimina,quant1,quant2,quant3,quant4,quant5,quant6,quant7)\r\n if resposta==[False,True]: # NÃO PARAR , MAS CONTINUO\r\n respondidas.append(pergunta)\r\n # print(respondidas) // VERIFICANDO A LISTA DE PERGUNTAS JA RESPONDIDAS\r\n n+=1\r\n\r\n if n==2:\r\n print('')\r\n print('===> Parabéns você desbloqueou uma nova ajuda: [Pular] X 3')\r\n input('Pressione ENTER para continuar...')\r\n quant1+=3\r\n\r\n elif n==3:\r\n print('')\r\n print('===> Parabéns você desbloqueou uma nova ajuda: [Dica] X 3')\r\n input('Pressione ENTER para continuar...')\r\n quant2+=3\r\n\r\n elif n==4 or n==13:\r\n print('')\r\n print('===> Parabéns você desbloqueou uma nova ajuda: [Cartas]')\r\n input('Pressione ENTER para continuar...')\r\n quant3+=1\r\n\r\n elif n==7:\r\n print('')\r\n print('===> Parabéns você desbloqueou uma nova ajuda: [Blindar]')\r\n input('Pressione ENTER para continuar...')\r\n quant4+=1\r\n\r\n elif n==10:\r\n print('')\r\n print('===> Parabéns você desbloqueou uma nova ajuda: [Clonar]')\r\n input('Pressione ENTER para continuar...')\r\n quant5+=1\r\n\r\n elif n==19:\r\n print('')\r\n print('===> Parabéns você desbloqueou uma nova ajuda: [Descobrir]')\r\n input('Pressione ENTER para continuar...')\r\n quant6+=1\r\n\r\n elif n==19:\r\n print('')\r\n print('===> Parabéns você desbloqueou uma nova ajuda: [Dica]')\r\n input('Pressione ENTER para continuar...')\r\n quant2+=1\r\n\r\n elif n==20:\r\n print('')\r\n print('===> Parabéns você desbloqueou uma nova ajuda: [Pular]')\r\n input('Pressione ENTER para continuar...')\r\n quant1+=1\r\n\r\n elif n==27:\r\n print('')\r\n print('===> FORCA: Se você tiver alguma ajuda deve escolher uma para ficar, o resto será descartado!')\r\n input('Pressione ENTER para continuar...')\r\n forca=p.forca(quant1,quant2,quant3,quant4,quant5,quant6,quant7)\r\n quant1,quant2,quant3,quant4,quant5,quant6,quant7=forca[0],forca[1],forca[2],forca[3],forca[4],forca[5],forca[6]\r\n\r\n elif n==30:\r\n quant1,quant2,quant3,quant4,quant5,quant6,quant7=0,0,0,0,0,0,0\r\n print('')\r\n print('PERGUNTA FINAL: Parabéns por chegar na pergunta de 1 Bilhão de Reais!')\r\n sleep(2)\r\n print('Mas a partir de agora, as coisas mudam...')\r\n sleep(2)\r\n print('Todas as suas ajudas serão anuladas!')\r\n sleep(2)\r\n print('Agora é Tudo ou Nada...')\r\n sleep(2)\r\n print('E você quem decidirá o Final')\r\n sleep(2)\r\n input('Pressione ENTER para continuar...')\r\n\r\n if n==31:\r\n jogando=False\r\n resultado_final=lista[3]\r\n print('')\r\n print('PARABÉNS VOCÊ ACABA DE GANHAR 1 BILHÃO DE REAIS!!!')\r\n\r\n elif resposta==[False,False]: # NÃO PARAR , MAS ERREI\r\n jogando=False\r\n resultado_final=lista[1]\r\n elif resposta==[True,False]: # PARAR O JOGO , NÃO CONTINUAR\r\n jogando=False\r\n resultado_final=lista[2]\r\n \r\n return resultado_final","repo_name":"BrunnoHenrique20/Jogo-do-Bilhao","sub_path":"Show do Bilhao/Automacao.py","file_name":"Automacao.py","file_ext":"py","file_size_in_byte":4401,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39019164543","text":"class Multifloat:\n \"\"\"\n Multiple floating point precision number\n \"\"\"\n def __init__(self, std, tiny, factor):\n \"\"\"\n `std`: standard component of this number\n `tiny`: differential element of this number\n `factor`: the approximate factor of the differential element\n \"\"\"\n self.std = std\n self.tiny = tiny\n self.factor = factor\n\n def get_std(self):\n if isinstance(self.std, Multifloat):\n return self.std.flatten()\n else:\n return self.std\n\n def get_tiny(self):\n if isinstance(self.tiny, Multifloat):\n return self.tiny.flatten()\n else:\n return self.tiny\n\n def flatten(self):\n flat = self.get_std() + self.get_tiny() * self.factor\n if isinstance(flat, Multifloat):\n return flat.flatten()\n else:\n return flat\n\n def __add__(self, other):\n \"\"\" Add another multifloat or a tensor to this multifloat \"\"\"\n if isinstance(other, Multifloat):\n if self.factor > other.factor:\n fact = other.factor / self.factor\n return Multifloat(\n std=self.std + other.std,\n tiny=self.tiny + other.tiny * fact,\n factor=self.factor,\n )\n else:\n fact = self.factor / other.factor\n return Multifloat(\n std=self.std + other.std,\n tiny=self.tiny * fact + other.tiny,\n factor=other.factor,\n )\n else:\n return Multifloat(std=self.std + other, tiny=self.tiny, factor=self.factor)\n\n def __neg__(self):\n return Multifloat(std=-self.std, tiny=-self.tiny, factor=self.factor)\n\n def __sub__(self, other):\n \"\"\" Subtract another multifloat or a tensor from this multifloat \"\"\"\n if not isinstance(self, Multifloat):\n return -other - self\n if isinstance(other, Multifloat):\n if self.factor > other.factor:\n fact = other.factor / self.factor\n return Multifloat(\n std=self.std - other.std,\n tiny=self.tiny - other.tiny * fact,\n factor=self.factor,\n )\n else:\n fact = self.factor / other.factor\n return Multifloat(\n std=self.std - other.std,\n tiny=self.tiny * fact - other.tiny,\n factor=other.factor,\n )\n else:\n return Multifloat(std=self.std - other, tiny=self.tiny, factor=self.factor)\n\n def __mul__(self, other):\n \"\"\" Multiply this multifloat by a tensor or multifloat \"\"\"\n if isinstance(other, Multifloat):\n return Multifloat(\n std=self.std * other.std,\n tiny=self.tiny * other.tiny,\n factor=self.factor * other.factor,\n )\n else:\n return Multifloat(\n std=self.std * other, tiny=self.tiny * other, factor=self.factor\n )\n \n __rmul__ = __mul__\n\n def __rsub__(self, other):\n \"\"\"\n other - self\n \"\"\"\n return other + (-self)\n\n def __truediv__(self, other):\n \"\"\" Divide this multifloat by a tensor \"\"\"\n if isinstance(other, Multifloat):\n raise NotImplementedError()\n else:\n return Multifloat(\n std=self.std / other, tiny=self.tiny / other, factor=self.factor\n )\n\n","repo_name":"CharlieMcVicker/quantum-stuff","sub_path":"simulations/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10024558448","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport scipy.stats as stats\r\nimport math\r\n\r\n# useful plotting functions\r\ndef lin_interp(x, y, i, half):\r\n '''Linear interpolation to find half-maximum crossing coordinate.'''\r\n return x[i] + (x[i+1] - x[i]) * ((half - y[i]) / (y[i+1] - y[i]))\r\n\r\ndef half_max_x(x, y):\r\n '''Returns the x-coordinate of the two half-maximum crossings.'''\r\n half = max(y)/2.0\r\n signs = np.sign(np.add(y, -half))\r\n zero_crossings = (signs[0:-2] != signs[1:-1])\r\n zero_crossings_i = np.where(zero_crossings)[0]\r\n return [lin_interp(x, y, zero_crossings_i[0], half),\r\n lin_interp(x, y, zero_crossings_i[1], half)]\r\n\r\n# constants \r\nmax_K = 1000 # maximum K for plot of theoretical mean and variance\r\nK_th = np.linspace(1, max_K, max_K) # K's used to plot theoretical mean and variance\r\nw = 1 # synaptic weight\r\nrate = 10 # rate of input poisson neurons\r\ndelta_t = 10**(-4) # length of timestep \r\ntau = 20*10**(-3) # time constant\r\nsec = 15 # run for over 10 seconds (to compute mean and variance)\r\nsteps = int((1/delta_t)*sec) # number of steps in interval\r\n\r\n# THEORETICAL EXPRESSIONS\r\n# define theoretical expressions for mean and variance\r\nmean_th = tau*w*rate\r\nvariance_th = (w**2 * rate * tau**2)*(1 - delta_t*rate)/((2*tau - delta_t)*K_th)\r\n\r\n# EMPIRICAL OBSERVATIONS\r\n# range of K's to evaluate mean and variance experimentally\r\nK = np.array([1, 10, 100, 1000])\r\n# evaluate variance at these values of K\r\nvariance_th_plot = (w**2 * rate * tau**2)*(1 - delta_t*rate)/((2*tau - delta_t)*K)\r\n# initial conditions for voltage\r\nvoltage = np.zeros([len(K), steps])\r\n# placeholder for the several runs of the computation\r\n# of the mean and variance for each value of K\r\nmean1 = []\r\nmean10 = []\r\nmean100 = []\r\nmean1000 = []\r\nvar1= []\r\nvar10 = []\r\nvar100 = []\r\nvar1000 = []\r\n\r\n# perform 50 simulation trials \r\nfor p in range(50):\r\n n=0\r\n for k in K:\r\n # initialise input_spike_train to all 0's\r\n input_spike_train = np.zeros([k, steps])\r\n # populate the spike train matrix with k spike trains,\r\n # these are modelled by sampling 150000 times (15s) from a\r\n # bernoulli distribution with p = r_x*delta_t = 0.001\r\n for i in range(k):\r\n input_spike_train[i] = np.random.binomial(1, rate*delta_t, steps)\r\n np.random.seed()\r\n # implement 15s of the LIF neuron with k poisson inputs\r\n for i in range(1, steps):\r\n voltage[n, i] = voltage[n, i-1] + delta_t*(-voltage[n, i-1]/tau) + (w/k)*np.sum(input_spike_train[:,i-1], axis = 0)\r\n # increase counting variable\r\n n += 1\r\n\r\n # append the means and variances of the voltage\r\n # (removing the 100ms transient) for the different values of K \r\n mean1.append(np.mean(voltage[0,1000:]))\r\n mean10.append(np.mean(voltage[1,1000:]))\r\n mean100.append(np.mean(voltage[2,1000:]))\r\n mean1000.append(np.mean(voltage[3,1000:]))\r\n var1.append(np.var(voltage[0,1000:]))\r\n var10.append(np.var(voltage[1,1000:]))\r\n var100.append(np.var(voltage[2,1000:]))\r\n var1000.append(np.var(voltage[3,1000:]))\r\n\r\n# plot the theoretical variance vs. the empirical variance as a function of K\r\nplt.figure(1)\r\nplt.loglog(K_th, variance_th, label='Theoretical Prediction')\r\nplt.errorbar(1, np.mean(var1), yerr = 2*np.std(var1), solid_capstyle='projecting', capsize=5, fmt='.g', label = 'K=1')\r\nplt.errorbar(10, np.mean(var10), yerr = 2*np.std(var10), solid_capstyle='projecting', capsize=5, fmt='.b', label = 'K=10')\r\nplt.errorbar(100, np.mean(var100), yerr = 2*np.std(var100), solid_capstyle='projecting', capsize=5, fmt='.m', label = 'K=100')\r\nplt.errorbar(1000, np.mean(var1000), yerr = 2*np.std(var1000), solid_capstyle='projecting', capsize=5, fmt='.r', label = 'K=1000')\r\nplt.xlabel('K (number of input neurons)', size = 15, labelpad = 10)\r\nplt.ylabel('Variance (a.u.)', size = 15, labelpad = 10)\r\nplt.legend()\r\n\r\n# plot the theoretical mean vs. the empirical mean as a function of K\r\nplt.figure(2)\r\nplt.semilogx(K_th, [mean_th]*max_K, label='Theoretical Prediction')\r\nplt.errorbar(1, np.mean(mean1), yerr = 2*np.std(mean1), solid_capstyle='projecting', capsize=5, fmt='.g', label = 'K=1')\r\nplt.errorbar(10, np.mean(mean10), yerr = 2*np.std(mean10), solid_capstyle='projecting', capsize=5, fmt='.b', label = 'K=10')\r\nplt.errorbar(100, np.mean(mean100), yerr = 2*np.std(mean100), solid_capstyle='projecting', capsize=5, fmt='.m', label = 'K=100')\r\nplt.errorbar(1000, np.mean(mean1000), yerr = 2*np.std(mean1000), solid_capstyle='projecting', capsize=5, fmt='.r', label = 'K=1000')\r\nplt.xlabel('K (number of input neurons)', size = 15, labelpad = 10)\r\nplt.ylabel('Mean (a.u.)', size = 15, labelpad = 10)\r\nplt.legend()\r\n\r\n# plot the voltage dynamics with the theoretical and empirical mean \r\n# and standard deviation lines\r\nfig, axs = plt.subplots(2, 2)\r\n\r\n# K=1\r\n# voltage plot\r\naxs[0, 0].plot(voltage[0, :], color = 'k')\r\n# theoretical mean and variance lines\r\naxs[0, 0].plot([mean_th]*len(voltage[0, :]), color = 'g')\r\naxs[0, 0].plot([mean_th + 2*np.sqrt(variance_th_plot[0])]*len(voltage[0, :]), color = 'm')\r\n# experimental mean and variance lines\r\naxs[0, 0].plot([mean1[-1]]*len(voltage[0, :]), '--', color = 'g')\r\naxs[0, 0].plot([mean1[-1] + 2*np.sqrt(var1[-1])]*len(voltage[0, :]), '--', color = 'm')\r\naxs[0, 0].set_title('K = 1', size=15)\r\n\r\n# K = 10\r\n# voltage plot\r\naxs[0, 1].plot(voltage[1, :], color = 'k')\r\n# theoretical mean and variance lines\r\naxs[0, 1].plot([mean_th]*len(voltage[1, :]), color = 'g')\r\naxs[0, 1].plot([mean_th + 2*np.sqrt(variance_th_plot[1])]*len(voltage[1, :]), color = 'm')\r\naxs[0, 1].plot([mean_th - 2*np.sqrt(variance_th_plot[1])]*len(voltage[1, :]), color = 'm')\r\n# experimental mean and variance lines\r\naxs[0, 1].plot([mean10[-1]]*len(voltage[1, :]), '--', color = 'g')\r\naxs[0, 1].plot([mean10[-1] + 2*np.sqrt(var10[-1])]*len(voltage[1, :]),'--', color = 'm')\r\naxs[0, 1].plot([mean10[-1] - 2*np.sqrt(var10[-1])]*len(voltage[1, :]), '--', color = 'm')\r\naxs[0, 1].set_title('K = 10', size=15)\r\n\r\n# K = 100\r\n# voltage plot\r\naxs[1, 0].plot(voltage[2, :], color = 'k')\r\n# theoretical mean and variance lines\r\naxs[1, 0].plot([mean_th]*len(voltage[2, :]), color = 'g')\r\naxs[1, 0].plot([mean_th + 2*np.sqrt(variance_th_plot[2])]*len(voltage[2, :]), color = 'm')\r\naxs[1, 0].plot([mean_th - 2*np.sqrt(variance_th_plot[2])]*len(voltage[2, :]), color = 'm')\r\n# experimental mean and variance lines\r\naxs[1, 0].plot([mean100[-1]]*len(voltage[2, :]), '--', color = 'g')\r\naxs[1, 0].plot([mean100[-1] + 2*np.sqrt(var100[-1])]*len(voltage[2, :]), '--', color = 'm')\r\naxs[1, 0].plot([mean100[-1] - 2*np.sqrt(var100[-1])]*len(voltage[2, :]), '--', color = 'm')\r\naxs[1, 0].set_title('K = 100', size = 15)\r\naxs[1, 0].set_xticks(np.linspace(0,steps, 16), minor = False)\r\naxs[1, 0].set_xticklabels(np.linspace(0,steps/10000, 16).astype(int), fontdict=None, minor=False)\r\n\r\n# K = 1000\r\n# voltage plot\r\naxs[1, 1].plot(voltage[3, :], 'k')\r\n# theoretical mean and variance plots\r\naxs[1, 1].plot([mean_th]*len(voltage[3, :]), color = 'g', label='Theoretical Mean')\r\naxs[1, 1].plot([mean_th + 2*np.sqrt(variance_th_plot[3])]*len(voltage[3, :]), color = 'm', label='Theoretical +/- 2*std')\r\naxs[1, 1].plot([mean_th - 2*np.sqrt(variance_th_plot[3])]*len(voltage[3, :]), color = 'm')\r\n# experimental mean and variance lines\r\naxs[1, 1].plot([mean1000[-1]]*len(voltage[3, :]), '--' ,color = 'g', label ='Simulation Mean')\r\naxs[1, 1].plot([mean1000[-1] + 2*np.sqrt(var1000[-1])]*len(voltage[3, :]),'--', color = 'm', label= 'Simulation +/- 2*std')\r\naxs[1, 1].plot([mean1000[-1] - 2*np.sqrt(var1000[-1])]*len(voltage[3, :]),'--', color = 'm')\r\naxs[1, 1].set_title('K = 1000', size = 15)\r\naxs[1, 1].set_xticks(np.linspace(0,steps, 16), minor = False)\r\naxs[1, 1].set_xticklabels(np.linspace(0,steps/10000, 16).astype(int), fontdict=None, minor=False)\r\naxs[1, 1].legend()\r\n\r\n# set the y-label\r\nfor ax in [axs.flat[0], axs.flat[2]]:\r\n ax.set_ylabel('Voltage (a.u.)', size = 12, labelpad =10)\r\n# set the x-ticks and x-label\r\nfor ax in [axs.flat[0], axs.flat[1]]:\r\n ax.set_xticks([])\r\n ax.set_xticklabels([])\r\nfor ax in [axs.flat[2], axs.flat[3]]:\r\n ax.set_xlabel('Time (s)', size =12, labelpad = 10)\r\n\r\n# plot theoretical vs. empirical voltage distributions\r\nfig, axs = plt.subplots(2, 2)\r\n\r\n# theoretical values of sigma\r\nsigma_th = np.sqrt(variance_th_plot)\r\n\r\n# K = 1\r\n# theoretical mean and variance\r\ntheor_gauss = np.linspace(mean_th - 3*sigma_th[0], mean_th + 3*sigma_th[0], 100)\r\ntheor_gauss_pdf = stats.norm.pdf(theor_gauss, mean_th, sigma_th[0])\r\naxs[0, 0].plot(theor_gauss, theor_gauss_pdf, label = 'Theoretical Prediction')\r\n# experimental normalised histogram\r\naxs[0, 0].hist(voltage[0, :], density = True, bins= 100, label = 'Simulation Values')\r\n# plot mean position\r\naxs[0, 0].stem([mean_th], [theor_gauss_pdf[50]], markerfmt = '')\r\n# plot full width at half maximum\r\ntheor_hmx = half_max_x(theor_gauss,theor_gauss_pdf)\r\naxs[0, 0].hlines(theor_gauss_pdf[50]/2, theor_hmx[0], theor_hmx[1], colors = 'tab:blue')\r\naxs[0, 0].set_title('K = 1', size = 15)\r\naxs[0, 0].legend()\r\n\r\n# K = 10\r\n# theoretical mean and variance\r\ntheor_gauss = np.linspace(mean_th - 3*sigma_th[1], mean_th + 3*sigma_th[1], 100)\r\ntheor_gauss_pdf = stats.norm.pdf(theor_gauss, mean_th, sigma_th[1])\r\naxs[0, 1].plot(theor_gauss, theor_gauss_pdf, label = 'Theoretical Prediction')\r\n# experimental normalised histogram\r\naxs[0, 1].hist(voltage[1, :], density = True, bins= 100, label = 'Simulation Values')\r\n# plot mean position\r\naxs[0, 1].stem([mean_th], [theor_gauss_pdf[50]], markerfmt = '')\r\n# plot full width at half maximum\r\ntheor_hmx = half_max_x(theor_gauss,theor_gauss_pdf)\r\naxs[0, 1].hlines(theor_gauss_pdf[50]/2, theor_hmx[0], theor_hmx[1], colors = 'tab:blue')\r\naxs[0, 1].set_title('K = 10', size = 15)\r\n\r\n\r\n# K = 100\r\n# theoretical mean and variance\r\ntheor_gauss = np.linspace(mean_th - 3*sigma_th[2], mean_th + 3*sigma_th[2], 100)\r\ntheor_gauss_pdf = stats.norm.pdf(theor_gauss, mean_th, sigma_th[2])\r\naxs[1, 0].plot(theor_gauss, theor_gauss_pdf, label = 'Theoretical Prediction')\r\n# experimental normalised histogram\r\naxs[1, 0].hist(voltage[2, :], density = True, bins= 100, label = 'Simulation Values')\r\n# plot mean position\r\naxs[1, 0].stem([mean_th], [theor_gauss_pdf[50]], markerfmt = '')\r\n# plot full width at half maximum\r\ntheor_hmx = half_max_x(theor_gauss,theor_gauss_pdf)\r\naxs[1, 0].hlines(theor_gauss_pdf[50]/2, theor_hmx[0], theor_hmx[1], colors = 'tab:blue')\r\naxs[1, 0].set_title('K = 100', size = 15)\r\n\r\n\r\n# K = 1000\r\n# theoretical mean and variance\r\ntheor_gauss = np.linspace(mean_th - 3*sigma_th[3], mean_th + 3*sigma_th[3], 100)\r\ntheor_gauss_pdf = stats.norm.pdf(theor_gauss, mean_th, sigma_th[3])\r\naxs[1, 1].plot(theor_gauss, theor_gauss_pdf, label = 'Theoretical Prediction')\r\n# experimental normalised histogram\r\naxs[1, 1].hist(voltage[3, :], density = True, bins= 100, label = 'Simulation Values')\r\n# plot mean position\r\naxs[1, 1].stem([mean_th], [theor_gauss_pdf[50]], markerfmt = '')\r\n# plot full width at half maximum\r\ntheor_hmx = half_max_x(theor_gauss,theor_gauss_pdf)\r\naxs[1, 1].hlines(theor_gauss_pdf[50]/2, theor_hmx[0], theor_hmx[1], colors = 'tab:blue')\r\naxs[1, 1].set_title('K = 1000', size = 15)\r\n\r\n\r\nplt.show()\r\n\r\n\r\n","repo_name":"SamuelLiebana/4G3_Coursework_1","sub_path":"code/exercise_3/exercise_3c.py","file_name":"exercise_3c.py","file_ext":"py","file_size_in_byte":11262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40225398269","text":"'''\r\nfunction to test pretrained models on the test set and show the graph\r\nof actual values and predictions\r\n'''\r\nimport pandas as pd\r\nfrom furiosanet import test, get_layers_from_file\r\n\r\ndef main():\r\n '''\r\n test models saved in the csv\r\n '''\r\n models = pd.read_csv(\"model-evaluation.csv\", index_col=0).index.tolist()\r\n for model in models:\r\n weights_file = \"weights/automated/\" + model\r\n layers = get_layers_from_file(weights_file)\r\n test(weights_file, layers, show_fig=False)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n \r\n","repo_name":"jklewis99/furiosa","sub_path":"automate_testing.py","file_name":"automate_testing.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26141766011","text":"\"\"\"Simulation of stems on a 2D plane.\n\nTODO: more information here.\n\"\"\"\n\nimport json\nimport math\nfrom os import sys\nimport random\n\nfrom matplotlib import pyplot as plt\nimport matplotlib\nfrom mpl_toolkits.mplot3d import Axes3D\nimport scipy.spatial\nimport numpy as np\n\nimport plane_v1\n\n\n__author__ = \"Jeremey Chizewer, Joseph Rubin\"\n\n\nDISK = 0\nSQUARE = 1\n\n\ndef _main():\n if len(sys.argv) < 3:\n alert_bad_usage_and_abort()\n \n filename = sys.argv[1]\n output_filename = sys.argv[2]\n public_main(filename, output_filename)\n\n\ndef public_main(filename, output_filename):\n with open(filename, 'r') as data_file:\n data = json.loads(data_file.read())\n settings = data['settings']\n\n # Compatability.\n if 'state' in data:\n points = data['state']['points']\n stems = data['state']['stems']\n else:\n stems = data['stems']\n \n # Filter the stems if we would like to.\n heights = []\n for point in stems:\n height = point['height']\n heights.append(height)\n max_height = max(heights)\n\n stem_coords = []\n for point in stems:\n height = point['height']\n if height > max_height * 0.5:\n stem_coords.append(point['coord'])\n\n # Remove boundary simplices.\n tri = scipy.spatial.Delaunay(stem_coords)\n #boundary_simplices = [i for i, _ in enumerate(tri.simplices) if -1 in tri.neighbors[i]]\n #simplices_left = [s for i, s in enumerate(tri.simplices) if i not in boundary_simplices]\n #tri.simplices = simplices_left\n\n # Generate random points for comparison.\n random_coords = []\n for i in range(len(stem_coords)):\n random_coords.append(plane_v1.random_coord(settings['PLANE_SHAPE']))\n\n random_tri = scipy.spatial.Delaunay(random_coords)\n #boundary_simplices = [i for i, _ in enumerate(random_tri.simplices) if -1 in random_tri.neighbors[i]]\n #simplices_left = [s for i, s in enumerate(random_tri.simplices) if i not in boundary_simplices]\n #random_tri.simplices = simplices_left\n\n tri_angle_std = calculate_angle_stddev(tri, stem_coords)\n random_tri_angle_std = calculate_angle_stddev(random_tri, random_coords)\n tri_side_length_std = calculate_side_length_stddev(tri, stem_coords)\n random_tri_side_length_std = calculate_side_length_stddev(random_tri, random_coords)\n\n with open(output_filename, 'w') as output_file:\n output_file.write(json.dumps({\n 'STEM_ANGLE_STD_DEV': tri_angle_std,\n 'RANDOM_ANGLE_STD_DEV': random_tri_angle_std,\n 'STEM_SIDE_STD_DEV': tri_side_length_std,\n 'RANDOM_SIDE_STD_DEV': random_tri_side_length_std,\n 'STEM_COUNT': len(stem_coords)\n }))\n\n #print('stem angle stddev is', tri_angle_std)\n #print('{} point random angle stddev is'.format(len(stem_coords)), random_tri_angle_std)\n #print('stem side length stddev is', tri_side_length_std)\n #print('{} point random side length stddev is'.format(len(stem_coords)), random_tri_side_length_std)\n\n #plt.triplot([p[0] for p in tri.points], [p[1] for p in tri.points], tri.simplices)\n #plt.triplot([p[0] for p in random_tri.points], [p[1] for p in random_tri.points], random_tri.simplices)\n #plt.show()\n\n\ndef calculate_angle_stddev(tri, from_coords):\n angles = []\n for simplex in tri.simplices:\n a = from_coords[simplex[0]]\n b = from_coords[simplex[1]]\n c = from_coords[simplex[2]]\n ang1 = calculate_angle(a, b, c)\n ang2 = calculate_angle(c, a, b)\n ang3 = calculate_angle(b, c, a)\n angles.append(ang1)\n angles.append(ang2)\n angles.append(ang3)\n return np.std(angles)\n\n\ndef calculate_side_length_stddev(tri, from_coords):\n side_lengths = []\n for simplex in tri.simplices:\n a = from_coords[simplex[0]]\n b = from_coords[simplex[1]]\n c = from_coords[simplex[2]]\n side_length1 = euclidean_distance(a, b)\n side_length2 = euclidean_distance(b, c)\n side_length3 = euclidean_distance(c, a)\n side_lengths.append(side_length1)\n side_lengths.append(side_length2)\n side_lengths.append(side_length3)\n return np.std(side_lengths)\n\n\ndef euclidean_distance(a, b):\n dx = a[0] - b[0]\n dy = a[1] - b[1]\n return math.sqrt(dx * dx + dy * dy)\n\n\ndef calculate_angle(a, b, c):\n angle = abs(math.degrees(math.atan2(c[1]-b[1], c[0]-b[0]) - math.atan2(a[1]-b[1], a[0]-b[0])))\n if angle > 180:\n angle = 360 - angle\n return angle\n\n\ndef alert_bad_usage_and_abort():\n print('usage: {}: <filename.json> <output_filename.json>'.format(sys.argv[0]), file=sys.stderr)\n exit(1)\n\n\ndef visualize_init(settings):\n matplotlib.use('TkAgg')\n plt.clf()\n fig = plt.gcf()\n ax = plt.gca()\n #ax = Axes3D(fig)\n fig.set_size_inches(12, 12)\n ax.axis('equal', adjustable='datalim')\n ax.set(xlim=(-1.5, 1.5), ylim=(-1.5, 1.5))\n #plt.axis(xmin=0, xmax=1, ymin=0, ymax=1)\n #plt.axis('equal')\n #fname = sys.argv[1] + shape + str(count) + '-' + str(pstick1) + '-' + str(pmelt) + '-' + str(pstick2) + '-' + str(delta) + '-' + str(width) + '-' + str(numdrops) + '-' + str(minStemLen) + '-' + str(hexa) + '.png'\n #ax.axis('equal')\n if settings['PLANE_SHAPE'] == DISK: ax.add_artist(plt.Circle((0,0), radius=1, fill=False))\n\n\ndef visualize_state_points(state, settings):\n stems = state['stems']\n points = state['points']\n\n fig = plt.gcf()\n ax = plt.gca()\n \n for point_id in points:\n point = points[str(point_id)]\n coord = point['coord']\n assert point['stem']\n drop_artist = plt.Circle((coord[0], coord[1]), radius=settings['DROP_RADIUS'], fill=True, color=(0, 1, 0, 0.1))\n ax.add_artist(drop_artist)\n\n plt.draw()\n\n\n#temp\ndef plot(coords):\n visualize_init({'PLANE_SHAPE': 0})\n fig = plt.gcf()\n ax = plt.gca()\n for coord in coords:\n drop_artist = plt.Circle((coord[0], coord[1]), radius=0.05, fill=True, color=(0.5, 0, 0, 1))\n ax.add_artist(drop_artist)\n plt.show()\n\n\ndef visualize_state_stems(state, settings):\n stems = state['stems']\n points = state['points']\n\n fig = plt.gcf()\n ax = plt.gca()\n\n height_max = None\n for point_id in points:\n point = points[str(point_id)]\n height = point['height']\n if height_max is None or height > height_max:\n height_max = height\n\n for point_id in stems:\n point = points[str(point_id)]\n coord = point['coord']\n height = point['height']\n #point_id_bottom = stem[0]\n #point_bottom = points[str(point_id_bottom)]\n #coord_bottom = point_bottom['coord']\n drop_artist = plt.Circle((coord[0], coord[1]), radius=settings['DROP_RADIUS'], fill=True, color=(height / height_max, 0, 0, 1))\n #drop_artist_bottom = plt.Circle((coord_bottom[0], coord_bottom[1]), radius=settings['DROP_RADIUS'], fill=True, color=(0, 0, 1, 0.1))\n ax.add_artist(drop_artist)\n #ax.add_artist(drop_artist_bottom)\n bounce_artist_outer = plt.Circle((coord[0], coord[1]), radius=settings['BOUNCE_DISTANCE'] + settings['DROP_RADIUS']*1, fill=False, color=(height / height_max, 0, 0, 1))\n bounce_artist_inner = plt.Circle((coord[0], coord[1]), radius=settings['BOUNCE_DISTANCE'] - settings['DROP_RADIUS']*1, fill=False, color=(height / height_max, 0, 0, 1))\n #ax.add_artist(bounce_artist_outer)\n #ax.add_artist(bounce_artist_inner)\n\n plt.draw()\n\n\nif __name__ == '__main__':\n _main()\n","repo_name":"josephrubin/order-from-chaos","sub_path":"stat_v1.py","file_name":"stat_v1.py","file_ext":"py","file_size_in_byte":7506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"41667558307","text":"import os.path\nimport re\n\n\nclass Template:\n def __init__(self, template_path: str, var_prefix: str = \"CRUPEST\"):\n if len(var_prefix) != 0 and re.fullmatch(r\"^[a-zA-Z_][a-zA-Z0-9_]*$\", var_prefix) is None:\n raise ValueError(\"Invalid var prefix.\")\n self.template_path = template_path\n self.template_name = os.path.basename(\n template_path)[:-len(\".template\")]\n with open(template_path, \"r\") as f:\n self.template = f.read()\n self.var_prefix = var_prefix\n self.__var_regex = re.compile(r\"\\$(\" + var_prefix + r\"_[a-zA-Z0-9_]+)\")\n self.__var_brace_regex = re.compile(\n r\"\\$\\{\\s*(\" + var_prefix + r\"_[a-zA-Z0-9_]+)\\s*\\}\")\n var_set = set()\n for match in self.__var_regex.finditer(self.template):\n var_set.add(match.group(1))\n for match in self.__var_brace_regex.finditer(self.template):\n var_set.add(match.group(1))\n self.var_set = var_set\n\n def generate(self, config: dict) -> str:\n result = self.template\n for var in self.var_set:\n if var not in config:\n raise ValueError(f\"Missing config var {var}.\")\n result = result.replace(\"$\" + var, config[var])\n result = re.sub(r\"\\$\\{\\s*\" + var + r\"\\s*\\}\", config[var], result)\n return result\n","repo_name":"crupest/crupest","sub_path":"tool/modules/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"44810016307","text":"import random\n\n# Constants\nAI_AGENT_SYMBOL = 'AI'\nCOMPUTER_SYMBOL = 'C'\nEMPTY_SYMBOL = ' '\nROWS = 6\nCOLUMNS = 7\n\n# Create an empty game board\ndef create_board(rows, columns):\n return [[EMPTY_SYMBOL for _ in range(columns)] for _ in range(rows)]\n\n# Check if the game is over\ndef game_over(board):\n return check_winner(board) or check_draw(board)\n\n# Check if there is a winner\ndef check_winner(board):\n # Check rows\n for row in board:\n for col in range(COLUMNS - 3):\n if row[col] != EMPTY_SYMBOL and row[col] == row[col + 1] == row[col + 2] == row[col + 3]:\n return row[col]\n\n # Check columns\n for col in range(COLUMNS):\n for row in range(ROWS - 3):\n if board[row][col] != EMPTY_SYMBOL and board[row][col] == board[row + 1][col] == board[row + 2][col] == board[row + 3][col]:\n return board[row][col]\n\n # Check diagonals (top-left to bottom-right)\n for row in range(ROWS - 3):\n for col in range(COLUMNS - 3):\n if board[row][col] != EMPTY_SYMBOL and board[row][col] == board[row + 1][col + 1] == board[row + 2][col + 2] == board[row + 3][col + 3]:\n return board[row][col]\n\n # Check diagonals (bottom-left to top-right)\n for row in range(3, ROWS):\n for col in range(COLUMNS - 3):\n if board[row][col] != EMPTY_SYMBOL and board[row][col] == board[row - 1][col + 1] == board[row - 2][col + 2] == board[row - 3][col + 3]:\n return board[row][col]\n\n return None\n\n# Check if the game is a draw\ndef check_draw(board):\n for row in board:\n if EMPTY_SYMBOL in row:\n return False\n return True\n\n# Generate all possible moves\ndef generate_moves(board):\n moves = []\n for col in range(COLUMNS):\n if board[0][col] == EMPTY_SYMBOL:\n moves.append(col)\n return moves\n\n# Make a move on the game board\ndef make_move(board, column, symbol):\n for row in range(ROWS - 1, -1, -1): # Start from the bottom row and move up\n if board[row][column] == EMPTY_SYMBOL:\n board[row][column] = symbol\n break\n\n# Undo a move on the game board\ndef undo_move(board, column):\n for row in range(ROWS):\n if board[row][column] != EMPTY_SYMBOL:\n board[row][column] = EMPTY_SYMBOL\n break\n\n# Evaluate the current game state and assign a score\ndef evaluate_state(board):\n if check_winner(board) == AI_AGENT_SYMBOL:\n return 1\n elif check_winner(board) == COMPUTER_SYMBOL:\n return -1\n else:\n return 0\n","repo_name":"AhmedEssammm/connect_four","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33547483885","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 19 14:34:38 2021\r\n\r\n@author: nbrow\r\n\"\"\"\r\nimport numpy as np\r\ndef INP_writer(Elements, Nodes,E_X,E_Y,UX,UY):\r\n\r\n with open('Test_Abaqus.inp','w') as f:\r\n # Writing the Job Info:\r\n f.write('''*Heading\r\nPython Written Abaqus INP File\r\n**Job name: Run_Job Model name: Model-1\r\n** Generated by: Abaqus/CAE 2020\r\n*Preprint, echo=NO, model=NO, history=NO, contact=No\r\n**\r\n''')#Incorporate the Parts \r\n f.write('''PARTS\r\n**\r\n*Part, name=Body_part\r\n*Node\r\n''')\r\n for i in range(0,len(Nodes)):\r\n f.write(' {}, {}, {}\\n'.format(int(Nodes[i,2]),Nodes[i,0],Nodes[i,1]))\r\n f.write('''*Element, type=CPS4R\\n''')\r\n for i in range(0,len(Elements)):\r\n f.write(' {}, {}, {}, {}, {}\\n'.format(int(Elements[i,4]),int(Elements[i,0]),int(Elements[i,1]),int(Elements[i,2]),int(Elements[i,3])))\r\n f.write('''*Nset, nset=Set-1, internal\\n''')\r\n for i in range(0,len(Nodes)):\r\n if i!=len(Nodes)-1:\r\n f.write('{}, '.format(int(Nodes[i,2])))\r\n else:\r\n f.write('{}\\n'.format(int(Nodes[i,2])))\r\n if i%15==0 and i!=0 and i!=len(Elements)-1:\r\n f.write('\\n')\r\n f.write('*Elset, elset=Set-1, internal\\n')\r\n for i in range(0,len(Elements)):\r\n if i!=len(Elements)-1:\r\n f.write('{}, '.format(int(Elements[i,4])))\r\n else:\r\n f.write('{}\\n'.format(int(Elements[i,4])))\r\n if i%15==0 and i!=0 and i!=len(Elements)-1:\r\n f.write('\\n')\r\n f.write('** Section: Section-1\\n')\r\n f.write('''*Solid Section, elset=Set-1, material=Material-1\r\n*End Part\r\n**\r\n**\r\n** ASSEMBLY\r\n**\r\n*Assembly, name=Assembly\r\n**\r\n*Instance, name=Part-1-1, part=Body_part\r\n*End Instance\r\n**\r\n''') \r\n El_Len=[]\r\n Node_Len=[]\r\n f.write('*Nset, nset=Lower_Edge, instance=Part-1-1\\n')\r\n for i in range(0,(E_X*2*UX)+1):\r\n if i==0:\r\n f.write('{}'.format(int(Nodes[i,2])))\r\n Node_Len=np.append(Node_Len,int(Nodes[i,2]))\r\n if i!=0 and int(Nodes[i,2])<=(E_X*UX*2)+1:\r\n f.write(', {}'.format(int(Nodes[i,2])))\r\n Node_Len=np.append(Node_Len,int(Nodes[i,2]))\r\n if i%10==0 and i!=0:\r\n f.write('\\n')\r\n f.write('\\n*Elset, elset=Lower_Edge, instance=Part-1-1\\n')\r\n for i in range(0,(E_X*2*UX)):\r\n if i==0:\r\n f.write('{}'.format(int(Elements[i,4])))\r\n El_Len=np.append(El_Len,int(Elements[i,4]))\r\n if i!=0 and int(Elements[i,4])<=(E_X*2*UX):\r\n f.write(', {}'.format(int(Elements[i,4])))\r\n El_Len=np.append(El_Len,int(Elements[i,4]))\r\n if i%10==0 and i!=0:\r\n f.write('\\n')\r\n f.write('\\n*Nset, nset=Upper_Edge, internal, instance=Part-1-1\\n')\r\n for i in range(0,len(Node_Len)+1):\r\n if i==0:\r\n f.write('{}'.format(int(Nodes[-len(Node_Len)+i,2])))\r\n if i!=0:\r\n f.write(', {}'.format(int(Nodes[-1-len(Node_Len)+i,2])))\r\n if i%10==0 and i!=0:\r\n f.write('\\n')\r\n \r\n f.write('''\\n*End Assembly\r\n**\r\n** MATERIALS\r\n**\r\n*Material, name=Material-1\r\n*Density\r\n1e-05,\r\n*Elastic\r\n 1e+06., 0.3\\n''')\r\n f.write('''**\r\n** INTERACTION PROPERTIES\r\n**\r\n*Surface Interaction, name=IntProp-1\r\n1.,\r\n*Friction, slip tolerance=0.005\r\n0.5,\r\n*Surface Behavior, pressure-overclosure=HARD\r\n**\r\n** INTERACTIONS\r\n**\r\n** ----------------------------------------------------------------\r\n**\r\n** STEP: Force_Applied_Step\r\n**\r\n*Step, name=Force_Appled_Step, nlgeom=YES\r\n*Static\r\n0.25, 1., 1e-05, 0.25\r\n**\r\n** BOUNDARY CONDITIONS\r\n**\r\n** Name: BC_1 Type: Symmetry/Antisymmetry/Encastre\r\n*Boundary\r\nLower_Edge, PINNED\r\n**\r\n** LOADS\r\n**\r\n** Name:Load-1 Type:Concentrated force\r\n*Cload\r\nUpper_Edge, 2, -{}'''.format(1e6/len(Node_Len)))\r\n f.write('''\\n**\r\n** OUTPUT REQUESTS\r\n**\r\n*Restart, write, frequency=0\r\n**\r\n** FIELD OUTPUT: F-Output-1\r\n**\r\n*Output, field, variable=PRESELECT\r\n**\r\n** HISTORY OUTPUT: H-Output-1\r\n**\r\n*Output, history, variable=PRESELECT\r\n*End Step''')\r\n\r\n ","repo_name":"CJJP-git/metamaterial-the-first-example","sub_path":"Unit_Cell_Designing/Abaqus_INP_writer.py","file_name":"Abaqus_INP_writer.py","file_ext":"py","file_size_in_byte":4250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"17057235268","text":"salida = False\nwhile salida == False:\n print(\"Métodos para el cómputo.\")\n print(\"1. For\")\n print(\"2. While\")\n metodo = int(input(\"Escribe el método a usar: \"))\n lim = int(input(\"Escribe el número al que quieras contar: \"))\n if metodo == 1:\n # Por cada vez que pase i en el rango entre i y lim+1, aumenta de 1 en 1\n for i in range(0, lim + 1, 2):\n print(i)\n\n if metodo == 2:\n x = 1\n # Mientras la condición sea verdadera ( Val de X <= lim), se ejecuta lo siguiente\n while x <= lim:\n print(x)\n x = x + 1\n\n iSalida = int(input(\"Desea continuar con otra operación ? (1. Si | 2. No ): \"))\n\n if iSalida == 2:\n print(\"Gracias por utilizarme... como mi ex.\")\n salida = True\n","repo_name":"GMartinUTEQ/T217PythonLabs","sub_path":"App05.py","file_name":"App05.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4407074426","text":"import pandas as pd\nfrom HousePriceProject.helpers import *\n\ntrain = get_train_data()\ntest = get_test_data()\n\ndf = train.append(test).reset_index()\n\ndef data_preprocessing(dataframe):\n cat_cols, num_cols, cat_but_car = grab_col_names(dataframe, cat_th=17, car_th=26)\n\n # train ve test içerisindeki null değerlere missing ataması yapıyoruz çünkü anlam ifade ediyorlar.\n for col in cat_cols:\n dataframe[col].fillna(\"missing\", inplace=True) # kategorik değişkenlerdeki missing değerler anlam ifade ediyor.\n\n # nümerik null değerleri test ve trainde ortalama ile dolduruyoruz.\n for col in num_cols:\n dataframe[col].fillna(dataframe.groupby(\"Neighborhood\")[col].transform(\"mean\"), inplace=True)\n\n # Outliers\n for col in num_cols:\n if col != \"SalePrice\":\n replace_with_thresholds(dataframe, col)\n\n\n","repo_name":"anilozcan35/DSMLBC8-","sub_path":"HousePriceProject/HousePrediction/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35609603058","text":"class Solution(object):\n def pivotIndex(self, nums):\n left, right = 0, sum(nums)\n for index, num in enumerate(nums):\n right -= num\n if left == right:\n return index\n left += num\n return -1\n\ndef main():\n\tnums = [-1,-1,-1,-1,-1,0]\n\tsolution = Solution()\n\tpivotIndex = solution.pivotIndex(nums)\n\tprint(pivotIndex)\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"smkatash/-leetcode-daily-challenges","sub_path":"level1/724/pivotIndex.py","file_name":"pivotIndex.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"23254787995","text":"from deep_space_trader.store import Store\nfrom deep_space_trader import constants as const\nfrom deep_space_trader import config\nfrom deep_space_trader.utils import errorDialog, yesNoDialog, infoDialog\n\nfrom PyQt5 import QtWidgets, QtCore, QtGui\n\n\nclass ButtonBar(QtWidgets.QWidget):\n def __init__(self, parent):\n super(ButtonBar, self).__init__(parent)\n\n self.parent = parent\n self.mainLayout = QtWidgets.QHBoxLayout(self)\n\n self.resetButton = QtWidgets.QPushButton(\"Reset\")\n self.resetButton.clicked.connect(self.resetButtonClicked)\n self.mainLayout.addWidget(self.resetButton)\n\n self.storeButton = QtWidgets.QPushButton(\"Go to store\")\n self.storeButton.clicked.connect(self.storeButtonClicked)\n self.mainLayout.addWidget(self.storeButton)\n\n self.dayButton = QtWidgets.QPushButton(\"Go to next day\")\n self.dayButton.clicked.connect(self.dayButtonClicked)\n self.mainLayout.addWidget(self.dayButton)\n\n def resetButtonClicked(self):\n proceed = yesNoDialog(self, \"Are you sure?\",\n message=\"Are you sure you want to reset the game and \"\n \"lose your progress?\")\n if not proceed:\n return\n\n self.parent.reset()\n\n def storeButtonClicked(self):\n dialog = Store(self.parent)\n dialog.setWindowModality(QtCore.Qt.ApplicationModal)\n dialog.exec_()\n\n def dayButtonClicked(self):\n self.parent.advanceDay()\n\n def useButtonClicked(self):\n dialog = StoreItemSelector(self.parent)\n dialog.setWindowModality(QtCore.Qt.ApplicationModal)\n dialog.exec_()\n","repo_name":"eriknyquist/deep_space_trader","sub_path":"deep_space_trader/top_button_bar.py","file_name":"top_button_bar.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"72452861974","text":"import json\n\nimport requests\nimport gistats\n\nconfig = json.load(open('config.json', 'r'))\ngist = gistats.Gist(config['username'], config['token'], config['gist'], 'Chess Statistics')\nresponse = requests.get(\n f'https://api.chess.com/pub/player/{config[\"chessname\"]}/stats'\n).json()\n\ngist.update({\n 'Bullets 🚅': response['chess_bullet']['last']['rating'],\n 'Rapid ⏲': response['chess_rapid']['last']['rating']\n})","repo_name":"ZSendokame/ChessPy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"38949695220","text":"# Given a string, determine if it is a palindrome, considering only alphanumeric characters and ignoring cases.\n\n# For example,\n# \"A man, a plan, a canal: Panama\" is a palindrome.\n# \"race a car\" is not a palindrome.\n\n# Note:\n# Have you consider that the string might be empty? This is a good question to ask during an interview.\n\n# For the purpose of this problem, we define empty string as valid palindrome.\n\n# learn Python str.isalnum() method\n# Two Pointer\nclass Solution(object):\n def isPalindrome(self, s):\n \"\"\"\n O(n)\n O(1)\n :type s: str\n :rtype: bool\n \"\"\"\n s = s.lower()\n l, r = 0, len(s)-1\n while l < r:\n if not s[l].isalnum():\n l += 1\n elif not s[r].isalnum():\n r -= 1\n elif s[l] == s[r]:\n l += 1\n r -= 1\n else:\n return False\n return True","repo_name":"youhusky/Facebook_Prepare","sub_path":"125. Valid Palindrome.py","file_name":"125. Valid Palindrome.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"73521964052","text":"import sys\n\ndef makes_sense(bites):\n for i, bite in enumerate(bites):\n if bite != 'mumble' and i + 1 != int(bite):\n return False\n return True\n\nif __name__ == '__main__':\n sys.stdin.readline()\n bites = sys.stdin.readline().strip().split()\n print('makes sense' if makes_sense(bites) else 'something is fishy')\n","repo_name":"kscharlund/kattis","sub_path":"babybites/babybites.py","file_name":"babybites.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72560231574","text":"import os\nimport signal\nimport time\nimport json\nimport logging\nfrom datetime import datetime\n\nfrom alignak.stats import Stats\nfrom alignak.basemodule import BaseModule\nfrom alignak.external_command import ExternalCommand\n\nfrom alignak_backend_client.client import Backend, BackendException\n\n# Set the backend client library log to ERROR level\nlogging.getLogger(\"alignak_backend_client.client\").setLevel(logging.ERROR)\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\nfor handler in logger.parent.handlers:\n if isinstance(handler, logging.StreamHandler):\n logger.parent.removeHandler(handler)\n\n# pylint: disable=invalid-name\nproperties = {\n 'daemons': ['arbiter'],\n 'type': 'backend_arbiter',\n 'external': False,\n 'phases': ['configuration'],\n}\n\n\ndef get_instance(mod_conf):\n \"\"\"\n Return a module instance for the modules manager\n\n :param mod_conf: the module properties as defined globally in this file\n :return:\n \"\"\"\n logger.info(\"Give an instance of %s for alias: %s\", mod_conf.python_name, mod_conf.module_alias)\n\n return AlignakBackendArbiter(mod_conf)\n\n\nclass AlignakBackendArbiter(BaseModule):\n # pylint: disable=too-many-public-methods\n \"\"\" This class is used to get configuration from alignak-backend\n \"\"\"\n\n def __init__(self, mod_conf):\n \"\"\"Module initialization\n\n mod_conf is a dictionary that contains:\n - all the variables declared in the module configuration file\n - a 'properties' value that is the module properties as defined globally in this file\n\n :param mod_conf: module configuration file as a dictionary\n \"\"\"\n BaseModule.__init__(self, mod_conf)\n\n # pylint: disable=global-statement\n global logger\n logger = logging.getLogger('alignak.module.%s' % self.alias)\n logger.setLevel(getattr(mod_conf, 'log_level', logging.INFO))\n\n logger.debug(\"inner properties: %s\", self.__dict__)\n logger.debug(\"received configuration: %s\", mod_conf.__dict__)\n\n self.my_arbiter = None\n\n # Alignak backend importation script is running\n self.backend_import = False\n if 'ALIGNAK_BACKEND_IMPORT_RUN' in os.environ and os.environ['ALIGNAK_BACKEND_IMPORT_RUN']:\n logger.info(\"Alignak backend importation script is active.\")\n self.backend_import = True\n\n self.client_processes = int(getattr(mod_conf, 'client_processes', 1))\n logger.info(\"Number of processes used by backend client: %s\", self.client_processes)\n\n logger.info(\"StatsD configuration: %s:%s, prefix: %s, enabled: %s\",\n getattr(mod_conf, 'statsd_host', 'localhost'),\n int(getattr(mod_conf, 'statsd_port', '8125')),\n getattr(mod_conf, 'statsd_prefix', 'alignak'),\n (getattr(mod_conf, 'statsd_enabled', '0') != '0'))\n self.statsmgr = Stats()\n self.statsmgr.register(self.alias, 'module',\n statsd_host=getattr(mod_conf, 'statsd_host', 'localhost'),\n statsd_port=int(getattr(mod_conf, 'statsd_port', '8125')),\n statsd_prefix=getattr(mod_conf, 'statsd_prefix', 'alignak'),\n statsd_enabled=(getattr(mod_conf, 'statsd_enabled', '0') != '0'))\n\n self.url = getattr(mod_conf, 'api_url', 'http://localhost:5000')\n logger.info(\"Alignak backend endpoint: %s\", self.url)\n self.backend = Backend(self.url, self.client_processes)\n self.backend.token = getattr(mod_conf, 'token', '')\n self.backend_connected = False\n self.backend_errors_count = 0\n self.backend_username = getattr(mod_conf, 'username', '')\n self.backend_password = getattr(mod_conf, 'password', '')\n self.backend_generate = getattr(mod_conf, 'allowgeneratetoken', False)\n\n self.backend_count = int(getattr(mod_conf, 'backend_count', '50'))\n logger.info(\"backend pagination count: %d items\", self.backend_count)\n\n if not self.backend.token:\n logger.warning(\"no user token configured. \"\n \"It is recommended to set a user token rather than a user login \"\n \"in the configuration. Trying to get a token from the provided \"\n \"user login information...\")\n self.getToken()\n else:\n self.backend_connected = True\n\n self.bypass_verify_mode = int(getattr(mod_conf, 'bypass_verify_mode', 0)) == 1\n logger.info(\"bypass objects loading when Arbiter is in verify mode: %s\",\n self.bypass_verify_mode)\n\n self.verify_modification = int(getattr(mod_conf, 'verify_modification', 5))\n logger.info(\"configuration reload check period: %s minutes\", self.verify_modification)\n\n self.action_check = int(getattr(mod_conf, 'action_check', 15))\n logger.info(\"actions check period: %s seconds\", self.action_check)\n self.daemons_state = int(getattr(mod_conf, 'daemons_state', 60))\n logger.info(\"daemons state update period: %s seconds\", self.daemons_state)\n self.retention_actived = int(getattr(mod_conf, 'retention_actived', 1))\n self.next_check = 0\n self.next_action_check = 0\n self.next_daemons_state = 0\n\n # Configuration load/reload\n self.backend_date_format = \"%a, %d %b %Y %H:%M:%S GMT\"\n self.time_loaded_conf = datetime.utcnow().strftime(self.backend_date_format)\n self.configuration_reload_required = False\n self.configuration_reload_changelog = []\n\n self.configraw = {}\n self.highlevelrealm = {\n 'level': 30000,\n 'name': ''\n }\n self.daemonlist = {'arbiter': {}, 'scheduler': {}, 'poller': {}, 'reactionner': {},\n 'receiver': {}, 'broker': {}}\n self.config = {'commands': [],\n 'timeperiods': [],\n 'hosts': [],\n 'hostgroups': [],\n 'services': [],\n 'contacts': [],\n 'contactgroups': [],\n 'servicegroups': [],\n 'realms': [],\n 'hostdependencies': [],\n 'hostescalations': [],\n 'servicedependencies': [],\n 'serviceescalations': [],\n 'triggers': []}\n self.backend_nb_hosts = 0\n self.backend_nb_services = 0\n self.default_tp_always = None\n self.default_tp_never = None\n self.default_host_check_command = None\n self.default_service_check_command = None\n self.default_user = None\n\n self.alignak_configuration = {}\n\n # Common functions\n def do_loop_turn(self):\n \"\"\"This function is called/used when you need a module with\n a loop function (and use the parameter 'external': True)\n \"\"\"\n logger.info(\"In loop\")\n time.sleep(1)\n\n def hook_read_configuration(self, arbiter):\n \"\"\"Hook in arbiter used on configuration parsing start. This is useful to get our arbiter\n object and its parameters.\n\n :param arbiter: alignak.daemons.arbiterdaemon.Arbiter\n :type arbiter: object\n :return: None\n \"\"\"\n self.my_arbiter = arbiter\n\n def getToken(self):\n \"\"\"Authenticate and get the token\n\n :return: None\n \"\"\"\n if self.backend_import:\n # Do no try to login when importing a configuration into the backend\n logger.info(\"Alignak backend importation script is active. \"\n \"No backend connection.\")\n return\n\n generate = 'enabled'\n if not self.backend_generate:\n generate = 'disabled'\n\n try:\n start = time.time()\n self.backend_connected = self.backend.login(self.backend_username,\n self.backend_password,\n generate)\n self.statsmgr.counter('backend-login', 1)\n self.statsmgr.timer('backend-login-time', time.time() - start)\n if not self.backend_connected:\n logger.warning(\"Backend login failed\")\n self.token = self.backend.token\n self.backend_errors_count = 0\n except BackendException as exp: # pragma: no cover - should not happen\n self.backend_connected = False\n self.backend_errors_count += 1\n logger.warning(\"Alignak backend is not available for login. \"\n \"No backend connection, attempt: %d\", self.backend_errors_count)\n logger.debug(\"Exception: %s\", exp)\n\n def raise_backend_alert(self, errors_count=10):\n \"\"\"Raise a backend alert\n\n :return: True if the backend is not connected and the error count\n is greater than a defined threshold\n \"\"\"\n logger.debug(\"Check backend connection, connected: %s, errors count: %d\",\n self.backend_connected, self.backend_errors_count)\n if not self.backend_connected and self.backend_errors_count >= errors_count:\n return True\n\n return False\n\n def single_relation(self, resource, mapping, ctype):\n \"\"\"Convert single embedded data to name of relation_data\n Example:\n {'contacts': {'_id': a3659204fe,'name':'admin'}}\n converted to:\n {'contacts': 'admin'}\n\n :param resource: dictionary got from alignak-backend\n :type resource: dict\n :param mapping: key value of resource\n :type mapping: str\n :param ctype: type of configraw (hosts, services, commands...)\n :type ctype: str\n \"\"\"\n if mapping in resource:\n if resource[mapping] is not None:\n if resource[mapping] in self.configraw[ctype]:\n resource[mapping] = self.configraw[ctype][resource[mapping]]\n\n def multiple_relation(self, resource, mapping, ctype):\n \"\"\"Convert multiple embedded data to name of relation_data\n Example:\n {'contacts': [{'_id': a3659204fe,'contact_name':'admin'},\n {'_id': a3659204ff,'contact_name':'admin2'}]}\n converted to:\n {'contacts': 'admin,admin2'}\n\n :param resource: dictionary got from alignak-backend\n :type resource: dict\n :param mapping: key value of resource\n :type mapping: str\n :param ctype: type of configraw (hosts, services, commands...)\n :type ctype: str\n \"\"\"\n if mapping in resource:\n members = []\n for member in resource[mapping]:\n if member in self.configraw[ctype]:\n members.append(self.configraw[ctype][member])\n resource[mapping] = ','.join(members)\n\n @classmethod\n def clean_unusable_keys(cls, resource):\n \"\"\"Delete keys of dictionary not used\n\n :param resource: dictionary got from alignak-backend\n :type resource: dict\n :return:\n \"\"\"\n fields = [\n '_links', '_updated', '_created', '_etag', '_id', 'name', 'ui', '_realm',\n '_sub_realm', '_users_read', '_users_update', '_users_delete', '_parent',\n '_tree_parents', '_all_children', '_level', 'customs', 'host', 'service',\n 'back_role_super_admin', 'token', '_templates', '_template_fields', 'note',\n '_is_template', '_templates_with_services', '_templates_from_host_template',\n 'merge_host_users', 'hosts_critical_threshold', 'hosts_warning_threshold',\n 'services_critical_threshold', 'services_warning_threshold',\n 'global_critical_threshold', 'global_warning_threshold', '_children',\n 'hostgroups', 'hosts', 'dependent_hostgroups', 'dependent_hosts',\n 'servicegroups', 'services', 'dependent_servicegroups', 'dependent_services',\n 'usergroups', 'users',\n 'location',\n 'duplicate_foreach', 'tags',\n # 'ls_acknowledged', 'ls_acknowledgement_type', 'ls_current_attempt', 'ls_attempt',\n # 'ls_downtimed', 'ls_execution_time',\n # 'ls_grafana', 'ls_grafana_panelid', 'ls_impact', 'ls_last_check', 'ls_last_state',\n # 'ls_last_state_changed', 'ls_last_hard_state_changed', 'ls_last_state_type',\n # 'ls_latency', 'ls_long_output',\n # 'ls_max_attempts', 'ls_next_check', 'ls_output', 'ls_perf_data',\n # 'ls_state', 'ls_state_id', 'ls_state_type',\n # 'ls_last_time_up', 'ls_last_time_down',\n # 'ls_last_time_ok', 'ls_last_time_warning', 'ls_last_time_critical',\n # 'ls_last_time_unknown', 'ls_last_time_unreachable',\n # 'ls_passive_check', 'ls_last_notification',\n '_overall_state_id',\n 'trigger', 'schema_version'\n ]\n # Add live state fields\n for field in resource:\n if field.startswith('ls_'):\n fields.append(field)\n for field in fields:\n if field in resource:\n del resource[field]\n\n @classmethod\n def convert_lists(cls, resource):\n \"\"\"Convert lists into string with values separated with comma\n\n :param resource: ressource\n :type resource: dict\n :return: None\n \"\"\"\n for prop in resource:\n if isinstance(resource[prop], list):\n resource[prop] = ','.join(str(e) for e in resource[prop])\n # Is it really useful ... considered as not useful!\n # elif isinstance(resource[prop], dict):\n # logger.warning(\"=====> %s\", prop)\n # logger.warning(resource[prop])\n\n def get_realms(self):\n \"\"\"Get realms from alignak_backend\n\n :return: None\n \"\"\"\n self.configraw['realms'] = {}\n self.configraw['realms_name'] = {}\n params = {\"max_results\": self.backend_count,\n \"embedded\": json.dumps({'_children': 1})}\n all_realms = self.backend.get_all('realm', params)\n logger.info(\"Got %d realms\",\n len(all_realms['_items']))\n for realm in all_realms['_items']:\n logger.debug(\"- %s\", realm['name'])\n self.configraw['realms'][realm['_id']] = realm['name']\n # we store the relation name => id because will use it for add / update alignak daemon\n # state in the backend\n self.configraw['realms_name'][realm['name']] = realm['_id']\n if realm['_level'] < self.highlevelrealm['level']:\n self.highlevelrealm['name'] = realm['name']\n realm['imported_from'] = 'alignak-backend'\n if 'definition_order' in realm and realm['definition_order'] == 100:\n realm['definition_order'] = 50\n realm['realm_name'] = realm['name']\n realm['realm_members'] = []\n for child in realm['_children']:\n realm['realm_members'].append(child['name'])\n self.clean_unusable_keys(realm)\n del realm['notes']\n del realm['alias']\n self.convert_lists(realm)\n\n logger.debug(\"- realm: %s\", realm)\n self.config['realms'].append(realm)\n\n self.statsmgr.counter('objects.realm', len(self.config['realms']))\n\n def get_commands(self):\n \"\"\"Get commands from alignak_backend\n\n :return: None\n \"\"\"\n self.configraw['commands'] = {}\n params = {\"max_results\": self.backend_count}\n all_commands = self.backend.get_all('command', params)\n logger.info(\"Got %d commands\",\n len(all_commands['_items']))\n for command in all_commands['_items']:\n logger.debug(\"- %s\", command['name'])\n self.configraw['commands'][command['_id']] = command['name']\n command['imported_from'] = 'alignak-backend'\n if 'definition_order' in command and command['definition_order'] == 100:\n command['definition_order'] = 50\n command['command_name'] = command['name']\n # poller_tag empty\n if 'poller_tag' in command and command['poller_tag'] == '':\n del command['poller_tag']\n self.clean_unusable_keys(command)\n del command['alias']\n del command['notes']\n self.convert_lists(command)\n\n # Set default host/service check commands\n if command['command_name'] == \"_internal_host_up\":\n self.default_host_check_command = command\n if command['command_name'] == \"_echo\":\n self.default_service_check_command = command\n\n logger.debug(\"- command: %s\", command)\n self.config['commands'].append(command)\n\n self.statsmgr.counter('objects.command', len(self.config['commands']))\n\n def get_timeperiods(self):\n \"\"\"Get timeperiods from alignak_backend\n\n :return: None\n \"\"\"\n self.configraw['timeperiods'] = {}\n params = {\"max_results\": self.backend_count}\n all_timeperiods = self.backend.get_all('timeperiod', params)\n logger.info(\"Got %d timeperiods\",\n len(all_timeperiods['_items']))\n for timeperiod in all_timeperiods['_items']:\n logger.debug(\"- %s\", timeperiod['name'])\n self.configraw['timeperiods'][timeperiod['_id']] = timeperiod['name']\n timeperiod['imported_from'] = 'alignak-backend'\n if 'definition_order' in timeperiod and timeperiod['definition_order'] == 100:\n timeperiod['definition_order'] = 50\n timeperiod['timeperiod_name'] = timeperiod['name']\n for daterange in timeperiod['dateranges']:\n timeperiod.update(daterange)\n del timeperiod['dateranges']\n self.clean_unusable_keys(timeperiod)\n del timeperiod['notes']\n self.convert_lists(timeperiod)\n\n # Set default timeperiod\n if timeperiod['timeperiod_name'] == \"24x7\":\n self.default_tp_always = timeperiod\n if timeperiod['timeperiod_name'] == \"Never\":\n self.default_tp_never = timeperiod\n\n logger.debug(\"- timeperiod: %s\", timeperiod)\n self.config['timeperiods'].append(timeperiod)\n\n self.statsmgr.counter('objects.timeperiod', len(self.config['timeperiods']))\n\n def get_contactgroups(self):\n \"\"\"Get contactgroups from alignak_backend\n\n :return: None\n \"\"\"\n self.configraw['contactgroups'] = {}\n params = {\"max_results\": self.backend_count}\n all_contactgroups = self.backend.get_all('usergroup', params)\n logger.info(\"Got %d contactgroups\",\n len(all_contactgroups['_items']))\n for contactgroup in all_contactgroups['_items']:\n logger.debug(\"- %s\", contactgroup['name'])\n self.configraw['contactgroups'][contactgroup['_id']] = contactgroup['name']\n\n for contactgroup in all_contactgroups['_items']:\n contactgroup['imported_from'] = 'alignak-backend'\n if 'definition_order' in contactgroup and contactgroup['definition_order'] == 100:\n contactgroup['definition_order'] = 50\n contactgroup['contactgroup_name'] = contactgroup['name']\n contactgroup['contactgroup_members'] = contactgroup['usergroups']\n contactgroup['members'] = contactgroup['users']\n # members\n self.multiple_relation(contactgroup, 'members', 'contacts')\n # contactgroup_members\n self.multiple_relation(contactgroup, 'contactgroup_members', 'contactgroups')\n self.clean_unusable_keys(contactgroup)\n del contactgroup['notes']\n self.convert_lists(contactgroup)\n\n logger.debug(\"- contacts group: %s\", contactgroup)\n self.config['contactgroups'].append(contactgroup)\n\n self.statsmgr.counter('objects.contactgroup', len(self.config['contactgroups']))\n\n def get_contacts(self):\n \"\"\"Get contacts from alignak_backend\n\n :return: None\n \"\"\"\n self.configraw['contacts'] = {}\n params = {\"max_results\": self.backend_count,\n \"where\": '{\"_is_template\": false}'}\n all_contacts = self.backend.get_all('user', params)\n logger.info(\"Got %d contacts\",\n len(all_contacts['_items']))\n for contact in all_contacts['_items']:\n logger.debug(\"- %s\", contact['name'])\n self.configraw['contacts'][contact['_id']] = contact['name']\n contact['imported_from'] = 'alignak-backend'\n if 'definition_order' in contact and contact['definition_order'] == 100:\n contact['definition_order'] = 50\n contact['contact_name'] = contact['name']\n\n # host_notification_period\n self.single_relation(contact, 'host_notification_period', 'timeperiods')\n # service_notification_period\n self.single_relation(contact, 'service_notification_period', 'timeperiods')\n # host_notification_commands\n self.multiple_relation(contact, 'host_notification_commands', 'commands')\n # service_notification_commands\n self.multiple_relation(contact, 'service_notification_commands', 'commands')\n # contactgroups\n self.multiple_relation(contact, 'contactgroups', 'contactgroups')\n\n # todo: perhaps those properties should have a default value in the backend?\n if 'host_notification_commands' not in contact:\n contact['host_notification_commands'] = ''\n if 'service_notification_commands' not in contact:\n contact['service_notification_commands'] = ''\n\n # todo: how should it be possible to not have those properties in the backend?\n # they are defined as required!\n if 'host_notification_period' not in contact:\n contact['host_notification_period'] = \\\n self.config['timeperiods'][0]['timeperiod_name']\n contact['host_notifications_enabled'] = False\n if 'service_notification_period' not in contact:\n contact['service_notification_period'] = \\\n self.config['timeperiods'][0]['timeperiod_name']\n contact['service_notifications_enabled'] = False\n\n for key, value in contact['customs'].items():\n if key[0] not in ['_']:\n key = '_' + key\n contact[key.upper()] = value\n self.clean_unusable_keys(contact)\n del contact['notes']\n del contact['ui_preferences']\n del contact['can_update_livestate']\n del contact['skill_level']\n self.convert_lists(contact)\n\n # Set default user\n if contact['contact_name'] == \"admin\":\n self.default_user = contact\n\n logger.debug(\"- contact: %s\", contact)\n self.config['contacts'].append(contact)\n\n self.statsmgr.counter('objects.contact', len(self.config['contacts']))\n\n def get_hostgroups(self):\n \"\"\"Get hostgroups from alignak_backend\n\n :return: None\n \"\"\"\n self.configraw['hostgroups'] = {}\n params = {\"max_results\": self.backend_count}\n all_hostgroups = self.backend.get_all('hostgroup', params)\n logger.info(\"Got %d hostgroups\",\n len(all_hostgroups['_items']))\n for hostgroup in all_hostgroups['_items']:\n logger.debug(\"- %s\", hostgroup['name'])\n self.configraw['hostgroups'][hostgroup['_id']] = hostgroup['name']\n\n for hostgroup in all_hostgroups['_items']:\n self.configraw['hostgroups'][hostgroup['_id']] = hostgroup['name']\n hostgroup['imported_from'] = 'alignak-backend'\n if 'definition_order' in hostgroup and hostgroup['definition_order'] == 100:\n hostgroup['definition_order'] = 50\n hostgroup['hostgroup_name'] = hostgroup['name']\n hostgroup['hostgroup_members'] = hostgroup['hostgroups']\n hostgroup['members'] = hostgroup['hosts']\n # members\n self.multiple_relation(hostgroup, 'members', 'hosts')\n # hostgroup_members\n self.multiple_relation(hostgroup, 'hostgroup_members', 'hostgroups')\n self.clean_unusable_keys(hostgroup)\n self.convert_lists(hostgroup)\n\n logger.debug(\"- hosts group: %s\", hostgroup)\n self.config['hostgroups'].append(hostgroup)\n\n self.statsmgr.counter('objects.hostgroup', len(self.config['hostgroups']))\n\n def get_hosts(self):\n \"\"\"Get hosts from alignak_backend\n\n :return: None\n \"\"\"\n self.configraw['hosts'] = {}\n params = {\"max_results\": self.backend_count,\n \"where\": '{\"_is_template\": false}'}\n all_hosts = self.backend.get_all('host', params)\n logger.info(\"Got %d hosts\", len(all_hosts['_items']))\n\n for host in all_hosts['_items']:\n logger.debug(\"- %s\", host['name'])\n self.configraw['hosts'][host['_id']] = host['name']\n host['host_name'] = host['name']\n host['imported_from'] = 'alignak-backend'\n\n # If default backend definition order is set, set as default alignak one\n if 'definition_order' in host and host['definition_order'] == 100:\n host['definition_order'] = 50\n\n # Check command\n if 'check_command' in host:\n if host['check_command'] in self.configraw['commands']:\n host['check_command'] = self.configraw['commands'][host['check_command']]\n else:\n host['check_command'] = self.default_host_check_command['command_name']\n else:\n host['check_command'] = self.default_host_check_command['name']\n\n # event handler\n if 'event_handler' in host:\n if host['event_handler'] in self.configraw['commands']:\n host['event_handler'] = self.configraw['commands'][host['event_handler']]\n else:\n del host['event_handler']\n\n # snapshot command\n if 'snapshot_command' in host:\n if host['snapshot_command'] in self.configraw['commands']:\n host['snapshot_command'] = self.configraw['commands'][host['snapshot_command']]\n else:\n del host['snapshot_command']\n\n for command_arg in ['check_command', 'event_handler']:\n arg = command_arg + \"_args\"\n if arg in host:\n if command_arg not in host:\n host[command_arg] = ''\n elif host[arg] != '':\n host[command_arg] += '!'\n host[command_arg] += host[arg]\n del host[arg]\n logger.debug(\"Host %s, %s: '%s'\",\n host['name'], command_arg, host[command_arg])\n\n # poller and reactionner tags are empty - Alignak defaults to the string 'None'\n if not host['poller_tag']:\n host['poller_tag'] = 'None'\n if not host['reactionner_tag']:\n host['reactionner_tag'] = 'None'\n\n # Contacts\n host['contacts'] = host['users']\n host['contact_groups'] = host['usergroups']\n\n # notification period - set default as 24x7\n if 'notification_period' not in host or not host['notification_period']:\n host['notification_period'] = self.default_tp_always['timeperiod_name']\n # maintenance period - set default as Never\n if 'maintenance_period' not in host or not host['maintenance_period']:\n host['maintenance_period'] = self.default_tp_never['timeperiod_name']\n # snapshot period - set default as Never\n if 'snapshot_period' not in host or not host['snapshot_period']:\n host['snapshot_period'] = self.default_tp_never['timeperiod_name']\n\n # realm\n self.single_relation(host, '_realm', 'realms')\n host['realm'] = host['_realm']\n # check period\n self.single_relation(host, 'check_period', 'timeperiods')\n # notification_period\n self.single_relation(host, 'notification_period', 'timeperiods')\n # maintenance_period\n self.single_relation(host, 'maintenance_period', 'timeperiods')\n # snapshot_period\n self.single_relation(host, 'snapshot_period', 'timeperiods')\n # event_handler\n self.single_relation(host, 'event_handler', 'commands')\n\n # parents\n # todo: why is it always an empty list ???\n # ## self.multiple_relation(host, 'parents', 'host_name')\n host['parents'] = ''\n\n # hostgroups\n self.multiple_relation(host, 'hostgroup_name', 'hostgroups')\n # contacts\n self.multiple_relation(host, 'contacts', 'contacts')\n # contact_groups\n self.multiple_relation(host, 'contact_groups', 'contactgroups')\n # escalations\n # ## self.multiple_relation(host, 'escalations', 'escalation_name')\n if 'escalations' in host:\n del host['escalations']\n\n if 'alias' in host and host['alias'] == '':\n del host['alias']\n if 'realm' in host:\n if host['realm'] is None:\n del host['realm']\n for key, value in host['customs'].items():\n if key[0] not in ['_']:\n key = '_' + key\n host[key.upper()] = value\n\n # Fix #9: inconsistent state when no retention module exists\n if not self.retention_actived and 'ls_last_state' in host:\n if host['ls_state'] == 'UNREACHABLE':\n host['initial_state'] = 'u'\n if host['ls_state'] == 'DOWN':\n host['initial_state'] = 'd'\n if host['ls_state'] == 'UP':\n host['initial_state'] = 'o'\n\n logger.debug(\n \"- host current live state is %s, \"\n \"set initial_state as '%s'\", host['ls_state'], host['initial_state']\n )\n self.clean_unusable_keys(host)\n self.convert_lists(host)\n\n logger.debug(\"- host: %s\", host)\n self.config['hosts'].append(host)\n self.backend_nb_hosts = len(self.config['hosts'])\n\n self.statsmgr.counter('objects.host', len(self.config['hosts']))\n\n def get_servicegroups(self):\n \"\"\"Get servicegroups from alignak_backend\n\n :return: None\n \"\"\"\n self.configraw['servicegroups'] = {}\n params = {\"max_results\": self.backend_count}\n all_servicegroups = self.backend.get_all('servicegroup', params)\n logger.info(\"Got %d servicegroups\",\n len(all_servicegroups['_items']))\n for servicegroup in all_servicegroups['_items']:\n logger.debug(\"- %s\", servicegroup['name'])\n self.configraw['servicegroups'][servicegroup['_id']] = servicegroup['name']\n\n for servicegroup in all_servicegroups['_items']:\n self.configraw['servicegroups'][servicegroup['_id']] = servicegroup['name']\n servicegroup['imported_from'] = 'alignak-backend'\n if 'definition_order' in servicegroup and servicegroup['definition_order'] == 100:\n servicegroup['definition_order'] = 50\n servicegroup['servicegroup_name'] = servicegroup['name']\n servicegroup['servicegroup_members'] = servicegroup['servicegroups']\n # members\n members = []\n for service in servicegroup['services']:\n if service not in self.configraw['services']:\n continue\n for svc in self.config['services']:\n if self.configraw['services'][service] == svc['service_description']:\n members.append(\"%s,%s\" % (svc['host_name'], svc['service_description']))\n servicegroup['members'] = ','.join(members)\n # servicegroup_members\n self.multiple_relation(servicegroup, 'servicegroup_members', 'servicegroups')\n self.clean_unusable_keys(servicegroup)\n self.convert_lists(servicegroup)\n\n logger.debug(\"- services group: %s\", servicegroup)\n self.config['servicegroups'].append(servicegroup)\n\n self.statsmgr.counter('objects.servicegroup', len(self.config['servicegroups']))\n\n def get_services(self):\n \"\"\"Get services from alignak_backend\n\n :return: None\n \"\"\"\n self.configraw['services'] = {}\n params = {\"max_results\": self.backend_count,\n \"where\": '{\"_is_template\": false}'}\n all_services = self.backend.get_all('service', params)\n logger.info(\"Got %d services\", len(all_services['_items']))\n\n for service in all_services['_items']:\n # Get host name from the previously loaded hosts list\n try:\n service['host_name'] = self.configraw['hosts'][service['host']]\n except KeyError:\n logger.warning(\"Got a service for an unknown host\")\n continue\n logger.debug(\"- %s/%s\", service['host_name'], service['name'])\n self.configraw['services'][service['_id']] = service['name']\n service['imported_from'] = 'alignak-backend'\n\n # If default backend definition order is set, set as default alignak one\n if 'definition_order' in service and service['definition_order'] == 100:\n service['definition_order'] = 50\n service['service_description'] = service['name']\n service['merge_host_contacts'] = service['merge_host_users']\n service['hostgroup_name'] = service['hostgroups']\n\n # Check command\n if 'check_command' in service:\n if service['check_command'] in self.configraw['commands']:\n service['check_command'] = self.configraw['commands'][service['check_command']]\n else:\n service['check_command'] = self.default_service_check_command['command_name']\n\n # event handler\n if 'event_handler' in service:\n if service['event_handler'] in self.configraw['commands']:\n service['event_handler'] = self.configraw['commands'][service['event_handler']]\n else:\n del service['event_handler']\n\n # snapshot command\n if 'snapshot_command' in service:\n if service['snapshot_command'] in self.configraw['commands']:\n service['snapshot_command'] = \\\n self.configraw['commands'][service['snapshot_command']]\n else:\n del service['snapshot_command']\n\n for command_arg in ['check_command', 'event_handler']:\n arg = command_arg + \"_args\"\n if arg in service:\n if command_arg not in service:\n service[command_arg] = ''\n elif service[arg] != '':\n service[command_arg] += '!'\n service[command_arg] += service[arg]\n del service[arg]\n logger.debug(\"Service %s, %s: '%s'\",\n service['name'], command_arg, service[command_arg])\n\n # poller and reactionner tags are empty - Alignak defaults to the string 'None'\n if not service['poller_tag']:\n service['poller_tag'] = 'None'\n if not service['reactionner_tag']:\n service['reactionner_tag'] = 'None'\n\n # Contacts\n service['contacts'] = service['users']\n service['contact_groups'] = service['usergroups']\n\n # notification period - set default as 24x7\n if 'notification_period' not in service or not service['notification_period']:\n service['notification_period'] = self.default_tp_always['timeperiod_name']\n # maintenance period - set default as Never\n if 'maintenance_period' not in service or not service['maintenance_period']:\n service['maintenance_period'] = self.default_tp_never['timeperiod_name']\n # snapshot period - set default as Never\n if 'snapshot_period' not in service or not service['snapshot_period']:\n service['snapshot_period'] = self.default_tp_never['timeperiod_name']\n\n # host_name\n self.single_relation(service, 'host_name', 'hosts')\n # check_period\n self.single_relation(service, 'check_period', 'timeperiods')\n # notification_period\n self.single_relation(service, 'notification_period', 'timeperiods')\n # maintenance_period\n self.single_relation(service, 'maintenance_period', 'timeperiods')\n # snapshot_period\n self.single_relation(service, 'snapshot_period', 'timeperiods')\n # event_handler\n self.single_relation(service, 'event_handler', 'commands')\n # hostgroups\n self.multiple_relation(service, 'hostgroup_name', 'hostgroups')\n # servicegroups\n self.multiple_relation(service, 'servicegroups', 'servicegroups')\n # contacts\n self.multiple_relation(service, 'contacts', 'contacts')\n # contact_groups\n self.multiple_relation(service, 'contact_groups', 'contactgroups')\n # escalations\n # ## self.multiple_relation(service, 'escalations', 'escalation_name')\n if 'escalations' in service:\n del service['escalations']\n # service_dependencies\n # ## self.multiple_relation(service, 'service_dependencies', 'service_name')\n service['service_dependencies'] = ''\n\n if 'alias' in service and service['alias'] == '':\n del service['alias']\n for key, value in service['customs'].items():\n if key[0] not in ['_']:\n key = '_' + key\n service[key.upper()] = value\n\n # Fix #9: inconsistent state when no retention module exists\n if not self.retention_actived and 'ls_last_state' in service:\n if service['ls_state'] == 'UNKNOWN':\n service['initial_state'] = 'u'\n if service['ls_state'] == 'CRITICAL':\n service['initial_state'] = 'c'\n if service['ls_state'] == 'WARNING':\n service['initial_state'] = 'w'\n if service['ls_state'] == 'UP':\n service['initial_state'] = 'o'\n\n logger.debug(\n \"- service current live state is %s, \"\n \"set initial_state as '%s'\", service['ls_state'], service['initial_state']\n )\n\n self.clean_unusable_keys(service)\n self.convert_lists(service)\n\n logger.debug(\"- service: %s\", service)\n self.config['services'].append(service)\n self.backend_nb_services = len(self.config['services'])\n\n self.statsmgr.counter('objects.service', len(self.config['services']))\n\n def get_hostdependencies(self):\n \"\"\"Get hostdependencies from alignak_backend\n\n :return: None\n \"\"\"\n self.configraw['hostdependencies'] = {}\n params = {\"max_results\": self.backend_count}\n all_hostdependencies = self.backend.get_all('hostdependency', params)\n logger.info(\"Got %d hostdependencies\",\n len(all_hostdependencies['_items']))\n for hostdependency in all_hostdependencies['_items']:\n logger.debug(\"- %s\", hostdependency['name'])\n self.configraw['hostdependencies'][hostdependency['_id']] = hostdependency['name']\n hostdependency['imported_from'] = 'alignak-backend'\n if 'definition_order' in hostdependency and hostdependency['definition_order'] == 100:\n hostdependency['definition_order'] = 50\n # Do not exist in Alignak\n # hostdependency['hostdependency_name'] = hostdependency['name']\n\n hostdependency['dependent_hostgroup_name'] = hostdependency['dependent_hostgroups']\n hostdependency['dependent_host_name'] = hostdependency['dependent_hosts']\n hostdependency['hostgroup_name'] = hostdependency['hostgroups']\n hostdependency['host_name'] = hostdependency['hosts']\n\n # dependent_host_name\n self.multiple_relation(hostdependency, 'dependent_host_name', 'hosts')\n # dependent_hostgroup_name\n self.multiple_relation(hostdependency, 'dependent_hostgroup_name', 'hostgroups')\n # host_name\n self.multiple_relation(hostdependency, 'host_name', 'hosts')\n # hostgroup_name\n self.multiple_relation(hostdependency, 'hostgroup_name', 'hostgroups')\n self.clean_unusable_keys(hostdependency)\n self.convert_lists(hostdependency)\n\n logger.debug(\"- hosts dependency: %s\", hostdependency)\n self.config['hostdependencies'].append(hostdependency)\n\n self.statsmgr.counter('objects.hostdependency', len(self.config['hostdependencies']))\n\n def get_hostescalations(self):\n \"\"\"Get hostescalations from alignak_backend\n\n :return: None\n \"\"\"\n self.configraw['hostescalations'] = {}\n params = {\"max_results\": self.backend_count}\n all_hostescalations = self.backend.get_all('hostescalation', params)\n logger.info(\"Got %d hostescalations\",\n len(all_hostescalations['_items']))\n for hostescalation in all_hostescalations['_items']:\n logger.debug(\"- %s\", hostescalation['name'])\n self.configraw['hostescalations'][hostescalation['_id']] = hostescalation['name']\n # hostescalation['hostescalation_name'] = hostescalation['name']\n hostescalation['imported_from'] = 'alignak-backend'\n if 'definition_order' in hostescalation and hostescalation['definition_order'] == 100:\n hostescalation['definition_order'] = 50\n hostescalation['contacts'] = []\n if 'users' in hostescalation:\n hostescalation['contacts'] = hostescalation['users']\n # host_name\n self.single_relation(hostescalation, 'host_name', 'hosts')\n # hostgroup_name\n self.multiple_relation(hostescalation, 'hostgroup_name', 'hostgroups')\n # contacts\n self.multiple_relation(hostescalation, 'contacts', 'contacts')\n # contact_groups\n self.multiple_relation(hostescalation, 'contact_groups', 'contactgroups')\n self.clean_unusable_keys(hostescalation)\n self.convert_lists(hostescalation)\n\n del hostescalation['notes']\n del hostescalation['alias']\n\n logger.debug(\"- host escalation: %s\", hostescalation)\n self.config['hostescalations'].append(hostescalation)\n\n self.statsmgr.counter('objects.hostescalation', len(self.config['hostescalations']))\n\n def get_servicedependencies(self):\n \"\"\"Get servicedependencies from alignak_backend\n\n :return: None\n \"\"\"\n self.configraw['servicedependencies'] = {}\n params = {\"max_results\": self.backend_count}\n all_servicedependencies = self.backend.get_all('servicedependency', params)\n logger.info(\"Got %d servicedependencies\",\n len(all_servicedependencies['_items']))\n for servicedependency in all_servicedependencies['_items']:\n logger.debug(\"- %s\", servicedependency['name'])\n self.configraw['servicedependencies'][servicedependency['_id']] = \\\n servicedependency['name']\n servicedependency['imported_from'] = 'alignak-backend'\n if 'definition_order' in servicedependency and \\\n servicedependency['definition_order'] == 100:\n servicedependency['definition_order'] = 50\n # Do not exist in Alignak\n # servicedependency['servicedependency_name'] = servicedependency['name']\n\n servicedependency['dependent_hostgroup_name'] = \\\n servicedependency['dependent_hostgroups']\n servicedependency['dependent_host_name'] = \\\n servicedependency['dependent_hosts']\n servicedependency['dependent_service_description'] = \\\n servicedependency['dependent_services']\n servicedependency['hostgroup_name'] = servicedependency['hostgroups']\n servicedependency['host_name'] = servicedependency['hosts']\n servicedependency['service_description'] = servicedependency['services']\n\n # dependent_host_name\n self.multiple_relation(servicedependency, 'dependent_host_name', 'hosts')\n # dependent_hostgroup_name\n self.multiple_relation(servicedependency, 'dependent_hostgroup_name', 'hostgroups')\n # service_description\n self.multiple_relation(servicedependency, 'service_description', 'services')\n # dependent_service_description\n self.multiple_relation(servicedependency, 'dependent_service_description', 'services')\n # host_name\n self.multiple_relation(servicedependency, 'host_name', 'hosts')\n # hostgroup_name\n self.multiple_relation(servicedependency, 'hostgroup_name', 'hostgroups')\n self.clean_unusable_keys(servicedependency)\n self.convert_lists(servicedependency)\n\n if not servicedependency['hostgroup_name']:\n del servicedependency['hostgroup_name']\n if not servicedependency['dependent_hostgroup_name']:\n del servicedependency['dependent_hostgroup_name']\n\n logger.debug(\"- services dependency: %s\", servicedependency)\n self.config['servicedependencies'].append(servicedependency)\n\n self.statsmgr.counter('objects.servicedependency', len(self.config['servicedependencies']))\n\n def get_serviceescalations(self):\n \"\"\"Get serviceescalations from alignak_backend\n\n :return: None\n \"\"\"\n self.configraw['serviceescalations'] = {}\n params = {\"max_results\": self.backend_count}\n all_serviceescalations = self.backend.get_all('serviceescalation', params)\n logger.info(\"Got %d serviceescalations\",\n len(all_serviceescalations['_items']))\n for serviceescalation in all_serviceescalations['_items']:\n logger.debug(\"- %s\", serviceescalation['name'])\n self.configraw['serviceescalations'][serviceescalation['_id']] = \\\n serviceescalation['name']\n # serviceescalation['serviceescalation_name'] = serviceescalation['name']\n serviceescalation['imported_from'] = 'alignak-backend'\n if 'definition_order' in serviceescalation and \\\n serviceescalation['definition_order'] == 100:\n serviceescalation['definition_order'] = 50\n serviceescalation['contacts'] = []\n if 'users' in serviceescalation:\n serviceescalation['contacts'] = serviceescalation['users']\n # host_name\n self.single_relation(serviceescalation, 'host_name', 'hosts')\n # hostgroup_name\n self.multiple_relation(serviceescalation, 'hostgroup_name', 'hostgroups')\n # service_description\n self.single_relation(serviceescalation, 'service_description', 'services')\n # contacts\n self.multiple_relation(serviceescalation, 'contacts', 'contacts')\n # contact_groups\n self.multiple_relation(serviceescalation, 'contact_groups', 'contactgroups')\n self.clean_unusable_keys(serviceescalation)\n self.convert_lists(serviceescalation)\n\n del serviceescalation['notes']\n del serviceescalation['alias']\n logger.debug(\"- service escalation: %s\", serviceescalation)\n self.config['serviceescalations'].append(serviceescalation)\n\n self.statsmgr.counter('objects.serviceescalation', len(self.config['serviceescalations']))\n\n def get_alignak_configuration(self):\n \"\"\"Get Alignak configuration from alignak-backend\n\n This function is an Arbiter hook called by the arbiter during its configuration loading.\n\n :return: alignak configuration parameters\n :rtype: dict\n \"\"\"\n self.alignak_configuration = {}\n\n if not self.backend_connected:\n self.getToken()\n if self.raise_backend_alert(errors_count=1):\n logger.error(\"Alignak backend connection is not available. \"\n \"Skipping Alignak configuration load and provide \"\n \"an empty configuration to the Arbiter.\")\n return self.alignak_configuration\n\n if self.my_arbiter and self.my_arbiter.verify_only:\n logger.info(\"My Arbiter is in verify only mode\")\n if self.bypass_verify_mode:\n logger.info(\"Configured to bypass the objects loading. \"\n \"Skipping Alignak configuration load and provide \"\n \"the last read configuration to the Arbiter.\")\n return self.alignak_configuration\n\n if self.backend_import:\n logger.info(\"Alignak backend importation script is active. \"\n \"Provide the last read Alignak configuration to the Arbiter.\")\n return self.alignak_configuration\n\n start_time = time.time()\n try:\n logger.info(\"Loading Alignak configuration...\")\n self.alignak_configuration = {}\n params = {'sort': '_id'}\n if self.my_arbiter and self.my_arbiter.arbiter_name:\n params.update({'where': '{\"name\": \"%s\"}' % self.my_arbiter.arbiter_name})\n all_alignak = self.backend.get_all('alignak', params)\n logger.info(\"Got %d Alignak configurations\", len(all_alignak['_items']))\n for alignak_cfg in all_alignak['_items']:\n logger.info(\"- %s\", alignak_cfg['name'])\n\n self.alignak_configuration.update(alignak_cfg)\n\n logger.debug(\"- configuration: %s\", alignak_cfg)\n except BackendException as exp:\n logger.warning(\"Alignak backend is not available for reading configuration. \"\n \"Backend communication error.\")\n logger.debug(\"Exception: %s\", exp)\n self.backend_connected = False\n return self.alignak_configuration\n\n self.time_loaded_conf = datetime.utcnow().strftime(\"%a, %d %b %Y %H:%M:%S GMT\")\n\n now = time.time()\n logger.info(\"Alignak configuration loaded in %s seconds\", now - start_time)\n\n self.statsmgr.counter('objects.alignak', len(self.alignak_configuration))\n self.statsmgr.timer('objects-alignak-time', now - start_time)\n\n return self.alignak_configuration\n\n def get_objects(self):\n \"\"\"Get objects from alignak-backend\n\n This function is an Arbiter hook called by the arbiter during its configuration loading.\n\n :return: configuration objects\n :rtype: dict\n \"\"\"\n\n if not self.backend_connected:\n self.getToken()\n if self.raise_backend_alert(errors_count=1):\n logger.error(\"Alignak backend connection is not available. \"\n \"Skipping objects load and provide an empty list to the Arbiter.\")\n return self.config\n\n if self.my_arbiter and self.my_arbiter.verify_only:\n logger.info(\"my Arbiter is in verify only mode\")\n if self.bypass_verify_mode:\n logger.info(\"Configured to bypass the objects loading. \"\n \"Skipping objects load and provide an empty list to the Arbiter.\")\n return self.config\n\n if self.backend_import:\n logger.info(\"Alignak backend importation script is active. \"\n \"Provide an empty objects list to the Arbiter.\")\n return self.config\n\n start_time = time.time()\n try:\n logger.info(\"Loading Alignak monitored system configuration...\")\n self.get_realms()\n self.get_commands()\n self.get_timeperiods()\n self.get_contacts()\n self.get_contactgroups()\n self.get_hosts()\n self.get_hostgroups()\n self.get_services()\n self.get_servicegroups()\n self.get_hostdependencies()\n self.get_hostescalations()\n self.get_servicedependencies()\n self.get_serviceescalations()\n except BackendException as exp: # pragma: no cover - should not happen\n logger.warning(\"Alignak backend is not available for reading. \"\n \"Backend communication error.\")\n logger.exception(\"Exception: %s\", exp)\n self.backend_connected = False\n\n self.time_loaded_conf = datetime.utcnow().strftime(self.backend_date_format)\n\n now = time.time()\n logger.info(\"Alignak monitored system configuration loaded in %s seconds\", now - start_time)\n\n self.statsmgr.timer('objects-time', now - start_time)\n\n # Schedule next configuration reload check\n self.next_check = int(now) + (60 * self.verify_modification)\n self.next_action_check = int(now) + self.action_check\n self.next_daemons_state = int(now) + self.daemons_state\n\n if self.verify_modification:\n logger.info(\"next configuration reload check in %s seconds ---\",\n (self.next_check - int(now)))\n else:\n logger.info(\"no configuration reload check\")\n if self.action_check:\n logger.info(\"next actions check in %s seconds ---\",\n (self.next_action_check - int(now)))\n else:\n logger.info(\"no actions check\")\n if self.daemons_state:\n logger.info(\"next update daemons state in %s seconds ---\",\n (self.next_daemons_state - int(now)))\n else:\n logger.info(\"no daemons state update\")\n return self.config\n\n def hook_tick(self, arbiter):\n # pylint: disable=too-many-nested-blocks\n \"\"\"Hook in arbiter used to check if configuration has changed in the backend since\n last configuration loaded\n\n :param arbiter: alignak.daemons.arbiterdaemon.Arbiter\n :type arbiter: object\n :return: None\n \"\"\"\n if not self.backend_connected:\n self.getToken()\n if self.raise_backend_alert(errors_count=10):\n logger.warning(\"Alignak backend connection is not available. \"\n \"Periodical actions are disabled: configuration change checking, \"\n \"ack/downtime/forced check, and daemons state updates.\")\n return\n\n try:\n now = int(time.time())\n if self.verify_modification and now > self.next_check:\n logger.info(\"Check if system configuration changed in the backend...\")\n logger.debug(\"Now is: %s\", datetime.utcnow().strftime(self.backend_date_format))\n logger.debug(\"Last configuration loading time is: %s\", self.time_loaded_conf)\n # todo: we should find a way to declare in the backend schema\n # that a resource endpoint is concerned with this feature. Something like:\n # 'arbiter_reload_check': True,\n # 'schema': {...}\n logger.debug(\"Check if system configuration changed in the backend...\")\n resources = [\n 'realm', 'command', 'timeperiod',\n 'usergroup', 'user',\n 'hostgroup', 'host', 'hostdependency', 'hostescalation',\n 'servicegroup', 'service', 'servicedependency', 'serviceescalation'\n ]\n self.configuration_reload_required = False\n for resource in resources:\n ret = self.backend.get(resource, {'where': '{\"_updated\":{\"$gte\": \"%s\"}}'\n % self.time_loaded_conf})\n if ret['_meta']['total'] > 0:\n logger.info(\" - backend updated resource: %s, count: %d\",\n resource, ret['_meta']['total'])\n\n self.statsmgr.counter('updated.%s' % resource, ret['_meta']['total'])\n\n self.configuration_reload_required = True\n for updated in ret['_items']:\n logger.debug(\" -> updated: %s\", updated)\n exists = [log for log in self.configuration_reload_changelog\n if log['resource'] == resource and\n log['item']['_id'] == updated['_id'] and\n log['item']['_updated'] == updated['_updated']]\n if not exists:\n self.configuration_reload_changelog.append({\"resource\": resource,\n \"item\": updated})\n\n # Test number of host and services in backend. The goal is to detect the resources\n # deleted\n # todo: this should also be checked for other resources!\n ret = self.backend.get('host', {\"where\": '{\"_is_template\": false}'})\n if ret['_meta']['total'] < self.backend_nb_hosts:\n self.configuration_reload_required = True\n self.configuration_reload_changelog.append({\"resource\": 'host',\n \"item\": 'deleted'})\n ret = self.backend.get('service', {\"where\": '{\"_is_template\": false}'})\n if ret['_meta']['total'] < self.backend_nb_services:\n self.configuration_reload_required = True\n self.configuration_reload_changelog.append({\"resource\": 'service',\n \"item\": 'deleted'})\n\n if self.configuration_reload_required:\n self.statsmgr.counter('reload_required', 1)\n\n logger.warning(\"Hey, we must reload configuration from the backend!\")\n try:\n with open(arbiter.pidfile, 'r') as f:\n arbiter_pid = f.readline()\n os.kill(int(arbiter_pid), signal.SIGHUP)\n message = \"The configuration reload notification was \" \\\n \"raised to the arbiter (pid=%s).\" % arbiter_pid\n self.configuration_reload_changelog.append({\"resource\": \"backend-log\",\n \"item\": {\n \"_updated\": now,\n \"level\": \"INFO\",\n \"message\": message\n }})\n logger.info(message)\n except Exception as exp:\n message = \"Problem with the arbiter pid file (%s). \" \\\n \"Configuration reload notification was not raised.\" \\\n % arbiter.pidfile\n self.configuration_reload_changelog.append({\"resource\": \"backend-log\",\n \"item\": {\n \"_updated\": now,\n \"level\": \"ERROR\",\n \"message\": message\n }})\n logger.error(message)\n else:\n logger.debug(\"No changes found\")\n self.next_check = now + (60 * self.verify_modification)\n logger.debug(\n \"next configuration reload check in %s seconds ---\",\n (self.next_check - now)\n )\n\n if self.action_check and now > self.next_action_check:\n logger.debug(\"Check if acknowledgements are required...\")\n self.get_acknowledge(arbiter)\n logger.debug(\"Check if downtime scheduling are required...\")\n self.get_downtime(arbiter)\n logger.debug(\"Check if re-checks are required...\")\n self.get_forcecheck(arbiter)\n\n self.next_action_check = now + self.action_check\n logger.debug(\"next actions check in %s seconds ---\",\n (self.next_action_check - int(now)))\n\n if self.daemons_state and now > self.next_daemons_state:\n logger.debug(\"Update daemons state in the backend...\")\n self.update_daemons_state(arbiter)\n\n self.next_daemons_state = now + self.daemons_state\n logger.debug(\n \"next update daemons state in %s seconds ---\",\n (self.next_daemons_state - int(now))\n )\n except Exception as exp:\n logger.warning(\"hook_tick exception: %s\", str(exp))\n logger.debug(\"Exception: %s\", exp)\n\n @staticmethod\n def convert_date_timestamp(mydate):\n \"\"\"Convert date/time of backend into timestamp\n\n :param mydate: the date\n :type mydate: str\n :return: the timestamp\n :rtype: int\n \"\"\"\n return int(time.mktime(datetime.strptime(mydate, \"%a, %d %b %Y %H:%M:%S %Z\").\n timetuple()))\n\n def get_acknowledge(self, arbiter):\n \"\"\"Get acknowledge from backend\n\n :return: None\n \"\"\"\n if not self.backend_connected:\n return\n\n try:\n all_ack = self.backend.get_all('actionacknowledge',\n {'where': '{\"processed\": false}',\n 'embedded': '{\"host\": 1, \"service\": 1, \"user\": 1}'})\n except BackendException as exp: # pragma: no cover - should not happen\n logger.debug(\"Exception: %s\", exp)\n return\n\n self.statsmgr.counter('action.acknowledge', len(all_ack['_items']))\n\n for ack in all_ack['_items']:\n sticky = 1\n if ack['sticky']:\n sticky = 2\n if ack['action'] == 'add':\n # ack['comment'] = ack['comment'].encode('utf8', 'replace')\n if ack['service']:\n command = '[{}] ACKNOWLEDGE_SVC_PROBLEM;{};{};{};{};{};{};{}\\n'.\\\n format(self.convert_date_timestamp(ack['_created']), ack['host']['name'],\n ack['service']['name'], sticky, int(ack['notify']),\n 1, ack['user']['name'], ack['comment'])\n else:\n command = '[{}] ACKNOWLEDGE_HOST_PROBLEM;{};{};{};{};{};{}\\n'. \\\n format(self.convert_date_timestamp(ack['_created']), ack['host']['name'],\n sticky, int(ack['notify']), 1, ack['user']['name'], ack['comment'])\n elif ack['action'] == 'delete':\n if ack['service']:\n command = '[{}] REMOVE_SVC_ACKNOWLEDGEMENT;{};{}\\n'.\\\n format(self.convert_date_timestamp(ack['_created']), ack['host']['name'],\n ack['service']['name'])\n else:\n command = '[{}] REMOVE_HOST_ACKNOWLEDGEMENT;{}\\n'. \\\n format(self.convert_date_timestamp(ack['_created']), ack['host']['name'])\n\n headers = {'Content-Type': 'application/json', 'If-Match': ack['_etag']}\n data = {'processed': True}\n self.backend.patch('actionacknowledge/' + ack['_id'], data, headers)\n\n logger.info(\"build external command: %s\", str(command))\n ext = ExternalCommand(command)\n arbiter.external_commands.append(ext)\n\n def get_downtime(self, arbiter):\n \"\"\"Get downtime from backend\n\n :return: None\n \"\"\"\n if not self.backend_connected:\n return\n\n all_downt = self.backend.get_all('actiondowntime',\n {'where': '{\"processed\": false}',\n 'embedded': '{\"host\": 1, \"service\": 1, '\n '\"user\": 1}'})\n\n self.statsmgr.counter('action.downtime', len(all_downt['_items']))\n\n # pylint: disable=too-many-format-args\n for downt in all_downt['_items']:\n if downt['action'] == 'add':\n # downt['comment'] = downt['comment'].encode('utf8', 'replace')\n if downt['service']:\n command = '[{}] SCHEDULE_SVC_DOWNTIME;{};{};{};{};{};{};{};{};{}\\n'.\\\n format(self.convert_date_timestamp(downt['_created']),\n downt['host']['name'], downt['service']['name'],\n downt['start_time'], downt['end_time'], int(downt['fixed']),\n 0, downt['duration'], downt['user']['name'], downt['comment'])\n elif downt['host'] and 'name' in downt['host']:\n command = '[{}] SCHEDULE_HOST_DOWNTIME;{};{};{};{};{};{};{};{}\\n'.\\\n format(self.convert_date_timestamp(downt['_created']),\n downt['host']['name'], downt['start_time'], downt['end_time'],\n int(downt['fixed']), 0, downt['duration'],\n downt['user']['name'], downt['comment'])\n elif downt['action'] == 'delete':\n if downt['service']:\n command = '[{}] DEL_ALL_SVC_DOWNTIMES;{};{}\\n'.\\\n format(self.convert_date_timestamp(downt['_created']),\n downt['host']['name'], downt['service']['name'])\n else:\n command = '[{}] DEL_ALL_HOST_DOWNTIMES;{}\\n'. \\\n format(self.convert_date_timestamp(downt['_created']),\n downt['host']['name'])\n\n headers = {'Content-Type': 'application/json', 'If-Match': downt['_etag']}\n data = {'processed': True}\n self.backend.patch('actiondowntime/' + downt['_id'], data, headers)\n\n logger.info(\"build external command: %s\", str(command))\n ext = ExternalCommand(command)\n arbiter.external_commands.append(ext)\n\n def get_forcecheck(self, arbiter):\n \"\"\"Get forcecheck from backend\n\n :return: None\n \"\"\"\n if not self.backend_connected:\n return\n\n all_fcheck = self.backend.get_all('actionforcecheck',\n {'where': '{\"processed\": false}',\n 'embedded': '{\"host\": 1, \"service\": 1}'})\n\n self.statsmgr.counter('action.force_check', len(all_fcheck['_items']))\n\n for fcheck in all_fcheck['_items']:\n timestamp = self.convert_date_timestamp(fcheck['_created'])\n if fcheck['service']:\n command = '[{}] SCHEDULE_FORCED_SVC_CHECK;{};{};{}\\n'.\\\n format(timestamp, fcheck['host']['name'], fcheck['service']['name'], timestamp)\n else:\n command = '[{}] SCHEDULE_FORCED_HOST_CHECK;{};{}\\n'.\\\n format(timestamp, fcheck['host']['name'], timestamp)\n\n headers = {'Content-Type': 'application/json', 'If-Match': fcheck['_etag']}\n data = {'processed': True}\n self.backend.patch('actionforcecheck/' + fcheck['_id'], data, headers)\n\n logger.info(\"build external command: %s\", str(command))\n ext = ExternalCommand(command)\n arbiter.external_commands.append(ext)\n\n def update_daemons_state(self, arbiter):\n \"\"\"Update the daemons status in the backend\n\n :param arbiter:\n :return:\n \"\"\"\n if not self.backend_connected:\n return\n\n if not self.daemonlist['arbiter']:\n all_daemons = self.backend.get_all('alignakdaemon')\n for item in all_daemons['_items']:\n self.daemonlist[item['type']][item['name']] = item\n\n for s_type in ['arbiter', 'scheduler', 'poller', 'reactionner', 'receiver', 'broker']:\n for daemon in getattr(arbiter.conf, s_type + 's'):\n data = {'type': s_type}\n data['name'] = getattr(daemon, s_type + '_name')\n for field in ['address', 'port', 'alive', 'reachable', 'passive', 'spare']:\n data[field] = getattr(daemon, field)\n data['last_check'] = int(getattr(daemon, 'last_check'))\n if s_type == 'arbiter' and data['last_check'] == 0 and data['reachable']:\n data['last_check'] = int(time.time())\n if getattr(daemon, 'realm_name') == '':\n # it's arbiter case not have realm refined\n data['_realm'] = self.configraw['realms_name'][self.highlevelrealm['name']]\n if len(self.configraw['realms']) == 1:\n data['_sub_realm'] = False\n else:\n data['_sub_realm'] = True\n else:\n data['_realm'] = self.configraw['realms_name'][getattr(daemon, 'realm_name')]\n if hasattr(daemon, 'manage_sub_realms'):\n data['_sub_realm'] = getattr(daemon, 'manage_sub_realms')\n\n if data['name'] in self.daemonlist[s_type]:\n headers = {\n 'Content-Type': 'application/json',\n 'If-Match': self.daemonlist[s_type][data['name']]['_etag']\n }\n response = self.backend.patch(\n 'alignakdaemon/%s' % self.daemonlist[s_type][data['name']]['_id'],\n data, headers, True)\n else:\n response = self.backend.post('alignakdaemon', data)\n self.daemonlist[s_type][data['name']] = response\n","repo_name":"Alignak-monitoring-contrib/alignak-module-backend","sub_path":"alignak_module_backend/arbiter/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":71715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30012581931","text":"import numpy as np\n\na = np.array(\n [\n [1,2,3],\n [5,6,7],\n [9,10,11]\n ]\n)\n\n'''\nprint(a)\nprint(a.shape,'\\n')\n\nb = a[:2,1:3]\nprint(b,'\\n')\n\nc = a[1:3,1:3]\nprint(c,'\\n')\n\nd = a[1:3,:2]\nprint(d)\n'''\n# Invertir los elementos de las filas de izquierda a derecha\nd = np.fliplr(a)\nprint(d)\n\n# Invertir las columna\nd = np.flip(a)\nprint(d)\n\n","repo_name":"IsraelArbona/mision_tic_gp_37","sub_path":"ciclo_1/semana4/ejercicio13.py","file_name":"ejercicio13.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6293162607","text":"class Node:\n def __init__(self, value):\n self.value = value\n self.prev = None\n self.next = None\n\nclass DoublyLinkedList:\n def __init__(self, value):\n self.head = Node(value)\n self.tail = self.head\n self.length = 1\n\n\n def append(self, value):\n ''' Adds a value to the end of a doubly linked list\n type: value\n '''\n self.length += 1\n postNode = Node(value)\n\n # Wire the postNode\n self.tail.next = postNode\n postNode.prev = self.tail\n\n # Sets new tail node\n self.tail = postNode\n\n\n def prepend(self, value):\n ''' Adds a value to the beginning of a doubly linked list\n type: value\n '''\n\n self.length += 1\n preNode = Node(value)\n\n # Wire the preNode \n preNode.next = self.head\n self.head.prev = preNode\n \n # Sets new head node\n self.head = preNode\n\n\n def insert(self, value, index):\n ''' Inserts a value in the DLL at a provided index position\n type: value\n type: index: str\n '''\n\n if not index in range(self.length):\n print(\"ERROR! This index does not exist!\")\n return\n elif index == 0:\n self.prepend(value)\n else:\n self.length += 1\n \n insertNode = Node(value)\n currentNode = self.head \n \n for position in range(self.length - 1):\n if position == index - 1:\n insertNode.next = currentNode.next\n \n currentNode.next.prev = insertNode\n insertNode.prev = currentNode\n \n currentNode.next = insertNode\n break\n currentNode = currentNode.next\n\n\n def remove(self, index):\n ''' Removes a node from a given index \n type: index: int\n '''\n \n if not index in range(self.length + 1):\n print(\"ERROR! This index does not exist!\")\n return\n \n if index == 0:\n # Remove head of the DLL\n self.head = self.head.next\n self.head.prev = None\n elif index == self.length - 1:\n # Remove tail of the DLL\n self.tail = self.tail.prev\n self.tail.next = None\n else:\n # Introduce a temporary node for \n # traversing through the list\n currentNode = self.head\n\n for position in range(self.length - 1):\n if position == index:\n currentNode.prev.next = currentNode.next\n currentNode.next.prev = currentNode.prev\n break\n \n currentNode = currentNode.next\n\n # Decrease length of the list\n self.length -= 1\n\n def print_list(self):\n ''' \n Print the linked list\n '''\n\n currentNode = self.head\n\n print(f\"<<<<<<< {self.length} >>>>>>>\")\n for index in range(self.length):\n nextValue = currentNode.next.value if currentNode.next else 'None'\n print(f\"{index}: {currentNode.value} <-> {nextValue}\")\n currentNode = currentNode.next\n print(f\"<<<<<<<<.>>>>>>>>\")\n\n\n def print_head(self):\n print(f\">> head: {self.head.value}\") if self.head else print(\">> head: None\")\n\n\n def print_tail(self):\n print(f\">> tail: {self.tail.value}\") if self.tail else print(\">> tail: None\")\n\n\nif __name__ == \"__main__\":\n dlinkedList = DoublyLinkedList(10)\n\n dlinkedList.append(20)\n dlinkedList.append(30)\n \n dlinkedList.prepend(-5)\n dlinkedList.prepend(-8)\n\n dlinkedList.insert(value=12, index=2)\n dlinkedList.print_list()\n\n \n dlinkedList.remove(index=5)\n dlinkedList.insert(value=30, index=4)\n dlinkedList.append(55)\n\n dlinkedList.print_list()\n dlinkedList.print_head()\n dlinkedList.print_tail()\n\n","repo_name":"lukaschoebel/LUMOS","sub_path":"code/python_scripts/dlinked_list.py","file_name":"dlinked_list.py","file_ext":"py","file_size_in_byte":3999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"888506074","text":"import collections\nimport numpy as np\nimport os\nimport time\nfrom tqdm import tqdm\n\nfrom apex import amp\nimport torch\nimport torch.nn.functional as F\nfrom pycocotools.cocoeval import COCOeval\n\nfrom simpleAICV.classification.common import ClassificationDataPrefetcher, AverageMeter, accuracy\n# from simpleAICV.detection.common import DetectionDataPrefetcher\n# from simpleAICV.segmentation.common import SegmentationDataPrefetcher\n\n\ndef validate_classification(val_loader, model, criterion, config):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n model_on_cuda = next(model.parameters()).is_cuda\n for images, targets in tqdm(val_loader):\n if model_on_cuda:\n images, targets = images.cuda(), targets.cuda()\n\n data_time.update(time.time() - end)\n end = time.time()\n\n outputs = model(images)\n batch_time.update(time.time() - end)\n\n loss = criterion(outputs, targets)\n acc1, acc5 = accuracy(outputs, targets, topk=(1, 5))\n # print('acc1', acc1)\n # print('acc5', acc5)\n # acc1 tensor([78.9062], device='cuda:1')\n # acc5 tensor([92.9688], device='cuda:1')\n losses.update(loss.item(), images.size(0))\n top1.update(acc1.item(), images.size(0))\n top5.update(acc5.item(), images.size(0))\n\n end = time.time()\n\n # per image data load time(ms) and inference time(ms)\n per_image_load_time = data_time.avg / config.batch_size * 1000\n per_image_inference_time = batch_time.avg / config.batch_size * 1000\n\n return top1.avg, top5.avg, losses.avg, per_image_load_time, per_image_inference_time\n\n\ndef train_classification(train_loader, model, criterion, optimizer, scheduler,\n epoch, logger, config):\n '''\n train classification model for one epoch\n '''\n top1 = AverageMeter()\n top5 = AverageMeter()\n losses = AverageMeter()\n\n # switch to train mode\n model.train()\n\n local_rank = torch.distributed.get_rank() if config.distributed else None\n if config.distributed:\n gpus_num = torch.cuda.device_count()\n iters = len(train_loader.dataset) // (\n config.batch_size * gpus_num) if config.distributed else len(\n train_loader.dataset) // config.batch_size\n else:\n iters = len(train_loader.dataset) // config.batch_size\n\n prefetcher = ClassificationDataPrefetcher(train_loader)\n images, targets = prefetcher.next()\n iter_index = 1\n\n while images is not None:\n images, targets = images.cuda(), targets.cuda()\n outputs = model(images)\n loss = criterion(outputs, targets)\n loss = loss / config.accumulation_steps\n\n if config.apex:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n if iter_index % config.accumulation_steps == 0:\n optimizer.step()\n optimizer.zero_grad()\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(outputs, targets, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1.item(), images.size(0))\n top5.update(acc5.item(), images.size(0))\n\n images, targets = prefetcher.next()\n\n if iter_index % config.print_interval == 0:\n log_info = f'train: epoch {epoch:0>4d}, iter [{iter_index:0>5d}, {iters:0>5d}], lr: {scheduler.get_lr()[0]:.6f}, top1: {acc1.item():.2f}%, top5: {acc5.item():.2f}%, loss: {loss.item():.4f}'\n logger.info(log_info) if (config.distributed and local_rank\n == 0) or not config.distributed else None\n\n iter_index += 1\n\n scheduler.step()\n\n return top1.avg, top5.avg, losses.avg\n\n\ndef validate_KD(val_loader, model, criterion):\n top1 = AverageMeter()\n top5 = AverageMeter()\n total_losses = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n model_on_cuda = next(model.parameters()).is_cuda\n for images, targets in tqdm(val_loader):\n if model_on_cuda:\n images, targets = images.cuda(), targets.cuda()\n\n tea_outputs, stu_outputs = model(images)\n total_loss = 0\n for loss_name in criterion.keys():\n if 'KD' in loss_name:\n temp_loss = criterion[loss_name](stu_outputs, tea_outputs)\n else:\n temp_loss = criterion[loss_name](stu_outputs, targets)\n\n total_loss += temp_loss\n\n acc1, acc5 = accuracy(stu_outputs, targets, topk=(1, 5))\n\n total_losses.update(total_loss.item(), images.size(0))\n top1.update(acc1.item(), images.size(0))\n top5.update(acc5.item(), images.size(0))\n\n return top1.avg, top5.avg, total_losses.avg\n\n\ndef train_KD(train_loader, model, criterion, optimizer, scheduler, epoch,\n logger, config):\n '''\n train classification model for one epoch\n '''\n top1 = AverageMeter()\n top5 = AverageMeter()\n total_losses = AverageMeter()\n\n # switch to train mode\n model.train()\n\n local_rank = torch.distributed.get_rank() if config.distributed else None\n if config.distributed:\n gpus_num = torch.cuda.device_count()\n iters = len(train_loader.dataset) // (\n config.batch_size * gpus_num) if config.distributed else len(\n train_loader.dataset) // config.batch_size\n else:\n iters = len(train_loader.dataset) // config.batch_size\n\n prefetcher = ClassificationDataPrefetcher(train_loader)\n images, targets = prefetcher.next()\n iter_index = 1\n\n while images is not None:\n images, targets = images.cuda(), targets.cuda()\n tea_outputs, stu_outputs = model(images)\n loss = 0\n loss_value = {}\n for loss_name in criterion.keys():\n if 'KD' in loss_name:\n temp_loss = criterion[loss_name](stu_outputs, tea_outputs)\n else:\n temp_loss = criterion[loss_name](stu_outputs, targets)\n\n loss_value[loss_name] = temp_loss\n loss += temp_loss\n\n total_losses.update(loss.item(), images.size(0))\n loss = loss / config.accumulation_steps\n\n if config.apex:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n if iter_index % config.accumulation_steps == 0:\n optimizer.step()\n optimizer.zero_grad()\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(stu_outputs, targets, topk=(1, 5))\n top1.update(acc1.item(), images.size(0))\n top5.update(acc5.item(), images.size(0))\n\n images, targets = prefetcher.next()\n\n log_info = ''\n if iter_index % config.print_interval == 0:\n log_info += f'train: epoch {epoch:0>4d}, iter [{iter_index:0>5d}, {iters:0>5d}], lr: {scheduler.get_lr()[0]:.6f}, top1: {acc1.item():.2f}%, top5: {acc5.item():.2f}%, total_loss: {loss.item():.4f} '\n for loss_name in criterion.keys():\n log_info += f'{loss_name}: {loss_value[loss_name].item():.4f} '\n logger.info(log_info) if (config.distributed and local_rank\n == 0) or not config.distributed else None\n\n iter_index += 1\n\n scheduler.step()\n\n return top1.avg, top5.avg, total_losses.avg\n\n\ndef compute_voc_ap(recall, precision, use_07_metric=True):\n if use_07_metric:\n # use voc 2007 11 point metric\n ap = 0.\n for t in np.arange(0., 1.1, 0.1):\n if np.sum(recall >= t) == 0:\n p = 0\n else:\n # get max precision for recall >= t\n p = np.max(precision[recall >= t])\n # average 11 recall point precision\n ap = ap + p / 11.\n else:\n # use voc>=2010 metric,average all different recall precision as ap\n # recall add first value 0. and last value 1.\n mrecall = np.concatenate(([0.], recall, [1.]))\n # precision add first value 0. and last value 0.\n mprecision = np.concatenate(([0.], precision, [0.]))\n\n # compute the precision envelope\n for i in range(mprecision.size - 1, 0, -1):\n mprecision[i - 1] = np.maximum(mprecision[i - 1], mprecision[i])\n\n # to calculate area under PR curve, look for points where X axis (recall) changes value\n i = np.where(mrecall[1:] != mrecall[:-1])[0]\n\n # sum (\\Delta recall) * prec\n ap = np.sum((mrecall[i + 1] - mrecall[i]) * mprecision[i + 1])\n\n return ap\n\n\ndef compute_ious(a, b):\n '''\n :param a: [N,(x1,y1,x2,y2)]\n :param b: [M,(x1,y1,x2,y2)]\n :return: IoU [N,M]\n '''\n\n a = np.expand_dims(a, axis=1) # [N,1,4]\n b = np.expand_dims(b, axis=0) # [1,M,4]\n\n overlap = np.maximum(0.0,\n np.minimum(a[..., 2:], b[..., 2:]) -\n np.maximum(a[..., :2], b[..., :2])) # [N,M,(w,h)]\n\n overlap = np.prod(overlap, axis=-1) # [N,M]\n\n area_a = np.prod(a[..., 2:] - a[..., :2], axis=-1)\n area_b = np.prod(b[..., 2:] - b[..., :2], axis=-1)\n\n iou = overlap / (area_a + area_b - overlap)\n\n return iou\n\n\n","repo_name":"guohaoyu110/taivision","sub_path":"tests/test_examples/imagenet_test/test/tools/scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":9546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18077506475","text":"# Random xlsx selection. Use while instead of for, when I use for, the list is half length, I have 200 files and it shows an order with 100 files, me not like\n\nimport random\nimport os\nimport pandas as pd\n\npath = os.getcwd()\nfiles = os.listdir(path)\nfiles\n\nfiles_xlsx = [f for f in files if f[-4:]=='xlsx' and f!='fifam-uids.xlsx' and f!='Teams.xlsx'] \n\ng = []\n\nwhile len(files_xlsx):\n a = random.choice(files_xlsx)\n print (a)\n\n g.append(a)\n files_xlsx.remove(a)\n","repo_name":"muratcansarkalkan/ReadExcelsFM","sub_path":"order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"124037595","text":"from sqlalchemy import Column, String, Text, Integer, ForeignKey\nfrom sqlalchemy.orm import relationship, lazyload\n\nfrom app.utils.base import BaseModel\nfrom app.utils.db import db\n\n\nclass ArticleModel(BaseModel):\n __tablename__ = 'liang_article'\n\n title = Column(String(50), index=True, comment='标题')\n cat = relationship('LabelModel', secondary='liang_article_label', backref=db.backref('articles', lazy='dynamic'), lazy='dynamic')\n author = Column(String(50), comment='作者')\n summary = Column(String(200), nullable=True, comment='简介')\n cover = Column(String(50), nullable=True, comment='封面图')\n content = Column(Text(), comment='内容')\n comments = Column(Integer(), default=0, comment='评论数')\n likes = Column(Integer(), default=0, comment='点赞数')\n\n\nclass ArticleLabelModel(db.Model):\n __tablename__ = 'liang_article_label'\n\n id = Column(Integer, primary_key=True, autoincrement=True)\n article = Column(Integer, ForeignKey('liang_article.id'))\n label = Column(Integer, ForeignKey('liang_label.id'))\n\n\n","repo_name":"RoyalLiang/Awesome","sub_path":"app/models/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25373698305","text":"import spacy\nnlp = spacy.load('en_core_web_sm')\n\n\ndef apply(input):\n # Find named entities, phrases and concepts\n doc = nlp(input)\n ents = [{\"text\": entity.text, \"entity\": entity.label_} for entity in doc.ents]\n return \"Entities {0}\".format(ents)\n\n\nif __name__ == \"__main__\":\n input_t = \"The Mars Orbiter Mission (MOM), informally known as Mangalyaan, \" \\\n \"was launched into Earth orbit on 5 November 2013 by the \" \\\n \"Indian Space Research Organisation (ISRO) and has entered\" \\\n \" Mars orbit on 24 September 2014. India thus became\" \\\n \" the first country to enter Mars orbit on its first attempt. \" \\\n \"It was completed at a record low cost of $74 million.\"\n print(apply(input_t))","repo_name":"olahsymbo/conv-ai","sub_path":"test_spacy.py","file_name":"test_spacy.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41147368726","text":"from django.urls import path, include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom .views import HomeView, PostView, CategoriesList, CategoryView, Contact\n\napp_name = 'blog'\n\nurlpatterns = [\n path('', HomeView.as_view(), name='home'),\n path('article/<slug:slug>/', PostView.as_view(), name='post'),\n path('category/', CategoriesList.as_view(), name='category'),\n path('category/<slug:slug>/', CategoryView.as_view(), name=\"categoryDetail\"),\n path('contact/', Contact.as_view(), name='contact')\n]\n\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"mohammadh1380/Blog-Django","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43968535895","text":"import hashlib\ndef _get_unique_id(node_handle):\n m = hashlib.md5(node_handle.encode())\n return str(str(int(m.hexdigest(), 16))[0:16])\n\ndef ProcessNodeData(dataObj,name):\n data=dict(dataObj)\n for key,val in data.iteritems():\n data[key]=getValFromWeaver(val)\n data['caption']=name\n data['handle']=name\n if data['labels'][0][1:8]=='Concept':\n data['type']='Concept'\n else:\n data['type']='Media'\n return data\n\ndef ProcessEdgeData(edge,edgeProps):\n props=dict()\n if 'edgeDirection' in edgeProps:\n if edgeProps['edgeDirection']=='B':\n props['source']=edge.end_node\n props['target']=edge.start_node\n else:\n props['source']=edge.start_node\n props['target']=edge.end_node\n\n props['type']=getValFromWeaver(edge.properties['label'])\n props['handle']=edge.handle\n return props\n\ndef getValFromWeaver(val):\n return val[0]\n\ndef mergeProps(l1,l2):\n l2=getValFromWeaver(l2)\n l1=l1.split(',')\n l2=l2.split(',')\n l3=list()\n l3=l1+list(set(l2)-set(l1))\n return \",\".join(l3)\n\ndef getUpdatedProperty(newProps,oldProps):\n for key,val in newProps.iteritems():\n updateVal=val\n if key in oldProps:\n updateVal=mergeProps(newProps[key],oldProps[key])\n newProps[key]=updateVal\n\n return newProps\n","repo_name":"sid17/weaverLib","sub_path":"weaverWrapper/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41698076291","text":"from torch.utils.data import Dataset\nimport torch\nimport torchvision.transforms as transforms\nimport collections\nfrom glob import glob\nimport os\nfrom PIL import Image\n\n\"\"\"\nmain.py 함수를 참고하여 다음을 생각해봅시다.\n\n1. CRNN_dataset은 어떤 모듈을 상속받아야 할까요? torch.utils.data.Dataset을 상속받아야 한다.\n\n2. CRNN_dataset의 역할은 무엇일까요? 왜 필요할까요? 데이터셋을 좀더 쉽게 다루기 위해서 필요하다. - 미니 배치 학습,shuffle, 병렬 처리까지 간단히 수행할 수 있다. \n\n3. 1.의 모듈을 상속받는다면 __init__, __len__, __getitem__을 필수로 구현해야 합니다. 각 함수의 역할을 설명해주세요.\n 1)__init__ : 초기화 함수\n 2)__len__ : 데이터셋의 길이(총 샘플의 수)를 반환하는 함수\n 3)__getitem__ : 데이터셋에서 특정 i번째 샘플을 가져오도록 하는 인덱싱을 위한 함수 \n\n\"\"\"\n\n\nclass CRNN_dataset(Dataset):\n def __init__(self, path, w=100, h=32, alphabet='0123456789abcdefghijklmnopqrstuvwxyz', max_len=36):\n self.max_len=max_len\n self.path = path\n self.files = glob(path+'/*.jpg') \n self.n_image = len(self.files)\n assert (self.n_image > 0), \"해당 경로에 파일이 없습니다. :)\"\n\n self.transform = transforms.Compose([\n transforms.Resize((w, h)), # image 사이즈를 w, h를 활용하여 바꿔주세요.\n transforms.ToTensor() # tensor로 변환해주세요.\n ])\n \"\"\"\n strLabelConverter의 역할을 설명해주세요.\n 1. text 문제를 풀기 위해 해당 함수는 어떤 역할을 하고 있을까요?\n\n text를 숫자로, 숫자를 text��� 바꾼다\n\n 2. encode, decode의 역할 설명\n\n encode : 입력된 정보를 어떻게 저장하여 압축할지 정하는것(text의 각 단어들을 숫자로 바꾼다)\n decode : 인코더에서 전달된 정보들을 어떻게 출어서 다시 생성할지 정하는것( 숫자열을 문자열로 바꾸어준다)\n\n \"\"\"\n self.converter = strLabelConverter(alphabet) \n \n def __len__(self):\n return self.n_image # hint: __init__에 정의한 변수 중 하나\n\n def __getitem__(self,idx):\n label = self.files[idx].split('_')[1]\n img = Image.open(self.files[idx]).convert('L')\n img = self.transform(img)\n \"\"\"\n max_len이 왜 필요할까요? # hint: text data라는 점\n\n sequence같은 물체의 길이 변화에 불변하여 순차적표현을 가능하게 하기 위해서,,?\n\n \"\"\"\n\n if len(label) > self.max_len:\n label = label[:self.mfax_len]\n label_text, label_length = self.converter.encode(label)\n\n if len(label_text) < self.max_len:\n temp = torch.ones(self.max_len-len(label), dtype=torch.int)\n label_text = torch.cat([label_text, temp])\n\n return img, (label_text, label_length) # hint: main.py를 보면 알 수 있어요 :)\n\n\n\n# 아래 함수는 건드리지 마시고, 그냥 쓰세요 :)\nclass strLabelConverter(object):\n def __init__(self, alphabet, ignore_case=True):\n self._ignore_case = ignore_case\n if self._ignore_case:\n alphabet = alphabet.lower()\n self.alphabet = alphabet + '-' \n\n self.dict = {}\n for i, char in enumerate(alphabet):\n self.dict[char] = i + 1\n\n def encode(self, text):\n if isinstance(text, str):\n text = [\n self.dict[char.lower() if self._ignore_case else char]\n for char in text\n ]\n length = [len(text)]\n elif isinstance(text, collections.Iterable):\n length = [len(s) for s in text]\n text = ''.join(text)\n text, _ = self.encode(text)\n return (torch.IntTensor(text), torch.IntTensor(length))\n\n def decode(self, t, length, raw=False):\n if length.numel() == 1:\n length = length[0]\n assert t.numel() == length, \"text with length: {} does not match declared length: {}\".format(t.numel(), length)\n if raw:\n return ''.join([self.alphabet[i - 1] for i in t])\n else:\n char_list = []\n for i in range(length):\n if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):\n char_list.append(self.alphabet[t[i] - 1])\n return ''.join(char_list)\n else:\n assert t.numel() == length.sum(), \"texts with length: {} does not match declared length: {}\".format(t.numel(), length.sum())\n texts = []\n index = 0\n for i in range(length.numel()):\n l = length[i]\n texts.append(\n self.decode(\n t[index:index + l], torch.IntTensor([l]), raw=raw))\n index += l\n return texts\n","repo_name":"Yu-Jin22/Tobigs-14th-assignment","sub_path":"wk9/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4962,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70639478935","text":"'''\r\n사탕 게임\r\nhttps://www.acmicpc.net/problem/3085\r\n'''\r\nN = int(input())\r\nmat = []\r\nfor _ in range(N):\r\n row = list(map(str, input()))\r\n mat.append(row)\r\n\r\n\r\ndef reset_colors(colors):\r\n colors[0] = 0\r\n colors[1] = 0\r\n colors[2] = 0\r\n colors[3] = 0\r\n\r\n\r\ndef color_to_index(color):\r\n if color == 'C':\r\n return 0\r\n elif color == 'P':\r\n return 1\r\n elif color == 'Z':\r\n return 2\r\n else: # Y\r\n return 3\r\n\r\n\r\ndef count_max_continuous(r, c, d): # row, column, direction\r\n global cnt_max\r\n\r\n if d == 'r': # 한 행에 대하여 이동\r\n c = 0\r\n else: # 한 열에 대하여 이동\r\n r = 0\r\n reset_colors(colors_max)\r\n reset_colors(colors)\r\n prev_color = ''\r\n for _ in range(N):\r\n idx = color_to_index(mat[r][c])\r\n if prev_color == '' or prev_color == mat[r][c]:\r\n colors[idx] += 1\r\n colors_max[idx] = max(colors[idx], colors_max[idx])\r\n else:\r\n colors[idx] = 1\r\n\r\n prev_color = mat[r][c]\r\n if d == 'r':\r\n c += 1\r\n else:\r\n r += 1\r\n\r\n cnt_max = max(cnt_max, max(colors_max))\r\n\r\n\r\n# 우, 하\r\ndr = [0, 1]\r\ndc = [1, 0]\r\n\r\ncnt_max = 0\r\ncolors = [0, 0, 0, 0] # C, P, Z, Y\r\ncolors_max = [0, 0, 0, 0] # max of C, P, Z, Y\r\nfor r in range(N):\r\n for c in range(N):\r\n # 교환 전 최대 연속 개�� 파악\r\n count_max_continuous(r, c, 'r') # 행 검사\r\n count_max_continuous(r, c, 'c') # 열 검사\r\n\r\n # 우측, 하측 원소와 교환 후 최대 연속 개수 파악\r\n for i in range(2):\r\n rr = r+dr[i]\r\n cc = c+dc[i]\r\n\r\n if rr >= N or cc >= N: # 범위 초과시 다음으로\r\n continue\r\n\r\n if mat[r][c] != mat[rr][cc]:\r\n mat[r][c], mat[rr][cc] = mat[rr][cc], mat[r][c] # swap\r\n count_max_continuous(r, c, 'r')\r\n count_max_continuous(r, c, 'c')\r\n count_max_continuous(rr, cc, 'r')\r\n count_max_continuous(rr, cc, 'c')\r\n mat[r][c], mat[rr][cc] = mat[rr][cc], mat[r][c] # re-swap\r\n\r\nprint(cnt_max)\r\n","repo_name":"buyeolim/ps_prac","sub_path":"BOJ/python3/3085.py","file_name":"3085.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15536938734","text":"import os\n\nimport pytest\n\nimport oddt\nfrom oddt.datasets import pdbbind, dude\n\ntest_data_dir = os.path.dirname(os.path.abspath(__file__))\n\n\ndef test_pdbbind():\n\n results = {\n 'core': (['4yef', '10gs'],\n [5.35, 6.4]),\n 'refined': (['1nlp', '1imx', '4yef', '10gs'],\n [4.96, 3.52, 5.35, 6.4]),\n 'general_PL': (['1k9q', '1nlo', '1nlp', '1imx', '4yef', '10gs'],\n [3.15, 5.47, 4.96, 3.52, 5.35, 6.4]),\n }\n\n with pytest.raises(ValueError):\n pdbbind(home=os.path.join(test_data_dir, 'data', 'pdbbind'))\n\n for year in [2007, 2013, 2016]:\n pdbbind_db = pdbbind(home=os.path.join(test_data_dir, 'data', 'pdbbind'),\n version=year, default_set='core')\n\n for set_name, (ids, activities) in results.items():\n if set_name == 'general_PL' and year == 2007:\n set_name = 'general'\n pdbbind_db.default_set = set_name\n assert pdbbind_db.ids == ids\n assert pdbbind_db.activities == activities\n\n for pid in pdbbind_db:\n assert isinstance(pid.pocket, oddt.toolkit.Molecule)\n assert len(pid.pocket.atoms) > 0\n assert isinstance(pid.ligand, oddt.toolkit.Molecule)\n assert len(pid.ligand.atoms) > 0\n if pid.id == '10gs':\n assert pid.protein is None\n else:\n assert isinstance(pid.protein, oddt.toolkit.Molecule)\n assert len(pid.protein.atoms) > 0\n\n # reset the pdbbind set\n pdbbind_db.default_set = 'refined'\n\n # getting by name\n assert pdbbind_db['1imx'].id == '1imx'\n\n # getting by id\n assert pdbbind_db[-3].id == '1imx'\n assert pdbbind_db[1].id == '1imx'\n\n with pytest.raises(KeyError):\n pdbbind_db['xxxx']\n with pytest.raises(KeyError):\n pdbbind_db[123456]\n with pytest.raises(KeyError):\n pdbbind_db[-123456]\n\n pid = pdbbind_db['1imx']\n # get ligand\n ligand = pid.ligand\n ligand.removeh()\n assert len(ligand.atoms) == 60\n\n # get pocket\n pocket = pid.pocket\n pocket.removeh()\n assert len(pocket.atoms) == 234\n\n # protein do exist\n protein = pid.protein\n protein.removeh()\n assert len(protein.atoms) == 478\n\n\ndef test_dude():\n results = {\n 'fabp4': (1022, 36, 57, 2855),\n 'inha': (1857, 22, 71, 2318),\n }\n\n dude_db = dude(home=os.path.join(test_data_dir, 'data', 'dude'))\n\n for target in dude_db:\n if target.dude_id == 'xiap':\n # different file names\n assert target.protein is None\n assert target.ligand is None\n assert target.actives is None\n assert target.decoys is None\n continue\n\n prot_atoms, lig_atoms, num_act, num_dec = results[target.dude_id]\n\n prot = target.protein\n prot.removeh()\n assert len(prot.atoms) == prot_atoms\n lig = target.ligand\n lig.removeh()\n assert len(lig.atoms) == lig_atoms\n\n assert len(list(target.actives)) == num_act\n for a in target.actives:\n assert len(a.atoms) > 0\n assert len(list(target.decoys)) == num_dec\n for d in target.decoys:\n assert len(d.atoms) > 0\n\n with pytest.raises(KeyError):\n dude_db['xxxx']\n","repo_name":"oddt/oddt","sub_path":"tests/test_datasets.py","file_name":"test_datasets.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","stars":369,"dataset":"github-code","pt":"67"} +{"seq_id":"1306015450","text":"import torch\r\nfrom torch.utils.data import Dataset\r\nfrom torchvision import transforms\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom PIL import Image\r\nimport argparse\r\nimport os\r\nimport copy\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport matplotlib as plt\r\n\r\nLABELS_Severity = {35: 0,\r\n 43: 0,\r\n 47: 1,\r\n 53: 1,\r\n 61: 2,\r\n 65: 2,\r\n 71: 2,\r\n 85: 2}\r\n\r\n\r\nmean = (.1706)\r\nstd = (.2112)\r\nnormalize = transforms.Normalize(mean=mean, std=std)\r\n\r\n# resize to (1,224,224)\r\ntransform = transforms.Compose([\r\n transforms.Resize(size=(256,512)),\r\n transforms.ToTensor(),\r\n normalize,\r\n])\r\n \r\nclass OCTDataset(Dataset):\r\n def __init__(self, args, subset='train', transform=None,):\r\n if subset == 'train':\r\n self.annot = pd.read_csv(args.annot_train_prime)\r\n elif subset == 'test':\r\n self.annot = pd.read_csv(args.annot_test_prime)\r\n \r\n self.annot['Severity_Label'] = [LABELS_Severity[drss] for drss in copy.deepcopy(self.annot['DRSS'].values)] \r\n # print(self.annot)\r\n self.root = os.path.expanduser(args.data_root)\r\n self.transform = transform\r\n # self.subset = subset\r\n self.nb_classes=len(np.unique(list(LABELS_Severity.values())))\r\n self.path_list = self.annot['File_Path'].values\r\n self._labels = self.annot['Severity_Label'].values\r\n assert len(self.path_list) == len(self._labels)\r\n # idx_each_class = [[] for i in range(self.nb_classes)]\r\n\r\n def __getitem__(self, index):\r\n img, target = Image.open(self.root+self.path_list[index]).convert(\"L\"), self._labels[index]\r\n\r\n if self.transform is not None:\r\n img = self.transform(img) # reshape\r\n\r\n return img, target\r\n\r\n def __len__(self):\r\n return len(self._labels) \r\n\r\ndef parse_args():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--annot_train_prime', type = str, default = 'df_prime_train.csv')\r\n parser.add_argument('--annot_test_prime', type = str, default = 'df_prime_test.csv')\r\n parser.add_argument('--data_root', type = str, default = 'S:/8803Proj')\r\n return parser.parse_args()\r\n\r\n# Define the autoencoder architecture\r\nclass Autoencoder(nn.Module):\r\n def __init__(self):\r\n super(Autoencoder, self).__init__()\r\n ## encoder layers ##\r\n # conv layer (depth from 1 --> 8), 3x3 kernels\r\n self.conv1 = nn.Conv2d(1, 8, 3, padding=1) \r\n # conv layer (depth from 8 --> 4), 3x3 kernels\r\n self.conv2 = nn.Conv2d(8, 4, 3, padding=1)\r\n # conv layer (depth from 4 --> 2), 3x3 kernels\r\n self.conv3 = nn.Conv2d(4, 2, 3, padding=1)\r\n # conv layer (depth from 2 --> 1), 3x3 kernels\r\n self.conv4 = nn.Conv2d(2, 1, 3, padding=1)\r\n # pooling layer to reduce x-y dims by two\r\n self.pool = nn.MaxPool2d(2, 2)\r\n self.pool2 = nn.MaxPool2d(4, 4)\r\n self.pool3 = nn.MaxPool2d(8, 8)\r\n self.pool4 = nn.MaxPool2d(8, 4)\r\n \r\n ## decoder layers ##\r\n ## a kernel of 2 and a stride of 2 will increase the spatial dims by 2\r\n self.t_conv2 = nn.ConvTranspose2d(1, 2, 8, stride=8)\r\n self.t_conv3 = nn.ConvTranspose2d(2, 4, 4, stride=4)\r\n self.t_conv4 = nn.ConvTranspose2d(4, 8, 4, stride=4)\r\n self.t_conv5 = nn.ConvTranspose2d(8, 1, 2, stride=2)\r\n\r\n def forward(self, x):\r\n ## encode ##\r\n x = F.relu(self.conv1(x))\r\n x = self.pool(x)\r\n # add second hidden layer\r\n # 128\r\n x = F.relu(self.conv2(x))\r\n x = self.pool2(x) # compressed representation\r\n # 32\r\n x = F.relu(self.conv3(x))\r\n x = self.pool2(x) # compressed representation\r\n # 8\r\n x = F.relu(self.conv4(x))\r\n x = self.pool3(x) # compressed representation\r\n \r\n lower_rep = x\r\n \r\n ## decode ##\r\n # add transpose conv layers, with relu activation function after each conv layer \r\n x = F.relu(self.t_conv2(x))\r\n x = F.relu(self.t_conv3(x))\r\n x = F.relu(self.t_conv4(x))\r\n # output layer (with sigmoid for scaling from 0 to 1)\r\n x = F.sigmoid(self.t_conv5(x))\r\n return x, lower_rep\r\n\r\n\r\nif __name__ == '__main__':\r\n labels = [35,43,47,53,61,65,71,85]\r\n args = parse_args()\r\n trainset = OCTDataset(args, 'train', transform=transform)\r\n testset = OCTDataset(args, 'test', transform=transform)\r\n \r\n # Feature extraction is required before using the SVM model \r\n # Extract HOG features for training images \r\n os.environ['TORCH_HOME'] = \"S:/8803Proj\"\r\n #print(torch.cuda.is_available()) # check the environment \r\n\r\n train_dataloader = torch.utils.data.DataLoader(trainset, batch_size=1, shuffle=True)\r\n test_dataloader = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=True) \r\n\r\n # use autoencoder to convert 1x224x224 img to 2 dimensions\r\n ae = Autoencoder().to('cuda')\r\n #encoded_data,decoded_data = ae.forward(X_train)\r\n \r\n # train the autoencoder \r\n optimizer = torch.optim.Adam(ae.parameters(), lr=1e-3)\r\n loss_fn = nn.CrossEntropyLoss()\r\n num_epochs = 50\r\n\r\n \r\n for epoch in range(num_epochs):\r\n for i, (x,y) in enumerate(train_dataloader): \r\n ae.train()\r\n optimizer.zero_grad() \r\n input=x.cuda()#.permute(1,0,2,3)\r\n input = (input - input.min())/(input.max()-input.min())\r\n out, rep = ae(input)\r\n print(rep.detach().cpu().numpy()[0][0])\r\n loss_val=loss_fn(input, out).cuda()\r\n loss_val.backward()\r\n optimizer.step()\r\n #print(\"done\",i)\r\n\r\n print('Epoch: {} | Loss:{:0.6f}'.format(epoch, loss_val.item()))\r\n \r\n model_scripted = torch.jit.script(ae) # Export to TorchScript\r\n model_scripted.save('AE_for_visual_finals.pt') # Save\r\n","repo_name":"ziyul893/fml_final_project","sub_path":"AE.py","file_name":"AE.py","file_ext":"py","file_size_in_byte":6007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19157969113","text":"import sys\nimport codecs\nimport re\n# filename: processfileV5.py\n#\n# text-mines mpaa data file and builds a set of Movie objects\n#\n# Fifth iteration of file, used after analysis of early iteration results\n\n\n# Movie class\nclass Movie:\n\n\tdef __init__(self, called, when):\n\t\tself.name = called\n\t\tself.year = when\n\t\tself.reasons = []\t# list of reasons\n\t\tself.lines = ''\n\n\tdef rate(self, rated):\n\t\tself.rating = rated\n\n\tdef addreason(self, reason):\n\t\tself.reasons.append(reason)\n\n\tdef addline(self, line):\n\t\tself.lines = self.lines + line\n\n# main procedure\ndef main():\n\t# for handling between loops\n\tactiveobject = None\n\t# stores the Movie objects\n\tmovies = []\n\t# all unique reason 'names' get stored with their frequencies\n\treasons = {}\n\t# line count\n\tlcount = 0\n\n\t# initial object creation\n\tprint('creating movie objects..')\n\n\t# open file\n\twith codecs.open('edited-reasons.list', \"r\", encoding='utf-8', errors='ignore') as fdata:\n\t\tfor line in fdata:\n\t\t\tlcount += 1\n\t\t\tif line[0] == '-':\n\t\t\t\t# ignore because just separator\n\t\t\t\tcontinue\n\n\t\t\tif line[0] == 'M':\n\t\t\t\tstring = line[3:].strip(' ')\n\t\t\t\ttokens = string.split('(')\n\t\t\t\tif len(tokens) > 3:\n\t\t\t\t\tprint('error af')\n\t\t\t\tname = tokens[0]\n\t\t\t\tyear_ = tokens[1]\n\t\t\t\tyear = year_[0:4]\n\t\t\t\t# make new Movie object\n\t\t\t\tnew = Movie(name,year)\n\t\t\t\tif activeobject is not None:\n\t\t\t\t\tmovies.append(activeobject)\n\t\t\t\tactiveobject = new\n\n\t\t\tif line[0] == 'R':\n\t\t\t\tif activeobject is None:\n\t\t\t\t\tprint('Error: No Active Object')\n\n\n\t\t\t\tactiveobject.addline(line[4:-1])\n\n\t\t\t\ttokens = line.split()\n\t\t\t\tif len(tokens) > 1 and tokens[1].upper() == 'RATED':\n\t\t\t\t\t# begins with Rated, must be followed with a rating\n\t\t\t\t\trating = tokens[2]\n\t\t\t\t\tactiveobject.rate(rating)\n\n\t\t\t\t\t'''\n\t\t\t\t\tfor tok in tokens[4:]:\n\t\t\t\t\t\ttoken = tok.lower()\n\t\t\t\t\t\t# store token in object and in reasons dictionary\n\t\t\t\t\t\tif token not in reasons:\n\t\t\t\t\t\t\treasons[token] = 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\treasons[token] += 1\n\t\t\t\t\t\tactiveobject.addreason(token)\n\t\t\t\t\t'''\n\t\t\t\t'''\t\n\t\t\t\telse:\n\t\t\t\t\tfor tok in tokens[1:]:\n\t\t\t\t\t\ttoken = tok.lower()\n\t\t\t\t\t\t# store token\n\t\t\t\t\t\tif token not in reasons:\n\t\t\t\t\t\t\treasons[token] = 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\treasons[token] += 1\n\t\t\t\t\t\tactiveobject.addreason(token)\n\t\t\t\t'''\n\n\tprint('done\\nreading acjectives and nouns...')\n\n\toutv = open('validation_1984', 'w')\n\toutt = open('training_1984', 'w')\n\tafile = open('adj', 'r')\n\tnfile = open('noun','r')\n\tadjectives = []\n\tnouns = []\n\tfor line in afile:\n\t\tadjectives.append(line.strip())\n\tfor line in nfile:\n\t\tnouns.append(line.strip())\n\n\tl = []\n\tprint('done\\nReasoning...')\n\n\tfor m_i in range(0,len(movies)):\n\t\tmovie = movies[m_i]\n\n\t\tfor i in range(0,len(movie.lines)):\n\t\t\tc = movie.lines[i]\n\t\t\tif c == '/':\n\t\t\t\tmovie.lines = movie.lines[:i+1]+' '+movie.lines[i+1:]\n\t\t\n\t\t# remove stuff in parens\n\t\tmovie.lines = a(movie.lines)\n\n\t\ttokens = movie.lines.split()\n\n\t\tactive = ''\t\t\t\t\t\t# this guy is where we hold a 'reason' in progress\n\t\t\n\t\tfor i in range(3,len(tokens)):\n\t\t\tslash = False\n\t\t\ttoken = tokens[i].strip(',.')\t\n\t\t\tif '/' in token:\n\t\t\t\tslash = True\n\t\t\t\ttoken = token[:len(token)-1]\n\t\t\tif '/' in token:\n\t\t\t\tprint('Error: Unhandled Slash')\n\n\t\t\tif token.lower() in adjectives:\n\t\t\t\tactive = active + token + ' '\n\n\t\t\telif token.lower() in nouns:\n\t\t\t\tif slash:\n\t\t\t\t\ttemp = active + token\n\t\t\t\t\tmovie.addreason(temp)\n\t\t\t\telse:\n\t\t\t\t\tactive = active + token\n\t\t\t\t\tmovie.addreason(active)\n\t\t\t\t\tactive = ''\t\t# clear\n\t\t\telif token.lower() != 'and':\n\t\t\t\tactive = ''\t\t# clear\n\n\n\n\tprint('done\\nCounting...')\n\n\tfor movie in movies:\n\t\tfor reason in movie.reasons:\n\t\t\tif reason in reasons:\n\t\t\t\treasons[reason] += 1\n\t\t\telse:\n\t\t\t\treasons[reason] = 1\n\n\t# dictionaries\n\tars = {}\n\tthr = {}\n\tpgs = {}\n\tgen = {}\n\tnc7 = {}\n\n\tfor movie in movies:\n\t\tr = movie.rating\n\t\tfor reason in movie.reasons:\n\t\t\tif reason not in gen:\n\t\t\t\tgen[reason] = 0\n\t\t\t\tpgs[reason] = 0\n\t\t\t\tthr[reason] = 0\n\t\t\t\tars[reason] = 0\n\t\t\t\tnc7[reason] = 0\n\t\t\t\t\n\t\t\tif r == 'G':\n\t\t\t\tgen[reason] += 1\n\t\t\telif r == 'PG':\n\t\t\t\tpgs[reason] += 1\n\n\t\t\telif r == 'PG-13':\n\t\t\t\tthr[reason] += 1\n\n\t\t\telif r == 'R':\n\t\t\t\tars[reason] += 1\n\n\t\t\telif r == 'NC-17':\n\t\t\t\tnc7[reason] += 1\n\t\t\t\t\n\n\n\tprint('done\\nWriting...')\n\n\t#for reason in reasons:\n\t\t#line = reason+', ' +str(reasons[reason])+', ' \n\t\t#+str(gen[reason])+', ' +str(pgs[reason])+', ' \n\t\t#+str(thr[reason])+', ' +str(ars[reason])+', ' \n\t\t#+str(nc7[reason])+'\\n'\n\n\tpfile = open('phrases', 'r')\n\n\tphrases = []\n\n\tfor line in pfile:\n\t\tphrases.append(line.strip())\n\n\n\tm_id = 1\n\tfor movie in movies:\n\t\ttry:\n\t\t\tnu = int(movie.year)\n\t\texcept:\n\t\t\tcontinue\n\t\t\t \n\t\tif nu < 1985:\n\t\t\tcontinue\n\t\tline = movie.rating\n\t\tfor phrase in phrases:\n\t\t\tline = line + ','\n\t\t\tif phrase in movie.reasons:\n\t\t\t\tline = line + '1'\n\t\t\telse:\n\t\t\t\tline = line + '0'\n\n\t\tif m_id%2 == 0:\t\n\t\t\toutv.write(line+'\\n')\n\t\telse:\n\t\t\toutt.write(line+'\\n')\n\t\n\t\tm_id += 1 \t# increment\n\n\n\t\t'''\n\t\tout.write(movie.name + '\\n')\n\t\tout.write(movie.year + ', ')\n\t\tout.write(movie.rating + '\\n')\n\t\tout.write(movie.lines + '\\n')\n\t\tout.write('\\n --- \\n\\n')\n\t\t'''\t\n\n\tprint('done\\n')\n\n\n\n\n# function from stackoverflow\n# removes text within ANY parentheses \ndef a(test_str):\n ret = ''\n skip1c = 0\n skip2c = 0\n for i in test_str:\n if i == '[':\n skip1c += 1\n elif i == '(':\n skip2c += 1\n elif i == ']' and skip1c > 0:\n skip1c -= 1\n elif i == ')'and skip2c > 0:\n skip2c -= 1\n elif skip1c == 0 and skip2c == 0:\n ret += i\n return ret\n\n\n# n00b main call\nmain()","repo_name":"aaronlarouco/imdbparser","sub_path":"processfileV5.py","file_name":"processfileV5.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72691267732","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated Nov 2020\n\n@author: hassi\n\"\"\"\n\nprint(\"Ch 7: Adding the noise profile of an IBM Q machine to your local simulator\")\nprint(\"--------------------------------------------------------------------------\")\n\n# Import Qiskit and load account\nfrom qiskit import Aer, IBMQ, QuantumCircuit, execute\nfrom qiskit.providers.aer.noise import NoiseModel\nfrom qiskit.tools.visualization import plot_histogram\nfrom qiskit.tools.monitor import job_monitor\n\nimport numpy as np\nnp.set_printoptions(precision=3)\nfrom IPython.core.display import display\n\nprint(\"Getting provider...\")\nif not IBMQ.active_account():\n IBMQ.load_account()\nprovider = IBMQ.get_provider()\n\nglobal backend, noise_model\n\ndef select_backend():\n # Get all available and operational backends.\n available_backends = provider.backends(filters=lambda b: not b.configuration().simulator and b.configuration().n_qubits > 1 and b.status().operational)\n \n # Fish out criteria to compare\n print(\"{0:20} {1:<10} {2:<10}\".format(\"Name\",\"#Qubits\",\"Pending jobs\"))\n print(\"{0:20} {1:<10} {2:<10}\".format(\"----\",\"-------\",\"------------\"))\n \n for n in range(0, len(available_backends)):\n backend = provider.get_backend(str(available_backends[n]))\n print(\"{0:20} {1:<10}\".format(backend.name(),backend.configuration().n_qubits),backend.status().pending_jobs)\n\n select_backend=input(\"Select a backend ('exit' to end): \")\n \n if select_backend!=\"exit\":\n backend = provider.get_backend(select_backend)\n else:\n backend=select_backend\n return(backend)\n\ndef build_noise_model(backend):\n\n # Construct the noise model from backend\n noise_model = NoiseModel.from_backend(backend)\n print(noise_model)\n return(noise_model)\n \ndef execute_circuit(backend, noise_model):\n # Basis gates for the noise model\n basis_gates = noise_model.basis_gates\n \n # Coupling map\n coupling_map = backend.configuration().coupling_map\n \n print(\"Coupling map: \",coupling_map)\n \n # Construct the GHZ-state quantum circuit\n circ = QuantumCircuit(3, 3)\n circ.h(0)\n circ.cx(0, 1)\n circ.cx(0, 2)\n circ.measure([0,1,2], [0,1,2])\n print(circ)\n\n \n # Execute on QASM simulator and get counts\n counts = execute(circ, Aer.get_backend('qasm_simulator')).result().get_counts(circ)\n display(plot_histogram(counts, title='Ideal counts for 3-qubit GHZ state on local qasm_simulator'))\n \n # Execute noisy simulation on QASM simulator and get counts\n counts_noise = execute(circ, Aer.get_backend('qasm_simulator'), noise_model=noise_model, coupling_map=coupling_map, basis_gates=basis_gates).result().get_counts(circ)\n display(plot_histogram(counts_noise, title=\"Counts for 3-qubit GHZ state with noise model on local qasm simulator\"))\n\n # Execute noisy simulation on the ibmq_qasm_simulator and get counts\n counts_noise_ibmq = execute(circ, provider.get_backend('ibmq_qasm_simulator'), noise_model=noise_model, coupling_map=coupling_map, basis_gates=basis_gates).result().get_counts(circ)\n display(plot_histogram(counts_noise_ibmq, title=\"Counts for 3-qubit GHZ state with noise model on IBMQ qasm simulator\"))\n \n # Execute job on IBM Q backend and get counts\n job = execute(circ, backend)\n job_monitor(job)\n counts_ibmq=job.result().get_counts()\n \n title=\"Counts for 3-qubit GHZ state on IBMQ backend \" + backend.name()\n display(plot_histogram(counts_ibmq, title=title))\n\n # Display the results for all runs\n display(plot_histogram([counts, counts_noise, counts_noise_ibmq, counts_ibmq], bar_labels=True, legend=[\"Baseline\",\"Noise on simulator\", \"Noise on IBMQ simulator\", \"IBM Q backend\"], title=\"Comparison\"))\n\nwhile True:\n # Select backend\n back=select_backend()\n if back==\"exit\":\n break\n # Build noise model and then run the circuit\n noise=build_noise_model(back)\n execute_circuit(back, noise)\n","repo_name":"PacktPublishing/Quantum-Computing-in-Practice-with-Qiskit-and-IBM-Quantum-Experience","sub_path":"Chapter07/ch7_r3_noise.py","file_name":"ch7_r3_noise.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"67"} +{"seq_id":"7044743164","text":"import pytest\n\nfrom ....product.utils.preparing_product import prepare_product\nfrom ....shop.utils.preparing_shop import prepare_shop\nfrom ....utils import assign_permissions\nfrom ....vouchers.utils import create_voucher, create_voucher_channel_listing\nfrom ...utils import (\n checkout_add_promo_code,\n checkout_complete,\n checkout_create,\n checkout_delivery_method_update,\n checkout_dummy_payment_create,\n)\n\n\ndef prepare_voucher_for_staff_only(\n e2e_staff_api_client,\n channel_id,\n voucher_code,\n voucher_discount_type,\n voucher_discount_value,\n):\n input = {\n \"code\": voucher_code,\n \"discountValueType\": voucher_discount_type,\n \"type\": \"ENTIRE_ORDER\",\n \"onlyForStaff\": True,\n }\n voucher_data = create_voucher(e2e_staff_api_client, input)\n\n voucher_id = voucher_data[\"id\"]\n channel_listing = [\n {\n \"channelId\": channel_id,\n \"discountValue\": voucher_discount_value,\n }\n ]\n create_voucher_channel_listing(\n e2e_staff_api_client,\n voucher_id,\n channel_listing,\n )\n\n return voucher_discount_value, voucher_code\n\n\n@pytest.mark.e2e\ndef test_staff_can_use_voucher_for_staff_only_in_checkout_core_0904(\n e2e_staff_api_client,\n e2e_no_permission_staff_api_client,\n permission_manage_products,\n permission_manage_channels,\n permission_manage_shipping,\n permission_manage_product_types_and_attributes,\n permission_manage_discounts,\n):\n # Before\n\n permissions = [\n permission_manage_products,\n permission_manage_channels,\n permission_manage_shipping,\n permission_manage_product_types_and_attributes,\n permission_manage_discounts,\n ]\n assign_permissions(e2e_staff_api_client, permissions)\n\n (\n warehouse_id,\n channel_id,\n channel_slug,\n shipping_method_id,\n ) = prepare_shop(e2e_staff_api_client)\n\n (\n _product_id,\n product_variant_id,\n product_variant_price,\n ) = prepare_product(\n e2e_staff_api_client,\n warehouse_id,\n channel_id,\n variant_price=\"9.99\",\n )\n\n voucher_discount_value, voucher_code = prepare_voucher_for_staff_only(\n e2e_staff_api_client,\n channel_id,\n \"VOUCHER001\",\n \"FIXED\",\n 1,\n )\n\n # Step 1 - Create checkout for product on sale\n lines = [\n {\n \"variantId\": product_variant_id,\n \"quantity\": 1,\n },\n ]\n checkout_data = checkout_create(\n e2e_no_permission_staff_api_client,\n lines,\n channel_slug,\n email=\"testEmail@example.com\",\n set_default_billing_address=True,\n set_default_shipping_address=True,\n )\n checkout_id = checkout_data[\"id\"]\n shipping_method_id = checkout_data[\"shippingMethods\"][0][\"id\"]\n checkout_lines = checkout_data[\"lines\"][0]\n assert checkout_lines[\"unitPrice\"][\"gross\"][\"amount\"] == float(\n product_variant_price\n )\n assert checkout_data[\"isShippingRequired\"] is True\n\n # Step 2 Add voucher code to checkout\n checkout_data = checkout_add_promo_code(\n e2e_no_permission_staff_api_client,\n checkout_id,\n voucher_code,\n )\n unit_price_with_voucher = float(product_variant_price) - voucher_discount_value\n assert (\n checkout_data[\"lines\"][0][\"unitPrice\"][\"gross\"][\"amount\"]\n == unit_price_with_voucher\n )\n\n # Step 4 - Set DeliveryMethod for checkout.\n checkout_data = checkout_delivery_method_update(\n e2e_no_permission_staff_api_client,\n checkout_id,\n shipping_method_id,\n )\n assert checkout_data[\"deliveryMethod\"][\"id\"] == shipping_method_id\n shipping_price = checkout_data[\"deliveryMethod\"][\"price\"][\"amount\"]\n total_gross_amount = round(unit_price_with_voucher + shipping_price, 2)\n assert checkout_data[\"totalPrice\"][\"gross\"][\"amount\"] == total_gross_amount\n\n # Step 5 - Create payment for checkout.\n checkout_dummy_payment_create(\n e2e_no_permission_staff_api_client,\n checkout_id,\n total_gross_amount,\n )\n\n # Step 6 - Complete checkout.\n order_data = checkout_complete(\n e2e_no_permission_staff_api_client,\n checkout_id,\n )\n assert order_data[\"status\"] == \"UNFULFILLED\"\n assert order_data[\"discounts\"][0][\"type\"] == \"VOUCHER\"\n assert order_data[\"voucher\"][\"code\"] == voucher_code\n assert order_data[\"discounts\"][0][\"value\"] == voucher_discount_value\n order_total_gross_amount = order_data[\"total\"][\"gross\"][\"amount\"]\n assert order_total_gross_amount == total_gross_amount\n","repo_name":"saleor/saleor","sub_path":"saleor/tests/e2e/checkout/discounts/vouchers/test_staff_user_can_use_voucher_for_staff_only.py","file_name":"test_staff_user_can_use_voucher_for_staff_only.py","file_ext":"py","file_size_in_byte":4626,"program_lang":"python","lang":"en","doc_type":"code","stars":19331,"dataset":"github-code","pt":"67"} +{"seq_id":"21787040607","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom mpl_toolkits.mplot3d import axes3d, Axes3D #<-- Note the capitalization! \nimport math\n\ndef regularnn(NHIDDEN=24, INPUTDIM=1, OUTPUTDIM=1, STDEV=0.5):\n x = tf.placeholder(dtype=tf.float32, shape=[None,INPUTDIM], name=\"x\")\n y = tf.placeholder(dtype=tf.float32, shape=[None,OUTPUTDIM], name=\"y\")\n W = tf.Variable(tf.random_normal([INPUTDIM,NHIDDEN], stddev=STDEV, dtype=tf.float32))\n b = tf.Variable(tf.random_normal([NHIDDEN], stddev=STDEV, dtype=tf.float32))\n W_out = tf.Variable(tf.random_normal([NHIDDEN,OUTPUTDIM], stddev=STDEV, dtype=tf.float32))\n b_out = tf.Variable(tf.random_normal([OUTPUTDIM], stddev=STDEV, dtype=tf.float32))\n hidden_layer = tf.nn.tanh(tf.matmul(x, W) + b)\n output = tf.matmul(hidden_layer,W_out) + b_out\n return x,y,output\n\ndef mdn(NHIDDEN=24, INPUTDIM=1, OUTPUTDIM=1, STDEV=0.5, KMIX=24):\n NOUT = KMIX * (2+OUTPUTDIM)\n x = tf.placeholder(dtype=tf.float32, shape=[None,INPUTDIM], name=\"x\")\n y = tf.placeholder(dtype=tf.float32, shape=[None,OUTPUTDIM], name=\"y\")\n Wh = tf.Variable(tf.random_normal([INPUTDIM,NHIDDEN], stddev=STDEV, dtype=tf.float32))\n bh = tf.Variable(tf.random_normal([NHIDDEN], stddev=STDEV, dtype=tf.float32))\n Wo = tf.Variable(tf.random_normal([NHIDDEN,NOUT], stddev=STDEV, dtype=tf.float32))\n bo = tf.Variable(tf.random_normal([NOUT], stddev=STDEV, dtype=tf.float32))\n hidden_layer = tf.nn.tanh(tf.matmul(x, Wh) + bh)\n output = tf.matmul(hidden_layer,Wo) + bo\n return x,y,output\n\ndef get_mixture_coef(output, KMIX=24, OUTPUTDIM=1):\n out_pi = tf.placeholder(dtype=tf.float32, shape=[None,KMIX], name=\"mixparam\")\n out_sigma = tf.placeholder(dtype=tf.float32, shape=[None,KMIX], name=\"mixparam\")\n out_mu = tf.placeholder(dtype=tf.float32, shape=[None,KMIX*OUTPUTDIM], name=\"mixparam\")\n splits = tf.split(1, 2 + OUTPUTDIM, output)\n out_pi = splits[0]\n out_sigma = splits[1]\n out_mu = tf.pack(splits[2:], axis=2)\n out_mu = tf.transpose(out_mu, [1,0,2])\n # use softmax to normalize pi into prob distribution\n max_pi = tf.reduce_max(out_pi, 1, keep_dims=True)\n out_pi = tf.sub(out_pi, max_pi)\n out_pi = tf.exp(out_pi)\n normalize_pi = tf.inv(tf.reduce_sum(out_pi, 1, keep_dims=True))\n out_pi = tf.mul(normalize_pi, out_pi)\n # use exponential to make sure sigma is positive\n out_sigma = tf.exp(out_sigma)\n return out_pi, out_sigma, out_mu\n\ndef tf_normal(y, mu, sigma):\n oneDivSqrtTwoPI = 1 / math.sqrt(2*math.pi)\n result = tf.sub(y, mu)\n result = tf.transpose(result, [2,1,0])\n result = tf.mul(result,tf.inv(sigma + 1e-8))\n result = -tf.square(result)/2\n result = tf.mul(tf.exp(result),tf.inv(sigma + 1e-8))*oneDivSqrtTwoPI\n result = tf.reduce_prod(result, reduction_indices=[0])\n return result\n\ndef get_lossfunc(out_pi, out_sigma, out_mu, y):\n result = tf_normal(y, out_mu, out_sigma)\n kernel = result\n result = tf.mul(result, out_pi)\n result = tf.reduce_sum(result, 1, keep_dims=True)\n beforelog = result\n result = -tf.log(result + 1e-8)\n return tf.reduce_mean(result),kernel,beforelog\n\ndef generate_ensemble(out_pi, out_mu, out_sigma, x_test, M = 10, OUTPUTDIM=1):\n NTEST = x_test.size\n result = np.random.rand(NTEST, M, OUTPUTDIM) # initially random [0, 1]\n rn = np.random.randn(NTEST, M) # normal random matrix (0.0, 1.0)\n mu = 0\n std = 0\n idx = 0\n # transforms result into random ensembles\n for j in range(0, M):\n for i in range(0, NTEST):\n for d in range(0, OUTPUTDIM):\n idx = np.random.choice(24, 1, p=out_pi[i])\n mu = out_mu[idx,i,d]\n std = out_sigma[i, idx]\n result[i, j, d] = mu + rn[i, j]*std\n return result\n\n# 1d to 1d test case\ndef oned2oned():\n NSAMPLE = 250\n\n y_data = np.float32(np.random.uniform(-10.5, 10.5, (1, NSAMPLE))).T\n r_data = np.float32(np.random.normal(size=(NSAMPLE,1)))\n x_data = np.float32(np.sin(0.75*y_data)*7.0+y_data*0.5+r_data*1.0)\n\n x,y,output = mdn()\n\n out_pi, out_sigma, out_mu = get_mixture_coef(output)\n lossfunc,k,bl = get_lossfunc(out_pi, out_sigma, out_mu, y)\n train_op = tf.train.AdamOptimizer().minimize(lossfunc)\n\n sess = tf.InteractiveSession()\n sess.run(tf.initialize_all_variables())\n\n plt.figure(figsize=(8, 8))\n plt.plot(x_data,y_data,'ro', alpha=0.3)\n plt.show()\n\n NEPOCH = 10000\n loss = np.zeros(NEPOCH) # store the training progress here.\n for i in range(NEPOCH):\n sess.run(train_op,feed_dict={x: x_data, y: y_data})\n loss[i] = sess.run(lossfunc, feed_dict={x: x_data, y: y_data})\n print(loss[i])\n\n plt.figure(figsize=(8, 8))\n plt.plot(np.arange(100, NEPOCH,1), loss[100:], 'r-')\n plt.show()\n\n x_test = np.float32(np.arange(-15,15,0.1))\n NTEST = x_test.size\n x_test = x_test.reshape(NTEST,1) # needs to be a matrix, not a vector\n\n out_pi_test, out_sigma_test, out_mu_test = sess.run(get_mixture_coef(output), feed_dict={x: x_test})\n\n y_test = generate_ensemble(out_pi_test, out_mu_test, out_sigma_test, x_test, M=1)\n\n plt.figure(figsize=(8, 8))\n plt.plot(x_data,y_data,'ro', x_test,y_test[:,:,0],'bo',alpha=0.3)\n plt.show()\n\n# 1d to 2d test case\ndef oned2twod():\n NSAMPLE = 250\n fig = plt.figure()\n ax = Axes3D(fig) \n z_data = np.float32(np.random.uniform(-10.5, 10.5, (1, NSAMPLE))).T\n r_data = np.float32(np.random.normal(size=(NSAMPLE,1)))\n x1_data = np.float32(np.sin(0.75*z_data)*7.0+z_data*0.5+r_data*1.0)\n x2_data = np.float32(np.sin(0.5*z_data)*7.0+z_data*0.5+r_data*1.0)\n\n ax.scatter(x1_data, x2_data, z_data)\n ax.legend()\n plt.show()\n\n x_data = np.dstack((x1_data,x2_data))\n\n x,y,output = mdn(INPUTDIM=1, OUTPUTDIM=2)\n out_pi, out_sigma, out_mu = get_mixture_coef(output, OUTPUTDIM=2)\n lossfunc,kernel,beforelog = get_lossfunc(out_pi, out_sigma, out_mu, y)\n\n train_op = tf.train.AdamOptimizer().minimize(lossfunc)\n\n sess = tf.InteractiveSession()\n sess.run(tf.initialize_all_variables())\n\n NEPOCH = 10000\n loss = np.zeros(NEPOCH) # store the training progress here.\n for i in range(NEPOCH):\n sess.run(train_op,feed_dict={x: z_data, y: x_data[:,0,:]})\n loss[i] = sess.run(lossfunc, feed_dict={x: z_data, y: x_data[:,0,:]})\n print(str(i) + \":\" + str(loss[i]))\n\n #loss[i],k,bl = sess.run([lossfunc,kernel,beforelog], feed_dict={x: z_data, y: x_data[:,0,:]})\n #print(str(i) + \":\" + str(loss[i]) + \",\" + str(k) + \"\" + str(bl))\n\n plt.figure(figsize=(8, 8))\n plt.plot(np.arange(100, NEPOCH,1), loss[100:], 'r-')\n plt.show()\n\n x_test = np.float32(np.arange(-10.5,10.5,0.1))\n NTEST = x_test.size\n x_test = x_test.reshape(NTEST,1) # needs to be a matrix, not a vector\n\n out_pi_test, out_sigma_test, out_mu_test = sess.run(get_mixture_coef(output, OUTPUTDIM=2), feed_dict={x: x_test})\n\n y_test = generate_ensemble(out_pi_test, out_mu_test, out_sigma_test, x_test, M=1,OUTPUTDIM=2)\n\n fig = plt.figure()\n ax = Axes3D(fig) \n ax.scatter(y_test[:,0,0], y_test[:,0,1], x_test, c='r')\n ax.scatter(x1_data, x2_data, z_data, c='b')\n ax.legend()\n plt.show()\n\noned2oned()\noned2twod()\n","repo_name":"yanji84/keras-mdn","sub_path":"tf.py","file_name":"tf.py","file_ext":"py","file_size_in_byte":6927,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"67"} +{"seq_id":"72094840535","text":"fin = open(\"input.txt\")\nfout = open(\"output.txt\",\"w\")\n\nn, i, j = map(int, fin.readline().split())\n\nr1 = abs(i - j) - 1\nr2 = n - max(i, j) + min(i, j) - 1\n\nfout.write(str(min(r1, r2)))\n\nfin.close()\nfout.close()","repo_name":"kokosda/sport-programming","sub_path":"src/acmp.ru/task_263.py","file_name":"task_263.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2210054512","text":"from torch import nn\n\nfrom manet.mac import MLP\n\n\nclass MNModel1(nn.Module):\n def __init__(self):\n super().__init__()\n self.recognizer = nn.Sequential(\n nn.Conv2d(1, 10, kernel_size=5, padding=2),\n nn.MaxPool2d(2),\n nn.ReLU(),\n nn.Conv2d(10, 20, kernel_size=5, padding=2),\n nn.MaxPool2d(2),\n nn.ReLU(),\n nn.Conv2d(20, 40, kernel_size=5, padding=2),\n nn.MaxPool2d(2),\n nn.ReLU(),\n nn.Conv2d(40, 80, kernel_size=3, padding=1),\n nn.MaxPool2d(2),\n nn.ReLU(),\n nn.Flatten(),\n MLP(80, [40, 20, 10]),\n nn.LogSoftmax(dim=1)\n )\n\n def forward(self, x):\n return self.recognizer(x)\n\n\ndef _model_():\n return MNModel1()\n","repo_name":"mountain/manet","sub_path":"demo/mnist/mnist1.py","file_name":"mnist1.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"5526041864","text":"#!/usr/bin/env python3\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport pyfiglet\nimport logging\nimport datetime\nimport base64\n\ndef main():\n get_domain_screenshots()\n\ndef get_domain_screenshots():\n logging.basicConfig(level=logging.INFO, filename='logs/app-gds.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s')\n logging.info(pyfiglet.figlet_format(\"metadevil\"))\n console_line = \"*\" * 80\n logging.info(console_line)\n logging.info(\"Starting: \" + str(datetime.datetime.now()))\n window_size = \"window-size=1200,1200\"\n domains = []\n output_path = \"../metadevil-screenshots/\"\n\n try:\n with open(\"../apex_file.txt\", \"r\") as file:\n domains = file.readlines()\n\n for domain in domains:\n domain = domain.strip()\n get_image(domain, window_size, output_path)\n except Exception as e:\n logging.critical(e)\n\n logging.info(console_line)\n logging.info(\"Finished: \" + str(datetime.datetime.now()))\n\ndef get_image(domain, window_size, output_path):\n image_name = \"\"\n image_id = \"\"\n\n try:\n options = Options()\n options.add_argument(\"--headless\")\n options.add_argument(window_size)\n with webdriver.Chrome(options=options) as driver:\n driver = webdriver.Chrome(options=options)\n driver.set_page_load_timeout(15)\n url = \"http://\" + domain\n logging.info(\"Getting url: \" + url)\n\n driver.get(url)\n source = driver.page_source\n source_len = len(source)\n logging.info(\"Source length: \" + str(source_len))\n cookies = driver.get_cookies()\n logging.info(\"Cookies: \" + str(len(cookies)))\n\n image_id = to_base64(domain)\n image_name = image_id + \".png\"\n logging.info(\"Saving image: \" + image_name)\n driver.save_screenshot(output_path + image_name)\n\n data = []\n data.append(domain)\n data.append(str(source_len))\n data.append(str(len(cookies)))\n data_out = ','.join(data)\n logging.info(data)\n data_name = image_id + \".csv\"\n\n with open(output_path + data_name, 'w') as data_file:\n data_file.writelines(data_out)\n\n except Exception as e:\n logging.critical(\"Failed: \" + url)\n logging.critical(e)\n\n return image_name\n\ndef to_base64(item):\n domain_bytes = item.encode('ascii')\n base64_bytes = base64.b64encode(domain_bytes)\n return str(base64_bytes.decode('ascii'))\n\nif __name__ == \"__main__\":\n main()","repo_name":"mindrash/metadevil","sub_path":"get-domain-screenshots.py","file_name":"get-domain-screenshots.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"11165855512","text":"from datetime import datetime\nimport requests\nimport re\nimport time\nimport telebot\nimport flask\nimport glob\nimport os\nfrom pathlib import Path\nfrom tqdm import tqdm\nimport logging\nimport time\nAPI_TOKEN = '5528813146:AAHtgSpySLIp-8Av6LNGQpnVx4iLvs3-Yu4'\nimport instaloader\n\nbot = telebot.TeleBot(API_TOKEN)\n\n\nWEBHOOK_HOST = 'instauzbek.herokuapp.com'\nWEBHOOK_PORT = 8443 # 443, 80, 88 or 8443 (port need to be 'open')\nWEBHOOK_LISTEN = '0.0.0.0' # In some VPS you may need to put here the IP addr\n\n# WEBHOOK_SSL_CERT = './webhook_cert.pem' # Path to the ssl certificate\n# WEBHOOK_SSL_PRIV = './webhook_pkey.pem' # Path to the ssl private key\n\n# Quick'n'dirty SSL certificate generation:\n#\n# openssl genrsa -out webhook_pkey.pem 2048\n# openssl req -new -x509 -days 3650 -key webhook_pkey.pem -out webhook_cert.pem\n#\n# When asked for \"Common Name (e.g. server FQDN or YOUR name)\" you should reply\n# with the same value in you put in WEBHOOK_HOST\n\nWEBHOOK_URL_BASE = \"https://%s:%s\" % (WEBHOOK_HOST, WEBHOOK_PORT)\nWEBHOOK_URL_PATH = \"/%s/\" % (API_TOKEN)\n\nlogger = telebot.logger\ntelebot.logger.setLevel(logging.INFO)\n\nbot = telebot.TeleBot(API_TOKEN)\n\napp = flask.Flask(__name__)\n\nL = instaloader.Instaloader()\n\n# Empty webserver index, return nothing, just http 200\n@app.get('/')\ndef index():\n bot.remove_webhook()\n\n time.sleep(0.1)\n\n # Set webhook\n bot.set_webhook(url=WEBHOOK_HOST + WEBHOOK_URL_PATH,\n )\n\n\n\n USER = \"fastlogzapp\"\n\n\n PASSWORD = \"asaka.uz1\"\n L.login(USER , PASSWORD)\n print('Successfully Logged in to profile:' , USER ,'!')\n return ''\n\n\n# Process webhook calls\n@app.route(WEBHOOK_URL_PATH, methods=['POST'])\ndef webhook():\n if flask.request.headers.get('content-type') == 'application/json':\n json_string = flask.request.get_data().decode('utf-8')\n update = telebot.types.Update.de_json(json_string)\n bot.process_new_updates([update])\n return ''\n else:\n flask.abort(403)\n\n\n\ngroup_id = '-1001600708495'\n\n\n\n\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n f_read = open(\"podpis.txt\",\"r\")\n f1 = f_read.readlines()\n if str(message.chat.id)+'\\n' in f1:\n pass\n else:\n f = open(\"podpis.txt\",\"a+\")\n f.write(f\"{message.chat.id}\\n\")\n f.close()\n bot.send_message(group_id,f\"{message.chat.id}, @{message.chat.username if message.chat.username else 'no username'}\")\n bot.send_message(message.chat.id,\"Please, send a link! I'm ready to download it!😎\")\n@bot.message_handler()\ndef linkto(message):\n if (message.text).startswith('https://www.instagram.com'):\n try: \n urldetail = (message.text).split('/')\n if 'p' in urldetail:\n url_index = urldetail.index('p')\n elif 'tv' in urldetail:\n url_index = urldetail.index('tv')\n elif 'reel' in urldetail:\n url_index = urldetail.index('reel')\n url = urldetail[int(url_index)+1]\n bbbb = bot.send_message(message.chat.id,'Please, wait a few seconds! Your post is downloading!😊')\n post = instaloader.Post.from_shortcode(L.context, url)\n L.download_post(post,target=\"posts\")\n\n\n files_path = os.path.join('posts', '*.jpg')\n files_path2 = os.path.join('posts', '*.mp4')\n files_path3 = os.path.join('posts', '*.txt')\n if files_path and files_path2 == None:\n files = sorted(\n glob.iglob(files_path), key=os.path.getctime, reverse=True) \n files3 = sorted(\n glob.iglob(files_path3), key=os.path.getctime, reverse=True) \n print(files[0])\n f = open(f\"{files[0]}\", \"rb\")\n txt=\"\"\n\n bot.send_photo(message.chat.id,f,caption=txt+\"\\n\\nDownloaded by @instasave_new_bot\")\n if files_path2:\n files = sorted(\n glob.iglob(files_path2), key=os.path.getctime, reverse=True) \n files3 = sorted(\n glob.iglob(files_path3), key=os.path.getctime, reverse=True) \n print(files[0])\n f = open(f\"{files[0]}\", \"rb\")\n txt=\"\"\n\n bot.send_video(message.chat.id,f,caption=txt+\"\\n\\nDownloaded by @instasave_new_bot\")\n \n time.sleep(3)\n files = glob.glob('posts/*')\n for f in files:\n os.remove(f)\n bot.delete_message(message.chat.id,bbbb.id)\n except:\n bot.send_message(message.chat.id,'Too many requests! Please try again few seconds later!😊')\n\n else:\n bot.send_message(message.chat.id,'Send only Instagram links!')\n \n# Remove webhook, it fails sometimes the set if there is a previous webhook\n\n\n# Start flask server\napp.run(host=WEBHOOK_LISTEN,\n port=WEBHOOK_PORT,\n debug=True)","repo_name":"islombek751/inst","sub_path":"instagram_downloader.py","file_name":"instagram_downloader.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"31829647274","text":"import time\nimport json\nfrom config import Config\nfrom pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\nfrom helper_func.progress_bar import progress_bar\nfrom helper_func.tools import execute, clean_up\n\nDATA = {}\n\nasync def download_file(client, message):\n media = message.reply_to_message\n if media.empty:\n await message.reply_text('Why did you delete that?? 😕', True)\n return\n\n msg = await client.send_message(\n chat_id=message.chat.id,\n text=\"`Indiriliyor`\",\n reply_to_message_id=media.id\n )\n filetype = media.document or media.video\n\n c_time = time.time()\n\n download_location = await client.download_media(\n message=media,\n file_name=Config.DOWNLOAD_DIR+'/', \n progress=progress_bar,\n progress_args=(\n \"`Indiriliyor`\",\n msg,\n c_time\n )\n )\n\n await msg.edit_text(\"`Dosyan Ayıklanıyor`\")\n\n output = await execute(f\"ffprobe -hide_banner -show_streams -print_format json '{download_location}'\")\n \n if not output:\n await clean_up(download_location)\n await msg.edit_text(\"`Ayıklarken Hata oluÅŸtu`\")\n return\n\n details = json.loads(output[0])\n buttons = []\n DATA[f\"{message.chat.id}-{msg.id}\"] = {}\n for stream in details[\"streams\"]:\n mapping = stream[\"index\"]\n stream_name = stream[\"codec_name\"]\n stream_type = stream[\"codec_type\"]\n if stream_type in (\"audio\", \"subtitle\"):\n pass\n else:\n continue\n try: \n lang = stream[\"tags\"][\"language\"]\n except:\n lang = mapping\n \n DATA[f\"{message.chat.id}-{msg.id}\"][int(mapping)] = {\n \"map\" : mapping,\n \"name\" : stream_name,\n \"type\" : stream_type,\n \"lang\" : lang,\n \"location\" : download_location\n }\n buttons.append([\n InlineKeyboardButton(\n f\"{stream_type.upper()} - {str(lang).upper()}\", f\"{stream_type}_{mapping}_{message.chat.id}-{msg.id}\"\n )\n ])\n\n buttons.append([\n InlineKeyboardButton(\"CANCEL\",f\"cancel_{mapping}_{message.chat.id}-{msg.id}\")\n ]) \n\n await msg.edit_text(\n \"`IstediÄŸin Dosyayı Seç Dostum..`\",\n reply_markup=InlineKeyboardMarkup(buttons)\n )\n\n\n","repo_name":"ali-mmagneto/aadder3","sub_path":"helper_func/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"3532883533","text":"from bs4 import BeautifulSoup\n#import requests\nimport pathlib\nimport os\n\n\"\"\"\nfn = os.path.join(os.path.dirname(__file__), 'html_files/Galaxy Note 20+ ‘Final’ Design Reveals Stunning New Display.htm')\nsource = requests.get('').text\nsoup = BeautifulSoup(source, 'lxml')\nwith open(fn) as html_file:\n soup = BeautifulSoup(html_file, 'lxml')\n\n\narticle = soup.find('div', class_='article-body fs-article fs-responsive-text current-article')\n\narticle_text = \"\" \nfor summary in article.find_all('p'):\n article_text+=summary.text\n\nprint(article_text)\n\"\"\"\n\n\ndef writeText(file_name, article_text):\n with open(file_name, \"w\") as text_file: \n text_file.write(article_text)\n\nimport urllib.request\nurl = \"https://news.google.com/?hl=en\"\nuser_agent = 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:64.0) Gecko/20100101 Firefox/64.0'\n\nheaders = {'User-Agent': user_agent}\n\n\nresults = []\ndeamplify = False\n\nreq = urllib.request.Request(url, headers=headers)\nresponse = urllib.request.urlopen(req)\npage = response.read()\ncontent = BeautifulSoup(page, \"html.parser\")\ncontent = content.find(\"h2\").parent.parent.parent\nresult = content.findChildren(\"div\", recursive=False)\nsection = None\nfor item in result:\n try:\n try:\n section = item.find(\"h2\").find(\"a\").text\n except Exception as sec_e:\n pass\n title = item.find(\"h3\").text\n if deamplify:\n try:\n link = item.find(\"article\").get(\"jslog\").split('2:')[1].split(';')[0]\n except Exception as deamp_e:\n print(deamp_e)\n link = 'news.google.com/' + item.find(\"h3\").find(\"a\").get(\"href\")\n else:\n link = item.find(\"h3\").find(\"a\").get(\"href\")\n try:\n datetime = item.find(\"time\").get(\"datetime\")\n except:\n datetime = None\n try:\n time = item.find(\"time\").text\n except:\n time = None\n try:\n site = item.find(\"time\").parent.find(\"a\").text\n except:\n site = None\n try:\n img = item.find(\"img\").get(\"src\")\n except:\n img = None\n desc = None\n if link.startswith('https://www.youtube.com/watch?v='):\n desc = 'video'\n\n results.append(\n {'section': section,\n 'title': title,\n 'datetime': datetime,\n 'time': time,\n 'site': site,\n 'desc': desc,\n 'link': link,\n 'media': None,\n 'img': img})\n a = True\n for key in results[0].keys():\n print(key, \" \", results[0][key]) \n if a:\n break \n except Exception as big_e:\n pass\nresponse.close()\n\n","repo_name":"jinseo99/Newscraper","sub_path":"garbage/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37460242361","text":"# beats 90% in time and 40% in space\n\nclass Solution:\n def findDisappearedNumbers(self, nums: List[int]) -> List[int]:\n n = len(nums)\n nums_set = set(nums)\n missing = []\n for i in range(1, n + 1):\n if i not in nums_set:\n missing.append(i)\n return missing ","repo_name":"DavidSober/Data-Structures-and-Algorithms-","sub_path":"LeetcodeProblems/448.py","file_name":"448.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"946300718","text":"\"\"\"\nКласс сортироваки товаров\n\"\"\"\n\nclass Filter:\n \"\"\"\n содержит элементы страницы\n с товарами после сортировки по имени товара\n \"\"\"\n product_name_field = \"input-name1\"\n meta_title_field = \"input-meta-title1\"\n model_field = \"input-model\"\n","repo_name":"verizm/otus_pytest","sub_path":"opencart_locators/Filter.py","file_name":"Filter.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41215476980","text":"import pytest\nimport projectq\n\nfrom projqube.projectq.cengines import CliffordSimulator, MultiqubitMeasurementCliffordEngine\n\n\nclass A:\n\tdef __init__(self):\n\t\tself.commands = []\n\t\n\tdef receive(self, cmd):\n\t\tself.commands += cmd\n\n\n\ndef test_qubit_initialization():\n\teng = projectq.MainEngine()\n\tinit = [(eng.allocate_qubit(), \"Z\")]\n\tsim = CliffordSimulator()\n\n\tsim.add_stabilizer(init)\n\tassert(len(sim._stabilizers) == 1)\n\tassert(len(sim._stabilizers[0][0])==1)\n\n\t# does the commutation check work?\n\tinit2 = [(init[0][0], \"X\")]\n\twith pytest.raises(ValueError):\n\t\tsim.add_stabilizer(init2)\n\tassert(len(sim._stabilizers) == 1)\n\tassert(len(sim._stabilizers[0][0])==1)\n\t\n\tinit += [(eng.allocate_qubit(), \"X\")]\n\tprint(init)\n\tsim.add_stabilizer(init)\n\tprint(sim._stabilizers)\n\tassert(len(sim._stabilizers) == 2)\n\tassert(len(sim._stabilizers[0][0])==1)\n\tassert(len(sim._stabilizers[1][0])==2)\n\n\ndef test_operation():\n\teng = projectq.MainEngine()\n\tqubit1 = eng.allocate_qubit()\n\tqubit2 = eng.allocate_qubit()\n\tinit1 = [(qubit1, \"Z\")]\n\tinit2 = [(qubit2, \"Z\")]\n\n\tsim = CliffordSimulator()\n\tsim.add_stabilizer(init1)\n\tsim.add_stabilizer(init2)\n\n\tcmd1 = projectq.ops.H.generate_command(qubit1)\n\n\tsim.apply_operation(cmd1)\n\n\tprint(sim._stabilizers)\n\tassert(len(sim._stabilizers) == 2)\n\tassert(len(sim._stabilizers[0][0])==1)\n\n\tassert(sim._stabilizers[0][0][qubit1[0].id]==\"X\")\n\tassert(sim._stabilizers[1][0][qubit2[0].id]==\"Z\")\n\n\ndef test_MultiqubitMeasurementCliffordEngine():\n\tdebugger = A()\n\n\tengine_list = [MultiqubitMeasurementCliffordEngine()]\n\teng = projectq.MainEngine(engine_list=engine_list)\n\n\tengine_list[0].next_engine = debugger\n\n\tqubit1 = eng.allocate_qubit()\n\tqubit2 = eng.allocate_qubit()\n\tprojectq.ops.H | qubit1\n\tprojectq.ops.CNOT | (qubit1[0], qubit2[0])\n\tprojectq.ops.Measure | qubit1\n\tprojectq.ops.Measure | qubit2\n\teng.flush()\n\n\tprint(debugger.commands[2].gate._bases)\n\tprint(debugger.commands[3].gate._bases)\n\tassert(False)\n\n\ndef test_CNOT():\n\tpass","repo_name":"quantumresource/projqube","sub_path":"tests/test_CliffordSimulator.py","file_name":"test_CliffordSimulator.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"19587946779","text":"from flask_app.config.mysqlconnection import connectToMySQL\n\n\n\nclass Ninja:\n DB = 'dojos_and_ninjas_schema'\n def __init__(self,data):\n self.id = data['id']\n self.first_name = data['first_name']\n self.last_name = data['last_name']\n self.age = data['age']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n self.dojo_id = data['dojo_id']\n\n @classmethod\n def get_all(cls):\n query = '''\n SELECT *\n FROM ninjas;\n '''\n\n results = connectToMySQL(cls.DB).query_db(query)\n\n all_ninjas=[]\n\n for ninja in results:\n all_ninjas.append( cls(ninja) )\n\n return all_ninjas\n \n @classmethod\n def add_ninja(cls,data):\n query=\"\"\"\n INSERT \n INTO ninjas (first_name, last_name, age, dojo_id)\n VALUES( %(first_name)s , %(last_name)s , %(age)s , %(dojo_id)s );\n \"\"\"\n\n results = connectToMySQL(cls.DB).query_db(query,data)\n\n return results\n \n @classmethod\n def update_ninja(cls,data):\n query=\"\"\"\n UPDATE ninjas\n SET name = %(first_name)s , %(last_name)s , %(age)s , %(dojo_id)s\n WHERE ninjas.id = %(ninja_id)s ;\n \"\"\"\n\n results = connectToMySQL(cls.DB).query_db(query, data)\n\n return results","repo_name":"trewoliver/Dojos_and_Ninjas","sub_path":"flask_app/models/ninja_model.py","file_name":"ninja_model.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71105549973","text":"def solution(phone_book):\n answer = True\n myL = sorted(phone_book)\n sz = len(myL)\n for i in range(0, sz-1):\n if len(myL[i]) < len(myL[i+1]):\n if myL[i] == myL[i+1][:len(myL[i])]:\n answer = False\n break\n return answer\n\n\n","repo_name":"KaJaeHyeob/Coding_Test","sub_path":"py/Programmers/전화번호 목록/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2153069626","text":"\r\n\r\n#Функция для генерации символов постоянных\r\nfrom apendix import *\r\ndef constant_gen(number_of_vibrational_degrees=4,max_indignation_step=4,TYPE_ANGARMONIC_CONST='A',title=''):\r\n CONST_A_LIST=\"\"\r\n CONST_n_LIST=\"\"\r\n CONST_W_LIST=\"\"\r\n CONST_D_LIST=\"\"\r\n CONST_KEY_KEY=\"\"\r\n f=open(f'const.py','w')\r\n f.write('import sympy as sy\\n')\r\n f.write('\\n')\r\n f.write(f'number_of_vibrational_degrees={number_of_vibrational_degrees}\\n')\r\n f.write(f'max_indignation_step={max_indignation_step}\\n')\r\n f.write(f\"TYPE_ANGARMONIC_CONST='{TYPE_ANGARMONIC_CONST}'\\n\")\r\n f.write(f\"title='{title}'\\n\")\r\n for i in range(1,number_of_vibrational_degrees+1):# Изменил (for i in VECTOR_INDEX[:number_of_vibrational_degrees]:) так как в VECTOR_INDEX в начале добавил 0\r\n i=VECTOR_INDEX[i]\r\n a='n_%s'%(i)\r\n A='%s=sy.symbols(''\"%s\"'')'%(a,a)\r\n CONST_n_LIST+=\"%s:0,\"%(a)\r\n f.write(A+'\\n')\r\n a='omega_%s'%(i)\r\n B='%s=sy.symbols(''\"%s\"'')'%(a,a)\r\n CONST_W_LIST+=\"%s:0,\"%(a)\r\n f.write(B+'\\n')\r\n f.write('\\n')\r\n f.write('\\n')\r\n f.write('###########################################\\n')\r\n f.write('\\n')\r\n C=1\r\n while C<=max_indignation_step:\r\n for i in INDEX(C,number_of_vibrational_degrees):\r\n a='A_%s'%(i)\r\n A='%s=sy.symbols(''\"%s\"'')'%(a,a)\r\n CONST_A_LIST+=\"%s:0,\"%(a)\r\n f.write(A+'\\n')\r\n f.write('\\n')\r\n f.write('###########################################\\n')\r\n f.write('\\n')\r\n C+=1\r\n C=-1\r\n f.write(\"D_0=sy.symbols('D_0')\\n\")\r\n CONST_D_LIST+=\"D_0:0,\"\r\n while C<max_indignation_step-1:\r\n for i in INDEX(C,number_of_vibrational_degrees):\r\n a='D_%s'%(i)\r\n A='%s=sy.symbols(''\"%s\"'')'%(a,a)\r\n CONST_D_LIST+=\"%s:0,\"%(a)\r\n f.write(A+'\\n')\r\n \r\n f.write('\\n')\r\n C+=1\r\n CONST_n_LIST='const_n_dikt={%s}'%(CONST_n_LIST[:-1])\r\n CONST_W_LIST='const_omega_dikt={%s}'%(CONST_W_LIST[:-1])\r\n CONST_A_LIST='const_angarmonik_dikt={%s}'%(CONST_A_LIST[:-1])\r\n CONST_D_LIST='const_dipol_dikt={%s}'%(CONST_D_LIST[:-1])\r\n\r\n f.write(CONST_n_LIST+'\\n')\r\n f.write(CONST_W_LIST+'\\n')\r\n f.write(CONST_A_LIST+'\\n')\r\n f.write(CONST_D_LIST+'\\n')\r\n f.write(\"ZAMENA={**const_n_dikt,**const_angarmonik_dikt,**const_dipol_dikt}\\n\")\r\n f.write(\"DATA_TABEL=[]\\n\")\r\n \r\n f.close()\r\n return #from const import *\r\n","repo_name":"Mixailyu/Spectrum-builder-of-non-linear-molecules-in-the-gas-phase","sub_path":"constant_gen.py","file_name":"constant_gen.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20678771511","text":"# 집합의 표현\n\n# 첫째 줄에 n, m이 주어진다.\n# 다음 m개의 줄에는 각각의 연산이 주어진다\n# 합집합은 0 a b의 형태 / 같은 집합에 포함되어 있는지를 확인하는 연산은 1 a b의 형태\n#\n# 1로 시작하는 입력에 대해서 한 줄에 하나씩 YES/NO로 결과\n\n# Union Find를 이용한 풀이\nimport sys\ninput=sys.stdin.readline\n\ndef find(x):\n if parent[x]==x:\n return x\n parent[x]=find(parent[x])\n return parent[x]\n\ndef union(x,y):\n x=find(x)\n y=find(y)\n\n if x!=y:\n parent[y]=x\n\ndef find_parent(x):\n if parent[x]==x:\n return x\n return find_parent(parent[x])\n\nn,m=map(int,input().split())\nparent={}\n\nfor i in range(n+1):\n parent[i]=i\n\nfor _ in range(m):\n cmd,num_1,num_2=map(int,input().split())\n\n if not cmd:\n union(num_1,num_2)\n\n if cmd:\n if find_parent(num_1)==find_parent(num_2):\n print('YES')\n else:\n print('NO')\n\n\n# 메모리 초과\n# import sys\n#\n# n,m=map(int,sys.stdin.readline().split())\n#\n# a=[]\n# for i in range(n+1):\n# a.append(set([i]))\n#\n# lis=[list(map(int,sys.stdin.readline().split())) for _ in range(m)]\n#\n# for i in lis:\n# if i[0]==0:\n# a[i[1]]=a[i[1]]|a[i[2]]\n# a[i[2]]=a[i[1]]|a[i[2]]\n# else:\n# if i[1]==i[2]:\n# print(\"YES\")\n# else:\n# print(\"NO\")\n\n\n","repo_name":"Minoolian/Coding_Test","sub_path":"Baekjoon/단계별 코딩테스트/28. Union_Find/1717 Set_representation.py","file_name":"1717 Set_representation.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31473224960","text":"import typing\nfrom copy import copy\n\nfrom .form_parser import FieldList\n\n\nclass Filter:\n def __init__(self):\n self.__operations: typing.List[typing.Tuple[str, Filter]] = []\n\n def __and__(self, other):\n self.__operations.append(('and', other))\n return self\n\n def __or__(self, other):\n self.__operations.append(('or', other))\n return copy(self)\n\n def filter(self, fields: FieldList) -> FieldList:\n filtered_fields = self._filter(fields)\n for operation, other in self.__operations:\n if operation == 'and':\n filtered_fields = other.filter(filtered_fields)\n if operation == 'or':\n filtered_fields.extend(other.filter(fields))\n\n return filtered_fields\n\n def _filter(self, fields: FieldList) -> FieldList:\n raise NotImplementedError\n\n\nclass Pages(Filter):\n \"\"\"\n Restricts the fields to the given pages.\n\n Note that page numbers start with 0, so 0 represents the first page, 1 the second page etc.\n\n :param pages: List of pages to take into account (starts with 0)\n \"\"\"\n def __init__(self, pages: typing.List[int]):\n super(Pages, self).__init__()\n self.__pages = pages\n\n def _filter(self, fields: FieldList) -> FieldList:\n filtered_fields = []\n for field in fields:\n if field.page in self.__pages:\n filtered_fields.append(field)\n\n return filtered_fields\n\n\nclass Page(Pages):\n \"\"\"\n Restricts the fields to a single page.\n\n Note that page numbers start with 0, so 0 represents the first page, 1 the second page etc.\n\n :param page: Page to take into account\n \"\"\"\n def __init__(self, page: int):\n super(Page, self).__init__([page])\n\n\nclass Location(Filter):\n \"\"\"\n Restricts the fields to a given location on a page.\n\n Restrictions are indicated as ratios in the range [0.0, 1.0], so the tuple (.0, 1.0) would indicate the full\n range and (.5, 1.0) the right/bottom half and (.0, 0.5) the left/upper half.\n\n If a dimension is None, it is not checked (and equal to the full range (.0, 1.0)).\n\n :param horizontal: Horizontal range to take into account (left, right), default None\n :param vertical: Vertical range to take into account (top, bottom), default None\n \"\"\"\n optional_dimension = typing.Optional[typing.Tuple[float, float]]\n\n def __init__(self, horizontal: optional_dimension = None, vertical: optional_dimension = None):\n super(Location, self).__init__()\n self.__horizontal = horizontal\n self.__vertical = vertical\n\n def _filter(self, fields: FieldList) -> FieldList:\n filtered_fields = []\n for field in fields:\n tx_field = field.field\n if (self.__horizontal is None or self.__horizontal[0] < tx_field.geometry.boundingBox.left <\n self.__horizontal[1]) and \\\n (self.__vertical is None or self.__vertical[0] < tx_field.geometry.boundingBox.top <\n self.__vertical[1]):\n filtered_fields.append(field)\n\n return filtered_fields\n\n\nclass Selected(Filter):\n \"\"\"\n Restricts the fields to those that are selected (i.e. have a checked checkbox).\n \"\"\"\n def _filter(self, fields: FieldList) -> FieldList:\n filtered_fields = []\n for field in fields:\n tx_field = field.field\n if tx_field.value is not None and tx_field.value.text == 'SELECTED':\n filtered_fields.append(field)\n\n return filtered_fields\n","repo_name":"Futsch1/form-analyzer","sub_path":"form_analyzer/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11581140398","text":"\n# -*- coding: utf-8 -*-\n\nimport sys\nimport os\n\ndirname = os.path.dirname(__file__)\n\nclass ChallengeAlreadyExist(Exception): pass\n\nif __name__ == '__main__':\n \n if len(sys.argv) == 2:\n ch_name = sys.argv[1]\n \n newdir = os.path.join(dirname, ch_name)\n \n if not os.path.exists(newdir):\n os.makedirs(newdir)\n else:\n raise ChallengeAlreadyExist(\"{} already exists\".format(ch_name))\n \n ch_initpy = open(newdir + '/__init__.py', mode='wt')\n \n ch_initpy.write('''\n# -*- coding: utf-8 -*-\n\nfrom flask import Blueprint\n\n{ch_name}_blueprint = Blueprint('{ch_name}_blueprint', __name__)\n\nfrom . import {ch_name}'''.format(ch_name=ch_name))\n\n ch_initpy.close()\n\n ch_file = open(newdir + '/{ch_name}.py'.format(ch_name=ch_name), mode='wt')\n \n ch_file.write('''\n# -*- coding: utf-8 -*-\n\nfrom flask import render_template\n\nfrom . import {ch_name}_blueprint\n\n@{ch_name}_blueprint.route('/{ch_name}')\ndef {ch_name}_view():\n return render_template('{ch_name}.html')'''.format(ch_name=ch_name))\n \n ch_file.close()\n \n \n app_initpy = open(dirname + '/__init__.py', mode='at')\n app_initpy.write('''\nfrom .{ch_name} import {ch_name}_blueprint\napp.register_blueprint({ch_name}_blueprint)'''.format(ch_name=ch_name))\n\n app_initpy.close()\n \n template = open(dirname + '/templates/{ch_name}.html'.format(ch_name=ch_name), mode='wt')\n \n template.write(open(dirname + '/templates/ch_0x00.html', mode='rt').read())\n \n template.close()\n \n\n","repo_name":"NQysit/pybwap","sub_path":"pybwap/newchallenge.py","file_name":"newchallenge.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13891220939","text":"\"\"\"\nGeneral utility functions\n\"\"\"\nimport logging\nimport numpy as np\nimport pandas as pd\nimport joblib\n\nlogger = logging.getLogger(__name__)\n\ndef setup_logging(filename):\n \"\"\"\n Set up logging with basic configuration\n \"\"\"\n fh = logging.FileHandler(filename)\n sh = logging.StreamHandler()\n fmt = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'\n logging.basicConfig(format=fmt, handlers=[fh,sh], level=logging.INFO)\n\nclass LoggerMixin():\n \"\"\"\n Solves pickling problem of loggers\n \"\"\"\n @property\n def logger(self):\n component = \"{}.{}\".format(type(self).__module__, type(self).__name__)\n return logging.getLogger(component)\n\n_marker = object()\n\ndef first(iterable, default=_marker):\n \"\"\"\n Return the first item of *iterable*, or *default* if *iterable* is\n empty.\n\n >>> first([0, 1, 2, 3])\n 0\n >>> first([], 'some default')\n 'some default'\n\n If *default* is not provided and there are no items in the iterable,\n raise ``ValueError``.\n\n :func:`first` is useful when you have a generator of expensive-to-retrieve\n values and want any arbitrary one. It is marginally shorter than\n ``next(iter(iterable), default)``.\n\n \"\"\"\n try:\n return next(iter(iterable))\n except StopIteration as e:\n if default is _marker:\n raise ValueError(\n 'first() was called on an empty iterable, and no '\n 'default value was provided.'\n ) from e\n return default\n \ndef columns_with_nans(df):\n mask = df.isnull().any()\n vars_with_missing = mask[mask == True].index.toList()\n return vars_with_missing\n\ndef chunks(lst, n):\n \"\"\"\n Yield successive n-sized chunks from lst\n \"\"\"\n for i in range(0, len(lst), n):\n yield lst[i:i+n]","repo_name":"pwjvr/local-model-pipeline","sub_path":"code/utilities/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7655400131","text":"import itertools\nimport os\nimport subprocess\nimport sys\n\nallOpts = ['-vw', '--stats']\nmatrix = [[\n [],\n ['--compression', 'none'],\n ['--compression', 'jpeg'],\n ['--compression', 'jpeg', '-q', '95'],\n ['--compression', 'jpeg', '-q', '90'],\n ['--compression', 'jpeg', '-q', '80'],\n ['--compression', 'jpeg', '-q', '70'],\n ['--compression', 'deflate'],\n ['--compression', 'deflate', '--predictor', 'none'],\n ['--compression', 'deflate', '--predictor', 'horizontal'],\n ['--compression', 'deflate', '--level', '1'],\n ['--compression', 'deflate', '--level', '9'],\n ['--compression', 'lzw'],\n ['--compression', 'lzw', '--predictor', 'none'],\n ['--compression', 'lzw', '--predictor', 'horizontal'],\n ['--compression', 'zstd'],\n ['--compression', 'zstd', '--predictor', 'none'],\n ['--compression', 'zstd', '--predictor', 'horizontal'],\n ['--compression', 'zstd', '--level', '1'],\n ['--compression', 'zstd', '--level', '9'],\n ['--compression', 'zstd', '--level', '22'],\n ['--compression', 'packbits'],\n ['--compression', 'packbits', '--predictor', 'none'],\n ['--compression', 'packbits', '--predictor', 'horizontal'],\n # ['--compression', 'jbig'],\n # ['--compression', 'lzma'],\n ['--compression', 'webp'],\n ['--compression', 'webp', '-q', '0'],\n ['--compression', 'webp', '-q', '100'],\n ['--compression', 'webp', '-q', '95'],\n ['--compression', 'webp', '-q', '90'],\n ['--compression', 'webp', '-q', '80'],\n ['--compression', 'webp', '-q', '70'],\n ['--compression', 'jp2k'],\n ['--compression', 'jp2k', '--psnr', '80'],\n ['--compression', 'jp2k', '--psnr', '70'],\n ['--compression', 'jp2k', '--psnr', '60'],\n ['--compression', 'jp2k', '--psnr', '50'],\n ['--compression', 'jp2k', '--psnr', '40'],\n ['--compression', 'jp2k', '--cr', '100', ''],\n ['--compression', 'jp2k', '--cr', '1000', ''],\n ['--compression', 'jp2k', '--cr', '10000'],\n], [\n [], # 256\n ['--tile', '512'],\n ['--tile', '1024'],\n]]\n\nif not len(sys.argv):\n print(\"\"\"test_compression.py (output directory) (input file ...)\"\"\")\n sys.exit(0)\nfor input in sys.argv[2:]:\n root = os.path.join(sys.argv[1], os.path.splitext(os.path.basename(input))[0])\n for optList in itertools.product(*matrix):\n opts = [opt for subList in optList for opt in subList]\n output = root + '.' + '.'.join(str(opt).strip('-') for opt in opts) + '.tiff'\n cmd = ['large_image_converter', input, output] + opts + allOpts\n print(' '.join(cmd))\n subprocess.call(cmd)\n","repo_name":"ncihtan/dsa-htan","sub_path":"compressionStats/test_compression.py","file_name":"test_compression.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"sr","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"15767151549","text":"import streamlit as st\r\nfrom utils import load_lottie\r\nfrom streamlit_lottie import st_lottie\r\nfrom streamlit_extras.switch_page_button import switch_page\r\n\r\n\r\n\r\nlottie = \"https://assets2.lottiefiles.com/packages/lf20_6ljswtij.json\"\r\nst.set_page_config(page_title='Case Description', page_icon='📄', layout='wide')\r\n\r\nst.markdown(\"<h1 style='margin-top:5px;padding-top:10px;text-align: center'>Enter Case Description</h1>\", unsafe_allow_html=True)\r\nlottie = load_lottie(lottie)\r\nanimation, descrip = st.columns(2)\r\nwith animation:\r\n st_lottie(lottie,height=200,width=200)\r\nwith descrip:\r\n description = st.text_area(label='Case Description')\r\n clicked = st.button(\"Submit\")\r\n\r\n\r\nif len(description) != 0:\r\n from utils import citations_using_embeddings, df, text_df, use, svm, generate_pdf, scaling\r\n distances,similarity,precedents = citations_using_embeddings(description,use,df,text_df,svm)\r\n \r\n scaled = scaling([i for i,_ in distances])\r\n \r\n st.write('---')\r\n st.write(\"## Most Relevant Case:\")\r\n st.markdown(f\"<h4><u>{precedents.iloc[0]['case_name']}</u></h4>\", unsafe_allow_html=True)\r\n st.write(f\"{precedents.iloc[0]['author_name'].title()}\")\r\n st.write(f\"{precedents.iloc[0]['date_filed']}\")\r\n st.write(f\"Link: {precedents.iloc[0]['absolute_url']}\")\r\n # case1 = st.progress(0,text=\"Relevance\")\r\n # case1.progress(float(similarity),text='Relevance')\r\n if st.button(\"Download PDF\",key=0):\r\n generate_pdf(f\"{precedents.iloc[0]['case_name']}_{st.session_state['username']}\",precedents,0)\r\n \r\n st.write('---') \r\n st.write(\"## Other relevant cases:\")\r\n one,two= st.columns(2)\r\n with one:\r\n st.markdown(f\"<h4><u>{precedents.iloc[1]['case_name']}</u></h4>\", unsafe_allow_html=True)\r\n st.write(f\"{precedents.iloc[1]['author_name'].title()}\")\r\n st.write(f\"{precedents.iloc[1]['date_filed']}\")\r\n st.write(f\"Link: {precedents.iloc[1]['absolute_url']}\")\r\n case2 = st.progress(0,text=\"Relevance\")\r\n case2.progress(int(scaled[0]*100),text='Relevance')\r\n if st.button(\"Download PDF\",key=1):\r\n generate_pdf(f\"{precedents.iloc[1]['case_name']}_{st.session_state['username']}\",precedents,1)\r\n with two:\r\n st.markdown(f\"<h4><u>{precedents.iloc[2]['case_name']}</u></h4>\", unsafe_allow_html=True)\r\n st.write(f\"{precedents.iloc[2]['author_name'].title()}\")\r\n st.write(f\"{precedents.iloc[2]['date_filed']}\")\r\n st.write(f\"Link: {precedents.iloc[2]['absolute_url']}\")\r\n case3 = st.progress(0,text=\"Relevance\")\r\n case3.progress(int(scaled[1]*100),text='Relevance')\r\n if st.button(\"Download PDF\",key=2):\r\n generate_pdf(f\"{precedents.iloc[2]['case_name']}_{st.session_state['username']}\",precedents,2)\r\n three,four= st.columns(2)\r\n with three:\r\n st.markdown(f\"<h4><u>{precedents.iloc[3]['case_name']}</u></h4>\", unsafe_allow_html=True)\r\n st.write(f\"{precedents.iloc[3]['author_name'].title()}\")\r\n st.write(f\"{precedents.iloc[3]['date_filed']}\")\r\n st.write(f\"Link: {precedents.iloc[3]['absolute_url']}\")\r\n case4 = st.progress(0,text=\"Relevance\")\r\n case4.progress(int(scaled[2]*100),text='Relevance')\r\n if st.button(\"Download PDF\",key=3):\r\n generate_pdf(f\"{precedents.iloc[3]['case_name']}_{st.session_state['username']}\",precedents,3)\r\n with four:\r\n st.markdown(f\"<h4><u>{precedents.iloc[4]['case_name']}</u></h4>\", unsafe_allow_html=True)\r\n st.write(f\"{precedents.iloc[4]['author_name'].title()}\")\r\n st.write(f\"{precedents.iloc[4]['date_filed']}\")\r\n st.write(f\"Link: {precedents.iloc[4]['absolute_url']}\")\r\n case5 = st.progress(0,text=\"Relevance\")\r\n case5.progress(int(scaled[3]*100),text='Relevance')\r\n if st.button(\"Download PDF\",key=4):\r\n generate_pdf(f\"{precedents.iloc[4]['case_name']}_{st.session_state['username']}\",precedents,4)\r\n \r\ntry:\r\n \r\n st.sidebar.markdown(f\"<h2>Welcome, {st.session_state['username']}</h2>\",unsafe_allow_html=True)\r\n st.sidebar.write(f\"ID: {st.session_state['id']+1}\")\r\n st.sidebar.write(f\"Occupation: {st.session_state['occupation']}\")\r\n st.sidebar.write(f\"Email: {st.session_state['email']}\")\r\n logout = st.sidebar.button('Logout')\r\n if logout and len(st.session_state) != 0:\r\n st.session_state.clear()\r\n switch_page(\"Login\")\r\n elif logout and len(st.session_state) == 0:\r\n st.sidebar.error(\"Please login first\")\r\n\r\nexcept:\r\n switch_page(\"Login\")\r\n\r\n\r\nhide_st_style = \"\"\"\r\n <style>\r\n footer {visibility: hidden;}\r\n </style>\r\n \"\"\"\r\nst.markdown(hide_st_style, unsafe_allow_html=True)","repo_name":"AkshatMittu/Precedent-Analysis","sub_path":"3_Case.py","file_name":"3_Case.py","file_ext":"py","file_size_in_byte":4726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73305581973","text":"from input import *\r\n\r\n\r\ndef dichotomy_method(a, b, epsilon, k=1):\r\n my_counter = Counter()\r\n while b - a > epsilon:\r\n k += 1\r\n middle = (a + b) / 2\r\n x1 = middle - delta\r\n x2 = middle + delta\r\n if f(my_counter, x2) > f(my_counter, x1):\r\n a, b = a, x2\r\n else:\r\n a, b = x1, b\r\n return (a + b) / 2, f(Counter(), (a + b) / 2), k, my_counter.get_count()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(dichotomy_method(A, B, EPSILON))\r\n","repo_name":"babtiss/ITMO_Math","sub_path":"Primat/lab1/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"8111107587","text":"from collections import deque\ndef solution(n, computers):\n queue = deque()\n ret = 0\n visited = dict()\n\n for i in range(n):\n if i in visited:\n continue\n visited[i] = True\n queue.append(i)\n while queue:\n node = queue.popleft()\n for neighbor in range(n):\n if neighbor in visited or computers[node][neighbor] == 0:\n continue\n visited[neighbor] = True\n queue.append(neighbor)\n ret += 1\n\n return ret\n\nprint(solution(3, [[1, 1, 0], [1, 1, 0], [0, 0, 1]]))\nprint(solution(3, [[1, 1, 0], [1, 1, 1], [0, 1, 1]]))","repo_name":"taehooK/Programmers","sub_path":"레벨3/네트워크.py","file_name":"네트워크.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73885431574","text":"import torch\r\n\r\ndef fgsm_attack(model, criterion, point, labels, eps, x = False, m = True) :\r\n point.requires_grad = True\r\n if m == True:\r\n outputs, __, __ = model(point.transpose(1,2), x) #Fgsm Attack to PointNet Network\r\n else :\r\n outputs = model(point.permute(0,2,1)) #Fgsm Attack to DGCNN Network\r\n model.zero_grad()\r\n cost = criterion(outputs, labels.long())\r\n cost.backward()\r\n attack_data = eps*point.grad.sign()\r\n return attack_data\r\n\r\ndef pgd_linf(model, x, y, loss_fn, num_steps, step_size, eps, xs = False , m = True):\r\n delta = torch.zeros_like(x, requires_grad=True)\r\n for i in range(num_steps):\r\n if m == True :\r\n prediction, __, __ = model((x+delta).transpose(1,2), xs) #Pgd Attack to PointNet Network\r\n else :\r\n prediction = model((x+delta).permute(0,2,1)) #Pgd Attack to DGCNN Network\r\n loss = loss_fn(prediction,y)\r\n loss.backward()\r\n delta.data = (delta + step_size*delta.grad.detach().sign()).clamp(-eps,eps)\r\n delta.grad.zero_()\r\n return delta.detach()\r\n","repo_name":"saiteja1012/Expediting-Adversarial-Training-with-Prediction-Methods-for-Point-Cloud-Classification","sub_path":"attacks.py","file_name":"attacks.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29314368083","text":"# Задайте число. Составьте список чисел Фибоначчи, в том \n# числе для отрицательных индексов.\n\nvalue = int(input(\"Введите индекс:\"))\nlist1 = [0,1]\nlist2 = []\n\nfor i in range(2,value+1):\n list1.append(list1[i-1]+list1[i-2])\n list2.append((-1)**(i+1)*list1[i])\n\nlist2.reverse()\nprint(f'Список чисел Фибоначи для этого индекса: {list2 + list1}')\n","repo_name":"replica-rs/gb_py_hw","sub_path":"lesson_3/task_1-4.py","file_name":"task_1-4.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74649608213","text":"################################################################################################################\r\n## This code is meant to take in openphish data and eventually produce a cumulative_urls.txt ##\r\n## Cumulative_urls.txt includes url and brand name, separated by a delimiter of a tab \"\\t\" ##\r\n## Usage: Input folder containing openphish text files, example in this case, python cumulate_urls.py -f 1mayto13june\" \r\n################################################################################################################\r\n\r\nimport os\r\nimport argparse\r\nimport json\r\n\r\n\r\ndef readData(dataUrl):\r\n fopen=open(dataUrl, 'r')\r\n x=fopen.read()\r\n rawdata=x.split('}')\r\n listdict=[]\r\n for i in rawdata:\r\n try:\r\n i=i.lstrip(', ')\r\n i=i.lstrip('[')\r\n i=i.replace('}]','')\r\n i=i+'}'\r\n listdict.append(json.loads(i))\r\n except Exception:\r\n continue\r\n fopen.close()\r\n return listdict\r\n\r\n\r\n# Ensure no duplicates\r\ncumulative_urls = set()\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('-f', \"--folder\", help='Input folder for collected openphish urls', required=True)\r\nargs = parser.parse_args()\r\n\r\nfor file in os.listdir(args.folder):\r\n\tfull_path_file = os.path.join(args.folder, file)\r\n\turl_target = readData(full_path_file)\r\n\tfor item in url_target:\r\n\t\tcumulative_urls.add((item['url'], item['brand']))\r\n\t\r\n\tprint(\"Done with: \" + str(file))\r\n\r\nfile_to_write = \"cumulative_urls.txt\"\r\n\r\nprint(len(cumulative_urls))\r\nwith open(file_to_write, \"w+\") as f:\r\n\tfor item in cumulative_urls:\r\n\t\turl, brand = item\r\n\t\tf.write(url+\"\\t\")\r\n\t\tf.write(brand+\"\\n\")","repo_name":"junyango/webpage_saver","sub_path":"cumulate_urls.py","file_name":"cumulate_urls.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"42378102980","text":"from flectra import api, fields, models\nfrom flectra.osv import expression\n\n\nclass Employee(models.Model):\n _inherit = \"hr.employee\"\n\n vehicle = fields.Char(string='Company Vehicle', groups=\"hr.group_hr_user\")\n contract_ids = fields.One2many('hr.contract', 'employee_id', string='Employee Contracts')\n contract_id = fields.Many2one('hr.contract', string='Current Contract',\n groups=\"hr.group_hr_user\",domain=\"[('company_id', '=', company_id)]\", help='Current contract of the employee')\n calendar_mismatch = fields.Boolean(related='contract_id.calendar_mismatch')\n contracts_count = fields.Integer(compute='_compute_contracts_count', string='Contract Count')\n contract_warning = fields.Boolean(string='Contract Warning', store=True, compute='_compute_contract_warning', groups=\"hr.group_hr_user\")\n first_contract_date = fields.Date(compute='_compute_first_contract_date', groups=\"hr.group_hr_user\")\n\n def _get_first_contracts(self):\n self.ensure_one()\n return self.sudo().contract_ids.filtered(lambda c: c.state != 'cancel')\n\n @api.depends('contract_ids.state', 'contract_ids.date_start')\n def _compute_first_contract_date(self):\n for employee in self:\n contracts = employee._get_first_contracts()\n if contracts:\n employee.first_contract_date = min(contracts.mapped('date_start'))\n else:\n employee.first_contract_date = False\n\n @api.depends('contract_id', 'contract_id.state', 'contract_id.kanban_state')\n def _compute_contract_warning(self):\n for employee in self:\n employee.contract_warning = not employee.contract_id or employee.contract_id.kanban_state == 'blocked' or employee.contract_id.state != 'open'\n\n def _compute_contracts_count(self):\n # read_group as sudo, since contract count is displayed on form view\n contract_data = self.env['hr.contract'].sudo().read_group([('employee_id', 'in', self.ids)], ['employee_id'], ['employee_id'])\n result = dict((data['employee_id'][0], data['employee_id_count']) for data in contract_data)\n for employee in self:\n employee.contracts_count = result.get(employee.id, 0)\n\n def _get_contracts(self, date_from, date_to, states=['open'], kanban_state=False):\n \"\"\"\n Returns the contracts of the employee between date_from and date_to\n \"\"\"\n state_domain = [('state', 'in', states)]\n if kanban_state:\n state_domain = expression.AND([state_domain, [('kanban_state', 'in', kanban_state)]])\n\n return self.env['hr.contract'].search(\n expression.AND([[('employee_id', 'in', self.ids)],\n state_domain,\n [('date_start', '<=', date_to),\n '|',\n ('date_end', '=', False),\n ('date_end', '>=', date_from)]]))\n\n def _get_incoming_contracts(self, date_from, date_to):\n return self._get_contracts(date_from, date_to, states=['draft'], kanban_state=['done'])\n\n @api.model\n def _get_all_contracts(self, date_from, date_to, states=['open']):\n \"\"\"\n Returns the contracts of all employees between date_from and date_to\n \"\"\"\n return self.search([])._get_contracts(date_from, date_to, states=states)\n\n def write(self, vals):\n res = super(Employee, self).write(vals)\n if vals.get('contract_id'):\n for employee in self:\n employee.resource_calendar_id.transfer_leaves_to(employee.contract_id.resource_calendar_id, employee.resource_id)\n employee.resource_calendar_id = employee.contract_id.resource_calendar_id\n return res\n","repo_name":"flectra-hq/flectra","sub_path":"addons/hr_contract/models/hr_employee.py","file_name":"hr_employee.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"67"} +{"seq_id":"25485497541","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom string import Template\n\nfrom os.path import expanduser\n\n\n__author__ = 'Giacomo Lacava'\n\nfrom twistscraper import TwisterScraper\n\nTEMPLATE = None\nwith open(\"map.html\", \"rb\") as mapTemplate:\n TEMPLATE = Template(mapTemplate.read().decode('utf-8'))\n\n\ndef generate_map(userdb):\n ts = TwisterScraper(userdb)\n loc_users = [u for u in ts.db.users.values() if hasattr(u, 'location') and u.location != '']\n noLoc_user_num = len(ts.db.users) - len(loc_users)\n loc_users_fake_num = 0\n locDb = {}\n\n for u in loc_users:\n if u.location in locDb:\n locDb[u.location]['users'].append(u.username)\n else:\n locData = u.locate()\n if locData is not None:\n locDb[u.location] = {}\n locDb[u.location]['coordinates'] = locData\n locDb[u.location]['users'] = [u.username]\n else:\n loc_users_fake_num += 1\n # second pass to aggregate misspellings\n done = []\n newLocDb = {}\n for loc, locDict in locDb.items():\n # find all elements with same coordinates\n sameCoord = [(l, lObj['users']) for l, lObj in locDb.items() if lObj['coordinates'] == locDict['coordinates']]\n if len(sameCoord) == 1:\n # if only one element, copy it straight to the new dict\n newLocDb[loc] = locDict\n\n elif len(sameCoord) > 1:\n # if we're here, multiple locations have the same name\n\n # find the most popular name\n locMax = max(sameCoord, key=lambda x: len(x[1]))\n location = locMax[0]\n coordHash = '/'.join([str(locDict['coordinates'][0]), str(locDict['coordinates'][1])])\n # if we haven't seen this set of coordinates yet...\n if coordHash not in done:\n\n # ... collect all users ...\n users = []\n for l, us in sameCoord:\n for u in us:\n users.append(u)\n users.sort()\n\n # ... and add the aggregated result\n if location not in newLocDb:\n newLocDb[location] = {}\n newLocDb[location]['users'] = users\n newLocDb[location]['coordinates'] = locDict['coordinates']\n done.append(coordHash)\n\n locStrings = []\n for k in newLocDb.keys():\n locStrings.append(\"['<h4>{name} - {numusers}</h4><small>{users}</small>', {lat}, {lng}]\".format(\n name=k.replace(\"'\", \"'\"),\n lat=newLocDb[k]['coordinates'][0],\n lng=newLocDb[k]['coordinates'][1],\n users=',<br />'.join(newLocDb[k]['users']),\n numusers=len(newLocDb[k]['users'])))\n locStrings.sort()\n return TEMPLATE.substitute(locations=',\\n'.join(locStrings),\n users_real_loc=len(loc_users),\n users_fake_loc=loc_users_fake_num,\n users_no_loc=noLoc_user_num,\n timestamp=datetime.now().isoformat())\n\n\nif __name__ == '__main__':\n html = generate_map(expanduser('~/.twister/_localusersdb'))\n with open(expanduser('~/twistermap.html'), 'wb') as tmf:\n tmf.write(html.encode('utf-8'))","repo_name":"toyg/pytwister","sub_path":"Twistmapper.py","file_name":"Twistmapper.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"44922625018","text":"\"\"\"\n339. Nested List Weight Sum\nGiven a nested list of integers, return the sum of all integers in the list weighted by their depth.\n\nEach element is either an integer, or a list -- whose elements may also be integers or other lists.\n\nExample 1:\nGiven the list [[1,1],2,[1,1]], return 10. (four 1's at depth 2, one 2 at depth 1)\n\nExample 2:\nGiven the list [1,[4,[6]]], return 27. (one 1 at depth 1, one 4 at depth 2, and one 6 at depth 3; 1 + 4*2 + \n6*3 = 27)\n\"\"\"\n\n# \"\"\"\n# This is the interface that allows for creating nested lists.\n# You should not implement it, or speculate about its implementation\n# \"\"\"\n#class NestedInteger:\n# def __init__(self, value=None):\n# \"\"\"\n# If value is not specified, initializes an empty list.\n# Otherwise initializes a single integer equal to value.\n# \"\"\"\n#\n# def isInteger(self):\n# \"\"\"\n# @return True if this NestedInteger holds a single integer, rather than a nested list.\n# :rtype bool\n# \"\"\"\n#\n# def add(self, elem):\n# \"\"\"\n# Set this NestedInteger to hold a nested list and adds a nested integer elem to it.\n# :rtype void\n# \"\"\"\n#\n# def setInteger(self, value):\n# \"\"\"\n# Set this NestedInteger to hold a single integer equal to value.\n# :rtype void\n# \"\"\"\n#\n# def getInteger(self):\n# \"\"\"\n# @return the single integer that this NestedInteger holds, if it holds a single integer\n# Return None if this NestedInteger holds a nested list\n# :rtype int\n# \"\"\"\n#\n# def getList(self):\n# \"\"\"\n# @return the nested list that this NestedInteger holds, if it holds a nested list\n# Return None if this NestedInteger holds a single integer\n# :rtype List[NestedInteger]\n# \"\"\"\n\nclass Solution:\n def depthSum(self, nestedList):\n \"\"\"\n :type nestedList: List[NestedInteger]\n :rtype: int\n \"\"\"\n return self.depthSum_helper(nestedList, 1)\n \n def depthSum_helper(self, nestedList, level):\n result = 0\n\n for n in nestedList:\n if n.isInteger():\n result += level*n.getInteger()\n else:\n result += self.depthSum_helper(n.getList(), level+1)\n \n return result\n \n\"\"\"\n256. Paint House\n\nThere are a row of n houses, each house can be painted with one of the three colors: red, blue or green. The \ncost of painting each house with a certain color is different. You have to paint all the houses such that no \ntwo adjacent houses have the same color.\n\nThe cost of painting each house with a certain color is represented by a n x 3 cost matrix. For example, \ncosts[0][0] is the cost of painting house 0 with color red; costs[1][2] is the cost of painting house 1 with \ncolor green, and so on... Find the minimum cost to paint all houses.\n\nNote:\nAll costs are positive integers.\n\nExample:\nInput: [[17,2,17],[16,16,5],[14,3,19]]\nOutput: 10\nExplanation: Paint house 0 into blue, paint house 1 into green, paint house 2 into blue. \n Minimum cost: 2 + 5 + 3 = 10.\n\"\"\"\n\nclass Solution:\n def minCost(self, costs):\n \"\"\"\n :type costs: List[List[int]]\n :rtype: int\n \"\"\"\n if len(costs) <= 0: \n return 0\n \n last_red = costs[0][0]\n last_green = costs[0][1]\n last_blue = costs[0][2]\n \n for i in range(1, len(costs)):\n curr_red = min(last_green, last_blue) + costs[i][0]\n curr_green = min(last_red, last_blue) + costs[i][1]\n curr_blue = min(last_red, last_green) + costs[i][2]\n last_red = curr_red\n last_green = curr_green\n last_blue = curr_blue\n \n return min(last_red, min(last_green, last_blue))\n\n\"\"\"\n746. Min Cost Climbing Stairs\n\nOn a staircase, the i-th step has some non-negative cost cost[i] assigned (0 indexed).\n\nOnce you pay the cost, you can either climb one or two steps. You need to find minimum cost to reach the top \nof the floor, and you can either start from the step with index 0, or the step with index 1.\n\nExample 1:\nInput: cost = [10, 15, 20]\nOutput: 15\nExplanation: Cheapest is start on cost[1], pay that cost and go to the top.\n\nExample 2:\nInput: cost = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]\nOutput: 6\nExplanation: Cheapest is start on cost[0], and only step on 1s, skipping cost[3].\n\nNote:\ncost will have a length in the range [2, 1000].\nEvery cost[i] will be an integer in the range [0, 999].\n\"\"\"\n\nclass Solution:\n def minCostClimbingStairs(self, cost):\n \"\"\"\n :type cost: List[int]\n :rtype: int\n \"\"\"\n prev = 0\n curr = 0\n for i in range(len(cost)):\n temp = prev\n prev = curr\n curr = cost[i] + min(temp, curr)\n return min(prev, curr)\n\n\"\"\"\n121. Best Time to Buy and Sell Stock\n\nSay you have an array for which the ith element is the price of a given stock on day i.\n\nIf you were only permitted to complete at most one transaction (i.e., buy one and sell one share of the \nstock), design an algorithm to find the maximum profit.\n\nNote that you cannot sell a stock before you buy one.\n\nExample 1:\nInput: [7,1,5,3,6,4]\nOutput: 5\nExplanation: Buy on day 2 (price = 1) and sell on day 5 (price = 6), profit = 6-1 = 5.\n Not 7-1 = 6, as selling price needs to be larger than buying price.\n\nExample 2:\nInput: [7,6,4,3,1]\nOutput: 0\nExplanation: In this case, no transaction is done, i.e. max profit = 0.\n\"\"\"\n\nclass Solution:\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n if len(prices) <= 1: \n return 0\n \n max_profit = 0\n curr_low = prices[0]\n \n for i in range(1, len(prices)):\n if prices[i] < curr_low:\n curr_low = prices[i]\n max_profit = max(max_profit, prices[i] - curr_low)\n\n return max_profit\n\n\"\"\"\n70. Climbing Stairs\n\nYou are climbing a stair case. It takes n steps to reach to the top.\n\nEach time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?\n\nNote: Given n will be a positive integer.\n\nExample 1:\nInput: 2\nOutput: 2\nExplanation: There are two ways to climb to the top.\n1. 1 step + 1 step\n2. 2 steps\n\nExample 2:\nInput: 3\nOutput: 3\nExplanation: There are three ways to climb to the top.\n1. 1 step + 1 step + 1 step\n2. 1 step + 2 steps\n3. 2 steps + 1 step\n\"\"\"\n\nclass Solution:\n def climbStairs(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n if n <= 1: \n return 1\n \n a = 1\n b = 1\n \n for i in range(2, n):\n c = b + a\n a = b\n b = c\n \n return b + a\n\n\"\"\"\n53. Maximum Subarray\nGiven an integer array nums, find the contiguous subarray (containing at least one number) which has the \nlargest sum and return its sum.\n\nExample:\nInput: [-2,1,-3,4,-1,2,1,-5,4],\nOutput: 6\nExplanation: [4,-1,2,1] has the largest sum = 6.\n\nFollow up:\nIf you have figured out the O(n) solution, try coding another solution using the divide and conquer \napproach, which is more subtle.\n\"\"\"\n\nclass Solution:\n def maxSubArray(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if len(nums) <= 0: \n return 0\n \n max_seq = nums[0]\n running_sum = 0\n \n for n in nums:\n running_sum += n\n max_seq = max(max_seq, running_sum)\n \n if running_sum < 0: \n running_sum = 0\n \n return max_seq\n\n\"\"\"\n198. House Robber\n\nYou are a professional robber planning to rob houses along a street. Each house has a certain amount of \nmoney stashed, the only constraint stopping you from robbing each of them is that adjacent houses have \nsecurity system connected and it will automatically contact the police if two adjacent houses were broken \ninto on the same night.\n\nGiven a list of non-negative integers representing the amount of money of each house, determine the maximum \namount of money you can rob tonight without alerting the police.\n\nExample 1:\nInput: [1,2,3,1]\nOutput: 4\nExplanation: Rob house 1 (money = 1) and then rob house 3 (money = 3).\n Total amount you can rob = 1 + 3 = 4.\n\nExample 2:\nInput: [2,7,9,3,1]\nOutput: 12\nExplanation: Rob house 1 (money = 2), rob house 3 (money = 9) and rob house 5 (money = 1).\n Total amount you can rob = 2 + 9 + 1 = 12.\n\"\"\"\n\nclass Solution:\n def rob(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n prev = 0\n curr = 0\n\n for n in nums:\n temp = curr\n curr = max(curr, prev+n)\n prev = temp\n\n return curr\n\n\"\"\"\n276. Paint Fence\n\nThere is a fence with n posts, each post can be painted with one of the k colors.\n\nYou have to paint all the posts such that no more than two adjacent fence posts have the same color.\n\nReturn the total number of ways you can paint the fence.\n\nNote:\nn and k are non-negative integers.\n\nExample:\nInput: n = 3, k = 2\nOutput: 6\nExplanation: Take c1 as color 1, c2 as color 2. All possible ways are:\n\n post1 post2 post3 \n ----- ----- ----- ----- \n 1 c1 c1 c2 \n 2 c1 c2 c1 \n 3 c1 c2 c2 \n 4 c2 c1 c1 \n 5 c2 c1 c2\n 6 c2 c2 c1\n\"\"\"\n\nclass Solution:\n def numWays(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: int\n \"\"\"\n if k == 0 or n == 0: \n return 0\n if n == 1: \n return k\n \n same = k\n diff = k * (k-1)\n\n for i in range(2, n):\n same, diff = diff, (same + diff) * (k-1)\n return same + diff","repo_name":"maxthecabbie/LeetCode","sub_path":"Recursion and Dynamic Programming/Easy/Solutions.py","file_name":"Solutions.py","file_ext":"py","file_size_in_byte":9925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20876389531","text":"\"\"\"\n코딩테스트 연습\n2023 KAKAO BLIND RECRUITMENT\n미로 탈출 명령어\nhttps://school.programmers.co.kr/learn/courses/30/lessons/150365\n\"\"\"\n\nfrom collections import defaultdict\nfrom collections import deque\n\ndef solution(n, m, x, y, r, c, k):\n x, y, r, c = x - 1, y - 1, r - 1, c - 1\n answer = 'impossible'\n dic = defaultdict(int)\n stk = deque([(x, y, '')])\n\n while stk:\n i, j, w = stk.popleft() # 좌표, 문자열\n t = len(w) # 문자열 길이 : 여태까지 소모한 시간\n\n if t == k: # k시간 소모시 continue\n if i == r and j == c: # 목표지점 도착시 answer 반환\n answer = w\n break\n continue\n\n if k - t < abs(i - r) + abs(j - c): # 남은 시간동안 목표지점에 가지 못할 경우 continue\n continue\n\n # 만약 t+1 시간만큼 소모했을 때 다음 지점에 이미 도착한 경우 continue (사전 순으로 더 빠른 루트가 이미 존재한다)\n # 그렇지 않은 경우 좌표와 t+1 시간을 기록 후 push\n\n if not dic[(i + 1, j, t + 1)] and i + 1 < n:\n dic[(i + 1, j, t + 1)] = 1\n stk.append((i + 1, j, w + 'd'))\n\n if not dic[(i, j - 1, t + 1)] and j - 1 >= 0:\n dic[(i, j - 1, t + 1)] = 1\n stk.append((i, j - 1, w + 'l'))\n\n if not dic[(i, j + 1, t + 1)] and j + 1 < m:\n dic[(i, j + 1, t + 1)] = 1\n stk.append((i, j + 1, w + 'r'))\n\n if not dic[(i - 1, j, t + 1)] and i - 1 >= 0:\n dic[(i - 1, j, t + 1)] = 1\n stk.append((i - 1, j, w + 'u'))\n\n return answer","repo_name":"seoda0000/TIL","sub_path":"AlgorithmProblemSolving/02_프로그래머스/programmers_level_3/미로_탈출_명령어.py","file_name":"미로_탈출_명령어.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"20703159054","text":"\nimport requests\nimport json\n\nhost = 'http://localhost:9200/'\n\nindex = 'companydatabase'\n\nsettings = {\n \"index\": {\n \"max_ngram_diff\": 20\n },\n \"analysis\": {\n \"analyzer\": {\n \"autocomplete\": {\n \"type\": \"custom\",\n \"tokenizer\": \"standard\",\n \"filter\": [\n \"lowercase\",\n \"autocomplete_filter\"\n ]\n },\n },\n \"filter\": {\n \"autocomplete_filter\": {\n \"type\": \"ngram\",\n \"min_gram\": 1,\n \"max_gram\": 20\n }\n }\n }\n }\n\nmappings = {\n \"properties\": {\n \"full_name\": {\n \"type\": \"text\",\n \"analyzer\": \"autocomplete\",\n \"fields\": {\n \"keyword\": {\n \"type\": \"keyword\"\n }\n }\n },\n \"Designation\": {\n \"type\": \"text\"\n },\n \"Salary\": {\n \"type\": \"text\"\n },\n \"DateOfJoining\": {\n \"type\": \"date\",\n \"format\": \"strict_date_optional_time||epoch_millis\"\n },\n \"Address\": {\n \"type\": \"text\"\n },\n \"Gender\": {\n \"type\": \"text\"\n },\n \"Age\": {\n \"type\": \"long\"\n },\n \"MaritalStatus\": {\n \"type\": \"text\"\n },\n \"Interests\": {\n \"type\": \"text\"\n }\n }\n}\n\ndef del_index():\n\n url = host + index\n r = requests.delete(url, verify=True)\n if r.status_code == 200:\n print(f'{index} - deleted successfully')\n else:\n print(r.text)\n\n\n\ndef apply_mapping():\n headers = {\n 'Content-Type': 'application/json',\n }\n\n params = (\n ('pretty', ''),\n )\n\n templete = {\"settings\" : settings, \"mappings\" : mappings}\n\n \n r = requests.put(host + index, headers=headers, params=params, data=json.dumps(templete), verify=True)\n if r.status_code == 200:\n print(f'{index} - mapping applied successfully')\n else:\n print(r.text)\n\n\n\ndef index_data():\n print(f'{index} - inserting data...')\n headers = {\n 'Content-Type': 'application/json',\n }\n data = open('Employees50K.json', 'rb').read()\n r = requests.put(f'{host + index}/_bulk', headers=headers,data=data, verify=True)\n\n if r.status_code == 200:\n print(f'{index} - data inserted successfully')\n else:\n print(r.text)\n\n\ndef count_documents():\n r = requests.get(f'{host + index}/_count', verify=True)\n\n if r.status_code == 200:\n print(f'{index} - There are {r.json()[\"count\"]} documents in the index')\n else:\n print(r.text)\n\n\ndef get_indexes():\n r = requests.get(f'{host}_cat/indices?pretty', verify=True)\n\n print(r.text)\n\n\ndef analyzer_test():\n\n search = 'elvis'\n\n headers = {\n 'Content-Type': 'application/json',\n }\n\n params = (\n ('pretty', ''),\n )\n\n data = {\n \"analyzer\": \"autocomplete\",\n \"text\": search\n }\n\n \n r = requests.post(host + index + '/_analyze', headers=headers, params = params, data=json.dumps(data), verify=True)\n print(f\"{search} - {','.join([i['token'] for i in r.json()['tokens']])}\")\n\n\n\n\ndef search_test():\n headers = {\n 'Content-Type': 'application/json',\n }\n\n\n params = (\n ('pretty', ''),\n )\n\n data = {\n \"query\": {\n \"match\": {\n \"full_name\": {\n \"query\": \"INNERT\",\n \"analyzer\": \"standard\"\n }\n }\n }\n }\n\n \n r = requests.post(host + index + '/_search', headers=headers, params = params, data=json.dumps(data), verify=True)\n print(r.text)\n\n\n\ndel_index()\napply_mapping()\nindex_data()\ncount_documents()\n\n#get_indexes()\n#analyzer_test()\n#search_test()\n","repo_name":"igal8183/es_search_as_you_type","sub_path":"reindex_with_ngram.py","file_name":"reindex_with_ngram.py","file_ext":"py","file_size_in_byte":3910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73709825492","text":"#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\nfrom setuptools import setup, find_packages\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nwith open('HISTORY.rst') as history_file:\n history = history_file.read()\n\nrequirements = [ 'credstash>=1.16.1' ]\n\nsetup_requirements = [ ]\n\ntest_requirements = [ ]\n\nsetup(\n author=\"Thomas Knox\",\n author_email='knoxilla@gmail.com',\n python_requires='>=3.5',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n description=\"Retrieves secrets from AWS via credstash, and returns a dictionary.\",\n install_requires=requirements,\n license=\"MIT license\",\n long_description=readme + '\\n\\n' + history,\n include_package_data=True,\n keywords='credstash_envvar_helper',\n name='credstash_envvar_helper',\n packages=find_packages(include=['credstash_envvar_helper', 'credstash_envvar_helper.*']),\n setup_requires=setup_requirements,\n test_suite='tests',\n tests_require=test_requirements,\n url='https://github.com/knoxilla/credstash_envvar_helper',\n version='0.1.3',\n zip_safe=False,\n)\n","repo_name":"knoxilla/credstash_envvar_helper","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28215059233","text":"from flask import Flask\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n a = 1\n b = 0\n c = a/b\n return 'Hello123!!!'\n\n#Debug的好处是:\n#1.可以热加载,更新完代码后,不用手动重新启动服务,系统会自动给我们重启\n#2.可以直接把错误信息加载到浏览器\nif __name__ == \"__main__\":\n # app.run(debug=True)\n app.debug = True\n app.run()","repo_name":"Bingo021/python_code","sub_path":"Flask框架/03_debug模式的使用.py","file_name":"03_debug模式的使用.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21749033600","text":"#----------------------------------------------------------------------\n# panelThree.py\n#\n# Created 11/26/2009\n#\n# Author: Mike Driscoll - mike@pythonlibrary.org\n#\n# Note: The TreeListCtrl code was taken from the Official wxPython Demo\n#\n#----------------------------------------------------------------------\n\nimport wx\n#import wx.gizmos as gizmos\n\nclass TabPanel(wx.Panel):\n \"\"\"\n This will be the third notebook tab\n \"\"\"\n #----------------------------------------------------------------------\n def __init__(self, parent):\n \"\"\"\"\"\"\n wx.Panel.__init__(self, parent=parent, id=wx.ID_ANY)\n\n self.Bind(wx.EVT_SIZE, self.OnSize)\n\n # self.tree = gizmos.TreeListCtrl(self, -1, style =\n # wx.TR_DEFAULT_STYLE\n # | wx.TR_FULL_ROW_HIGHLIGHT\n # )\n\n isz = (16,16)\n il = wx.ImageList(isz[0], isz[1])\n fldridx = il.Add(wx.ArtProvider_GetBitmap(wx.ART_FOLDER, wx.ART_OTHER, isz))\n fldropenidx = il.Add(wx.ArtProvider_GetBitmap(wx.ART_FILE_OPEN, wx.ART_OTHER, isz))\n fileidx = il.Add(wx.ArtProvider_GetBitmap(wx.ART_NORMAL_FILE, wx.ART_OTHER, isz))\n bulbidx = il.Add(wx.ArtProvider_GetBitmap(wx.ART_TIP, wx.ART_OTHER, isz))\n\n self.tree.SetImageList(il)\n self.il = il\n\n # create some columns\n self.tree.AddColumn(\"Main column\")\n self.tree.AddColumn(\"Column 1\")\n self.tree.AddColumn(\"Column 2\")\n self.tree.SetMainColumn(0) # the one with the tree in it...\n self.tree.SetColumnWidth(0, 175)\n\n\n self.root = self.tree.AddRoot(\"The Root Item\")\n self.tree.SetItemText(self.root, \"col 1 root\", 1)\n self.tree.SetItemText(self.root, \"col 2 root\", 2)\n self.tree.SetItemImage(self.root, fldridx, which = wx.TreeItemIcon_Normal)\n self.tree.SetItemImage(self.root, fldropenidx, which = wx.TreeItemIcon_Expanded)\n\n\n for x in range(15):\n txt = \"Item %d\" % x\n child = self.tree.AppendItem(self.root, txt)\n self.tree.SetItemText(child, txt + \"(c1)\", 1)\n self.tree.SetItemText(child, txt + \"(c2)\", 2)\n self.tree.SetItemImage(child, fldridx, which = wx.TreeItemIcon_Normal)\n self.tree.SetItemImage(child, fldropenidx, which = wx.TreeItemIcon_Expanded)\n\n for y in range(5):\n txt = \"item %d-%s\" % (x, chr(ord(\"a\")+y))\n last = self.tree.AppendItem(child, txt)\n self.tree.SetItemText(last, txt + \"(c1)\", 1)\n self.tree.SetItemText(last, txt + \"(c2)\", 2)\n self.tree.SetItemImage(last, fldridx, which = wx.TreeItemIcon_Normal)\n self.tree.SetItemImage(last, fldropenidx, which = wx.TreeItemIcon_Expanded)\n\n for z in range(5):\n txt = \"item %d-%s-%d\" % (x, chr(ord(\"a\")+y), z)\n item = self.tree.AppendItem(last, txt)\n self.tree.SetItemText(item, txt + \"(c1)\", 1)\n self.tree.SetItemText(item, txt + \"(c2)\", 2)\n self.tree.SetItemImage(item, fileidx, which = wx.TreeItemIcon_Normal)\n self.tree.SetItemImage(item, bulbidx, which = wx.TreeItemIcon_Selected)\n\n\n self.tree.Expand(self.root)\n\n self.tree.GetMainWindow().Bind(wx.EVT_RIGHT_UP, self.OnRightUp)\n self.tree.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnActivate)\n\n\n def OnActivate(self, evt):\n print('OnActivate: %s' % self.tree.GetItemText(evt.GetItem()))\n\n def OnRightUp(self, evt):\n pos = evt.GetPosition()\n item, flags, col = self.tree.HitTest(pos)\n if item:\n print('Flags: %s, Col:%s, Text: %s' % (flags, col,\n self.tree.GetItemText(item, col)))\n\n def OnSize(self, evt):\n self.tree.SetSize(self.GetSize())\n\n\nclass DemoFrame(wx.Frame):\n def __init__(self):\n wx.Frame.__init__(self, None, wx.ID_ANY, \"Panel Tutorial\")\n panel = TabPanel(self)\n self.Show()\n\n#----------------------------------------------------------------------\nif __name__ == \"__main__\":\n app = wx.PySimpleApp()\n frame = DemoFrame()\n app.MainLoop()\n","repo_name":"Dimon0014/Real","sub_path":"panelThree.py","file_name":"panelThree.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38949622590","text":"# Given a collection of numbers that might contain duplicates, return all possible unique permutations.\n\n# For example,\n# [1,1,2] have the following unique permutations:\n# [\n# [1,1,2],\n# [1,2,1],\n# [2,1,1]\n# ]\n\nclass Solution(object):\n def permuteUnique(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n res = []\n visited = set()\n nums = sorted(nums)\n self.backtracking(nums, [], res, visited)\n return res\n \n def backtracking(self, nums, temp, ans, visited):\n if len(temp) == len(nums):\n ans.append(list(temp))\n for i in range(len(nums)):\n if i in visited or i>0 and nums[i]==nums[i-1] and i-1 not in visited:\n continue\n temp.append(nums[i])\n visited.add(i)\n self.backtracking(nums, temp, ans, visited)\n visited.remove(i)\n temp.pop()\n \n\nclass Solution(object):\n def permuteUnique(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n res = []\n visited = set()\n self.backtracking(res, nums,[],visited)\n return res\n\n def backtracking(self, res, nums, temp,visited):\n if len(temp) == len(nums) and temp not in res:\n res.append(list(temp))\n for i in range(len(nums)):\n if nums[i] in temp and i in visited:\n continue\n visited.add(i)\n temp.append(nums[i])\n self.backtracking(res, nums, temp, visited)\n visited.remove(i)\n temp.pop()\n ","repo_name":"youhusky/Facebook_Prepare","sub_path":"047. Permutations II.py","file_name":"047. Permutations II.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"15672310222","text":"class node():\n def __init__(self, item=None):\n self.item = item\n self.next = None\n self.prev = None\n \n \nclass queue():\n def __init__(self):\n self.head = None\n self.tail = None\n self.size = 0\n \n def enqueue(self, item):\n new_node = node(item)\n if(self.size == 0):\n self.head = new_node\n self.tail = new_node\n else:\n self.tail.next = new_node\n new_node.prev = self.tail\n self.tail = new_node\n self.size += 1\n \n def dequeue(self):\n x = self.head.item\n self.head = self.head.next\n self.size -= 1\n if (self.size == 0):\n self.tail = None\n return x\n \n def front(self):\n return self.head.item\n \n def isEmpty(self):\n if(self.size == 0):\n return True\n return False\n","repo_name":"degtrdg/math-team","sub_path":"queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40412708211","text":"import matplotlib.pyplot as plt\nimport myokit\nimport numpy as np\n\n\"\"\"\nMinor discrepencies in validation. Curves match well. Voltage at maximum current should be -48.4mV for hERG1a and -28.4mV for hERG1b (this code says -48.56mV for hERG1a and -28.28mV for hERG1b) only off by 1 timestep (-48.44 is 1ms before, -28.4 is 1 ms after), probably just a solver detail, maybe in rounding of parameters\n\"\"\" \n\nmodel = myokit.load_model('.\\Models\\larsen2010.mmt')\n\nlog = myokit.DataLog.load_csv('.\\Protocols\\Larsen 2010\\larsenRamp.csv')\n\nprint(log.keys())\n\ntimes = log.time()\nvoltages = log['voltage']\n\nsim = myokit.Simulation(model)\n\nplt.figure(1)\nplt.xlabel('Time (ms)')\nplt.ylabel('Voltage (mV)')\nplt.plot(times, voltages,label='Voltage')\nplt.show()\n\nhERG1bRatio = [0, 20, 40, 60, 80, 100]\nprint('-----------')\nfor i in range(len(hERG1bRatio)):\n sim.set_constant('iKr_Markov.hERG1b',hERG1bRatio[i])\n sim.set_fixed_form_protocol(times, voltages)\n \n tmax = times[-1] + 1\n plotTimes = np.arange(0,tmax,1)\n log2 = sim.run(tmax,log_times=plotTimes)\n \n plt.xlabel('Time (ms)')\n plt.ylabel('Normalised Current (mA)')\n plt.plot(log2['cell.V'],np.divide(log2['IKr.i_Kr'],max(log2['IKr.i_Kr'])))\n plt.xlim((40,-80))\n print('Voltage at max current:')\n print(log2['cell.V'][np.argmax(log2['IKr.i_Kr'])])\n sim.reset()\nplt.show()\n","repo_name":"mjowen/hERG1a1b-Modelling","sub_path":"Code/larsen2010validation.py","file_name":"larsen2010validation.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30349224669","text":"import sys\nfrom collections import deque\n\nn, m = map(int, input().split(' '))\nmatrix = []\n\nfor _ in range(n):\n matrix.append(list(map(int, list(input()))))\n\ndp = [[[float(\"inf\")] * 2 for _ in range(m)] for _ in range(n)]\nvisited = [[[0] * 2 for _ in range(m)] for _ in range(n)]\ndp[0][0][0] = 0\ndp[0][0][1] = 0\nd = [[1, 0], [0, 1], [-1, 0], [0, -1]]\n\nif n == 1 and m == 1:\n if matrix[0][0] == 1:\n print(-1)\n else:\n print(1)\n sys.exit()\n\n\ndef solution():\n global n, m\n\n queue = deque([(0, 0, 0, False)])\n\n while queue:\n x, y, dist, broke = queue.popleft()\n\n for dx, dy in d:\n nx, ny = x + dx, y + dy\n\n if 0 <= nx < n and 0 <= ny < m:\n if broke:\n if matrix[nx][ny] == 1:\n pass\n else:\n if not visited[nx][ny][0]:\n dp[nx][ny][0] = min(dp[nx][ny][0], dist + 1)\n visited[nx][ny][0] = True\n queue.append((nx, ny, dist + 1, True))\n else:\n if matrix[nx][ny] == 1:\n if not visited[nx][ny][0]:\n dp[nx][ny][0] = min(dp[nx][ny][0], dist + 1)\n visited[nx][ny][0] = True\n queue.append((nx, ny, dist + 1, True))\n\n else:\n if not visited[nx][ny][1]:\n dp[nx][ny][1] = min(dp[nx][ny][1], dist + 1)\n visited[nx][ny][1] = True\n queue.append((nx, ny, dist + 1, False))\n\n\n\nsolution()\n\nif min(dp[-1][-1]) != float(\"inf\"):\n print(min(dp[-1][-1]) + 1)\nelse:\n print(-1)\n","repo_name":"Kim-Young-Hoo/boj_algorithms","sub_path":"백준/Gold/2206. 벽 부수고 이동하기/벽 부수고 이동하기.py","file_name":"벽 부수고 이동하기.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14246462273","text":"import csv\nimport os\n\n\n# 读取csv文件的方法\ndef reader(filename):\n # 先定位当前路径\n base_path = os.path.dirname(__file__)\n # 在替换成test_data/下\n path = base_path.replace(\"func\", \"test_data/\" + filename)\n table_list = []\n # 打开文件后关闭文件\n with open(path) as file:\n # 读取文件数据\n table = csv.reader(file)\n num = 0\n # 循环读取每一行数据,最后输出一个列表,第一行是标题需要跳过\n for row in table:\n if num != 0:\n table_list.append(row)\n num = num + 1\n return table_list\n","repo_name":"ccckaijie/test01","sub_path":"func/csv_file_message.py","file_name":"csv_file_message.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41787852349","text":"import unittest\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom os.path import join\n\nfrom src.naive_bayes.classifier.naive_bayes import NaiveBayesClassifier\nfrom src.utility.loader import *\n\ninput_path = './data'\ntraining_images_filepath = join(input_path, 'train-images-idx3-ubyte/train-images-idx3-ubyte')\ntraining_labels_filepath = join(input_path, 'train-labels-idx1-ubyte/train-labels-idx1-ubyte')\ntest_images_filepath = join(input_path, 't10k-images-idx3-ubyte/t10k-images-idx3-ubyte')\ntest_labels_filepath = join(input_path, 't10k-labels-idx1-ubyte/t10k-labels-idx1-ubyte')\n\nclass Testing(unittest.TestCase):\n # NaiveBayesClassifier constructor\n def test_nb_constructor(self):\n nb = NaiveBayesClassifier(np.arange(9), None)\n \n i = 0\n for x in nb.categories:\n self.assertEqual(x,i)\n i += 1\n \n # train\n def test_training(self):\n # load data\n mnist_dataloader = MnistDataloader(training_images_filepath, training_labels_filepath, test_images_filepath, test_labels_filepath)\n # (images, labels)\n (x_train, y_train), (x_validation, y_validation), (x_test, y_test) = mnist_dataloader.load_data()\n nbdata = Data((x_train, y_train),(x_validation, y_validation),(x_test, y_test))\n\n # create a NB classifier and train it\n nb_classifier = NaiveBayesClassifier(np.arange(10), nbdata)\n nb_classifier.train()\n \n self.assertTrue(nb_classifier.evaluate_on_validation_set() >= 0.83)\n \n # save_nb, load_nb\n def test_save_and_load_nb(self):\n # load data\n mnist_dataloader = MnistDataloader(training_images_filepath, training_labels_filepath, test_images_filepath, test_labels_filepath)\n # (images, labels)\n (x_train, y_train), (x_validation, y_validation), (x_test, y_test) = mnist_dataloader.load_data()\n nbdata = Data((x_train, y_train),(x_validation, y_validation),(x_test, y_test))\n \n # create a NB classifier and train it\n nb_classifier = NaiveBayesClassifier(np.arange(10), nbdata)\n nb_classifier.train()\n \n # save the NB classifier\n nb_classifier.save_nb(\"test_save_nb.txt\")\n \n # load the NB classifier, get the deployable model\n deployable_nb = NaiveBayesClassifier.load_nb(\"test_save_nb.txt\")\n \n # assert that the priors remain the same\n self.assertTrue(all([nb_classifier.log_priors.get(k) == v for k,v in deployable_nb.log_priors.items()]))\n # assert that the likelihoods remain the same\n self.assertTrue(all([nb_classifier.log_likelihoods[feat,cat] == v for (feat,cat),v in deployable_nb.log_likelihoods.items()]))\n \n # save_nb, load_nb\n def test_save_and_load_nb_accuracy(self):\n # load data\n mnist_dataloader = MnistDataloader(training_images_filepath, training_labels_filepath, test_images_filepath, test_labels_filepath)\n # (images, labels)\n (x_train, y_train), (x_validation, y_validation), (x_test, y_test) = mnist_dataloader.load_data()\n # for NB, we don't flatten inputs (they remain 28x28), and we don't normalise inputs (they remain in range 0-255)\n nbdata = Data((x_train, y_train),(x_validation, y_validation),(x_test, y_test))\n\n # load the NB, get the deployable model\n deployable_nb = NaiveBayesClassifier.load_nb(\"bayes.txt\")\n\n # assert that the accuracy on training and validation sets are at least 80%\n self.assertTrue(deployable_nb.evaluate_on_dataset(nbdata.training_set) >= 0.83)\n self.assertTrue(deployable_nb.evaluate_on_dataset(nbdata.validation_set) >= 0.83)\n \n\nif __name__ == '__main__':\n unittest.main()","repo_name":"losmi247/doodle-classifier","sub_path":"test/test_naive_bayes.py","file_name":"test_naive_bayes.py","file_ext":"py","file_size_in_byte":3737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"7730262691","text":"import datetime\nimport unittest\nimport genetic\n\n\ndef color(self, file, colors):\n (rules, nodes) = load_data(file)\n optimalValue = len(rules)\n colorLookup = {color[0]: color for color in colors}\n geneset = list(colorLookup.keys())\n startTime = datetime.datetime.now()\n nodeIndexLookup = {key: index for (index, key) in enumerate(sorted(nodes))}\n\n def fnDisplay(candidate):\n display(candidate, startTime)\n\n def fnGetFitness(genes):\n return get_fitness(genes, rules, nodeIndexLookup)\n best = genetic.get_best(fnGetFitness, len(nodes), optimalValue, geneset, fnDisplay)\n self.assertTrue((not (optimalValue > best.Fitness)))\n keys = sorted(nodes)\n for index in range(len(nodes)):\n print(((keys[index] + ' is ') + colorLookup[best.Genes[index]]))\n","repo_name":"menna161/API-Wizard","sub_path":"PyAroma/datasets/datetime/snippets/snippet885414.py","file_name":"snippet885414.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12214342187","text":"import math \r\n\r\ndef IsPrime(n):\r\n if n <= 1:\r\n return False\r\n elif n < 4:\r\n return True \r\n elif n % 2 == 0:\r\n return False \r\n elif n < 9:\r\n return True \r\n elif n % 3 == 0:\r\n return False \r\n else:\r\n r = math.floor(math.sqrt(n))\r\n f = 5 \r\n while f <= r:\r\n if n % f == 0:\r\n return False\r\n if n % (f + 2) == 0:\r\n return False\r\n f += 6\r\n return True \r\n\r\nlargestN = 0\r\nlist = [[0, 0, 0]]\r\nfor a in range(-999, 1000):\r\n for b in range(-1000, 1001):\r\n n = 0\r\n prime = True \r\n while(prime):\r\n x = n ** 2 + ( a * n ) + b \r\n prime = IsPrime(x)\r\n n += 1 \r\n if(n > largestN):\r\n largestN = n \r\n list.append([largestN, a, b])\r\n\r\nfor i in range(len(list)):\r\n if(list[i][0] == largestN):\r\n product = list[i][1] * list[i][2]\r\n print(\"\")\r\n print(product)","repo_name":"Jasasul2/privateNichePythonProjects","sub_path":"ProjectEuler/ProjectEulerProblem27.py","file_name":"ProjectEulerProblem27.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15498439385","text":"class Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n max_length = 0\n left = rght = 0\n\n substring = set()\n while rght < len(s):\n while s[rght] in substring:\n substring.remove(s[left])\n left += 1\n substring.add(s[rght])\n max_length = max(max_length, rght - left + 1)\n rght += 1\n return max_length","repo_name":"Ahmad-Abdalmageed/Problem-Solving","sub_path":"LeetCodeSolutions/problems/longest_substring_without_repeating_characters/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"1698739454","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import ProductCategory, Product, ProductType\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.conf import settings\nfrom django.core.cache import cache\n# from django.views.decorators.cache import cache_page\nfrom django.template.loader import render_to_string\nfrom django.http import JsonResponse\n\n\ndef get_products_type(type_product):\n if settings.LOW_CACHE:\n type = type_product.split(' ')[0]\n key = f'products_{type}'\n products_type = cache.get(key) # from cache\n if products_type is None:\n products_type = Product.objects.filter(type__name=type_product, is_active=True, category__is_active=True).select_related('type').order_by(\"?\")\n cache.set(key, products_type) # added in cache\n return products_type\n else:\n return Product.objects.filter(type__name=type_product, is_active=True, category__is_active=True).select_related('type').order_by(\"?\")\n\n\ndef get_same_products(same_product):\n same_products = Product.objects.filter(category=same_product.category, is_active=True, category__is_active=True).exclude(pk=same_product.pk)\n return same_products\n\n\ndef get_links_menu():\n if settings.LOW_CACHE:\n key = 'links_menu'\n links_menu = cache.get(key) # from cache\n if links_menu is None:\n links_menu = ProductCategory.objects.filter(is_active=True)\n cache.set(key, links_menu) # added in cache\n return links_menu\n else:\n return ProductCategory.objects.filter(is_active=True)\n\n\ndef get_category(pk):\n if settings.LOW_CACHE:\n key = f'category_{pk}'\n category = cache.get(key)\n if category is None:\n category = get_object_or_404(ProductCategory, pk=pk)\n cache.set(key, category)\n return category\n else:\n return get_object_or_404(ProductCategory, pk=pk)\n\n\ndef get_types():\n if settings.LOW_CACHE:\n key = 'types'\n types = cache.get(key)\n if types is None:\n types = ProductType.objects.all()\n cache.set(key, types)\n return types\n else:\n return ProductType.objects.all()\n\n\ndef get_products():\n if settings.LOW_CACHE:\n key = 'products'\n products = cache.get(key)\n if products is None:\n products = Product.objects.filter(is_active=True, category__is_active=True).select_related('type').order_by('price')\n cache.set(key, products)\n return products\n else:\n return Product.objects.filter(is_active=True, category__is_active=True).select_related('type').order_by('price')\n\n\ndef get_product(pk):\n if settings.LOW_CACHE:\n key = f'product_{pk}'\n product = cache.get(key)\n if product is None:\n product = get_object_or_404(Product, pk=pk)\n cache.set(key, product)\n return product\n else:\n return get_object_or_404(Product, pk=pk)\n\n\ndef get_products_in_category(pk):\n if settings.LOW_CACHE:\n key = f'products_in_category_{pk}'\n products = cache.get(key)\n if products is None:\n products = Product.objects.filter(category__pk=pk, is_active=True, category__is_active=True)\\\n .order_by('price').select_related('category')\n cache.set(key, products)\n return products\n else:\n return Product.objects.filter(category__pk=pk, is_active=True, category__is_active=True).order_by('price').select_related()\n\n\ndef main(request):\n exclusive_product = get_products_type('Exclusive')[:2]\n trending_products = get_products_type('Trending')[:6]\n types = get_types()\n same_products = get_same_products(exclusive_product[0])[:4]\n featured_products = get_products_type('Hot deal')[:4]\n\n context = {\n 'user': request.user,\n 'title': 'interior',\n 'exclusive_product': exclusive_product,\n 'trending_products': trending_products,\n 'types': types,\n 'same_products': same_products,\n 'featured_products': featured_products,\n }\n return render(request, 'mainapp/index.html', context=context)\n\n\n# @cache_page(3600)\ndef products(request, pk=None, num=None, page=1):\n title = 'Products'\n links_menu = get_links_menu()\n types = get_types()\n exclusive_product = get_products_type('Exclusive')[:2]\n\n if pk:\n if pk == '0':\n category = {'name': 'all', 'pk': 0}\n products_list = get_products()\n else:\n category = get_category(pk)\n products_list = get_products_in_category(pk)\n\n paginator = Paginator(products_list, 3)\n try:\n products_paginator = paginator.page(page)\n except PageNotAnInteger:\n products_paginator = paginator.page(1)\n except EmptyPage:\n products_paginator = paginator.page(paginator.num_pages)\n\n context = {\n 'title': title,\n 'links_menu': links_menu,\n 'category': category,\n 'products': products_paginator,\n 'types': types,\n }\n return render(request, 'mainapp/products_list.html', context=context)\n\n elif num:\n category = get_object_or_404(ProductType, pk=num)\n products_list = Product.objects.filter(type__pk=num).filter(is_active=True, category__is_active=True).select_related('type')\n context = {\n 'title': 'products',\n 'category': category,\n 'links_menu': links_menu,\n 'products': products_list,\n 'exclusive_product': exclusive_product,\n 'types': types,\n }\n\n return render(request, 'mainapp/products.html', context=context)\n\n\ndef products_ajax(request, pk=None, page=1):\n if request.is_ajax():\n links_menu = get_links_menu()\n\n if pk:\n if pk == '0':\n category = {'name': 'all', 'pk': 0}\n products_list = get_products()\n else:\n category = get_category(pk)\n products_list = get_products_in_category(pk)\n\n paginator = Paginator(products_list, 3)\n try:\n products_paginator = paginator.page(page)\n except PageNotAnInteger:\n products_paginator = paginator.page(1)\n except EmptyPage:\n products_paginator = paginator.page(paginator.num_pages)\n\n context = {\n 'title': 'products',\n 'links_menu': links_menu,\n 'category': category,\n 'products': products_paginator,\n }\n\n result = render_to_string(\n 'mainapp/includes/inc_products_list_content.html',\n context=context,\n request=request)\n\n return JsonResponse({'result': result})\n\n\ndef contacts(request):\n types = get_types()\n context = {\n 'title': 'contacts',\n\n 'types': types,\n }\n return render(request, 'mainapp/contacts.html', context=context)\n\n\ndef product(request, pk=None):\n links_menu = get_links_menu()\n title = 'Product'\n\n product_entry = get_product(pk)\n same_product = get_same_products(product_entry)[:3]\n\n context = {\n 'title': title,\n 'links_menu': links_menu,\n 'category': product_entry.category,\n 'product': product_entry,\n 'same_product': same_product,\n }\n return render(request, 'mainapp/product.html', context=context)\n","repo_name":"MariaAfanaseva/Django","sub_path":"mainapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6085599376","text":"import threading\n\nimport torch\nimport torch.distributed.rpc as rpc\nfrom utils import sparse_rpc_format_to_tensor, sparse_tensor_to_rpc_format\n\nfrom .ParameterServerBase import ParameterServerBase\n\n\nclass AverageParameterServer(ParameterServerBase):\n\n lock = threading.Lock()\n\n def __init__(\n self,\n rank,\n trainer_count,\n use_cuda_rpc\n ):\n r\"\"\"\n A parameter server that averages the gradients\n from trainers for each training iteration step.\n Gradients are added as they are received from trainers.\n When all gradients have been received, the sum is\n divided by the number of trainers.\n Args:\n rank (int): worker rank\n trainer_count (int): count of trainers sending\n gradients to the server\n use_cuda_rpc (bool): indicator for CUDA RPC\n \"\"\"\n super().__init__(rank)\n\n self.rank = rank\n self.trainer_count = trainer_count\n self.use_cuda_rpc = use_cuda_rpc\n\n self.batch_number = 0\n self.futures = {}\n self.gradient_dict = {}\n\n @staticmethod\n def reset_state(server_rref):\n r\"\"\"\n A method that clears the state of the server.\n Args:\n server_rref (RRef): remote reference to the server\n \"\"\"\n self = server_rref.local_value()\n self.batch_number = 0\n self.futures.clear()\n self.gradient_dict.clear()\n self.clear_metrics()\n\n def param_key(self, param_loc):\n r\"\"\"\n A method that returns an encoded key that represents\n the current batch and param location.\n Args:\n param_loc (int): bucket location sent by the trainer\n containing the gradient\n \"\"\"\n return f\"{self.batch_number},{param_loc}\"\n\n def clear_batch_state(self):\n r\"\"\"\n Clears the current server batch state.\n \"\"\"\n self.futures.clear()\n self.gradient_dict.clear()\n\n def process_gradient(self, gradient, param_loc):\n r\"\"\"\n Stores the gradient if param_loc is not in gradient_dict.\n Adds the gradient to param_loc if it is in gradient_dict.\n Args:\n gradient (torch.Tensor): tensor sent from trainer\n param_loc (int): bucket location sent by the trainer\n containing the gradient\n \"\"\"\n if param_loc not in self.gradient_dict:\n self.record_straggler_start(self.param_key(param_loc))\n self.record_batch_start(self.param_key(param_loc))\n self.gradient_dict[param_loc] = gradient\n else:\n self.gradient_dict[param_loc] += gradient\n\n @ParameterServerBase.record_method(name=\"average computation\")\n def average(self, param_loc):\n r\"\"\"\n Obtains the tensor at the param_loc in the gradient_dict\n and then divides by number of trainers.\n Args:\n param_loc (int): bucket location sent by the trainer\n containing the gradient\n \"\"\"\n param_loc_avg = self.gradient_dict[param_loc]\n param_loc_avg / (1.0 * self.trainer_count)\n return param_loc_avg\n\n @staticmethod\n @rpc.functions.async_execution\n def average_gradient(\n server_rref,\n received_batch_number,\n param_loc,\n gradient\n ):\n r\"\"\"\n An asynchronous function that will average gradients\n sent from trainers.\n Args:\n server_rref (RRef): remote reference to the server\n received_batch_number (int): batch number sent by\n the trainer\n param_loc (int): bucket location sent by the trainer\n containing the gradient\n gradient (torch.Tensor or list): tensor sent by the trainer\n \"\"\"\n self = server_rref.local_value()\n if type(gradient) is list:\n gradient = sparse_rpc_format_to_tensor(gradient)\n gradient = gradient.cuda(self.rank)\n fut = torch.futures.Future()\n with self.lock:\n if self.batch_number < received_batch_number:\n self.batch_number = received_batch_number\n self.clear_batch_state()\n self.process_gradient(gradient, param_loc)\n if param_loc not in self.futures:\n self.futures[param_loc] = []\n self.futures[param_loc].append(fut)\n if len(self.futures[param_loc]) == self.trainer_count:\n self.record_straggler_end(self.param_key(param_loc))\n param_loc_avg = self.average(param_loc)\n if not self.use_cuda_rpc:\n param_loc_avg = param_loc_avg.cpu()\n if param_loc_avg.is_sparse:\n param_loc_avg = sparse_tensor_to_rpc_format(param_loc_avg)\n for cur_fut in self.futures[param_loc]:\n cur_fut.set_result(param_loc_avg)\n self.record_batch_end(self.param_key(param_loc))\n return fut\n","repo_name":"xiaoqi25478/Job","sub_path":"PyTorch框架/pytorch-master/benchmarks/distributed/rpc/parameter_server/servers/AverageParameterServer.py","file_name":"AverageParameterServer.py","file_ext":"py","file_size_in_byte":5010,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"73980649172","text":"import copy as c\nimport pandas\n\npandas.set_option('display.max_columns', 500)\npandas.options.display.float_format = '{:,.0f}'.format\n\n\nclass Sequence:\n \"\"\" \n Classe qui représente un objet Séquence d'acides aminées \n \"\"\"\n\n def __init__(self, title = None, seq = None):\n \"\"\" Crée un objet Séquence \"\"\"\n\n if title != None and seq != None:\n self.__seq = seq # Séquence : string\n self.__title = title #Titre de la séquence\n\n def get_acids(self):\n \"\"\" Renvoie les lettres de la séquence \"\"\"\n return self.__seq \n\n def length(self):\n \"\"\" Renvoie la taille de la séquence \"\"\"\n return len(self.__seq)\n\n def set_title(self, title):\n \"\"\" Donne un nom a la séquence \"\"\"\n self.__title = title \n \n def set_seq(self, seq):\n \"\"\" met à jour la séquence \"\"\"\n self.__seq = seq \n\n def display(self):\n \"\"\" Affiche la séquence au format FASTA \"\"\"\n print(self.__title)\n print(self.__seq, end=\"\\n\\n\")\n\n\nclass ParserSequence:\n \"\"\" \n Classe qui représente un Parser qui va lire un fichier avec des séquences\n \"\"\"\n\n def __init__(self, file_name):\n \"\"\" Crée un objet Parser \"\"\"\n\n self.lines = open(file_name, \"r\").readlines() #Lignes du fichier\n self.nb_lines = len(self.lines)\n self.sequences = [] #Liste d'objets Séquence\n\n def create_seq(self, seq, title):\n \"\"\" Ajoute une séquence a la liste de séquences \"\"\"\n\n new_S = Sequence() # Crée un nouvel objet Séquence\n new_S.set_seq(seq) # lui donne la séquence correspondante\n new_S.set_title(title) # titre de la séquence\n\n self.sequences.append(new_S) #ajout d'un nouvel objet séquence\n\n def get_seq(self, i):\n \"\"\" Renvoie une séquence de la liste \"\"\"\n return self.sequences[i]\n \n def parse(self): \n \"\"\" Récupère les séquences du fichier \"\"\"\n\n tmp = \"\"\n title = \"\"\n for line in self.lines: # lignes du fichier\n if line[0] == \">\":\n # print(tmp + \"\\n\")\n if tmp != \"\": \n self.create_seq(tmp, title) # on crée un objet séquence \n tmp = \"\"\n\n title = line\n else:\n tmp += line.strip(\"\\n\")\n\n self.create_seq(tmp, title)\n\n\n\nclass Matrix:\n \"\"\" \n Classe qui représente un objet Matrice \n \"\"\"\n\n def __init__(self):\n \"\"\" Crée un objet Matrice \"\"\"\n\n self.mat = []\n self.n = 0 # colonnes (seq1)\n self.m = 0 # lignes (seq2)\n self.letters_seq1 = {} # dictionnaire clé = lettre, valeur = indice dans matrice\n self.letters_seq2 = {} # \"\n self.letters_orders1 = \"\" #Les lettres des colonnes mis dans l'ordre \n self.letters_orders2 = \"\" #Les lettres des lignes mis dans l'ordre\n\n\n def get_acid_score(self, i, j):\n \"\"\" Renvoie le score d'une cellule représentée par 2 lettres \"\"\"\n if i == \"-\" or j == \"-\":\n return -1\n else:\n return self.mat[self.letters_seq1[i]][self.letters_seq2[j]]\n\n def get_score(self, i, j):\n \"\"\" Renvoie le score d'une cellule en mat[i][j] \"\"\"\n return self.mat[i][j]\n\n def set_score(self, i, j, score):\n \"\"\" donne un score a une cellule \"\"\"\n self.mat[i][j] = score\n\n def addline(self):\n \"\"\" ajoute une ligne dans la matrice \"\"\"\n self.mat.append([])\n self.m += 1\n\n def add_cell(self, i, score):\n \"\"\" ajoute un élément dans une ligne (score) \"\"\"\n if isinstance(score, str):\n score = int(score)\n\n self.mat[i].append(score)\n\n def set_lign(self, scores):\n \"\"\" ajoute toute une ligne avec des scores \"\"\"\n self.mat.append(scores)\n\n def inc_nb_col(self):\n \"\"\" augmente le nb de colonnes de 1 \"\"\"\n self.n += 1\n\n def set_nb_col(self):\n \"\"\" détermine le nombre de colonnes \"\"\"\n self.n = len(self.mat[0])\n\n def inc_nb_lign(self):\n \"\"\" augmente le nb de lignes de 1 \"\"\"\n self.m += 1 \n\n def get_letters(self, i, j):\n \"\"\" Renvoie les lettres correspondantes a la cellule i,j \"\"\"\n return self.letters_orders2[i] + self.letters_orders1[j]\n\n def set_letters_seq(self, seq1, seq2):\n \"\"\" remplit les dictionnaires par\n clé : acide aminée, valeur : indice dans la matrice\n \"\"\"\n\n seq1 = seq1.replace(\" \", \"\")\n seq2 = seq2.replace(\" \", \"\")\n self.letters_orders1 = seq1\n self.letters_orders2 = seq2\n\n self.letters_seq1 = dict(zip(seq1, (i for i in range(len(seq1)))))\n self.letters_seq2 = dict(zip(seq2, (i for i in range(len(seq2)))))\n\n def display(self):\n \"\"\" Affiche la matrice \"\"\"\n\n for l in self.letters_orders1:\n if l == \"-\":\n print(\"\\t-\", end=\"\\t\")\n else:\n print(l, end= \"\\t\")\n print(\"\\n\")\n\n for i in range(self.m): # lignes\n print(self.letters_orders2[i], end =\"\\t\")\n if self.letters_orders2[i] == \"-\":\n #print(\" \", end= \"\")\n pass\n for j in range(self.n): # colonne\n print(self.mat[i][j], end= \"\\t\")\n print(\"\\n\")\n print(\"\\n\") \n\n def panda(self):\n \"\"\" Autre moyen d'afficher la matrice de manière plus compacte \"\"\"\n \n print(pandas.DataFrame(self.mat, list(self.letters_orders2), \\\n pandas.Index(list(self.letters_orders1), name=\"*\")), end=\"\\n\\n\")\n\n def get_max(self):\n \"\"\" Renvoie la position de l'élément maximal de la matrice \"\"\"\n\n maxi = -float(\"inf\") # - inf \n i, j = 0, 0\n for line in range(self.m):\n current_max = max(self.mat[line])\n if current_max > maxi:\n maxi = current_max\n i = line \n j = self.mat[line].index(current_max)\n\n return (i,j)\n\n \n def set_zero(self, positions):\n \"\"\" Met à 0 les éléments donnés en paramètre dans la matrice \"\"\"\n\n for pos in positions:\n self.mat[pos[0]][pos[1]] = 0\n\n\nclass MatSubstitution(Matrix):\n \"\"\" \n Classe qui représente un Parser qui va lire un fichier avec une matrice\n de substitution\n Cette classe hérite de Matrix\n \"\"\"\n\n def __init__(self, file_name):\n \"\"\" Crée un objet Parser \"\"\"\n\n Matrix.__init__(self) \n self.lines = open(file_name, \"r\").readlines() #Lignes du fichier\n self.nb_lines = len(self.lines)\n\n def get_mat_sub(self):\n \"\"\" Renvoie la matrice de subsitution parsée \"\"\"\n\n return self.mat\n\n def parse(self): \n \"\"\" Récupère la matrice du fichier \"\"\"\n\n i = 0\n for line in self.lines: # lignes du fichier\n\n line = line.strip(\"\\n\")\n if len(line) > 0:\n if line[0] == \" \":\n self.set_letters_seq(line, line) # lettres de seq 1 et 2\n else:\n if line[0] != \"#\":\n self.addline()\n all_line = line.split(\" \")\n for l in all_line:\n if l != \"\" and not l.isalpha() and l != \"*\":\n self.add_cell(i, l)\n i += 1\n\n self.set_nb_col()\n\n\nclass MatScoring(Matrix):\n \"\"\" Classe qui représente une matrice contenant les scores avec une pénalité\n de gap affine\n Cette classe hérite de Matrix\n \"\"\" \n\n def __init__(self, I, E, n, m):\n Matrix.__init__(self)\n\n self.n = n \n self.m = m \n self.I = I\n self.E = E \n\n def init_S_global(self):\n \"\"\" Initialise 1ere ligne et 1ere colonne de S pour la méthode globale \"\"\"\n\n first_col = [0]\n for i in range(self.m):\n\n newline = []\n if i == 0:\n newline += [0, -self.I] # 1ere ligne [0, -I, -I-E, ...]\n for j in range(2, self.n):\n newline.append(newline[j-1] - self.E)\n \n\n elif i == 1:\n newline += [-self.I] + (self.n-1)*[\"\"]\n first_col += [-self.I]\n else:\n first_col.append(first_col[i-1] - self.E)\n newline += [first_col[i]] + (self.n-1)*[\"\"]\n \n self.set_lign(newline)\n\n def init_S_local(self):\n \"\"\" Initialise 1ere ligne et 1ere colonne de S pour la méthode locale \"\"\"\n\n for i in range(self.m):\n newline = []\n if i == 0:\n newline += [0]*self.n \n else:\n newline += [0] + (self.n-1)*[\"\"]\n\n self.set_lign(newline)\n\n\nclass MatV(Matrix):\n \"\"\" Classe qui représente une matrice permettant de sauvegarder des valeurs \n liées aux lignes lors du calcul de la matrice de scoring \n Cette classe hérite de Matrix\n \"\"\"\n\n def __init__(self, n, m):\n Matrix.__init__(self)\n\n self.n = n \n self.m = m \n\n def init_V(self):\n \"\"\" Initialise 1ere ligne et 1ere colonne de V \"\"\"\n\n for i in range(self.m):\n if i == 0:\n self.set_lign([-float(\"inf\")]*self.n) # -inf -inf -inf ...\n else:\n self.set_lign([0] + (self.n-1)*[\"\"]) # 0 ...\n\n\nclass MatW(Matrix):\n \"\"\" Classe qui représente une matrice permettant de sauvegarder des valeurs \n liées aux colonnes lors du calcul de la matrice de scoring \n Cette classe hérite de Matrix\n \"\"\"\n\n def __init__(self, n, m):\n Matrix.__init__(self)\n\n self.n = n\n self.m = m \n\n def init_W(self):\n \"\"\" Initialise 1ere ligne et 1ere colonne de W \"\"\"\n\n for i in range(self.m):\n if i == 0:\n self.set_lign([-float(\"inf\")] + [0]*(self.n-1)) # -inf 0 0 0 0 ...\n else:\n self.set_lign([-float(\"inf\")] + (self.n-1)*[\"\"]) # 0 ... \n\n\nclass Alignment:\n \"\"\" Classe représentant un objet qui trouvera l'alignement de 2 séquences\n d'acides aminées\n \"\"\"\n\n def __init__(self, I, E, mat_file, seq1, seq2, p):\n \n self.I = I \n self.E = E\n self.p = p\n self.seq1 = seq1\n self.seq2 = seq2\n self.n = self.seq1.length()+1\n self.m = self.seq2.length()+1\n\n if self.p == 1 or p == 2: # Si on veut imprimer les informations\n print(\"Séquence 1 de longueur {0}: \".format(self.n))\n self.seq1.display() \n print(\"Séquence 2 de longueur {0}: \".format(self.m))\n self.seq2.display()\n print(\"matrice de substitution utilisée: {0}\".format(mat_file))\n print(\"Pénalité de gap affine: I = {0} | E = {1}\".format(self.I, self.E))\n\n # =================== SUBSITUTION ==============================\n self.t = MatSubstitution(mat_file)\n self.t.parse()\n\n # =================== SCORING ===============================\n\n self.S = MatScoring(I, E, self.n, self.m) \n # on crée un objet matrice de scoring\n\n # ===================== V ET W ===================================\n\n self.V = MatV(self.n, self.m)\n self.W = MatW(self.n, self.m)\n\n self.V.init_V() # Initialise V\n self.V.set_letters_seq(\"-\"+self.seq1.get_acids(), \"-\"+self.seq2.get_acids())\n self.W.init_W() # Initialise W\n self.W.set_letters_seq(\"-\"+self.seq1.get_acids(), \"-\"+self.seq2.get_acids())\n\n self.current_sol = [] # Pour le backtracking\n self.all_solutions = []\n\n def get_v(self, i, j):\n return max( self.S.get_score(i-1, j) - self.I, \n self.V.get_score(i-1,j) - self.E )\n\n def get_w(self, i, j):\n return max( self.S.get_score(i, j-1) - self.I,\n self.W.get_score(i, j-1) - self.E )\n\n def is_previous(self, mat, i, j):\n \"\"\" Regarde si l'élement ij résulte de l'élément en diagonale,\n en haut ou a gauche \n \"\"\"\n\n res = False\n if mat == \"v\":\n if self.V.get_score(i,j) == self.S.get_score(i,j):\n res = True \n\n elif mat == \"w\":\n if self.W.get_score(i,j) == self.S.get_score(i,j):\n res = True \n\n elif mat == \"s\":\n letters_ij = self.S.get_letters(i,j) # 'AB' par exemple\n t_ij = self.t.get_acid_score(letters_ij[0], letters_ij[1]) # t(i,j)\n\n if self.S.get_score(i-1, j-1) + t_ij == self.S.get_score(i,j):\n res = True \n\n return res \n\n\n\nclass GlobalAlignment(Alignment):\n \"\"\"\n Classe qui va trouver un aligement de séquences d'acides aminées avec\n une pénalité affine et une méthode globale\n \"\"\"\n\n def __init__(self, k, I, E, mat_file, seq1, seq2, p):\n Alignment.__init__(self, I, E, mat_file, seq1, seq2, p)\n self.k = k\n\n self.S.init_S_global() # On initialise la matrice de scoring\n self.S.set_letters_seq(\"-\"+self.seq1.get_acids(), \"-\"+self.seq2.get_acids())\n\n def update_s_global(self, i, j, v_ij, w_ij):\n letters_ij = self.S.get_letters(i,j) # 'AB' par exemple\n t_ij = self.t.get_acid_score(letters_ij[0], letters_ij[1])\n\n s_ij = max( self.S.get_score(i-1,j-1) + t_ij, v_ij, w_ij )\n self.S.set_score(i,j, s_ij)\n\n def Needleman_Wunsch(self):\n \"\"\" \n Algorithme qui calcule la matrice de scoring pour l’alignement global \n en utilisant la pénalité affine puis fait un backtracking pour récupérer\n tous les alignements optimaux possibles\n \"\"\"\n\n # ==================== CREATION SCORING ===========================\n\n for i in range(1, self.m):\n for j in range(1, self.n):\n v_ij = self.get_v(i, j) # V(i,j)\n self.V.set_score(i,j, v_ij)\n w_ij = self.get_w(i, j)\n self.W.set_score(i,j, w_ij) # W(i,j)\n\n self.update_s_global(i, j, v_ij, w_ij)\n\n if self.p == 2: # Si on veut afficher les matrices\n self.V.display()\n self.W.panda()\n self.S.panda()\n\n self.current_sol.append((self.m-1, self.n-1)) \n self.backtracking_global(self.m-1, self.n-1) #appel sur element en derniere\n #ligne, derniere colonne\n\n \n R = Result(self.S, self.t, self.all_solutions, self.p)\n res = R.compute_result()\n return res\n\n def backtracking_global(self, i, j):\n \"\"\" Remonte la matrice de scoring a partir du dernier élément jusqu'à [0][0]\n pour avoir les k alignements\n \"\"\"\n\n if i == 0 or j == 0:\n if i == 0 and j == 0:\n if len(self.current_sol) > 0:\n self.current_sol.pop() # Si on est en (0,0)\n if len(self.all_solutions) == self.k: # Si on a deja trouvé k alignements\n return \n if self.current_sol not in self.all_solutions:\n #print(\"1 solution trouvé: \", self.current_sol)\n self.all_solutions.append(c.deepcopy(self.current_sol)) \n\n else:\n for pos in range(3):\n\n new_i = i \n new_j = j \n valid = False \n if pos == 0 and self.is_previous(\"v\", i, j): # haut\n new_i -= 1 # i-1\n valid = True \n elif pos == 1 and self.is_previous(\"w\", i, j): # gauche\n new_j -= 1 # j-1 \n valid = True \n elif pos == 2 and self.is_previous(\"s\", i, j): # diagonale\n new_i -= 1 # i - 1\n new_j -= 1 # j - 1\n valid = True \n\n if valid: \n self.current_sol.append((new_i, new_j))\n self.backtracking_global(new_i, new_j) # appel sur cellule suivante\n if len(self.current_sol) != 0:\n self.current_sol.pop() # destruction sol partielle\n\n\n\nclass LocalAlignment(Alignment):\n \"\"\"\n Classe qui qui va trouver un aligement de séquences d'acides aminées avec\n une pénalité affine et une méthode locale\n \"\"\"\n\n def __init__(self, l, I, E, mat_file, seq1, seq2, p):\n Alignment.__init__(self, I, E, mat_file, seq1, seq2, p)\n self.l = l\n\n self.S.init_S_local() # On initialise la matrice de scoring\n self.S.set_letters_seq(\"-\"+self.seq1.get_acids(), \"-\"+self.seq2.get_acids())\n\n self.zeros = []\n self.found = False\n\n def update_s_local(self, i, j, v_ij, w_ij):\n \"\"\" détermine la valeur de S en fonction de V et W et de t\"\"\"\n\n letters_ij = self.S.get_letters(i,j) # 'AB' par exemple\n t_ij = self.t.get_acid_score(letters_ij[0], letters_ij[1])\n s_ij = max( self.S.get_score(i-1,j-1) + t_ij, v_ij, w_ij, 0 )\n self.S.set_score(i,j, s_ij) \n\n \n def compute_scoring(self, start_i, start_j):\n \"\"\" ReCalcule la matrice de\n scoring pour les alignements locaux avec pénalité affine \n après avoir trouvé un alignement local\n \"\"\"\n\n # ==================== CREATION SCORING ===========================\n\n for i in range(start_i, self.m):\n for j in range(start_j, self.n):\n if (i,j) not in self.zeros:\n v_ij = self.get_v(i, j) # V(i,j)\n self.V.set_score(i,j, v_ij)\n w_ij = self.get_w(i, j) # W(i,j)\n self.W.set_score(i,j, w_ij)\n\n self.update_s_local(i, j, v_ij, w_ij)\n\n if self.p == 2:\n print(\"la matrice de Scoring: \")\n self.S.panda()\n\n \n def sol_found(self, i,j):\n \"\"\" Détermine si on a fini un alignement local \"\"\"\n\n return (i == 0 or j == 0 or self.S.get_score(i,j) == 0)\n\n def bottom_up(self, i,j):\n \"\"\" Remonte la matrice de scoring a partir du max de la matrice\n de scoring jusqu'à un élément de la 1ere ligne ou 1ere colonne\n ou un élément 0\n \"\"\"\n self.current_sol.append((i,j))\n while not self.sol_found(i,j):\n if self.is_previous(\"v\", i, j): # haut\n i-=1\n elif self.is_previous(\"w\", i, j): # gauche\n j-=1\n elif self.is_previous(\"s\", i, j): # diagonale\n i-=1\n j-=1\n if self.S.get_score(i,j) != 0:\n self.current_sol.append((i,j))\n\n self.all_solutions.append(self.current_sol)\n self.zeros += self.current_sol\n self.current_sol = []\n\n \n def Smith_Waterman(self):\n self.compute_scoring(1,1)\n\n for i in range(self.l):\n current_max = self.S.get_max()\n self.bottom_up(current_max[0], current_max[1])\n if i == self.l-1: # Si on a trouvé l solutions\n break\n # une fois le backtrack fini, on met a 0 l'alignement \n self.S.set_zero(self.all_solutions[-1])\n self.V.set_zero(self.all_solutions[-1])\n self.W.set_zero(self.all_solutions[-1])\n if self.p == 2:\n print(\"mise a 0: \")\n self.S.panda()\n self.V.panda()\n self.W.panda()\n\n print(\"recalcul de la matrice: \")\n \n\n self.compute_scoring(self.all_solutions[-1][-1][0], \\\n self.all_solutions[-1][-1][1])\n\n R = Result(self.S, self.t, self.all_solutions, self.p)\n return R.compute_result()\n\n\n\nclass Result:\n \"\"\" Classe représentant un objet Résultat dans laquelle on va aligner\n 2 séquences selon la matrice de scoring obtenue\n \"\"\"\n\n def __init__(self, S, t, all_sol, p):\n self.S = S # matrice de scoring \n self.t = t # matrice de substitution\n self.all_solutions = all_sol\n self.gap = \"-\"\n self.p = p # Pour savoir si on veut print le résultat ou non\n \n def compute_result(self):\n \"\"\" trouve les correspondances entre lettres de l'alignement \"\"\"\n \n scores_sim = [] # stocke le score de similarité des résultats\n\n for sol in self.all_solutions:\n used = {} # dictionnaire avec indices deja utilisés \n # clé = colonnes, valeur = lignes\n res = []\n for pos in range(len(sol)-1, -1, -1):\n \n letters = self.S.get_letters(sol[pos][0], sol[pos][1])\n score = self.t.get_acid_score(letters[0], letters[1])\n if sol[pos][0] in used.values():\n letters = self.gap + letters[1] # '-B' par ex\n if sol[pos][1] in used.keys():\n letters = letters[0] + self.gap # 'B-' par ex\n\n res.append((letters, score)) # On ajoute son score aussi\n\n used[sol[pos][1]] = sol[pos][0]\n\n #self.bind(res)\n scores_sim.append(self.bind(res))\n \n return scores_sim\n\n\n def bind(self, seq):\n \"\"\" crée la liaison entre les 2 séquences (similarité, identité, ..)\n ainsi que les scores et pourcentages\n \"\"\"\n\n # print(seq)\n seq1, seq2, links = \"\", \"\", \"\"\n for i in range(len(seq)):\n # print(\"seq: \", seq[i][0], \" score: \", seq[i][1])\n seq1 += seq[i][0][1] # 2eme lettre\n seq2 += seq[i][0][0] # 1ere lettre\n\n if seq1[i] == self.gap or seq2[i] == self.gap:\n links += \" \" # Pas de correspondance\n else:\n if seq1[i] == seq2[i]:\n links += \":\" # Identiques\n elif seq[i][1] >= 0: # Si le score est positif => similaires\n links += \".\"\n else:\n links += \" \"\n \n similarity = ((links.count(\".\")+links.count(\":\"))/len(links)) * 100 # pourcentage de similarité\n identity = (links.count(\":\")/len(links)) * 100 # pourcentage d'identité\n \n if self.p == 1 or self.p == 2:\n self.print_result(seq1, seq2, links, similarity, identity)\n \n return similarity\n\n \n def print_result(self, seq1, seq2, links, similarity, identity):\n \"\"\" Imprime de manière jolie les 2 séquences alignées \n seq est de la forme: [('AB', scoreAB), .. ]\n \"\"\"\n\n print(\"\\n>>> Alignement: \")\n\n i, j = 0, 0\n while i < (len(seq1) // 60):\n print(seq1[j:j+60]+\"\\n\"+links[j:j+60]+\"\\n\"+seq2[j:j+60]+\"\\n\\n\")\n i += 1\n j += 60\n end = len(seq1) - len(seq2)%60\n print(seq1[end:]+\"\\n\"+links[end:]+\"\\n\"+seq2[end:]+\"\\n\\n\")\n\n print(\"==> similarité: {0} %\".format(similarity))\n print(\"==> Identité: {0} %\".format(identity))\n\n\n\ndef main():\n # seq1 = Sequence(\"Séquence 1 de l'exemple\",\"AZAP\")\n # seq2 = Sequence(\"Séquence 2 de l'exemple\", \"AI\")\n # G0 = GlobalAlignment(3, 12, 2, \"blosum62.txt\", seq1, seq2, 2)\n # G0.Needleman_Wunsch()\n\n P2 = ParserSequence(\"protein-sequences.fasta\")\n P2.parse()\n\n # seq1 = Sequence(\"Séquence 1 de l'exemple\",\"ISALIGNED\")\n # seq2 = Sequence(\"Séquence 2 de l'exemple\", \"THISLINE\")\n # L0 = LocalAlignment(2, 4, 4, \"blosum62.txt\", seq1, seq2, 2)\n # L0.Smith_Waterman()\n\n L3 = LocalAlignment(3, 12, 2, \"blosum62.txt\", P2.get_seq(0), P2.get_seq(1), 1)\n L3.Smith_Waterman()\n\nif __name__ == \"__main__\":\n main()","repo_name":"AlexandreHnf/Projet_Bioinfo","sub_path":"Projet part 1/jupyter/AlignmentPart1.py","file_name":"AlignmentPart1.py","file_ext":"py","file_size_in_byte":23664,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"16758855094","text":"from collections import deque\r\n\r\nnums_pumps = int(input())\r\npumps = deque()\r\n\r\nfor i in range(nums_pumps):\r\n pump = [int(x) for x in input().split(\" \")]\r\n pumps.append(pump)\r\n\r\nfor j in range(nums_pumps):\r\n total = 0\r\n found = True\r\n for pair in pumps:\r\n total = total + pair[0] - pair[1]\r\n if total < 0:\r\n pumps.append(pumps.popleft())\r\n found = False\r\n break\r\n if found:\r\n print(j)\r\n break\r\n","repo_name":"nikichhh/SoftUni-Python","sub_path":"SoftUni_Python_Advanced_and_OOP/1. Advanced/1. Lists as Stacks and Queues/exercise/5_truck_tour.py","file_name":"5_truck_tour.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41102074646","text":"\n# coding: utf-8\n\n# # Box Office\n\n# In[143]:\n\n\n\n#import pandas as pd\n#data = pd.read_csv('movie_info_2016.csv')\n\n\n# In[117]:\n\n\n# clean data, transfer the box office to numerical\n#delete the data that is '-'\n#the data is the final dataframe gain from the web_scaping\ndef clean_data(data):\n import pandas as pd\n data = data[data['Box Office'] != '-']\n data = data[data['Genre'] != '-']\n data['Critic Rating'] = data[data['Critic Rating'] != '-']['Critic Rating'].apply(pd.to_numeric)\n data['Critic Numbers'] = data[data['Critic Numbers'] != '-']['Critic Numbers'].apply(pd.to_numeric)\n data['User Rating'] = data[data['User Rating'] != '-']['User Rating'].apply(pd.to_numeric)\n data['User Numbers'] = data[data['User Numbers'] != '-']['User Numbers'].apply(pd.to_numeric)\n data['Box Office'] = data['Box Office'].apply(lambda x: x.strip('$').replace(',','')).apply(pd.to_numeric)\n return data\n\ndef box_office_visual(dataframe):\n #firstly clean the data\n data = clean_data(dataframe)\n import pandas as pd\n import matplotlib.pyplot as plt\n import matplotlib\n import seaborn as sns\n revenue = data[['Critic Rating', 'Critic Numbers', 'User Rating', 'User Numbers', 'Box Office']]\n \n fig = plt.figure(figsize=(12,12))\n ax1 = plt.subplot(2,2,1)\n ax1 = sns.regplot(x='Critic Rating', y='Box Office', data=revenue, x_jitter=.1)\n plt.title('Box Office by Critic Rating',fontsize=15)\n plt.xlabel('Critic Rating',fontsize=13)\n plt.ylabel('Box office',fontsize=13)\n\n ax2 = plt.subplot(2,2,2)\n ax2 = sns.regplot(x='Critic Numbers', y='Box Office', data=revenue, x_jitter=.1,color='g',marker='+')\n #ax2.text(6800,1.1e9,'r=0.78',fontsize=15)\n plt.title('Box office by Critic Numbers',fontsize=15)\n plt.xlabel('Critic Numbers',fontsize=13)\n plt.ylabel('Box office',fontsize=13)\n\n ax3 = plt.subplot(2,2,3)\n ax3 = sns.regplot(x='User Rating', y='Box Office', data=revenue, x_jitter=.1,color='g',marker='+')\n #ax2.text(6800,1.1e9,'r=0.78',fontsize=15)\n plt.title('Box office by User Rating',fontsize=15)\n plt.xlabel('Critic Numbers',fontsize=13)\n plt.ylabel('Box office',fontsize=13)\n\n ax4 = plt.subplot(2,2,4)\n ax4 = sns.regplot(x='User Numbers', y='Box Office', data=revenue, x_jitter=.1,color='g',marker='+')\n #ax2.text(6800,1.1e9,'r=0.78',fontsize=15)\n plt.title('Box office by User Numbers',fontsize=15)\n plt.xlabel('User Critic Numbers',fontsize=13)\n plt.ylabel('Box office',fontsize=13)\n \n plt.savefig(\"Box_Office_Visual.jpg\")\n \n#input is the final dataframe \ndef corr_heatmap(dataframe):\n data = clean_data(dataframe)\n data_corr = data[['Critic Rating','Critic Numbers','User Rating','User Numbers','Box Office']].corr()\n f, ax = plt.subplots(figsize=(10,7))\n sns.heatmap(data_corr,cbar=True, annot=True,vmax=.8, cmap='PuBu',square=True)\n plt.savefig(\"corr_heatmap.jpg\")\n\n\n# # Movie Genre\n\n# In[140]:\n\n\n# create all genre set\n# input is the final data table\ndef all_genre_set(data):\n data = clean_data(data)\n movie_genre = data['Genre']\n genre = set() \n genre_set = set()\n for item in movie_genre:\n genre.update(str(item).strip().split(','))\n for item in genre:\n genre_set.add(str(item).strip())\n genre_set.remove('nan')\n return genre_set\n\ndef movie_genre_df(dataframe):\n genre_set = all_genre_set(data)\n genre_df = pd.DataFrame()\n for genre in genre_set:\n genre_df[genre] = data['Genre'].str.contains(genre).map(lambda x: 1 if x else 0)\n return genre_df\n\n\ndef genre_count_visual(dataframe,year:str): \n import matplotlib.pyplot as plt\n import matplotlib\n import seaborn as sns\n genre_df = movie_genre_df(dataframe)\n genre_sum = genre_df.sum().sort_values(ascending = False)\n fig = plt.figure(figsize = (10,10))\n ax = plt.subplot(1,1,1)\n ax = genre_sum.plot.bar()\n plt.xticks(rotation=70)\n plt.title(f'Film genre in %s'%(year),fontsize = 18)\n plt.xlabel('genre', fontsize = 18)\n plt.ylabel('count', fontsize =18)\n plt.tight_layout()\n plt.savefig(\"genre_count_visual.jpg\")\n\ndef genre_profit_visual(dataframe,year:str):\n genre_set = all_genre_set(dataframe)\n data = clean_data(dataframe)\n profit_by_genre = pd.Series(index = genre_set)\n genre_df = movie_genre_df(dataframe)\n genre_df['Box Office'] = data['Box Office']\n genre_sum = genre_df.sum().sort_values(ascending = False)\n \n for genre in genre_set:\n profit_by_genre.loc[genre] = genre_df[genre_df[genre] == 1]['Box Office'].sum()/genre_sum[genre]\n # plot the revenue of movie genre\n profit_by_genre = profit_by_genre.sort_values(ascending =False)\n \n fig = plt.figure(figsize = (10,10))\n ax = plt.subplot(1,1,1)\n ax = profit_by_genre.plot.bar()\n plt.xticks(rotation=70)\n plt.title(f'Film genre in %s'%(year),fontsize = 18)\n plt.xlabel('genre', fontsize = 18)\n plt.ylabel('average profit', fontsize =18)\n plt.tight_layout()\n plt.savefig(\"genre_profit_visual.jpg\")\n\n\n# # Studio Analysis\n\n# In[163]:\n\n\n# compare studio revenue\ndef compare_studio_revenue(dataframe):\n company_list = ['Universal Pictures', 'Paramount Pictures', '20th Century Fox','Sony','Warner']\n company_df = pd.DataFrame()\n genre_df = movie_genre_df(dataframe)\n data = clean_data(dataframe)\n \n for company in company_list:\n company_df[company]=data['Studio'].str.contains(company).map(lambda x:1 if x else 0)\n company_df = pd.concat([company_df,genre_df.iloc[:,:-1],data['Box Office']],axis=1)\n studio = pd.DataFrame(index=company_list,columns=company_df.columns[5:])\n \n for item in company_list:\n if company_df[item].sum() > 0: \n studio.loc[item]=company_df.groupby(item,as_index=False).sum().iloc[1,5:]\n \n fig = plt.figure(figsize=(8,8))\n ax = fig.add_subplot(111)\n studio['Box Office'].sort_values(ascending = False).plot(ax=ax,kind='bar')\n plt.xticks(rotation=60)\n plt.title('studio compares')\n plt.ylabel('Box office')\n plt.savefig(\"compare_studio_revenue.jpg\")\n \ndef studio_different_genre(dataframe):\n company_list = ['Universal Pictures', 'Paramount Pictures', '20th Century Fox','Sony','Warner']\n company_df = pd.DataFrame()\n genre_df = movie_genre_df(dataframe)\n data = clean_data(dataframe)\n \n for company in company_list:\n company_df[company]=data['Studio'].str.contains(company).map(lambda x:1 if x else 0)\n company_df = pd.concat([company_df,genre_df.iloc[:,:-1],data['Box Office']],axis=1)\n studio = pd.DataFrame(index=company_list,columns=company_df.columns[5:])\n \n for item in company_list:\n if company_df[item].sum() > 0: \n studio.loc[item]=company_df.groupby(item,as_index=False).sum().iloc[1,5:] \n \n plt.figure(figsize=(20,15))\n for i in range(len(company_list)):\n item = company_list[i]\n if company_df[item].sum() > 0: \n a = studio.loc[item].iloc[:-1]\n a['others'] = a.sort_values(ascending=False).iloc[8:].sum()\n a = a.sort_values(ascending=True).iloc[-9:]\n a = a[a>0]\n \n plt.subplot(2,3,1+i)\n plt.pie(a, labels=a.index, autopct='%.2f%%',startangle=90,pctdistance=0.75)\n plt.title(f'{item}',fontsize=15)\n plt.savefig(\"studio_different_genre.jpg\")\n\n","repo_name":"XiaoxuanXia/Project","sub_path":"Data_Visualization.py","file_name":"Data_Visualization.py","file_ext":"py","file_size_in_byte":7359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4909526076","text":"\"\"\"Task bout goods and tuples\"\"\"\ngoods = []\ndef fill_database():\n i = 1\n ans = input(f'Enter new position? Yes/No: ').lower()\n while ans == 'yes':\n tur = (i, {'name': input(f'Enter new name: '),\n 'price': input(f'Enter new price: '),\n 'amount': input(f'Enter amount: '),\n 'units': input(f'Enter units: ')})\n goods.append(tur)\n i += 1\n ans = input(f'Enter new position? Yes/No: ').lower()\n return goods\n\n\ndef goods_analytics(goods):\n temp_names = []\n temp_prices = []\n temp_quantities = []\n temp_unit = []\n for element in goods:\n temp_names.append(element[1]['name'])\n temp_prices.append(element[1]['price'])\n temp_quantities.append(element[1]['amount'])\n temp_unit.append(element[1]['units'])\n result_dict = {'name': temp_names, 'price': temp_prices,\n 'amount': temp_quantities, 'units': temp_unit}\n return [f'\\t{el}\\n' for el in result_dict.items()]\n\n\nfill_database()\nprint(*goods_analytics(goods))","repo_name":"VikkMoor/py_seminars","sub_path":"sem_three/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9479408367","text":"import os\nimport datetime\n\nfrom django.test import TestCase\nfrom django.conf import settings\nfrom django.core.files.images import ImageFile\nfrom django.contrib.auth.models import User\n\nfrom components.prizes.models import RafflePrize, RaffleTicket, RaffleDeadline\nfrom components.floors.models import Dorm, Floor\n\nclass RafflePrizeTests(TestCase):\n \"\"\"\n Tests the RafflePrize model.\n \"\"\"\n def setUp(self):\n \"\"\"\n Sets up a test individual prize for the rest of the tests.\n This prize is not saved, as the round field is not yet set.\n \"\"\"\n self.saved_rounds = settings.COMPETITION_ROUNDS\n start = datetime.date.today()\n end = start + datetime.timedelta(days=7)\n \n settings.COMPETITION_ROUNDS = {\n \"Round 1\" : {\n \"start\": start.strftime(\"%Y-%m-%d\"),\n \"end\": end.strftime(\"%Y-%m-%d\"),\n },\n }\n \n # Create a test user\n self.user = User.objects.create_user(\"user\", \"user@test.com\", password=\"changeme\")\n \n # Set up raffle deadline\n self.deadline = RaffleDeadline(\n round_name=\"Round 1\", \n pub_date=datetime.datetime.today() - datetime.timedelta(hours=1),\n end_date=datetime.datetime.today() + datetime.timedelta(days=5),\n )\n self.deadline.save()\n \n image_path = os.path.join(settings.PROJECT_ROOT, \"fixtures\", \"test_images\", \"test.jpg\")\n image = ImageFile(open(image_path, \"r\"))\n self.prize = RafflePrize(\n title=\"Super prize!\",\n description=\"A test prize\",\n image=image,\n value=5,\n deadline=self.deadline,\n )\n \n def testTicketAllocation(self):\n \"\"\"\n Tests that a user can allocate a ticket.\n \"\"\"\n self.prize.round_name = \"Round 1\"\n self.prize.save()\n \n profile = self.user.get_profile()\n profile.add_points(25, datetime.datetime.today(), \"test\")\n profile.save()\n \n # Add a ticket to the prize\n self.assertEqual(profile.available_tickets(), 1, \"User should have one raffle ticket.\")\n self.prize.add_ticket(self.user)\n self.assertEqual(profile.available_tickets(), 0, \"User should not have any raffle tickets.\")\n self.assertEqual(self.prize.allocated_tickets(), 1, \"1 ticket should be allocated to this prize.\")\n self.assertEqual(self.prize.allocated_tickets(self.user), 1, \"1 ticket should be allocated by this user to this prize.\")\n \n # Have another user add a ticket to the prize.\n user2 = User.objects.create_user(\"user2\", \"user2@test.com\", password=\"changeme\")\n \n profile = user2.get_profile()\n profile.add_points(25, datetime.datetime.today(), \"test\")\n profile.save()\n \n # Add a ticket to the prize\n self.prize.add_ticket(user2)\n self.assertEqual(self.prize.allocated_tickets(), 2, \"2 tickets should be allocated to this prize.\")\n self.assertEqual(self.prize.allocated_tickets(user2), 1, \"1 ticket should be allocated by this user to this prize.\")\n \n # Add another ticket to the prize.\n profile.add_points(25, datetime.datetime.today(), \"test\")\n profile.save()\n \n self.prize.add_ticket(user2)\n self.assertEqual(self.prize.allocated_tickets(), 3, \"3 tickets should be allocated to this prize.\")\n self.assertEqual(self.prize.allocated_tickets(user2), 2, \"2 tickets should be allocated by this user to this prize.\")\n \n # Remove a ticket from the prize.\n self.prize.remove_ticket(self.user)\n self.assertEqual(self.prize.allocated_tickets(), 2, \"2 tickets should be allocated to this prize.\")\n self.assertEqual(self.prize.allocated_tickets(self.user), 0, \"No tickets should be allocated by this user to this prize.\")\n \n self.prize.remove_ticket(user2)\n self.assertEqual(self.prize.allocated_tickets(), 1, \"1 ticket should be allocated to this prize.\")\n self.assertEqual(self.prize.allocated_tickets(user2), 1, \"1 ticket should be allocated by this user to this prize.\")\n \n def tearDown(self):\n \"\"\"\n Deletes the created image file in prizes.\n \"\"\"\n settings.COMPETITION_ROUNDS = self.saved_rounds\n self.prize.image.delete()\n self.prize.delete()","repo_name":"keokilee/makahiki","sub_path":"makahiki/apps/components/prizes/tests/raffle_tests.py","file_name":"raffle_tests.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"67"} +{"seq_id":"37458224856","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom graphene.types.resolver import dict_resolver\nimport graphene\nimport sys\nimport os\nimport json\nimport re\n\nfrom pprint import pprint\n\n# Articut Result 檔案路徑\nresultFilePath = ''\n\n\n\"\"\"\nArticut GraphQL Schema\n\"\"\"\nclass Persons(graphene.ObjectType):\n class Meta:\n default_resolver = dict_resolver\n \n text = graphene.String()\n pos_ = graphene.String()\n tag_ = graphene.String()\n\nclass Nouns(graphene.ObjectType):\n class Meta:\n default_resolver = dict_resolver\n \n text = graphene.String()\n pos_ = graphene.String()\n tag_ = graphene.String()\n\nclass Numbers(graphene.ObjectType):\n class Meta:\n default_resolver = dict_resolver\n \n text = graphene.String()\n pos_ = graphene.String()\n tag_ = graphene.String()\n\nclass Sites(graphene.ObjectType):\n class Meta:\n default_resolver = dict_resolver\n \n text = graphene.String()\n pos_ = graphene.String()\n tag_ = graphene.String()\n\nclass Userdefined(graphene.ObjectType):\n class Meta:\n default_resolver = dict_resolver\n \n text = graphene.String()\n pos_ = graphene.String()\n tag_ = graphene.String()\n\nclass Meta(graphene.ObjectType):\n class Meta:\n default_resolver = dict_resolver\n \n lang = graphene.String()\n description = graphene.String()\n\nclass Tokens(graphene.ObjectType):\n class Meta:\n default_resolver = dict_resolver\n \n text = graphene.String()\n pos_ = graphene.String()\n tag_ = graphene.String()\n isStop = graphene.Boolean()\n isEntity = graphene.Boolean()\n isVerb = graphene.Boolean()\n isTime = graphene.Boolean()\n isClause = graphene.Boolean()\n isKnowledge = graphene.Boolean()\n\nclass Ents(graphene.ObjectType):\n class Meta:\n default_resolver = dict_resolver\n \n persons = graphene.List(Persons)\n nouns = graphene.List(Nouns)\n numbers = graphene.List(Numbers)\n sites = graphene.List(Sites)\n #userdefined = graphene.List(Userdefined)\n\nclass Doc(graphene.ObjectType):\n class Meta:\n default_resolver = dict_resolver\n \n text = graphene.String()\n tokens = graphene.List(Tokens)\n ents = graphene.Field(Ents)\n\nclass Nlp(graphene.ObjectType):\n meta = graphene.Field(Meta)\n doc = graphene.Field(Doc)\n\nclass Query(graphene.ObjectType):\n nlp = graphene.Field(\n Nlp,\n filepath = graphene.String(),\n model = graphene.String()\n )\n \n def resolve_nlp(self, info, filepath, model):\n if model != \"TW\":\n return Nlp(\n meta = {\n \"lang\": model,\n \"description\": 'Articut-GraphQL Model Unsupported.'\n }\n )\n \n if filepath[-5:] == '.json':\n try:\n with open(filepath, 'r', encoding='utf-8') as resultFile:\n result = json.loads(resultFile.read())\n textTagLIST = posList2TextTag(result[\"result_pos\"])\n \n return Nlp(\n meta = {\n \"lang\": model,\n \"description\": 'Articut GraphQL Query Result.'\n },\n doc = {\n \"text\": result[\"result_segmentation\"].replace('/', ''),\n \"tokens\": getTokens(textTagLIST),\n \"ents\": getEnts(textTagLIST)\n }\n )\n except Exception as e:\n print('[Articut-GraphQL ERROR] {}'.format(e))\n return Nlp(\n meta = {\n \"lang\": model,\n \"description\": 'Articut-GraphQL Error.'\n }\n )\n\n\n\"\"\"\nUsed by ArticutAPI.py\n\"\"\"\nclass GraphQL():\n def query(self, filePath, query=\"\"\"\n {\n meta {\n lang\n description\n }\n doc {\n text\n tokens {\n text\n pos_\n tag_\n isStop\n isEntity\n isVerb\n isTime\n isClause\n isKnowledge\n }\n ents {\n persons {\n text\n pos_\n tag_\n }\n nouns {\n text\n pos_\n tag_\n }\n numbers {\n text\n pos_\n tag_\n }\n sites {\n text\n pos_\n tag_\n }\n }\n }\n }\"\"\"):\n query = \"\"\"{\\n nlp(filepath: \"{{filePath}}\", model: \"TW\") {{query}}\\n}\"\"\".replace('{{filePath}}', filePath).replace('{{query}}', query)\n result = graphene.Schema(query=Query).execute(query)\n return json.loads(json.dumps({\"data\": result.data}))\n\n\"\"\"\n將 result_pos 拆開成 [{\"text\", \"tag_\", \"pos_\"} ...]\n\"\"\"\ndef posList2TextTag(posLIST):\n textTagLIST = []\n textPosPat = re.compile(\"<[^>]*?>.*?</[^>]*?>\")\n posPat = re.compile(\"(?<=>).*?</[^>]*?>\")\n posLIST.reverse()\n for pos in posLIST:\n if pos[0] == '<' and pos[-1] == '>':\n textPosLIST = [p.group(0) for p in reversed(list(textPosPat.finditer(pos)))]\n for t in textPosLIST:\n textLIST = [tp.group(0).split(\"</\") for tp in posPat.finditer(t)]\n textTagLIST.append({\n \"text\": textLIST[0][0],\n \"tag_\": textLIST[0][1][:-1],\n \"pos_\": pos2UniversalPOS(textLIST[0][1][:-1])\n })\n else:\n textTagLIST.append({\n \"text\": pos,\n \"tag_\": 'PUNCTUATION',\n \"pos_\": 'PUNCT'\n })\n textTagLIST.reverse()\n return textTagLIST\n\n\"\"\"\nArticut POS 轉換 Universal Part-of-speech Tags\n\"\"\"\ndef pos2UniversalPOS(pos):\n if pos in ['FUNC_inner']:\n return 'ADP'\n if pos in ['FUNC_determiner']:\n return 'DET'\n if pos in ['AUX', 'MODAL']:\n return 'AUX'\n if pos in ['ASPECT', 'FUNC_negation']:\n return 'PART'\n if pos in ['FUNC_inter', 'FUNC_conjunction']:\n return 'CONJ'\n if pos in ['ENTITY_person', 'ENTITY_pronoun']:\n return 'PERSON'\n if pos in ['TIME_justtime', 'RANGE_period']:\n return 'TIME'\n if pos in ['QUANTIFIER', 'ENTITY_measurement']:\n return 'QUANTITY'\n if pos in ['MODIFIER', 'MODIFIER_color', 'FUNC_modifierHead']:\n return 'ADJ'\n if pos in ['LOCATION', 'RANGE_locality', 'KNOWLEDGE_place', 'KNOWLEDGE_addTW', 'KNOWLEDGE_route']:\n return 'LOC'\n if pos in ['VerbP', 'ACTION_verb', 'ACTION_lightVerb', 'ACTION_quantifiedVerb']:\n return 'VERB'\n if pos in ['TIME_day', 'TIME_week', 'TIME_month', 'TIME_season', 'TIME_year', 'TIME_decade', 'TIME_holiday']:\n return 'DATE'\n if pos in ['IDIOM', 'ENTITY_noun', 'ENTITY_nouny', 'ENTITY_oov', 'ENTITY_NP', 'ENTITY_nounHead', 'ENTITY_num', 'ENTITY_classifier', 'ENTITY_possessive']:\n return 'NOUN'\n\n return 'OTHER' # ['UserDefined', 'CLAUSE_AnotAQ', 'CLAUSE_YesNoQ', 'CLAUSE_WhoQ', 'CLAUSE_WhatQ', 'CLAUSE_WhereQ', 'CLAUSE_WhenQ', 'CLAUSE_HowQ', 'CLAUSE_WhyQ', 'CLAUSE_Particle', 'KNOWLEDGE_url']\n\n\"\"\"\nArticut-GraphQL Content\n\"\"\"\ndef getTokens(textTagLIST):\n resultLIST = []\n for textTag in textTagLIST:\n resultDICT = {\n \"text\": textTag[\"text\"],\n \"tag_\": textTag[\"tag_\"],\n \"pos_\": textTag[\"pos_\"],\n \"isStop\": posIsStop(textTag[\"tag_\"]),\n \"isEntity\": posIsEntity(textTag[\"tag_\"]),\n \"isVerb\": posIsVerb(textTag[\"tag_\"]),\n \"isTime\": posIsTime(textTag[\"tag_\"]),\n \"isClause\": posIsClause(textTag[\"tag_\"]),\n \"isKnowledge\": posIsKnowledge(textTag[\"tag_\"])\n }\n resultLIST.append(resultDICT)\n return resultLIST\n\ndef getEnts(textTagLIST):\n resultDICT = {\n \"persons\": getPersons(textTagLIST),\n \"nouns\": getNouns(textTagLIST),\n \"numbers\": getNumbers(textTagLIST),\n \"sites\": getSites(textTagLIST)\n #\"userdefined\": getUserdefined(textTagLIST)\n }\n return resultDICT\n\n\"\"\"\nArticut-GraphQL Function\n\"\"\"\ndef posIsStop(pos):\n if pos in ['ACTION_lightVerb', 'FUNC_determiner', 'FUNC_modifierHead', 'FUNC_negation', 'FUNC_conjunction', 'RANGE_locality', 'RANGE_period']:\n return True\n return False\n\ndef posIsEntity(pos):\n if pos in ['ENTITY_num', 'ENTITY_classifier', 'ENTITY_measurement', 'ENTITY_person', 'ENTITY_pronoun', 'ENTITY_possessive', 'ENTITY_noun', 'ENTITY_nounHead', 'ENTITY_nouny', 'ENTITY_oov', 'ENTITY_NP']:\n return True\n return False\n\ndef posIsVerb(pos):\n if pos in ['ACTION_verb', 'ACTION_quantifiedVerb', 'VerbP']:\n return True\n return False\n\ndef posIsTime(pos):\n if pos in ['TIME_justtime', 'TIME_holiday', 'TIME_day', 'TIME_week', 'TIME_month', 'TIME_season', 'TIME_year', 'TIME_decade']:\n return True\n return False\n\ndef posIsClause(pos):\n if pos in ['CLAUSE_AnotAQ', 'CLAUSE_YesNoQ', 'CLAUSE_WhoQ', 'CLAUSE_WhatQ', 'CLAUSE_WhereQ', 'CLAUSE_WhenQ', 'CLAUSE_HowQ', 'CLAUSE_WhyQ', 'CLAUSE_Particle']:\n return True\n return False\n\ndef posIsKnowledge(pos):\n if pos in ['KNOWLEDGE_addTW', 'KNOWLEDGE_url', 'KNOWLEDGE_place', 'KNOWLEDGE_route', 'LOCATION', 'UserDefined']:\n return True\n return False\n\ndef getPersons(textTagLIST):\n resultLIST = []\n for textTag in textTagLIST:\n if textTag[\"tag_\"] in ['ENTITY_person', 'ENTITY_pronoun']:\n resultLIST.append(textTag)\n return resultLIST\n\ndef getNouns(textTagLIST):\n resultLIST = []\n for textTag in textTagLIST:\n if textTag[\"tag_\"] in ['ENTITY_noun', 'ENTITY_nounHead', 'ENTITY_nouny', 'ENTITY_oov', 'ENTITY_NP']:\n resultLIST.append(textTag)\n return resultLIST\n\ndef getNumbers(textTagLIST):\n resultLIST = []\n for textTag in textTagLIST:\n if textTag[\"tag_\"] in ['ENTITY_num', 'ENTITY_classifier', 'ENTITY_measurement']:\n resultLIST.append(textTag)\n return resultLIST\n\ndef getSites(textTagLIST):\n resultLIST = []\n for textTag in textTagLIST:\n if textTag[\"tag_\"] in ['KNOWLEDGE_addTW', 'KNOWLEDGE_place', 'LOCATION', 'KNOWLEDGE_route']:\n resultLIST.append(textTag)\n return resultLIST\n\n# Uncompleted\ndef getUserdefined(textTagLIST):\n resultLIST = []\n for textTag in textTagLIST:\n if textTag[\"tag_\"] in ['']:\n resultLIST.append(textTag)\n return resultLIST\n\n\n\n\"\"\"\nUsed by python ArticutGraphQL.py articutResult.json\nStarlette server (http://0.0.0.0:8000)\n\"\"\"\ndef serverStart():\n from starlette.applications import Starlette\n from starlette.routing import Router\n from starlette.routing import Route\n import uvicorn\n \n app = Router([Route('/', endpoint=graphQL, methods=['GET', 'POST'])])\n uvicorn.run(app, host='0.0.0.0', port=8000)\n return None\n\nasync def graphQL(request):\n from starlette.templating import Jinja2Templates\n from starlette.status import HTTP_400_BAD_REQUEST\n from starlette.responses import PlainTextResponse\n from starlette.responses import JSONResponse\n \n if request.method == 'POST':\n content_type = request.headers.get(\"Content-Type\", \"\")\n if content_type == 'application/json':\n data = await request.json()\n else:\n return PlainTextResponse('Bad Request!', status_code=HTTP_400_BAD_REQUEST)\n \n try:\n query = data[\"query\"]\n variables = data.get(\"variables\")\n except KeyError:\n return PlainTextResponse('Bad Request!', status_code=HTTP_400_BAD_REQUEST)\n \n result = graphene.Schema(query=Query).execute(query, variables=variables)\n return JSONResponse({\"data\": result.data})\n else:\n return Jinja2Templates(directory='Toolkit').TemplateResponse('graphQL.html', {\n \"request\": request,\n \"resultFilePath\": resultFilePath\n })\n\nif __name__ == '__main__':\n if len(sys.argv) >= 2:\n resultFilePath = sys.argv[1]\n if os.path.isfile(resultFilePath):\n serverStart()\n else:\n print('{} 檔案不存在!'.format(resultFilePath))\n else:\n print('請輸入斷詞結果檔案路徑,例:python ArticutGraphQL.py articutResult.json')","repo_name":"Droidtown/ArticutAPI","sub_path":"ArticutAPI/Toolkit/graphQL.py","file_name":"graphQL.py","file_ext":"py","file_size_in_byte":12197,"program_lang":"python","lang":"en","doc_type":"code","stars":400,"dataset":"github-code","pt":"67"} +{"seq_id":"16874097243","text":"#!/usr/bin/python3\n\nletters = input(\"Enter a string: \").lower().strip()\nvowels = ['a', 'e', 'i', 'o', 'u', 'y']\n\nfor i in letters:\n if i not in vowels:\n print(i, end=\" \")\n else:\n continue\n","repo_name":"FrankOnyemaOrji/negpod5-gc3","sub_path":"loops/num2.py","file_name":"num2.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9236630408","text":"'''\nA script for adding a new (sentence-splitted, POS-tagged and lemmatized, xml) subcorpus to the master corpus\n(add to the escaped xml version of the corpus).\nA re-run of tagged2vert.py and unescape_s.py is necessary after adding the new subcorpus.\n'''\nimport argparse\nfrom lxml import etree\nimport gc\nfrom pathlib import Path\n\ngc.set_threshold(1000, 15, 15) # setting higher thresholds for garbage collection, in order to avoid memory peaks\n\n# define cmd arguments\nparser = argparse.ArgumentParser(description=\"A script for adding a subcorpus to the master corpus\")\nparser.add_argument(\"masterCorpus\", help=\"the master corpus in xml format\")\nparser.add_argument(\"subCorpus\", help=\"the master corpus in xml format\")\nargs = parser.parse_args()\n\n# processing arguments\nmasterCorpus = args.masterCorpus\nsubCorpus = args.subCorpus\n\n\nwith open(masterCorpus, \"r+\", encoding=\"utf-8\") as master_corpus:\n parser = etree.XMLParser(remove_blank_text=True, encoding='utf-8')\n tree_master = etree.parse(master_corpus, parser)\n root_master = tree_master.getroot()\n\n with open(subCorpus, \"r+\", encoding=\"utf-8\") as subcorp:\n tree_subc = etree.parse(subcorp, parser)\n root_subc = tree_subc.getroot()\n for child in root_subc.getchildren():\n root_master.append(child)\n\n\nfilename_old = Path(masterCorpus).stem\nfilename_new = filename_old + \"_subcorpus-added.xml\"\ntree_master.write(filename_new, encoding=\"utf-8\")\n\nprint(\"Done\")\n\n\n\n","repo_name":"antcont/GeLeCo","sub_path":"corpus_processing/add_subcorpus.py","file_name":"add_subcorpus.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"67"} +{"seq_id":"27742988439","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 11 17:35:25 2020\r\n\r\n@author: PORTATIL\r\n\"\"\"\r\nimport os\r\nimport glob\r\nimport nltk\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.naive_bayes import BernoulliNB\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn import metrics\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.model_selection import train_test_split \r\nfrom nltk.corpus import stopwords\r\nfrom sklearn.model_selection import ShuffleSplit\r\nfrom sklearn.model_selection import learning_curve\r\nfrom scipy.sparse import coo_matrix\r\nfrom sklearn.utils import shuffle\r\n\r\n\r\n# Cargar CSV\r\n\r\ndf = pd.read_csv(r\"C:\\Users\\PORTATIL\\Desktop\\TFG\\dataset_tfg\\isot_dataset\\combinado.csv\")\r\n\r\n#Convert NaN values to empty string\r\nnan_value = float(\"NaN\")\r\n\r\ndf.replace(\"\", nan_value, inplace=True)\r\n\r\ndf.dropna( inplace=True)\r\n\r\nprint((df.columns))\r\n\r\n\r\n# Definición de los datos\r\n\r\nX= df['title'].astype(str) + ' ' + df['body'].astype(str)\r\ny = df['Category']\r\nW1=X\r\nz=y\r\n\r\n#y_list=y.tolist()\r\n\r\n# Dividir los datos 70% train 30% test\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.70,test_size=0.30)\r\n\r\n#Aplicar TFIDF.\r\ntfIdfVectorizer=TfidfVectorizer(smooth_idf=True,max_features=1000,use_idf=True)\r\n#tfIdfVectorizer = TfidfVectorizer()\r\n\r\n\r\n\r\nX_vectorizer = tfIdfVectorizer.fit_transform(X_train)\r\nX_vector=tfIdfVectorizer.transform(X_test)\r\n\r\nX_train2=X_vectorizer.toarray()\r\nX_test2=X_vector.toarray()\r\n\r\nW_vectorizer=tfIdfVectorizer.fit_transform(W1)\r\nW_learning=W_vectorizer.toarray()\r\n\r\nW = coo_matrix(W_learning)\r\n\r\nW,z=shuffle(W,z)\r\n\r\n#ENTRENAMIENTO NAIVE BAYES\r\n# Definir modelo de clasificación, Naive Bayes, Decision Tree y Random Forest. \r\nnaive_bayes_classifier = BernoulliNB()\r\nnaive_bayes_classifier=naive_bayes_classifier.fit(X_train2, y_train)\r\n\r\npred_NB = naive_bayes_classifier.predict(X_test2)\r\n\r\n#ENTRENAMIENTO DECISION TREE.\r\ndecision_tree_classifier=DecisionTreeClassifier()\r\ndecision_tree_classifier=decision_tree_classifier.fit(X_train2, y_train)\r\n\r\npred_DT=decision_tree_classifier.predict(X_test2)\r\n\r\n\r\n#tree.plot_tree(decision_tree) \r\n\r\n#ENTRENAMIENTO RANDOM FOREST\r\nrandom_forest=RandomForestClassifier(n_estimators=3)\r\nrandom_forest=random_forest.fit(X_train2,y_train)\r\n\r\npred_RF=random_forest.predict(X_test2)\r\n\r\n# compute the performance measures\r\nscore1 = metrics.accuracy_score(y_test,pred_NB )\r\nscore2 = metrics.accuracy_score(y_test,pred_DT)\r\nscore3 = metrics.accuracy_score(y_test,pred_RF)\r\n\r\n##LEARNING CURVES\r\n#NAIVE BAYES\r\ntrain_sizes, train_scores, test_scores = learning_curve(BernoulliNB(), W, z, cv=10, scoring='accuracy', n_jobs=-1, train_sizes=np.linspace(0.01, 1.0, 50))\r\n\r\ntrain_mean = np.mean(train_scores, axis=1)\r\ntrain_std = np.std(train_scores, axis=1)\r\n\r\ntest_mean = np.mean(test_scores, axis=1)\r\ntest_std = np.std(test_scores, axis=1)\r\n\r\nplt.subplots(1, figsize=(10,10))\r\nplt.plot(train_sizes, train_mean, '--', color=\"m\", label=\"Training score\")\r\nplt.plot(train_sizes, test_mean, color=\"y\", label=\"Cross-validation score\")\r\n\r\nplt.fill_between(train_sizes, train_mean - train_std, train_mean + train_std, color=\"#DDDDDD\")\r\nplt.fill_between(train_sizes, test_mean - test_std, test_mean + test_std, color=\"#DDDDDD\")\r\n\r\nplt.title(\"Learning Curve\")\r\nplt.xlabel(\"Training Set Size\"), plt.ylabel(\"Accuracy Score\"), plt.legend(loc=\"best\")\r\nplt.tight_layout()\r\nplt.savefig('NB')\r\nplt.show()\r\n\r\n\r\n#DECISION TREE\r\ntrain_sizes2, train_scores2, test_scores2 = learning_curve(DecisionTreeClassifier(), W, z, cv=10, scoring='accuracy', n_jobs=-1, train_sizes=np.linspace(0.01, 1.0, 50))\r\n\r\ntrain_mean2 = np.mean(train_scores2, axis=1)\r\ntrain_std2 = np.std(train_scores2, axis=1)\r\n\r\ntest_mean2 = np.mean(test_scores2, axis=1)\r\ntest_std2 = np.std(test_scores2, axis=1)\r\n\r\nplt.subplots(1, figsize=(10,10))\r\nplt.plot(train_sizes2, train_mean2, '--', color=\"m\", label=\"Training score\")\r\nplt.plot(train_sizes2, test_mean2, color=\"y\", label=\"Cross-validation score\")\r\n\r\nplt.fill_between(train_sizes2, train_mean2 - train_std2, train_mean2 + train_std2, color=\"#DDDDDD\")\r\nplt.fill_between(train_sizes2, test_mean2 - test_std2, test_mean2 + test_std2, color=\"#DDDDDD\")\r\n\r\nplt.title(\"Learning Curve\")\r\nplt.xlabel(\"Training Set Size\"), plt.ylabel(\"Accuracy Score\"), plt.legend(loc=\"best\")\r\nplt.tight_layout()\r\nplt.savefig('DT')\r\nplt.show()\r\n\r\n\r\n#RANDOM FOREST\r\ntrain_sizes3, train_scores3, test_scores3 = learning_curve(RandomForestClassifier(), W, z, cv=10, scoring='accuracy', n_jobs=-1, train_sizes=np.linspace(0.01, 1.0, 50))\r\n\r\ntrain_mean3 = np.mean(train_scores3, axis=1)\r\ntrain_std3 = np.std(train_scores3, axis=1)\r\n\r\ntest_mean3 = np.mean(test_scores3, axis=1)\r\ntest_std3 = np.std(test_scores3, axis=1)\r\n\r\nplt.subplots(1, figsize=(10,10))\r\nplt.plot(train_sizes3, train_mean3, '--', color=\"m\", label=\"Training score\")\r\nplt.plot(train_sizes3, test_mean3, color=\"y\", label=\"Cross-validation score\")\r\n\r\nplt.fill_between(train_sizes3, train_mean3 - train_std3, train_mean3 + train_std3, color=\"#DDDDDD\")\r\nplt.fill_between(train_sizes3, test_mean3 - test_std3, test_mean3 + test_std3, color=\"#DDDDDD\")\r\n\r\nplt.title(\"Learning Curve\")\r\nplt.xlabel(\"Training Set Size\"), plt.ylabel(\"Accuracy Score\"), plt.legend(loc=\"best\")\r\nplt.tight_layout()\r\nplt.savefig('DT')\r\nplt.show()\r\n\r\n\r\n#MATRIZ DE CONFUSION DEL NAIVE BAYES\r\nprint(\"accuracy: %0.3f\" % score1)\r\n\r\nprint(metrics.classification_report(y_test, pred_NB,\r\n target_names=['Positive', 'Negative']))\r\n\r\nprint(\"confusion matrix:\")\r\nprint(metrics.confusion_matrix(y_test, pred_NB))\r\n\r\nprint('------------------------------')\r\n#MATRIZ DE CONFUSION DECISION TREE\r\nprint(\"accuracy: %0.3f\" % score2)\r\n\r\nprint(metrics.classification_report(y_test,pred_DT ,\r\n target_names=['Positive', 'Negative']))\r\n\r\nprint(\"confusion matrix:\")\r\nprint(metrics.confusion_matrix(y_test, pred_DT))\r\n\r\nprint('------------------------------')\r\n\r\n#MATRIZ DE CONFUSION RANDOM FOREST\r\nprint(\"accuracy: %0.3f\" % score3)\r\n\r\nprint(metrics.classification_report(y_test, pred_RF,\r\n target_names=['Positive', 'Negative']))\r\n\r\nprint(\"confusion matrix:\")\r\nprint(metrics.confusion_matrix(y_test,pred_RF))\r\n\r\nprint('------------------------------')\r\n\r\n\r\n","repo_name":"Mircea667/Fake-news-","sub_path":"FAKE_NEWS.py","file_name":"FAKE_NEWS.py","file_ext":"py","file_size_in_byte":6383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22811809699","text":"'''in this window code is writeen for html hyperlink'''\n\nfrom tkinter import *\nfrom tkhtmlview import HTMLLabel #view as html file\nimport subprocess \n\nroot = Tk()\nroot.geometry(\"400x400\")\nroot.wm_iconbitmap(\"facebook.ico\")\nroot.title(\"facebook\")\n \n\nmy_label = HTMLLabel(root, html=\"\"\"\n<li> LINK FOR FACEBOOK</LI>\n <ul>\n <li><a href='https://www.facebook.com//'>Facebook</a></li>\n \n </ul>\n \"\"\")\n \nmy_label.pack(pady=20, padx=20)\n\n\n# function for home button\n\ndef run_home():\n\n root.destroy()\n subprocess.call([\"python\",\"project.py\"])\n\n\n\nb=Button(root,text=\"⌂ home\",bg=\"black\",fg=\"white\",command=run_home)\nb.place(x=180,y=250)\n \nroot.mainloop()","repo_name":"NABIN777/individual","sub_path":"facebook.py","file_name":"facebook.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1753819888","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\nPROBLEM\n\nYou need to create an instance, but want to bypass the execution of the\n__init__() method for some reason.\n\"\"\"\n\n\n\"\"\"\nDISCUSSION\n\"\"\"\n\n\nclass Date:\n def __init__(self, year, month, day):\n self.year = year\n self.month = month\n self.day = day\n\n\nd = Date.__new__(Date)\nprint(d)\n\ndata = {\n 'year' : 2012,\n 'month' : 8,\n 'day' : 29\n}\n\nfor key, value in data.items():\n setattr(d, key, value)\n\nprint(d.year)\nprint(d.month)\nprint('')\n\n\n\"\"\"\nDISCUSSION\n\"\"\"\n\nfrom time import localtime\n\n\nclass Date2:\n def __init__(self, year, month, day):\n self.year = year\n self.month = month\n self.day = day\n\n @classmethod\n def today(cls):\n d = cls.__new__(cls)\n t = localtime()\n d.year = t.tm_year\n d.month = t.tm_mon\n d.day = t.tm_mday\n return d\n\n @classmethod\n def makedate(cls, data):\n d = cls.__new__(cls)\n for key, value in data.items():\n setattr(d, key, value)\n return d\n\n\nd2 = Date2.makedate(data)\nprint(d2.year)\nprint(d2.month)\nprint(d2.day)\n","repo_name":"irobot0/pycookbook","sub_path":"ch8/8.17/py3_example.py","file_name":"py3_example.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38590679821","text":"from environments import GymEnvironment\nfrom algorithms import BaseAlgorithm # SAC, DDQN, DDPG\nfrom algorithms import SAC\nfrom laserhockey import LaserHockeyEnv, HockeyEnv\n\ndef train(env: GymEnvironment, agent: BaseAlgorithm,\n n_episodes: int = 1000, max_timesteps: int = 1000,\n save_model: bool = False, test_model: bool = False):\n \"\"\"\n Train the agent on the given environment.\n\n :param env: The environment to train on.\n :param agent: The agent to train.\n :param n_episodes: The number of episodes to train for. (default: 1000)\n :param save_model: Whether to save the model after training. (default: False)\n :param test_model: If True, the model will be tested after training. (default: False)\n \"\"\"\n # Initialize the environment\n state = env.reset()\n\n # Train the agent\n for episode in range(n_episodes):\n if episode % 10000 == 0:\n print(f'Episode {episode + 1}/{n_episodes}')\n # Train for max_timesteps\n for t in range(max_timesteps):# Choose an action\n action = agent.choose_action(state)\n\n # Perform the action\n next_state, reward, done, truncated, info = env.step(action)\n\n # Store the transition\n agent.store_transition({\n 'state': state,\n 'action': action,\n 'reward': reward,\n 'next_state': next_state,\n 'done': done\n })\n\n # Update the state\n state = next_state\n\n # Update the agent\n agent.learn()\n\n # Render the environment\n env.render()\n\n # Check if the episode is done\n if done:\n break\n if reward > -100:\n print(f'Episode {episode + 1}/{n_episodes} finished in {t + 1} timesteps')\n print(f'Episode reward: {reward}')\n print()\n\n\n # Reset the environment\n state = env.reset()\n done = False\n\n\n # Close the environment\n env.close()\n\nif __name__ == '__main__':\n pass\n","repo_name":"SNicolas99/RLPowerPucks","sub_path":"environments/train_env.py","file_name":"train_env.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"33793411754","text":"from sqlalchemy.exc import IntegrityError\n\nfrom app import db\nfrom faker import Faker\n\nclass ResourceCategory(db.Model):\n \"\"\"\n Schema for categories associated with a resource\n \"\"\"\n\n __tablename__ = 'resource_categories'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(256), index=True)\n\n @staticmethod\n def get_resource_categories_as_dict():\n cats = ResourceCategory.query.all()\n cats_as_dicts = []\n for cat in cats:\n cat = cat.__dict__\n\n if '_sa_instance_state' in cat:\n del cat['_sa_instance_state']\n\n cats_as_dicts.append(cat)\n return cats_as_dicts\n\n @staticmethod\n def get_resource_category_name(id):\n return ResourceCategory.query.get(id).name\n\n @staticmethod\n def generate_fake(count=15):\n fake = Faker()\n for i in range(count):\n cat = fake.bs()\n\n resource = ResourceCategory(name=cat)\n\n db.session.add(resource)\n\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n\n","repo_name":"littlefieldnick/flask-angular-test","sub_path":"flask-angular/app/models/resource_category.py","file_name":"resource_category.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33565383293","text":"import json\nimport csv\n\n\nADS_CSV = 'ad.csv'\nADS_JSON = 'ad.json'\nADS_MODEL_NAME = 'ads.ad'\n\nCATEGORY_CSV = 'category.csv'\nCATEGORY_JSON = 'category.json'\nCATEGORY_MODEL_NAME = 'ads.category'\n\nUSER_CSV = 'user.csv'\nUSER_JSON = 'user.json'\nUSER_MODEL_NAME = 'users.user'\n\nLOCATION_CSV = 'location.csv'\nLOCATION_JSON = 'location.json'\nLOCATION_MODEL_NAME = 'users.location'\n\n\ndef convert_csv_to_json(csv_filename, json_filename, model_name):\n\n with open(csv_filename, encoding='utf-8') as csv_file:\n csv_read = csv.DictReader(csv_file)\n result = []\n for row in csv_read:\n res = {\"model\": model_name, \"pk\": int(row['id'] if 'id' in row else row['Id'])}\n if 'id' in row:\n del row['id']\n elif 'Id' in row:\n del row['Id']\n if 'is_published' in row:\n if row['is_published'] == 'TRUE':\n row['is_published'] = True\n else:\n row['is_published'] = False\n if 'price' in row:\n row['price'] = int(row['price'])\n if 'author_id' in row:\n row['author_id'] = int(row['author_id'])\n if 'location_id' in row:\n loc_lst: list = []\n loc_lst.append(int(row['location_id']))\n row['location'] = loc_lst\n del row['location_id']\n if 'age' in row:\n row['age'] = int(row['age'])\n res['fields'] = row\n result.append(res)\n\n with open(json_filename, 'w', encoding='utf-8') as json_file:\n json_file.write(json.dumps(result, indent=3, ensure_ascii=False))\n\n\nconvert_csv_to_json(ADS_CSV, ADS_JSON, ADS_MODEL_NAME)\nconvert_csv_to_json(CATEGORY_CSV, CATEGORY_JSON, CATEGORY_MODEL_NAME)\nconvert_csv_to_json(LOCATION_CSV, LOCATION_JSON, LOCATION_MODEL_NAME)\nconvert_csv_to_json(USER_CSV, USER_JSON, USER_MODEL_NAME)\n","repo_name":"Vikver74/lesson28","sub_path":"data/convert_csv_to_json.py","file_name":"convert_csv_to_json.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39057505177","text":"import decimal\n\nfrom django.conf import settings\n\nfrom sorl.thumbnail.fields import ImageField as SorlImageField\n\n__all__ = ('ImageField')\n\n\ndef resize_image(file):\n from PIL import Image\n max_width = decimal.Decimal(getattr(settings, 'MAXIMUM_IMAGE_WIDTH', 1280))\n max_height = decimal.Decimal(getattr(settings, 'MAXIMUM_IMAGE_HEIGHT', 1024))\n\n try:\n im = Image.open(file.path)\n except:\n im = None\n if im:\n current_width, current_height = im.size\n current_width, current_height = decimal.Decimal(current_width), decimal.Decimal(current_height)\n if current_width > max_width or current_height > max_height:\n if current_width > max_width and current_height > max_height:\n ratios = (max_width/current_width, max_height/current_height)\n ratio = min(ratios)\n elif current_width > max_width:\n ratio = max_width/current_width\n elif current_height > max_height:\n ratio = max_height/current_height\n new_width = int(current_width * ratio)\n new_height = int(current_height * ratio)\n new_size = (new_width, new_height)\n im.thumbnail(new_size, Image.ANTIALIAS)\n im.save(file.path)\n\n\nclass ImageField(SorlImageField):\n\n def pre_save(self, model_instance, add):\n \"Returns field's value just before saving.\"\n file = super(ImageField, self).pre_save(model_instance, add)\n resize_image(file)\n return file\n\n","repo_name":"fruitschen/custom-sorl-thumbnail","sub_path":"custom_sorl_thumbnail/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"21815245397","text":"\"\"\"\n# 239\n https://leetcode.com/problems/sliding-window-maximum/description/\n Naive n^2 implementations. Check and append maximum from current windiw (temp array here)\n can use heaps to optimize (possibly in one pass)\n python3\n submission details : \n\t Runtime: 1093 ms\n\t Runtime: 1052 ms (without accessing temp array)\n\"\"\"\nclass Solution:\n def maxSlidingWindow(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n if not nums:\n return nums\n left,right = 0,k-1\n #if k == 1:\n # return [max(nums)]\n res = []\n while right <= len(nums)-1:\n #print(left,right)\n temp = nums[left:right+1] # temporary array for current window\n #print(left,right,temp)\n res.append(max(temp)) # can rewrite it as res.append(max(nums[left:right+1]))\n right += 1\n left += 1\n \n return(res)","repo_name":"sk-g/Leetcode","sub_path":"python/239. Sliding Window Maximum.py","file_name":"239. Sliding Window Maximum.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"31348478826","text":"import boto3\nfrom boto3.dynamodb.conditions import Key, Attr\n\nclass DatabaseHandler:\n\n def __init__(self):\n #self.dynamodb = boto3.resource('dynamodb', region_name='us-west-2', endpoint_url=\"http://localhost:8000\")\n self.session = boto3.session.Session(aws_access_key_id='AKIAJZ5AB4UMBOILNJBQ', aws_secret_access_key='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', region_name='us-east-1')\n self.dynamodb = self.session.resource('dynamodb')\n self.table = self.dynamodb.Table('TweetsDB')\n\n\n\n def getItem(self, keyword):\n response = self.table.scan(\n FilterExpression=Attr('text').contains(keyword.lower()) | Attr('text').contains(keyword.upper()) | Attr('text').contains(keyword.title())\n )\n items = response['Items']\n '''\n while (response.get('LastEvaluatedKey')):\n response = self.table.scan(\n FilterExpression=Attr('text').contains(keyword.lower()) | Attr('text').contains(keyword.upper()) | Attr(\n 'text').contains(keyword.title()),\n ExclusiveStartKey=response['LastEvaluatedKey'])\n items.extend(response['Items'])\n '''\n return items\n\n\n def getAllItem(self):\n response = self.table.scan()\n items = response['Items']\n while (response.get('LastEvaluatedKey')):\n response = self.table.scan(ExclusiveStartKey=response['LastEvaluatedKey'])\n items.extend(response['Items'])\n\n return len(items)\n\n\n\n","repo_name":"williamla0907/SocialMediaAnalyzer","sub_path":"Analyzer/database_handler.py","file_name":"database_handler.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15820071987","text":"import base64\r\nimport requests\r\nimport pickle\r\nimport requests\r\nimport urllib.parse\r\nimport json\r\nimport mysql.connector\r\nimport threading\r\n\r\n\r\nclass Db:\r\n user = 'root'\r\n password = ''\r\n host = 'localhost'\r\n database = 'carttest'\r\n\r\n def connectdb(self):\r\n conn = mysql.connector.connect(user = self.user,password = self.password,host = self.host, database = self.database)\r\n return conn\r\n\r\n def close_connect(self,conn):\r\n conn.close()\r\n \r\n def selectdb(self,conn,query):\r\n mycursor=conn.cursor()\r\n mycursor.execute(query)\r\n '''columns = [column[0] for column in mycursor.description]\r\n results = []\r\n if mycursor:\r\n for row in mycursor.fetchall():\r\n results.append(dict(zip(columns, row)))\r\n #result = mycursor.fetchall()\r\n return result[0]\r\n else:\r\n return False'''\r\n result = mycursor.fetchone()\r\n if result:\r\n result_id_desc = {}\r\n result_id_desc['id_desc'] = result[4]\r\n return result_id_desc\r\n else:\r\n return False\r\n\r\n\r\n def insertdb(self,conn,query):\r\n mycursor=conn.cursor()\r\n mycursor.execute(query)\r\n try:\r\n mycursor.execute(query)\r\n conn.commit()\r\n return mycursor.lastrowid\r\n except:\r\n conn.rollback()\r\n return False\r\n\r\n def updatedb(self,conn,query):\r\n mycursor=conn.cursor()\r\n mycursor.execute(query)\r\n try:\r\n mycursor.execute(query)\r\n conn.commit()\r\n except:\r\n conn.rollback()\r\n\r\n \r\n\r\n def deletedb(self,conn,query):\r\n mycursor=conn.cursor()\r\n mycursor.execute(query)\r\n try:\r\n mycursor.execute(query)\r\n conn.commit()\r\n except:\r\n conn.rollback()\r\n\r\n def arrayToInCondition(self,array):\r\n if not array:\r\n return \"('None')\"\r\n array = map({self, 'escape'}, array)\r\n result = ','.join(array)\r\n result = '(' + result + ')'\r\n return result\r\n\r\n\r\n def arrayToInsertCondition(self,array, allow_keys = None):\r\n if(array == False):\r\n return False\r\n\r\n keys = array.keys()\r\n data_key = data_value = []\r\n if(allow_keys == False):\r\n data_key = keys\r\n data_value = array.values()\r\n #for key,value in data_value.items():\r\n #data_value[key] = self.escape(value)\r\n\r\n else:\r\n for key in keys.items():\r\n if(key in allow_keys):\r\n data_key.append(key)\r\n value = array[key]\r\n if(isinstance(value, int)):\r\n data_value.append(value)\r\n #else:\r\n #data_value[] = self.escape(value)\r\n \r\n \r\n \r\n \r\n if(data_key == False):\r\n return False\r\n \r\n key_condition = '(`' + str.join('`, `', data_key) + '`)'\r\n value_condition = \"(\" + str.join(\", \", data_value) + \")\"\r\n condition = key_condition + \" VALUES \" + value_condition\r\n return condition\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndb = Db()\r\nconnect = db.connectdb()\r\nquery = \"\"\"insert into log(url_src,url_tar,id_src,type) values('%s','%s','%s','%s')\"\"\" %('http://localhost/oscommerce/catalog/','http://localhost/opencart/',10,'Customer')\r\ndb.insertdb(connect,query)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"namlv94/testpy","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73542543254","text":"\"\"\"This defines a quick I/O framework for sending to Mathematica\n\n\"\"\"\n\nfrom __future__ import print_function\nimport os\n\nmathematica_type_exporters = {}\nmathematica_export_parameters = {\n \"BufferMaxSize\" : 5000\n} #These are set on the Mathematica side\n\ndef mathematica_safe_json(obj):\n\n import json\n\n try:\n m_string = json.dumps(obj)\n except TypeError:\n m_string = repr(obj)\n\n return m_string\n\ndef mathematica_default_export(obj):\n import base64\n if isinstance(obj, bytes):\n exp_data = {\n \"ReturnType\":\"Bytes\",\n \"ReturnValue\":str(base64.b64encode(obj))\n }\n else:\n exp_data = { \"ReturnType\":\"String\", \"ReturnValue\":mathematica_safe_json(obj) }\n return exp_data\n\ndef mathematica_register_type_exporter(t, f):\n mathematica_type_exporters[t] = f;\n\nmathematica_exporter_dir = os.path.join(os.path.dirname(__file__), \"exporters\")\ndef mathematica_load_exporter(obj):\n m_typ = type(obj)\n obj_mod = m_typ.__module__ + \".\" if hasattr(m_typ, \"__module__\") else \"\"\n obj_name = m_typ.__name__\n if m_typ not in mathematica_type_exporters:\n m_path = os.path.join(\n mathematica_exporter_dir,\n obj_mod + obj_name +\".py\"\n );\n if not os.path.exists(m_path) and len(obj_mod) > 0:\n m_path= os.path.join(\n mathematica_exporter_dir,\n obj_mod + \"py\"\n );\n if os.path.exists( m_path ):\n try:\n exp_file = open(m_path)\n exec(exp_file.read())\n finally:\n exp_file.close()\n if m_typ in mathematica_type_exporters:\n m_exporter = mathematica_type_exporters[m_typ]\n else:\n m_exporter = mathematica_default_export\n\n return m_exporter\n\ndef _mathematica_export_core(obj):\n exp = mathematica_load_exporter(obj)\n return exp(obj)\n\ndef mathematica_export(obj):\n import json\n\n exp_data = _mathematica_export_core(obj)\n if isinstance(exp_data, str):\n m_dict = {\n \"ReturnType\":\"String\",\n \"ReturnValue\":exp_data\n }\n elif isinstance(exp_data, bytes):\n import base64\n m_dict = {\n \"ReturnType\": \"Bytes\",\n \"ReturnValue\": str(base64.b64encode(exp_data))\n }\n elif isinstance(exp_data, dict):\n if \"ReturnValue\" in exp_data:\n m_dict = exp_data\n else:\n m_dict = {\n \"ReturnType\":\"String\",\n \"ReturnValue\":mathematica_safe_json(exp_data)\n }\n else:\n m_dict = {\n \"ReturnType\":\"String\",\n \"ReturnValue\":mathematica_safe_json(exp_data)\n }\n\n if isinstance(m_dict[\"ReturnValue\"], bytes):\n import base64\n m_dict[\"ReturnType\"] = \"Bytes\"\n m_dict[\"ReturnValue\"] = str(base64.b64encode(m_dict[\"ReturnValue\"]))\n elif not isinstance(m_dict[\"ReturnValue\"], str):\n m_dict[\"ReturnValue\"] = mathematica_safe_json(m_dict[\"ReturnValue\"])\n\n print(mathematica_export_parameters[\"Delimiter\"])\n if len(m_dict[\"ReturnValue\"]) > mathematica_export_parameters[\"BufferMaxSize\"]:\n import tempfile as tmp\n dump_tmp = tmp.NamedTemporaryFile(delete=False)\n dump_f = dump_tmp.name\n dump_tmp.close()\n try:\n tmp_f = open(dump_f, \"w\")\n tmp_f.write(m_dict[\"ReturnValue\"])\n finally:\n tmp_f.close()\n m_dict[\"ReturnValue\"] = dump_f\n m_dict[\"ReturnType\"] = \"TemporaryFile\"\n print(json.dumps(m_dict))\n print(mathematica_export_parameters[\"Delimiter\"])\n","repo_name":"b3m2a1/mathematica-PyTools","sub_path":"Resources/MLib/MExport.py","file_name":"MExport.py","file_ext":"py","file_size_in_byte":3437,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"71495814934","text":"# ---------------------------------------------------------------------------\n# Cosmica - All rights reserved by NeuroJump Trademark 2018\n# marketsystemsellvalue.py\n# Written by Chris Lewis\n# ---------------------------------------------------------------------------\n# This uses scroll mouse up/down or plus and minus buttons to modify the\n# system resource selling orders for the galactic market\n# ---------------------------------------------------------------------------\nfrom rootbutton import RootButton\nfrom anw.gui import valuebar, textonscreen\nfrom anw.func import globals\n\nclass MarketSystemSellValue(RootButton):\n \"\"\"The System Market Gui for setting a price and amount to sell a resource from a system\"\"\"\n def __init__(self, path, mySystemDict, resource, currentPrice):\n self.mySystemDict = mySystemDict\n self.resource = resource\n self.currentPrice = currentPrice\n self.maxPrice = currentPrice\n self.maxPriceIncrease = self.maxPrice*2.0\n y = -0.4\n x = 0.65\n RootButton.__init__(self, path, x=x, y=y, name='sys_sell')\n self.allKeys = ['A','S','D','F','G','Z','X','C','V']\n self.scale = 0.25\n self.priceBar = None\n self.amountBar = None\n self.maxAmount = mySystemDict[resource]\n self.currentAmount = mySystemDict[resource]\n self.disableButtonTime = 0.5\n self.disableButtonIgnore = ['S','F','G','Z','X','C','V']\n self.scrollFactor = 0\n self.focus = 'price'\n self.pressButton('G')\n self.pressButton('V')\n self.myTitle = 'Set Price and Amount to Sell on Market:'\n self.createTitleCard('marketTitle','Create Sell Order for %s:' % self.resource,\n 30,x-0.04, y+0.35)\n self.createPriceBar()\n self.createAmountBar()\n self.enableSubmit()\n \n def createPriceBar(self):\n \"\"\"Market Price from 0 to Max\"\"\"\n self.priceBar = valuebar.ValueBar(self.path, scale=self.scale, extraText = ' CREDITS PER UNIT', showOverValues=1)\n self.priceBar.setMyValues(self.currentPrice, self.maxPrice)\n self.priceBar.setMyPosition(self.posInitX+0.20, 0, self.posInitY+0.31)\n color = globals.resourceColors['CR']\n self.priceBar.setColor(globals.colors[color])\n self.myWidgets.append(self.priceBar)\n \n def createAmountBar(self):\n \"\"\"Market Amount from 0 to Max\"\"\"\n self.amountBar = valuebar.ValueBar(self.path, scale=self.scale, extraText = ' %s' % self.resource, showOverValues=0)\n self.amountBar.setMyValues(self.currentAmount, self.maxAmount)\n barHeight = self.amountBar.myBar.getHeight()*self.scale\n self.amountBar.setMyPosition(self.posInitX+0.20, 0, self.posInitY+0.31-barHeight)\n color = globals.resourceColors[self.resource]\n self.amountBar.setColor(globals.colors[color])\n self.myWidgets.append(self.amountBar)\n \n def setValue(self, value):\n \"\"\"Set either the price or the amount\"\"\"\n if self.focus == 'price':\n self.setPrice(value)\n else:\n self.setAmount(value)\n self.enableSubmit()\n \n def enableSubmit(self):\n \"\"\"Make sure submit button only enabled when it should be\"\"\"\n if self.currentAmount > 0 and self.currentPrice > 0:\n self.enableButton('S')\n else:\n self.disableButton('S')\n \n def setPrice(self, value):\n \"\"\"Update priceBar amount by value\"\"\"\n validValue = self.getValidValue(value, self.currentPrice, self.maxPriceIncrease)\n self.currentPrice = validValue\n self.priceBar.setMyValues(self.currentPrice, self.maxPrice)\n color = globals.resourceColors['CR']\n self.priceBar.setColor(globals.colors[color])\n self.priceBar.setMyPosition(self.priceBar.x,self.priceBar.y,self.priceBar.z)\n self.enableButton('S')\n \n def setAmount(self, value):\n \"\"\"Update Amount Bar by value\"\"\"\n validValue = self.getValidValue(value, self.currentAmount, self.maxAmount)\n self.currentAmount = validValue\n self.amountBar.setMyValues(self.currentAmount, self.maxAmount)\n color = globals.resourceColors[self.resource]\n self.amountBar.setColor(globals.colors[color])\n self.enableButton('S')\n \n def getValidValue(self, value, current, maxValue):\n \"\"\"if value being submitted is too big or too small for max\n or min value then set value to max or min value\"\"\"\n newValue = current + value\n if value > 0:#trying to add\n if newValue > maxValue:\n return maxValue\n else:\n return newValue\n else:# trying to remove\n if newValue < 1:\n return 0\n else:\n return newValue\n \n def acceptExtraKeys(self):\n \"\"\"Allow mousewheel to increment/decrement value\"\"\"\n self.accept('wheel_up', self.pressButton, ['D'])\n self.accept('wheel_down', self.pressButton, ['A'])\n \n def createButtons(self):\n \"\"\"Create all Buttons\"\"\"\n x = 0\n y = 1\n for key in ['Z','X','C','V']:\n buttonPosition = ((self.posInitX+x*.10),0,(self.posInitY+y*.10))\n self.createButton(key, buttonPosition)\n x += 1\n x = 0\n y = 2\n for key in ['A','S','D','F','G']:\n buttonPosition = ((self.posInitX+x*.10),0,(self.posInitY+y*.10))\n self.createButton(key, buttonPosition)\n x += 1\n y = 0\n \n def pressS(self):\n \"\"\"Submit value to server\"\"\"\n d = {'type':'sell', 'value':self.resource, 'min':self.currentPrice, 'max':0, 'amount':self.currentAmount, 'system':self.mySystemDict['id']}\n self.mode.submitMarketOrder(d)\n \n def pressD(self):\n \"\"\"Increment Value\"\"\"\n self.setValue(self.scrollFactor)\n \n def pressA(self):\n \"\"\"Decrement Value\"\"\"\n self.setValue(-self.scrollFactor)\n \n def pressZ(self):\n \"\"\"Set scroll factor to 1\"\"\"\n self.enableLastButton('Z')\n self.disableButton('Z')\n self.scrollFactor = 1\n \n def pressX(self):\n \"\"\"Set scroll factor to 5\"\"\"\n self.enableLastButton('X')\n self.disableButton('X')\n self.pressButton('G')\n self.scrollFactor = 10\n \n def pressC(self):\n \"\"\"Set scroll factor to 100\"\"\"\n self.enableLastButton('C')\n self.disableButton('C')\n self.pressButton('G')\n self.scrollFactor = 100\n \n def pressV(self):\n \"\"\"Set scroll factor to 1000\"\"\"\n self.enableLastButton('V')\n self.disableButton('V')\n self.pressButton('G')\n self.scrollFactor = 1000\n \n def pressF(self):\n \"\"\"Set focus to price\"\"\"\n self.enableLastButton2('F')\n self.disableButton('F')\n self.focus = 'price'\n self.pressButton('Z')\n \n def pressG(self):\n \"\"\"Set focus to amount\"\"\"\n self.enableLastButton2('G')\n self.disableButton('G')\n self.focus = 'amount'\n \nclass MarketSystemBuyValue(MarketSystemSellValue): \n def __init__(self, path, mySystemDict, resource, currentPrice):\n self.mySystemDict = mySystemDict\n self.resource = resource\n self.currentPrice = currentPrice\n self.maxPrice = currentPrice\n self.maxPriceIncrease = self.maxPrice*4.0\n y = -0.4\n x = -1.03\n RootButton.__init__(self, path, x=x, y=y, name='sys_buy')\n self.allKeys = ['A','S','D','F','G','Z','X','C','V']\n self.scale = 0.25\n self.priceBar = None\n self.amountBar = None\n self.maxAmount = 20000.0\n self.currentAmount = 0.0\n self.disableButtonTime = 0.5\n self.disableButtonIgnore = ['S','F','G','Z','X','C','V']\n self.scrollFactor = 0\n self.focus = 'price'\n self.pressButton('G')\n self.pressButton('V')\n self.myTitle = 'Set Price and Amount to Buy on Market:'\n self.createTitleCard('marketTitle','Create Buy Order for %s:' % self.resource,\n 30,x-0.04, y+0.35)\n self.createPriceBar()\n self.createAmountBar()\n self.enableSubmit()\n\n def pressS(self):\n \"\"\"Submit value to server\"\"\"\n d = {'type':'buy-any', 'value':self.resource, 'min':0, 'max':self.currentPrice, 'amount':self.currentAmount, 'system':self.mySystemDict['id']}\n self.mode.submitMarketOrder(d)\n \nif __name__ == \"__main__\":\n systemDict = {'id':'1','AL':1200, 'EC':110, 'IA':330, 'name':'SystemName1'}\n myValue = MarketSystemSellValue('media', systemDict, 'EC', 20.0, 20.0)\n run()","repo_name":"colshag/ANW","sub_path":"anw/Packages/anw/gui/marketsystemsellvalue.py","file_name":"marketsystemsellvalue.py","file_ext":"py","file_size_in_byte":8700,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"37719918677","text":"# example of a cnn for image classification\nfrom numpy import asarray\nfrom numpy import unique\nfrom numpy import argmax\nfrom tensorflow.keras.datasets.mnist import load_data\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Conv2D #\nfrom tensorflow.keras.layers import MaxPool2D # 由近到深\nfrom tensorflow.keras.layers import Flatten # 二維打平變一維 \nfrom tensorflow.keras.layers import Dropout # 神經元刪除一點做調整\n\n# example of loading and plotting the mnist dataset\nfrom matplotlib import pyplot as plt\n\n# # load dataset \n# ## 訓練資料(trianX , trainY),測試資料(testX, testy)\n# (trainX, trainy), (testX, testy) = load_data()\n\n# # summarize loaded dataset\n# print('Train: X=%s, y=%s' % (trainX.shape, trainy.shape))\n# print('Test: X=%s, y=%s' % (testX.shape, testy.shape))\n\n# # plot first few images\n# for i in range(2,15):\n# # define subplot\n# plt.subplot(5, 5, i+1)\n# # plot raw pixel data\n# plt.imshow(trainX[i], cmap=plt.get_cmap('Blues'))\n# # show the figure\n\n# plt.show()\n\n\n# load dataset\n(x_train, y_train), (x_test, y_test) = load_data()\n\n# reshape data to have a single channel\nx_train = x_train.reshape((x_train.shape[0], x_train.shape[1], x_train.shape[2], 1))\nx_test = x_test.reshape((x_test.shape[0], x_test.shape[1], x_test.shape[2], 1))\n\n# determine the shape of the input images\nin_shape = x_train.shape[1:]\n# determine the number of classes\nn_classes = len(unique(y_train))\nprint(in_shape, n_classes)\n\n# normalize pixel values\nx_train = x_train.astype('float32') / 255.0\nx_test = x_test.astype('float32') / 255.0\n\n# define model\nmodel = Sequential()\n\n# input 第一層 Conv2D 為16 input_shape第一層要指定\nmodel.add(Conv2D(16, (3,3), activation='relu', kernel_initializer='he_uniform', input_shape=in_shape))\nmodel.add(MaxPool2D((2, 2)))\n\n## 就上一層再拉一層,Conv2D 改為8\nmodel.add(Conv2D(8, (3,3), activation='relu', kernel_initializer='he_uniform'))\nmodel.add(MaxPool2D((2, 2)))\n\nmodel.add(Flatten())\nmodel.add(Dense(30, activation='relu', kernel_initializer='he_uniform'))\nmodel.add(Dense(30, activation='relu', kernel_initializer='he_uniform'))\nmodel.add(Dense(30, activation='relu', kernel_initializer='he_uniform'))\n\nmodel.add(Dropout(0.2))\nmodel.add(Dense(n_classes, activation='softmax'))\n\n# define loss and optimizer\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n\n# fit the model \n## (batch_size:一次抓幾張,太大記憶體會爆,太小效果不好)\nhistory = model.fit(x_train, y_train, epochs=50, batch_size=200, verbose=1)\nmodel.save(model.h5)\n\n# evaluate the model\nloss, acc = model.evaluate(x_test, y_test, verbose=1)\nprint('Accuracy: %.3f' % acc)\n\n# make a prediction\n## argmax() 查陣列中max值\nimage = x_train[2]\nyhat = model.predict(asarray([image]))\nprint(yhat)\nprint('Predicted: class=%d' % argmax(yhat))\n\n\n\nplt.title(\"Learning Curves\")\nplt.xlabel('Epoch')\nplt.ylabel('Loss and Accuracy')\nplt.plot(history.history['loss'],label=' train loss')\nplt.plot(history.history['accuracy'],label=' train accuracy')\nplt.legend()\nplt.show()","repo_name":"lllason/Class_AI","sub_path":"object03_classification/mnist01.py","file_name":"mnist01.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22739948895","text":"\"\"\"\nYou are given an array of length n + 1 whose elements belong to the set {1, 2, ..., n}.\nBy the pigeonhole principle, there must be a duplicate. Find it in linear time and space.\n\"\"\"\n# O(n) time but also O(n) space\ndef get_duplicate(arr):\n element_set = set()\n for num in arr:\n\n # a duplicate will already exist in the set\n if num in element_set:\n return num\n\n element_set.add(num)\n\n return None\n\n# O(n) time and O(1) space\n# sum of values from 1-n = (n*(n+1))/2, the Gauss' formula\n# so sum all the array and subtract from it the sum from 1-n\n# result will be the number that is duplicate\ndef get_duplicate_redux(arr):\n n = len(arr) - 1 # -1 because the elements in the array would 1 greater due to duplicate\n # print(n)\n return sum(arr) - (n * (n+1))//2\n\n\nif __name__ == \"__main__\":\n\n print(\"Duplicate value: {}\".format(get_duplicate_redux([1, 2, 3, 1])))\n\n print(\"Duplicate value: {}\".format(get_duplicate_redux([1, 2, 3, 4, 5, 6, 6])))\n","repo_name":"RafayAK/CodingPrep","sub_path":"DailyCodingProblem/164_Google_Find_Duplicate_Pigeonhole_Principal.py","file_name":"164_Google_Find_Duplicate_Pigeonhole_Principal.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"46661997917","text":"from pathlib import Path\nimport gc\nimport tensorflow as tf\nfrom tensorflow.keras.applications import vgg16, vgg19, densenet, resnet_v2, inception_v3, resnet50, resnet\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\nfrom tensorflow.keras.backend import clear_session\nfrom tensorflow.python.framework.ops import disable_eager_execution\nimport streamlit as st\n\nDEFAULT_CHECKPOINTS_PATH = Path(__file__).resolve().parent.parent / 'models' / 'vgg16' / 'checkpoints'\nDEFAULT_LOG_PATH = Path(__file__).resolve().parent.parent / 'models' / 'vgg16' / 'logs'\n\n# TODO - change img_depth depending on features from dataset - 2048, 512 until now\ndef make_classifier(n_classes, n_hidden=512, img_height=7, img_width=7, img_depth=2048):\n \n inputs = layers.Input(shape=(img_height, img_width, img_depth))\n x = layers.Flatten(name='flatten')(inputs)\n x = layers.Dense(n_hidden, activation='relu', name='dense_1')(x)\n x = layers.Dropout(0.2)(x)\n x = layers.BatchNormalization()(x)\n x = layers.Dense(n_hidden, activation='relu', name='dense_2')(x)\n x = layers.Dropout(0.2)(x)\n x = layers.BatchNormalization()(x)\n x = layers.Dense(n_hidden, activation='relu', name='dense_3')(x)\n x = layers.Dropout(0.2)(x)\n x = layers.BatchNormalization()(x)\n n_outputs = n_classes if n_classes != 2 else 1 # only one output neuron if it's a binary classification problem\n activation = 'softmax' if n_classes != 2 else 'sigmoid' # sigmoid if it's a binary classification problem\n outputs = layers.Dense(n_outputs, activation=activation, name='output')(x)\n model = tf.keras.Model(inputs, outputs)\n\n return model\n\n\ndef make_model(n_classes, include_top_vgg=False, n_hidden=512, img_height=224, img_width=224, transfer_learning=True, base_model='vgg16', img_depth=3):\n \"\"\"\n Creates a ConvNet classification model using a VGG16 pre-trained model for transfer learning.\n :param n_classes: int - number of classes required for the classification problem\n :param include_top_vgg: bool - whether or not to include the top of the pre-trained model\n :param n_hidden: int - number of hidden layers to add to the pre-trained model\n :param img_height: int - image height\n :param img_width: int - image width\n :return: tf.keras.Model - final model\n \"\"\"\n if transfer_learning:\n weights = 'imagenet'\n else:\n weights = None\n \n if base_model == 'vgg16':\n base_model_net = vgg16.VGG16(include_top=False, weights=weights)\n preprocess_layer = vgg16.preprocess_input\n #base_model = vgg16.VGG16(include_top=False, pooling='max', weights=weights)\n if base_model == 'vgg19':\n base_model_net = vgg19.VGG19(include_top=False, weights=weights)\n preprocess_layer = vgg19.preprocess_input\n elif base_model == 'densenet201':\n base_model_net = densenet.DenseNet201(include_top=False, weights=weights)\n preprocess_layer = densenet.preprocess_input\n elif base_model == 'densenet169':\n base_model_net = densenet.DenseNet169(include_top=False, weights=weights)\n preprocess_layer = densenet.preprocess_input\n elif base_model == 'densenet121':\n base_model_net = densenet.DenseNet121(include_top=False, weights=weights)\n preprocess_layer = densenet.preprocess_input\n elif base_model == 'resnet152v2':\n base_model_net = resnet_v2.ResNet152V2(include_top=False, weights=weights)\n preprocess_layer = resnet_v2.preprocess_input\n elif base_model == 'resnet50':\n base_model_net = resnet50.ResNet50(include_top=False, weights=weights)\n preprocess_layer = resnet50.preprocess_input\n elif base_model == 'resnet152':\n base_model_net = resnet.ResNet152(include_top=False, weights=weights)\n preprocess_layer = resnet.preprocess_input \n elif base_model == 'resnet101':\n base_model_net = resnet.ResNet101(include_top=False, weights=weights)\n preprocess_layer = resnet.preprocess_input\n elif base_model == 'inception_v3':\n base_model_net = inception_v3.InceptionV3(include_top=False, weights=weights)\n preprocess_layer = inception_v3.preprocess_input\n elif base_model == 'classifier':\n return make_classifier(n_classes, n_hidden, img_height, img_width, img_depth=img_depth)\n \n data_augmentation = tf.keras.Sequential([\n layers.experimental.preprocessing.RandomFlip('horizontal'),\n layers.experimental.preprocessing.RandomRotation(0.2),\n ])\n\n inputs = layers.Input(shape=(img_height, img_width, img_depth))\n x = data_augmentation(inputs)\n x = preprocess_layer(x)\n x = base_model_net(x, training=False)\n #x = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(x)\n #x = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(x)\n #x = layers.MaxPooling2D((2, 2))(x)\n x = layers.Flatten(name='flatten')(x)\n x = layers.Dense(n_hidden, activation='relu', name='dense_1')(x)\n x = layers.Dropout(0.2)(x)\n x = layers.BatchNormalization()(x)\n x = layers.Dense(n_hidden, activation='relu', name='dense_2')(x)\n x = layers.Dropout(0.2)(x)\n x = layers.BatchNormalization()(x)\n x = layers.Dense(n_hidden, activation='relu', name='dense_3')(x)\n x = layers.Dropout(0.2)(x)\n x = layers.BatchNormalization()(x)\n n_outputs = n_classes if n_classes != 2 else 1 # only one output neuron if it's a binary classification problem\n activation = 'softmax' if n_classes != 2 else 'sigmoid' # sigmoid if it's a binary classification problem\n outputs = layers.Dense(n_outputs, activation=activation, name='output')(x)\n model = tf.keras.Model(inputs, outputs)\n\n return model\n\n\ndef make_simple_model(n_classes, include_top_vgg=False, n_hidden=512, img_height=224, img_width=224, transfer_learning=True):\n \n data_augmentation = tf.keras.Sequential([\n layers.experimental.preprocessing.RandomFlip('horizontal'),\n layers.experimental.preprocessing.RandomRotation(0.2),\n ])\n inputs = layers.Input(shape=(img_height, img_width, 3))\n x = data_augmentation(inputs)\n x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(x)\n x = layers.MaxPooling2D((2, 2))(x)\n x = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(x)\n x = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(x)\n x = layers.MaxPooling2D((2, 2))(x)\n x = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(x)\n x = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(x)\n x = layers.MaxPooling2D((2, 2))(x)\n x = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(x)\n x = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(x)\n x = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(x)\n x = layers.MaxPooling2D((2, 2))(x)\n x = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(x)\n x = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(x)\n x = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(x)\n x = layers.MaxPooling2D((2, 2))(x)\n x = layers.Flatten(name='flatten')(x)\n x = layers.Dense(512, activation='relu', name='dense_1')(x)\n x = layers.Dropout(0.2)(x)\n x = layers.BatchNormalization()(x)\n x = layers.Dense(512, activation='relu', name='dense_2')(x)\n x = layers.Dropout(0.2)(x)\n x = layers.BatchNormalization()(x)\n n_outputs = n_classes if n_classes != 2 else 1 # only one output neuron if it's a binary classification problem\n activation = 'softmax' if n_classes != 2 else 'sigmoid' # sigmoid if it's a binary classification problem\n outputs = layers.Dense(n_outputs, activation=activation, name='output')(x)\n model = tf.keras.Model(inputs, outputs)\n\n return model\n\n\ndef freeze_all_base_model(model, base_model='vgg16'):\n for layer in model.layers:\n if base_model in layer.name:\n for base_model_layer in layer.layers:\n base_model_layer.trainable = False\n\n\ndef unfreeze_last_base_model(model, which_freeze=15, base_model='vgg16'):\n for layer in model.layers:\n if base_model in layer.name:\n for base_model_layer in layer.layers[:which_freeze]:\n base_model_layer.trainable = False\n for base_model_layer in layer.layers[which_freeze:]:\n base_model_layer.trainable = True\n\n\ndef unfreeze_all_base_model(model, base_model='vgg16'):\n for layer in model.layers:\n if base_model in layer.name:\n for base_model_layer in layer.layers:\n base_model_layer.trainable = True\n\n\ndef print_vgg_trainable(model):\n for layer in model.layers:\n if 'vgg' in layer.name:\n for i, vgg_layer in enumerate(layer.layers):\n print(i, vgg_layer.name, vgg_layer.trainable)\n\n\ndef loss_definition(n_classes):\n return tf.keras.losses.CategoricalCrossentropy() if n_classes > 2 else tf.keras.losses.BinaryCrossentropy()\n\n\ndef initial_model(n_classes, n_hidden=512, img_height=224, img_width=224, seed=None, transfer_learning=True, base_model='vgg16', img_depth=3):\n if seed is not None:\n tf.random.set_seed(seed)\n\n model = make_model(n_classes=n_classes, n_hidden=n_hidden, img_height=img_height, img_width=img_width, transfer_learning=transfer_learning, base_model=base_model, img_depth=img_depth)\n #model = make_simple_model(n_classes=n_classes, n_hidden=n_hidden, img_height=img_height, img_width=img_width,\n # transfer_learning=transfer_learning)\n #model = make_classifier(n_classes=n_classes, n_hidden=n_hidden, img_height=img_height, img_width=img_width, img_depth=2048)\n if transfer_learning:\n freeze_all_base_model(model, base_model=base_model)\n else:\n unfreeze_all_base_model(model, base_model=base_model)\n \n return model\n\n\ndef callbacks_definition(log_path=DEFAULT_LOG_PATH, checkpoints_path=DEFAULT_CHECKPOINTS_PATH,\n streamlit_callbacks=None, base_epochs=30, fine_tuning_epochs=30):\n tb = TensorBoard(log_dir=log_path)\n checkpoint = ModelCheckpoint(checkpoints_path / 'train_{epoch}.tf', verbose=1, save_weights_only=False,\n save_best_only=True, monitor='val_loss')\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=4, verbose=1)\n early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=15, verbose=1, restore_best_weights=True)\n \n \n callbacks = [tb, checkpoint, reduce_lr, early_stopping]\n #callbacks = [checkpoint, reduce_lr, early_stopping]\n if streamlit_callbacks is not None: # used only with streamlit web application\n class ProgressBarCallback(tf.keras.callbacks.Callback):\n def __init__(self, base_epochs, fine_tuning_epochs):\n super().__init__()\n self.state = 'base'\n self.base_epochs = base_epochs\n self.actual_base_epochs = 0\n self.fine_tuning_epochs = fine_tuning_epochs\n self.placeholders = []\n self.epoch = 0\n\n def on_train_begin(self, logs=None):\n streamlit_callbacks[0](\"###### STARTED \" + self.state.upper() + \" TRAINING\")\n self.placeholders.append(st.empty())\n with self.placeholders[-1]:\n streamlit_callbacks[1](0.0)\n\n def on_train_end(self, logs=None):\n with self.placeholders[-1]:\n streamlit_callbacks[1](1.0)\n streamlit_callbacks[0](\"###### FINISHED \" + self.state.upper() + \" TRAINING\")\n if self.state == 'base':\n self.state = 'fine_tuning'\n self.actual_base_epochs = self.epoch + 1 # keep the epoch number even if early stopped\n\n def on_epoch_end(self, epoch, logs=None):\n self.epoch = epoch\n with self.placeholders[-1]:\n epoch = epoch if self.state == 'base' else (epoch - self.actual_base_epochs)\n streamlit_callbacks[1](\n (epoch + 1) / (self.base_epochs if self.state == 'base' else (self.fine_tuning_epochs +\n (self.base_epochs -\n self.actual_base_epochs))))\n callbacks = callbacks + [ProgressBarCallback(base_epochs=base_epochs, fine_tuning_epochs=fine_tuning_epochs)]\n #ARRUMAR\n import matplotlib.pyplot as plt\n class timecallback(tf.keras.callbacks.Callback):\n def __init__(self):\n self.times = []\n # use this value as reference to calculate cummulative time taken\n self.timetaken = time.time()\n def on_epoch_end(self,epoch,logs = {}):\n self.times.append((epoch,time.time() - self.timetaken))\n def on_train_end(self,logs = {}):\n plt.xlabel('Epoch')\n plt.ylabel('Total time taken until an epoch in seconds')\n plt.plot(*zip(*self.times))\n plt.show()\n callbacks = callbacks + [timecallback()]\n return callbacks\n\n\ndef get_best_model_name(checkpoints_path=DEFAULT_CHECKPOINTS_PATH):\n # returns the last integer number inside the name of the best model file\n return sorted([(int(x.name.split('_')[1].split('.')[0]), x.name) for x in list(Path(checkpoints_path).iterdir()) if len(x.name.split('_')) > 1])[-1][1]\n\n\ndef load_best_model(checkpoints_path=DEFAULT_CHECKPOINTS_PATH):\n # loads the best model from checkpoints folder\n best_model_name = get_best_model_name(checkpoints_path=checkpoints_path)\n print('USER - Restoring model weights from the end of the best epoch:', best_model_name.split('_')[-1].split('.')[0])\n model = tf.keras.models.load_model(Path(checkpoints_path) / best_model_name) # loads the best model even if early_stopping does not triggers\n return model\n\n\ndef train(model, train_ds, valid_ds, n_classes, base_epochs=30, fine_tuning_epochs=30, fine_tune_at_layer=15,\n fine_tuning_lr=0.001, callbacks=None, seed=None, transfer_learning=True, base_model='vgg16', checkpoints_path=DEFAULT_CHECKPOINTS_PATH, base_lr=0.001, metrics=['accuracy']):\n if seed is not None:\n tf.random.set_seed(seed)\n \n metrics_ = [tf.keras.metrics.AUC(curve='ROC', name='roc_auc') if metric == 'roc_auc' else metric for metric in metrics]\n metrics_ = [tf.keras.metrics.AUC(curve='PR', name='pr_auc') if metric == 'pr_auc' else metric for metric in metrics_]\n loss = loss_definition(n_classes=n_classes)\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=base_lr), loss=loss, metrics=metrics_)\n history = model.fit(train_ds, epochs=base_epochs, validation_data=valid_ds, callbacks=callbacks)\n #history = model.fit(train_ds, epochs=base_epochs, validation_data=valid_ds, callbacks=callbacks, validation_steps=valid_ds.cardinality())\n \n gc.collect()\n del model\n gc.collect()\n \n clear_session()\n #disable_eager_execution()\n model = load_best_model(checkpoints_path=checkpoints_path)\n\n if transfer_learning and (fine_tuning_epochs > 0):\n unfreeze_last_base_model(model, which_freeze=fine_tune_at_layer, base_model=base_model)\n\n total_epochs = base_epochs + fine_tuning_epochs\n loss = loss_definition(n_classes=n_classes)\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=fine_tuning_lr), loss=loss, metrics=metrics_)\n\n if seed is not None:\n tf.random.set_seed(seed)\n\n history = model.fit(train_ds, epochs=total_epochs, validation_data=valid_ds, callbacks=callbacks,\n initial_epoch=history.epoch[-1] + 1)\n \n gc.collect()\n del model\n gc.collect()\n \n clear_session()\n #disable_eager_execution()\n model = load_best_model(checkpoints_path=checkpoints_path)\n return model, history\n","repo_name":"lucasmfaria/image_classifier","sub_path":"utils/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":16127,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"67"} +{"seq_id":"5553894048","text":"'''\r\nCreated on 04-Nov-2016\r\n\r\n@author: BALASUBRAMANIAM\r\n'''\r\nfrom xml.dom import minidom\r\n\r\ndoc=minidom.Document()\r\n\r\nproducts = doc.createElement(\"Products\")\r\ndoc.appendChild(products)\r\nproduct1 = doc.createElement(\"Product\")\r\nid=doc.createElement(\"Id\")\r\ndata=doc.createTextNode(\"32477\")\r\nid.appendChild(data)\r\nproduct1.appendChild(id)\r\nname=doc.createElement(\"name\")\r\ndata=doc.createTextNode(\"Wheel\")\r\nname.appendChild(data)\r\nproduct1.appendChild(name)\r\ncost=doc.createElement(\"cost\")\r\ndata=doc.createTextNode(\"6000\")\r\ncost.appendChild(data)\r\nproduct1.appendChild(cost)\r\nproducts.appendChild(product1)\r\nproduct2 =doc.createElement(\"Product\")\r\nid=doc.createElement(\"Id\")\r\ndata=doc.createTextNode(\"32480\")\r\nid.appendChild(data)\r\nproduct2.appendChild(id)\r\nname=doc.createElement(\"name\")\r\ndata=doc.createTextNode(\"Bumper\")\r\nname.appendChild(data)\r\nproduct2.appendChild(name)\r\ncost=doc.createElement(\"cost\")\r\ndata=doc.createTextNode(\"1200\")\r\ncost.appendChild(data)\r\nproduct2.appendChild(cost)\r\nproducts.appendChild(product2)\r\n\r\nxml_str = doc.toprettyxml(indent=\" \")\r\nwith open(\"Products.xml\", \"w\") as f:\r\n f.write(xml_str)\r\n","repo_name":"eswaribala/pythontraining","sub_path":"productxmlwriting.py","file_name":"productxmlwriting.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11196677532","text":"from __future__ import print_function\n\n# multibody.py\n# Aritra Biswas\n#\n# Framework for evolving motion of multiple particles under mutual\n# interactions.\n\nfrom Vector import *\nimport math\nimport visual as vs\nimport random\nimport time\nimport cPickle as pickle\nimport copy\n\nclass Particle:\n '''A particle in 3D space, holding a position and velocity.'''\n\n def __init__(self,\n pos = Vector([0.0, 0.0, 0.0]),\n vel = Vector([0.0, 0.0, 0.0]),\n mass = 1.0,\n radius = 0.1,\n ):\n\n assert isinstance(pos, Vector) and len(pos) == 3\n assert isinstance(vel, Vector) and len(vel) == 3\n\n self.pos = pos\n self.vel = vel\n self.mass = mass\n self.radius = radius\n \n def set_pos(self, pos):\n '''Set the position in Cartesian coordinates.'''\n \n assert isinstance(pos, Vector) and len(pos) == 3\n self.pos = pos\n \n def set_vel(self, vel):\n '''Set the velocity in Cartesian coordinates.'''\n\n assert isinstance(vel, Vector) and len(vel) == 3\n self.vel = vel\n\n def set_pos_sph(self, pos_sph):\n '''Set the position in spherical coordinates, where the\n input pos_ph = [r, theta, phi]. The physics convention is\n used, where phi is the azimuthal angle.'''\n\n assert isinstance(pos_sph, Vector) and len(pos_sph) == 3\n (r, theta, phi) = list(pos_sph)\n\n x = r * math.sin(theta) * math.cos(phi)\n y = r * math.sin(theta) * math.sin(phi)\n z = r * math.cos(theta)\n\n pos = Vector([x, y, z])\n self.set_pos(pos)\n\n def set_vel_sph(self, vel_sph):\n '''Set the velocity in spherical coordinates, where the\n input vel_sph = [v, theta, phi]. The physics convention is\n used, where phi is the azimuthal angle.'''\n\n assert isinstance(vel_sph, Vector) and len(vel_sph) == 3\n (v, theta, phi) = list(vel_sph)\n\n vx = v * math.sin(theta) * math.cos(phi)\n vy = v * math.sin(theta) * math.sin(phi)\n vz = v * math.cos(theta)\n\n vel = Vector([vx, vy, vz])\n self.set_vel(vel)\n\ndef grav_force(test, source):\n '''Returns the force vector of the particle test due to\n the gravitational force from particle source.'''\n\n assert isinstance(test, Particle)\n assert isinstance(source, Particle)\n\n G = 1\n\n sep = source.pos - test.pos\n force = G * test.mass * source.mass * sep / math.pow(abs(sep), 3)\n\n return force\n\ndef soft_grav_force(test, source):\n '''Similar to grav_force, except the effective distance between two\n particles is always incremented by a small offset, such that the force\n does not blow up when particles are near each other.'''\n \n assert isinstance(test, Particle)\n assert isinstance(source, Particle)\n\n G = 1\n a = 0.1\n\n sep = source.pos - test.pos\n force = G * test.mass * source.mass * sep / (math.pow(abs(sep) + a, 3))\n\n return force\n\nclass Cluster:\n '''A cluster of particles interacting with each other under some\n force.'''\n\n # INITIALIZATION AND PARTICLE MEMBERSHIP \n\n def __init__(self):\n\n # start cluster without any member particles\n self.particles = []\n\n # interaction force between pairs of particles\n # force(test, source) should return force vector on test\n self.force = grav_force\n\n # keep track of evolution timesteps\n self.timestep = 0\n\n def add_particle(self, particle):\n '''Add a particle to the cluster.'''\n\n assert isinstance(particle, Particle)\n self.particles.append(particle)\n\n # EVOLUTION\n\n def evolve(self, dt):\n '''Evolve by flipping between two types of symplectic Euler\n steps. Output the current step and time.'''\n\n if self.timestep % 2 == 0:\n self.evolve_symplectic1(dt)\n else:\n self.evolve_symplectic2(dt)\n \n out = \"step = {} \\t time = {}\".format(\n self.timestep,\n self.timestep * dt)\n print(out, end = \"\\r\")\n \n self.timestep += 1\n\n\n def evolve_symplectic1(self, dt):\n '''Using the symplectic Euler method, update the positions of\n all particles in the cluster for a timestep dt. Update\n velocities before positions.'''\n\n for test in self.particles:\n\n # calculate the net acceleration on test\n force = Vector([0.0, 0.0, 0.0])\n for source in self.particles:\n if source != test:\n force = force + self.force(test, source)\n accel = force / test.mass\n\n # use current acceleration to update velocity\n test.set_vel(test.vel + accel * dt)\n\n # use updated velocity to update position\n test.set_pos(test.pos + test.vel * dt)\n \n def evolve_symplectic2(self, dt):\n '''Using the symplectic Euler method, update the positions of\n all particles in the cluster for a timestep dt. Update positions\n before velocities.'''\n\n for test in self.particles:\n \n # use current velocity to update position\n test.set_pos(test.pos + test.vel * dt)\n\n # calculate the net acceleration on test\n force = Vector([0.0, 0.0, 0.0])\n for source in self.particles:\n if source != test:\n force = force + self.force(test, source)\n accel = force / test.mass\n\n # use updated acceleration to update velocity\n test.set_vel(test.vel + accel * dt)\n\n # VPYTHON VISUALIZATION\n\n def vs_init(self):\n '''Set up the visualization with the current particles.'''\n\n # define a list of acceptable colors to use for particles\n colors = [vs.color.red, vs.color.yellow, vs.color.green,\n vs.color.orange, vs.color.white, vs.color.blue,\n vs.color.cyan, vs.color.magenta]\n\n # initialize a dictionary to link particles with visual objects,\n # such that vs_map[particle] = vs_object\n self.vs_map = {}\n\n for particle in self.particles:\n\n # create visual object using particle parameters\n vs_obj = vs.sphere()\n vs_obj.radius = particle.radius\n vs_obj.pos = list(particle.pos)\n vs_obj.color = random.choice(colors)\n\n # link visual object to particle\n self.vs_map[particle] = vs_obj\n\n def vs_update(self):\n '''Update the visualization with the current positions of\n the particles.'''\n\n for particle in self.particles:\n vs_obj = self.vs_map[particle]\n vs_obj.pos = list(particle.pos)\n\n def vs_run(self, dt, tx):\n '''Continuously evolve and update the visual simulation\n with timestep dt and visual speed tx relative to real-time.'''\n\n while True:\n self.evolve(dt)\n vs.rate(tx / dt)\n self.vs_update()\n\n # OUTPUT SAVING\n\n def io_run(self, dt, steps, wrstep, name = \"out.pkl\"):\n '''Evolve the cluster for given steps with timestep dt and write\n out the cluster every wrstep timesteps.'''\n\n f_output = open(name, \"wb\")\n\n while self.timestep < steps:\n\n self.evolve(dt)\n\n if self.timestep % wrstep == 0:\n\n obj = copy.deepcopy(self)\n pickle.dump(obj, f_output)\n del obj\n \n print(\"wrte @\")\n\n f_output.close()\n","repo_name":"xerebus/ph22","sub_path":"set4/multibody.py","file_name":"multibody.py","file_ext":"py","file_size_in_byte":7513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"25032124796","text":"\"\"\"This module contains software for completing the LASP coding\nexercise. The goal of the exercise is to calculate the measured\nirradiance from the SOlar Stellar Irradiance Comparison Experiment\n(SOLSTICE).\n\nThe main tasks of this exercise are to:\n (1) Calculate the irradiance in watts/nm/m^2 for the UpScan and\n DownScan experiments and compare the results\n (2) Plot the irradiance as a function of wavelength around two\n emission lines located at ~180nm\n (3) Calculate and plot the ratio of the irradiances at each\n wavelength for each scan with respect to the reference spectrum\n\nAfter running this code, a 'plots/' directory will be created and\ncontain three plots:\n\n - instrument_data.html : plots the data acquired from the\n instrument\n - irradiance.html : plots the measured irradiance of the UpScan and\n DownScan experiments along with the reference spectrum\n - irradiance_ratios.html : plots the ratio of the UpScan and\n DownScan experiements with respect to the reference spectrum\n\nUse\n---\n\n To perform these calculations and make corresponding plots:\n\n From the command line:\n\n python exercise.py\n\n Or from within a python interpreter:\n\n ie = IrradianceExercise()\n ie.get_data()\n ie.make_plots_input_data()\n ie.run_calculations()\n ie.make_plots_results()\n\n It is required that a 'data/' directory exists in the working\n directory, and it contains the following data files:\n\n - detectorTemp.txt : in degrees Celsius. It is roughly sampled\n at 1 second.\n - distanceAndDoppler.txt : These are the corrections used to\n adjust for the changing\n distance and velocity of the spacecraft relative to the sun.\n - instrumentTelemetry.txt : Includes grating position and\n measured detector counts. The detector counts correspond\n to the number of photons detected within the currently set\n integrationTime (in milli-seconds).\n - integrationTime.txt : This is the currently set integration\n time (mlli-seconds) of the instrument. These are sampled\n at a different cadence than the instrumentTelemetry. Assume\n the value is constant until there is a new value.\n - plans.txt : This file includes the experiment names with\n start/end times. You can find the\n time ranges of the plans of interest here.\n - referenceSpectrum.txt : This is a reference spectrum with\n accurate wavelengths. The current irradiance measurements\n will be within 15% of this spectrum.\n\nDependencies\n------------\n\n - bokeh\n - numpy\n - pandas\n\n The user may utilize the provided 'requirements.txt' and/or\n 'environment.yml' files to create the necessary software\n environment to run the code. More details about this are provided\n in the README\n\"\"\"\n\nfrom collections import namedtuple\nimport datetime\n\nfrom bokeh.layouts import gridplot\nfrom bokeh.models import BoxAnnotation, NumeralTickFormatter\nfrom bokeh.plotting import figure, output_file, save\nimport numpy as np\nimport pandas\n\n\nclass IrradianceExercise():\n \"\"\"Main class for completing the exercise\"\"\"\n\n def __init__(self):\n \"\"\"Initializes a IrradianceExercise class object\"\"\"\n\n # Set some constant values\n self.aperture_Area = .01 / (1E2 * 1E2) # [m^2]\n self.c = 299792458.0 # [m/s]\n self.corrFactor = 0.0061628\n self.d = 277.77777777777777 # [nm]\n self.detectorTemperatureCorr = 0.0061628\n self.h = 6.62606957E-34 # [J*s]\n self.offset = 239532.38\n self.phiGInRads = 0.08503244115716374 # [rad]\n self.stepSize = 2.4237772022101214E-6 # [rad]\n\n # A place to store results\n self.results = namedtuple('results', field_names=[])\n\n def _convert_to_datetime(self, times):\n \"\"\"Convert a list of times (in microseconds) to a list of\n datetime objects for convenience in plotting.\n\n Parameters\n ----------\n times : list\n A list of times, e.g. ['9.434207077723681E14', ...]\n \"\"\"\n\n converted_times = []\n for time in times:\n converted_times.append(self.data_collection_start + datetime.timedelta(microseconds=time))\n\n return converted_times\n\n def _find_closest_measurements(self, data_to_match, data_to_search, data_type):\n \"\"\"Convenience method to find measurements (e.g. detector\n temperatures, distance corrections) that correspond to the\n given data's time measurements.\n\n The detector temperature and distance/doppler correction values\n are sampled at a different cadence than other data (e.g. grating\n positions). This method will return a list of measurement\n values that match to the given 'data' as close as possible in\n time.\n\n Currently, this function only supports searching over detector\n temperature and distance/doppler correction data.\n\n Parameters\n ----------\n data_to_match : pandas.core.frame.DataFrame object\n The data from which to find the closest values in time\n data_to_search : pandas.core.frame.DataFrame object\n The data to search over (e.g. detector temperatures)\n data_type : str\n The type of data to search over. Currently only\n 'temperature', 'distance_correction', and 'doppler_factor'\n are supported.\n\n Returns\n -------\n matched_values : np.array object\n The matched temperature values\n \"\"\"\n\n matched_values = []\n for index, row in data_to_match.iterrows():\n time_to_match = row['time']\n matched_index = np.argmin(np.abs(data_to_search['time'].values - time_to_match))\n matched_value = data_to_search[data_type].values[matched_index]\n matched_values.append(matched_value)\n\n return np.array(matched_values)\n\n def _indicate_experiments(self, plot):\n \"\"\"Given a bokeh plot, add a shaded region that indicate the\n time span of the downscan, dark, and upscan experiments\n\n Parameters\n ----------\n plot : bokeh.plotting.figure object\n The bokeh plot to add shaded regions to\n \"\"\"\n\n start_times = [self.downscan_start_dt, self.dark_start_dt, self.upscan_start_dt]\n end_times = [self.downscan_end_dt, self.dark_end_dt, self.upscan_end_dt]\n colors = ['green', 'gray', 'red']\n\n for start, end, color in zip(start_times, end_times, colors):\n plot.add_layout(BoxAnnotation(left=start, right=end, fill_color=color, line_color=color, fill_alpha=0.1))\n\n return plot\n\n def calculate_count_rate(self):\n \"\"\"Calculate the count rate:\n\n count_rate = counts / integrationTime [counts/s/nm]\n\n where:\n counts is in [counts/nm]\n integrationTime is in [s]\n \"\"\"\n\n # Convert integration times from milliseconds to seconds\n # Also simplify it down to one value since it is constant throughout the experiment\n integration_time_downscan_s = [time * 0.001 for time in self.data.integration_data_downscan['integration_time']][0]\n integration_time_upscan_s = [time * 0.001 for time in self.data.integration_data_upscan['integration_time']][0]\n\n # Calculate count rates\n count_rates_downscan = self.data.grating_data_downscan['counts'].values / integration_time_downscan_s\n count_rates_upscan = self.data.grating_data_upscan['counts'].values / integration_time_upscan_s\n\n # Store results for later use in plotting\n self.results.count_rates_downscan = count_rates_downscan\n self.results.count_rates_upscan = count_rates_upscan\n\n def calculate_count_rate_corr(self):\n \"\"\"Apply correction to count rate due to changes in temperature:\n\n count_rate_corr = count_rate * (1.0 + detectorTemperatureCorr * (20.0 - detectorTemp))\n\n where:\n detectorTemperatureCorr = 0.0061628\n \"\"\"\n\n # Find detector temperature measurements closest in time with grading position data\n matched_temperatures_downscan = self._find_closest_measurements(\n self.data.grating_data_downscan, self.data.temperature_data_downscan, 'temperature')\n matched_temperatures_upscan = self._find_closest_measurements(\n self.data.grating_data_upscan, self.data.temperature_data_upscan, 'temperature')\n\n # Apply temperature correction\n count_rate_corr_downscan = self.results.count_rates_downscan * \\\n (1.0 + self.detectorTemperatureCorr * (20.0 - np.array(matched_temperatures_downscan)))\n count_rate_corr_upscan = self.results.count_rates_upscan * \\\n (1.0 + self.detectorTemperatureCorr * (20.0 - np.array(matched_temperatures_upscan)))\n\n # Store results for later use in plotting\n self.results.count_rate_corr_downscan = count_rate_corr_downscan\n self.results.count_rate_corr_upscan = count_rate_corr_upscan\n\n def calculate_energy_per_photon(self):\n \"\"\"Calculate the energy per photon:\n\n energyPerPhotons = h * c / wavelength [J]\n\n where:\n h = 6.62606957E-34 [J*s]\n c = 299792458.0 [m/s]\n wavelength is in [m]\n \"\"\"\n\n # Convert wavelengths from nm to m\n wavelengths_downscan_m = self.results.wavelengths_downscan * 1E-9\n wavelengths_upscan_m = self.results.wavelengths_upscan * 1E-9\n\n # Calculate energy per photon\n energyPerPhotons_downscan = self.h * self.c / wavelengths_downscan_m\n energyPerPhotons_upscan = self.h * self.c / wavelengths_upscan_m\n\n # Store results for later use in plotting\n self.results.energyPerPhotons_downscan = energyPerPhotons_downscan\n self.results.energyPerPhotons_upscan = energyPerPhotons_upscan\n\n def calculate_irradiance(self):\n \"\"\"Calculate the solar irradiance:\n\n wattsPerM2_1AU = wattsPerM2 / sunObserverDistanceCorrection\n\n where:\n wattsPerM2 = photons_per_second_per_m2 * energyPerPhoton\n sunObserverDistanceCorrection is (something)\n \"\"\"\n\n # Calculate watts/m^2\n wattsPerM2_downscan = self.results.photons_per_second_per_m2_downscan * self.results.energyPerPhotons_downscan\n wattsPerM2_upscan = self.results.photons_per_second_per_m2_upscan * self.results.energyPerPhotons_upscan\n\n # Find distance correction measurements closest in time with grating position data\n matched_distances_downscan = self._find_closest_measurements(\n self.data.grating_data_downscan, self.data.distance_data_downscan, 'distance_correction')\n matched_distances_upscan = self._find_closest_measurements(\n self.data.grating_data_upscan, self.data.distance_data_upscan, 'distance_correction')\n\n # Calculate irradiance\n irradiance_downscan = wattsPerM2_downscan / matched_distances_downscan\n irradiance_upscan = wattsPerM2_upscan / matched_distances_upscan\n\n # Store results for later use in plotting\n self.results.irradiance_downscan = irradiance_downscan\n self.results.irradiance_upscan = irradiance_upscan\n\n def calculate_median_dark_count_rate(self):\n \"\"\"Calculate the median dark count rate, with applying temperature\n correction described in calcualte_count_rate_corr():\n\n median_dark_count_rate = median(dark_count_rate * ( 1.0 + detectorTemperatureCorr * ( 20.0 - detectorTemp)))\n\n where:\n dark_count_rate = dark_counts / dark_integrationTime\n detectorTemperatureCorr = 0.0061628\n \"\"\"\n\n # Calculate dark count rates\n dark_counts = self.data.grating_data_dark['counts'].values\n dark_integration_times = self.data.integration_data_dark['integration_time'].values\n dark_count_rates = dark_counts / dark_integration_times\n\n # Find detector temperature measurements closest in time with grating position data\n matched_temperatures_dark = self._find_closest_measurements(\n self.data.grating_data_dark, self.data.temperature_data_dark, 'temperature')\n\n # Apply temperature correction\n median_dark_count_rate = np.median(dark_count_rates * \\\n (1.0 + self.detectorTemperatureCorr * (20.0 - np.array(matched_temperatures_dark))))\n\n # Store results for later use in plotting\n self.results.median_dark_count_rate = median_dark_count_rate\n\n def calculate_photons_per_second_per_m2(self):\n \"\"\"Calculate the number of photons per second per square meter:\n\n photons_per_second_per_m2 = (count_rate_corr - median_dark_count_rate) / aperture_Area [photons/sec/m^2/nm]\n\n where:\n aperture_Area = .01 / (1E2 * 1E2) [m^2]\n \"\"\"\n\n # Calculate photons/s/m^2\n aperture_area = .01 / (1E2 * 1E2)\n photons_per_second_per_m2_downscan = (\n self.results.count_rate_corr_downscan - self.results.median_dark_count_rate) / aperture_area\n photons_per_second_per_m2_upscan = (\n self.results.count_rate_corr_upscan - self.results.median_dark_count_rate) / aperture_area\n\n # Store results for later use in plotting\n self.results.photons_per_second_per_m2_downscan = photons_per_second_per_m2_downscan\n self.results.photons_per_second_per_m2_upscan = photons_per_second_per_m2_upscan\n\n def calculate_wavelengths(self):\n \"\"\"Calculate wavelengths (in nm) from the grating positions via\n the grating equation:\n\n wavelength = 2 * d * sin(ang) * cos(phiGInRads / 2.0) [nm]\n\n where:\n d = 277.77777777777777 [nm]\n ang = (offset - gratingPosition) * stepSize\n phiGInRads = 0.08503244115716374 [rad]\n offset = 239532.38\n stepSize = 2.4237772022101214E-6 [rad]\n\n A correction is applied to the wavelength to take the changing\n velocity of the spacecraft account.\n \"\"\"\n\n # Calculate angle of incidence\n angles_downscan = ((self.offset - self.data.grating_data_downscan['grating_position']) * self.stepSize).values\n angles_upscan = ((self.offset - self.data.grating_data_upscan['grating_position']) * self.stepSize).values\n\n # Calculate wavelength\n wavelengths_downscan = 2 * self.d * np.sin(angles_downscan) * np.cos(self.phiGInRads / 2.0)\n wavelengths_upscan = 2 * self.d * np.sin(angles_upscan) * np.cos(self.phiGInRads / 2.0)\n\n # Find doppler correction measurements closest in time with wavelength data\n matched_doppler_downscan = self._find_closest_measurements(\n self.data.grating_data_downscan, self.data.distance_data_downscan, 'doppler_factor')\n matched_doppler_upscan = self._find_closest_measurements(\n self.data.grating_data_upscan, self.data.distance_data_upscan, 'doppler_factor')\n\n # Apply correction for doppler factor\n wavelengths_downscan = wavelengths_downscan / matched_doppler_downscan\n wavelengths_upscan = wavelengths_upscan / matched_doppler_upscan\n\n # Store results for later use in plotting\n self.results.wavelengths_downscan = wavelengths_downscan\n self.results.wavelengths_upscan = wavelengths_upscan\n\n def get_data(self):\n \"\"\"Read in data from input data files and store data within\n class object via a 'data' attribute. This method also creates\n other attributes (e.g. experiment start/end times, subsets of\n data for specific experiments, etc.) for convenience in\n calculations and/or plotting.\n \"\"\"\n\n # Package data into namedtuple object\n self.data = namedtuple('data', field_names=[])\n self.data.temperature_data = pandas.read_csv('data/detectorTemp.txt', names=['time', 'temperature'], header=0)\n self.data.distance_data = pandas.read_csv('data/distanceAndDoppler.txt', names=['time', 'distance_correction', 'doppler_factor'], header=0)\n self.data.grating_data = pandas.read_csv('data/instrumentTelemetry.txt', names=['time', 'grating_position', 'counts'], header=0)\n self.data.integration_data = pandas.read_csv('data/integrationTime.txt', names=['time', 'integration_time'], header=0)\n self.data.reference_spectrum_data = pandas.read_csv('data/referenceSpectrum.txt', names=['wavelength', 'irradiance'], header=0)\n self.data.plan_data = pandas.read_csv('data/plans.txt', names=['plan', 'start_time', 'end_time'], header=0)\n\n # Set some other attributes for convenience\n self.data_collection_start = datetime.datetime(1980, 1, 6, 0, 0)\n self.downscan_start = self.data.plan_data['start_time'][0]\n self.downscan_end = self.data.plan_data['end_time'][0]\n self.dark_start = self.data.plan_data['start_time'][1]\n self.dark_end = self.data.plan_data['end_time'][1]\n self.upscan_start = self.data.plan_data['start_time'][2]\n self.upscan_end = self.data.plan_data['end_time'][2]\n self.downscan_start_dt = self.data_collection_start + datetime.timedelta(microseconds=self.downscan_start)\n self.downscan_end_dt = self.data_collection_start + datetime.timedelta(microseconds=self.downscan_end)\n self.dark_start_dt = self.data_collection_start + datetime.timedelta(microseconds=self.dark_start)\n self.dark_end_dt = self.data_collection_start + datetime.timedelta(microseconds=self.dark_end)\n self.upscan_start_dt = self.data_collection_start + datetime.timedelta(microseconds=self.upscan_start)\n self.upscan_end_dt = self.data_collection_start + datetime.timedelta(microseconds=self.upscan_end)\n\n # Set some attributes for subsets of data for downscan, dark, and upscan experiments\n self.data.temperature_data_downscan = self.data.temperature_data[self.data.temperature_data['time'].between(self.downscan_start, self.downscan_end)]\n self.data.temperature_data_dark = self.data.temperature_data[self.data.temperature_data['time'].between(self.dark_start, self.dark_end)]\n self.data.temperature_data_upscan = self.data.temperature_data[self.data.temperature_data['time'].between(self.upscan_start, self.upscan_end)]\n self.data.distance_data_downscan = self.data.distance_data[self.data.distance_data['time'].between(self.downscan_start, self.downscan_end)]\n self.data.distance_data_dark = self.data.distance_data[self.data.distance_data['time'].between(self.dark_start, self.dark_end)]\n self.data.distance_data_upscan = self.data.distance_data[self.data.distance_data['time'].between(self.upscan_start, self.upscan_end)]\n self.data.grating_data_downscan = self.data.grating_data[self.data.grating_data['time'].between(self.downscan_start, self.downscan_end)]\n self.data.grating_data_dark = self.data.grating_data[self.data.grating_data['time'].between(self.dark_start, self.dark_end)]\n self.data.grating_data_upscan = self.data.grating_data[self.data.grating_data['time'].between(self.upscan_start, self.upscan_end)]\n self.data.integration_data_downscan = self.data.integration_data[self.data.integration_data['time'].between(self.downscan_start, self.downscan_end)]\n self.data.integration_data_dark = self.data.integration_data[self.data.integration_data['time'].between(self.dark_start, self.dark_end)]\n self.data.integration_data_upscan = self.data.integration_data[self.data.integration_data['time'].between(self.upscan_start, self.upscan_end)]\n\n def make_plots_input_data(self):\n \"\"\"Create a grid of bokeh plots displaying the data gathered\n from the instrument\n \"\"\"\n\n # Make detector temperature plot\n times_datetime = self._convert_to_datetime(self.data.temperature_data['time'])\n temperature_plot = figure(title=\"Detector Temperature\", x_axis_label='Time', y_axis_label='Temp (C)', x_axis_type='datetime')\n temperature_plot.line(times_datetime, self.data.temperature_data['temperature'], line_width=2)\n temperature_plot = self._indicate_experiments(temperature_plot)\n\n # Make Distance Correction and Doppler Factor plot\n times_datetime = self._convert_to_datetime(self.data.distance_data['time'])\n distance_plot = figure(title=\"Distance Correction and Doppler Factor\", x_axis_label='Time', x_axis_type='datetime')\n distance_plot.line(times_datetime, self.data.distance_data['distance_correction'], line_width=2, line_color='blue', legend_label='Distance Correction')\n distance_plot.line(times_datetime, self.data.distance_data['doppler_factor'], line_width=2, line_color='green', legend_label='Doppler Factor')\n distance_plot.legend.location = 'right'\n distance_plot = self._indicate_experiments(distance_plot)\n\n # Make Grating Position plot\n times_datetime = self._convert_to_datetime(self.data.grating_data['time'])\n grating_position_plot = figure(title=\"Grating Position\", x_axis_label='Time', x_axis_type='datetime')\n grating_position_plot.line(times_datetime, self.data.grating_data['grating_position'], line_width=2)\n grating_position_plot = self._indicate_experiments(grating_position_plot)\n grating_position_plot.yaxis[0].formatter = NumeralTickFormatter(format=\"0\")\n\n # Make Counts plot\n counts_plot = figure(title=\"Counts\", x_axis_label='Time', x_axis_type='datetime')\n counts_plot.line(times_datetime, self.data.grating_data['counts'], line_width=2)\n counts_plot = self._indicate_experiments(counts_plot)\n\n # Make Integration Time plot\n times_datetime = self._convert_to_datetime(self.data.integration_data['time'])\n integration_time_plot = figure(title=\"Integration Time\", x_axis_label='Time', y_axis_label='Integration Time (milli-seconds)', x_axis_type='datetime')\n integration_time_plot.line(times_datetime, self.data.integration_data['integration_time'], line_width=2)\n integration_time_plot = self._indicate_experiments(integration_time_plot)\n\n # Make Reference Spectrum plot\n reference_plot = figure(title=\"Reference Spectra\", x_axis_label='Wavelength (nm)', y_axis_label='Irradiance (watts/m^2/nm)')\n reference_plot.line(self.data.reference_spectrum_data['wavelength'], self.data.reference_spectrum_data['irradiance'], line_width=2)\n reference_plot.yaxis[0].formatter = NumeralTickFormatter(format=\"0.000\")\n\n # Arrange plots in a grid\n grid = gridplot([[temperature_plot, distance_plot, grating_position_plot],\n [counts_plot, integration_time_plot, reference_plot]], width=500, height=500)\n\n # Save plot\n filename = 'plots/instrument_data.html'\n output_file(filename=filename)\n save(grid)\n print(f'\\tPlot saved to {filename}')\n\n def make_plots_results(self):\n \"\"\"Create bokeh plots showing results of the exercise\"\"\"\n\n # Plot irradiance near ~180nm emission lines\n p = figure(title=\"Irradiance at 1AU\", x_axis_label='Wavelength (nm)', y_axis_label='Irradiance (watts/m^2/nm)', x_range=(180, 183))\n p.line(self.results.wavelengths_downscan, self.results.irradiance_downscan, line_width=2, color='green', legend_label='Downscan')\n p.line(self.results.wavelengths_upscan, self.results.irradiance_upscan, line_width=2, color='red', legend_label='Upscan')\n p.line(self.data.reference_spectrum_data['wavelength'], self.data.reference_spectrum_data['irradiance'], line_width=2, line_color='black', legend_label='Reference')\n filename = 'plots/irradiance.html'\n output_file(filename=filename)\n save(p)\n print(f'\\tPlot saved to {filename}')\n\n # Find the reference spectrum values that match downscan/upscan wavelengths the closest\n reference_values_downscan, reference_values_upscan = [], []\n for index, wavelength_to_match in enumerate(self.results.wavelengths_downscan):\n matched_index = np.argmin(np.abs(self.data.reference_spectrum_data['wavelength'].values - wavelength_to_match))\n matched_value = self.data.reference_spectrum_data['irradiance'].values[matched_index]\n reference_values_downscan.append(matched_value)\n for index, wavelength_to_match in enumerate(self.results.wavelengths_upscan):\n matched_index = np.argmin(np.abs(self.data.reference_spectrum_data['wavelength'].values - wavelength_to_match))\n matched_value = self.data.reference_spectrum_data['irradiance'].values[matched_index]\n reference_values_upscan.append(matched_value)\n\n # Plot irradiance ratio w.r.t. reference\n downscan_ratio = self.results.irradiance_downscan / reference_values_downscan\n upscan_ratio = self.results.irradiance_upscan / reference_values_upscan\n p = figure(title=\"Irradiance Ratio w.r.t. Reference Spectra\", x_axis_label='Wavelength (nm)', y_axis_label='Irradiance (watts/m^2/nm)', x_range=(180, 183))\n p.line(self.results.wavelengths_downscan, downscan_ratio, line_width=2, color='green', legend_label='Downscan')\n p.line(self.results.wavelengths_upscan, upscan_ratio, line_width=2, color='red', legend_label='Upscan')\n filename = 'plots/irradiance_ratios.html'\n output_file(filename=filename)\n save(p)\n print(f'\\tPlot saved to {filename}')\n\n def run_calculations(self):\n \"\"\"Main method for running calculations necessary for\n determining irradiance\n \"\"\"\n\n self.calculate_wavelengths()\n self.calculate_energy_per_photon()\n self.calculate_count_rate()\n self.calculate_count_rate_corr()\n self.calculate_median_dark_count_rate()\n self.calculate_photons_per_second_per_m2()\n self.calculate_irradiance()\n\n\nif __name__ == '__main__':\n\n ie = IrradianceExercise()\n ie.get_data()\n ie.make_plots_input_data()\n ie.run_calculations()\n ie.make_plots_results()\n","repo_name":"bourque/lasp_exercise","sub_path":"exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":26069,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"32655130188","text":"import pandas as pd\nimport xgboost as xgb\nimport random\nimport pickle\n\n# loading csv's\ntest_csv = pd.read_csv('testset.csv')\n\ntest_data = test_csv.copy()\n\n# Add Claim Amount when training, remove when testing\ntest_data_in = test_data.loc[:, ['feature1', 'feature2', 'feature4', 'feature5', 'feature6', 'feature7',\n 'feature8', 'feature9', 'feature10', 'feature11', 'feature12', 'feature14',\n 'feature15', 'feature16', 'feature17', 'feature18']]\n\nregr_test = test_data.copy()\ndrop_features = ['feature4', 'feature9', 'feature13', 'feature14', 'feature15',\n 'feature18']\nregr_test = regr_test.drop(drop_features, axis=1)\n\nprint(\"Starting...\")\n\nloaded_clf = pickle.load(open('xgb.sav', 'rb'))\nload_predict = loaded_clf.predict(test_data_in)\n\noriginalTest = test_data.copy()\noriginalTest['PredictedCategory'] = load_predict\n\n\nclf = pickle.load(open('rf_model.sav', 'rb'))\ny_pred_val = clf.predict(regr_test)\n\nexport = pd.DataFrame(columns=['ClaimAmount'])\nexport['ClaimAmount'] = y_pred_val\nfor i in range(len(export)):\n if originalTest.iloc[i]['PredictedCategory'] == 0:\n export.iloc[i, export.columns.get_loc('ClaimAmount')] = 0\nexport.to_csv('xgbv1-randomforest-true.csv')","repo_name":"Jeffery-Wasty/ICBCIT","sub_path":"final_model.py","file_name":"final_model.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10537920597","text":"\ninput = open(\"day08_input.txt\", \"r\").read().strip().split('\\n')\n\n\ndef part1():\n lens = {2, 3, 4, 7}\n res = 0\n for idx, line in enumerate(input):\n inp = line.split(' | ')[1]\n for cur in inp.split(' '):\n if len(cur) in lens:\n res += 1\n print(res)\n\n\ndef part2():\n res = 0\n for idx, line in enumerate(input):\n sec_res = []\n segs = dict()\n inp, out = line.split(' | ')\n inp = inp.split(' ')\n for i in inp:\n if len(i) == 2:\n segs[1] = i\n elif len(i) == 3:\n segs[7] = i\n elif len(i) == 4:\n segs[4] = i\n elif len(i) == 7:\n segs[8] = i\n\n for i in inp:\n if len(i) == 6:\n if len(set(i) - set(segs[7])) == 4:\n segs[6] = i\n elif len(set(i) - set(segs[4])) == 2:\n segs[9] = i\n else:\n segs[0] = i\n elif len(i) == 5:\n if len(set(i) - set(segs[7])) == 2:\n segs[3] = i\n elif len(set(i) - set(segs[4])) == 3:\n segs[2] = i\n elif len(set(i) - set(segs[4])) == 2:\n segs[5] = i\n\n for o in out.split(' '):\n for i in segs:\n if set(o) == set(segs[i]):\n sec_res.append(str(i))\n\n res += int(\"\".join(sec_res))\n print(res)\n\n\npart1()\npart2()\n","repo_name":"andersastrom/aoc-2021","sub_path":"day08.py","file_name":"day08.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2412638411","text":"### COPIED ### VERIFIED\nfrom digraph import DiGraph\n\n\nclass CycleException(Exception):\n pass\n\n\ndef depth_traversal(g, node, visited, topology):\n visited[node] = 1 # start processing\n\n for neigh in g.vert_list[node].get_neighs():\n if visited[neigh] == 1: # found a node in processing\n raise CycleException(\"Graph contains cycle\")\n if visited[neigh] == 2: # ignore visited node\n continue\n # Recursively do DFS\n depth_traversal(g, neigh, visited, topology)\n\n visited[node] = 2 # make the node visited\n topology.append(node) # Add to topology list\n\n\ndef topological_sort(g):\n visited = {}\n topology = [] # Stack for storing the ordering\n\n # we need 3 flags (0, 1, 2) to check if there is a cycle.\n # 0=not visited, 1=in the process, 2=visited\n for node in g.get_vertices():\n visited[node] = 0\n\n # Go through each non-visited node and start a DFS from that node\n for node in g.get_vertices():\n if visited[node] != 2:\n depth_traversal(g, node, visited, topology)\n\n # Reverse the topology and print\n print(topology[::-1])\n return topology[::-1]\n\nif __name__ == '__main__':\n g = DiGraph()\n g.add_edge(6, 3)\n g.add_edge(3, 7)\n g.add_edge(3, 2)\n g.add_edge(1, 2)\n g.add_edge(10, 1)\n # g.add_edge(2, 8)\n # g.add_edge(5, 9)\n # g.add_edge(5, 4)\n # g.add_edge(4, 8)\n # g.add_edge(8, 6)\n try:\n topological_sort(g)\n except CycleException:\n print('Graph contains Cycle')\n","repo_name":"ravikumarvj/DS-and-algorithms","sub_path":"Graphs/topological_sort.py","file_name":"topological_sort.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6841629330","text":"import numpy as np\nfrom timeit import default_timer as timer\nfrom numba import vectorize, cuda, njit, prange\nimport numba.cuda\n\n\n@vectorize([\"float32(float32, float32)\"], target='cuda')\ndef vector_add(a, b):\n return a+b\n\n\ndef vector_add2(a, b, c):\n for i in range(a.size):\n c[i] = a[i] + b[i]\n\n\n@vectorize([\"float32(float32, float32)\"], target='cpu')\ndef vector_add3(a, b):\n return a + b\n\n\ndef main():\n n = 60000000\n a = np.ones(n, dtype=np.float32)\n\n start = timer()\n vector_add(a, a)\n vector_add_time = timer() - start\n print(\"Time gpu \" + str(vector_add_time))\n\n start = timer()\n vector_add(a, a)\n vector_add_time = timer() - start\n print(\"Time gpu \" + str(vector_add_time))\n\n start = timer()\n vector_add3(a, a)\n vector_add_time3 = timer() - start\n print(\"Time cpu \" + str(vector_add_time3))\n\n start = timer()\n vector_add3(a, a)\n vector_add_time3 = timer() - start\n print(\"Time cpu \" + str(vector_add_time3))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Wonziak/Cuda-and-CPU-computing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42599280085","text":"\n# coding: utf-8\n\n# In[2]:\n\n# -*- coding: utf-8 -*-\n\n'''\nEscreva a sua solução aqui\nCode your solution here\nEscriba su solución aquí\n'''\nfrom __future__ import print_function\nwhile True:\n inp = raw_input().split(' ');\n if(inp[0] == '0'):\n break;\n Q = int(inp[0]);\n D = int(inp[1]);\n P = int(inp[2]);\n total = Q * D * P;\n page = total / abs(P - Q);\n if(page == 1):\n print(page , 'pagina');\n else:\n print(page, 'paginas');\n\n","repo_name":"psarkerbd/Python-Core-Topics","sub_path":"Problem Solving by python/URI/URI 1542 Reading Books.py","file_name":"URI 1542 Reading Books.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4976894437","text":"from functools import cache\n\nfrom app.packages.auth import OidcAuthorizationCodeBearer\nfrom app.packages.auth.util import get_azure_oidc_config_url\n\n\nclass AzureScheme:\n _scheme: OidcAuthorizationCodeBearer | None = None\n\n @classmethod\n def init(cls, tenant_id: str, client_id: str):\n cls._scheme = OidcAuthorizationCodeBearer(\n name=\"Azure AD\",\n config_url=get_azure_oidc_config_url(tenant_id),\n client_id=client_id,\n scopes={\n f\"api://{client_id}/user_impersonation\": \"user_impersonation\",\n },\n )\n\n @classmethod\n @property\n @cache\n def instance(cls):\n if cls._scheme is None: # pragma: no cover\n raise RuntimeError(\"AzureScheme not initialized\")\n return cls._scheme\n","repo_name":"TimoWilhelm/fastapi_azure_starter","sub_path":"app/azure_scheme.py","file_name":"azure_scheme.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"32784562371","text":"tc = input()\n\nfor x in range(int(tc)):\n res = 0\n day = 0\n mm = input()\n mm = mm.split(\" \")\n nD = mm[0]\n pD = mm[1]\n\n while int(res) < int(nD):\n res += int(pD)\n day += 1\n\n print(\"Kasus #{}: {}\".format(x+1,day))\n\n","repo_name":"abdilahrf/OnlineJudgeSolution","sub_path":"JollyBee/JAWABAN/pohonapel.py","file_name":"pohonapel.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72801420054","text":"import re\n\nfrom .errors import FormatError\nfrom .utils import match_or_error, look_next_line\n#from errors import FormatError\n#from utils import match_or_error, look_next_line\n\nMAX_LEN_NAME = 71\n\n\nclass BaseObject():\n def __init__(self, name='', material=0):\n self.material = material\n self.name = name\n self.dimensions = None\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name):\n if len(name) > MAX_LEN_NAME:\n raise FormatError(\n 'Max len for name is {}'.format(MAX_LEN_NAME))\n else:\n self._name = name\n\n\nclass BaseContainerObject(BaseObject):\n\n def __init__(self, child_type, **kargs):\n BaseObject.__init__(self, **kargs)\n # list of child entities\n self._child_list = []\n # type of child entities\n self._child_type = child_type\n # define the first line of the entity (assumes the header has only one line)\n self._begin_re = None\n # define the end of the entity header used only if _begin_re is None\n self._end_header_re = None\n # define when start parsing the entity tail\n self._begin_tail_re = None\n # define the end of entity, it None the entity ends in the end of the file\n # (if _begin_tail_re is not defined it is required, the _tail must be implemented)\n self._end_re = None\n # default header and tail strings\n self._header_str = None\n self._tail_str = None\n\n @property\n def _header(self):\n return self._header_str\n\n @property\n def _tail(self):\n return self._tail_str\n\n @property\n def _content(self):\n content_str = ''\n for child in self._child_list:\n content_str += child.serialize()\n return content_str\n\n def append(self, children):\n \"\"\"Append an element to the container\n\n :param children: instance or iterator of instances of _child_type\n :return:\n \"\"\"\n # only allow insertion of typed elements\n if self._child_type is None:\n raise NotImplementedError()\n if not isinstance(children, list):\n children = [children]\n\n def _check_and_add_child(child):\n if (not isinstance(child, self._child_type)):\n raise FormatError(\n 'Object is not a \"{}\" \"{}\"'.format(\n self._child_type, child))\n self._child_list.append(child)\n for child in children:\n _check_and_add_child(child)\n\n def clear(self):\n self._child_list = []\n\n def translate(self, v):\n for child in self._child_list:\n child.translate(v)\n\n def serialize(self):\n mstr = ''\n mstr += self._header\n mstr += self._content\n mstr += self._tail\n return mstr\n\n def write(self, filename):\n with open(filename, 'w', newline='\\r\\n') as dst_file:\n dst_file.write(self.serialize())\n\n def _parse_head(self, infile):\n \"\"\"Parse the start of the entity\n\n if _begin_re is defined read only the first line which must match _begin_re\n if _begin_re is not defined read until _end_header_re is found\n\n :param infile: opened input file\n :return:\n \"\"\"\n self._header_str = ''\n # if _begin_re is defined it must match the first line and the processing ends\n #print(self._begin_re)\n if self._begin_re is not None:\n match_or_error(self._begin_re, infile)\n # if _begin_re is not defined read until _end_header_re\n elif self._end_header_re is not None:\n while True:\n line = look_next_line(infile)\n if line == '':\n raise FormatError(\n 'Could not find \"{}\"'.format(self._end_header_re)\n )\n if re.match(self._end_header_re, line):\n break\n self._header_str += line\n # consumes the line\n infile.readline()\n\n else:\n raise NotImplementedError()\n\n def _parse_tail(self, infile):\n \"\"\"Parse the end of the entity\n\n read the file until _end_re is found and save in _tail_str\n if _end_re is None the file is read until its end\n\n :param infile: opened input file\n :return:\n \"\"\"\n self._tail_str = ''\n while True:\n line = infile.readline()\n self._tail_str += line\n if line == '':\n # if in end of file is reached and _end_re was not found\n if self._end_re is not None:\n raise FormatError(\n 'Could not find \"{}\"'.format(self._end_re)\n )\n # if _end_re is None the procesing ends\n else:\n break\n # if _end_re is defined, search for it\n if self._end_re is not None:\n if re.match(self._end_re, line):\n break\n\n def _parse_content(self, infile,mimo_id = -1):\n if mimo_id == -1:\n child = self._child_type.from_file(infile)\n else:\n child = self._child_type.from_file(infile, mimo_id)\n self.append(child)\n\n def __getitem__(self, key):\n for child in self._child_list:\n if child.name == key:\n return child\n raise KeyError()\n\n def __iter__(self):\n return iter(self._child_list)\n\n def keys(self):\n keys = []\n for child in self._child_list:\n keys.append(child.name)\n return keys\n\n def from_file(self, infile, MIMO = False):\n \"\"\"Parse entity\n\n Parse the head and then find childs defined by:\n * if _begin_tail is defined calls _parse_tail when _begin_tail is matched\n * if _begin_tail is None _end_re must be defined and children are parsed until it is found\n\n :param infile: opened input file\n :return: entity instance\n \"\"\"\n # consumes the entity header\n \n self._parse_head(infile)\n MIMO = MIMO\n mimo_id = -1\n while True:\n line = look_next_line(infile)\n # are we searching for the beginning of the tail\n if self._begin_tail_re is not None:\n if re.match(self._begin_tail_re, line):\n self._parse_tail(infile)\n break\n # if not we have to search for the end of the entity\n elif self._end_re is not None:\n if re.match(self._end_re, line):\n infile.readline()\n break\n # if it is not the start of the tail nor the end of the entity,\n # it is a child entity\n if MIMO:\n mimo_id += 1\n self._parse_content(infile, mimo_id=mimo_id)\n else:\n self._parse_content(infile)","repo_name":"lasseufpa/5gm-rwi-3d-modeling","sub_path":"rwimodeling/basecontainerobject.py","file_name":"basecontainerobject.py","file_ext":"py","file_size_in_byte":6996,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"31006442711","text":"from conans import ConanFile, CMake, tools\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass TgcConan(ConanFile):\n name = \"tgc\"\n description = \"A Tiny Garbage Collector for C.\"\n license = \"BSD-2-Clause\"\n topics = (\"conan\", \"tgc\", \"garbage-collector\")\n homepage = \"https://github.com/orangeduck/tgc\"\n url = \"https://github.com/conan-io/conan-center-index\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n exports_sources = \"CMakeLists.txt\"\n generators = \"cmake\"\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.configure()\n return self._cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"LICENSE.md\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n\n def package_info(self):\n self.cpp_info.libs = [\"tgc\"]\n","repo_name":"SpaceIm/conan-tgc","sub_path":"conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34277934632","text":"import logging\nimport os\n\nfrom util.Event import Event\nfrom GUI.LogTextEdit import QTextEditLogger\nfrom GUI.LogFormatter import CustomFormatter\nfrom PyQt6 import QtCore, QtGui\nfrom PyQt6.QtWidgets import *\n\nDEFAULT_BUTTON_HEIGHT = 150\nDEFAULT_BUTTON_WIDTH = 50\nICON_NAME = './resources/thebenlogo.jpg'\nWINDOW_TITLE = \"Theben: Main Window\"\nBACKGROUND_COLOR = \"#F4A999\"\n\nlog = logging.getLogger(\"log\")\nhome_dir = os.path.expanduser(\"~/Desktop\")\n\n\n# noinspection PyUnresolvedReferences\nclass MainWindow(QWidget):\n \"\"\"\n Main Window of the application.\n Features creating all widgets and creating Events\n for their use.\n \"\"\"\n\n # If not verified the program will not start\n verified = False\n\n log_textbox = None\n image_widget = None\n # The actual image\n pixmap = None\n\n clear_button = None\n save_button = None\n continue_button = None\n start_button = None\n stop_button = None\n\n brightness_slider = None\n contrast_slider = None\n gamma_slider = None\n\n brightness_label = None\n contrast_label = None\n gamma_label = None\n\n # Path that is transferred from the File Dialog\n save_image_path = \"\"\n\n def __init__(self):\n super().__init__()\n self.setWindowTitle(WINDOW_TITLE)\n self.setWindowIcon(QtGui.QIcon(ICON_NAME))\n self.setContentsMargins(10, 10, 10, 10)\n self.setStyleSheet(f\"background-color: {BACKGROUND_COLOR};\")\n # The current picture from the camera\n self.image_widget = QLabel(self)\n self.init_layout()\n\n self.on_do_save = Event()\n self.on_do_continue = Event()\n self.on_do_start = Event()\n self.on_do_stop = Event()\n self.on_brightness_changed = Event()\n self.on_contrast_changed = Event()\n self.on_gamma_changed = Event()\n\n self.show()\n self.activateWindow()\n\n def init_layout(self):\n \"\"\"Initializes the layout of the main window.\"\"\"\n\n self.log_textbox = self.init_log_textbox()\n self.init_buttons()\n self.init_sliders()\n\n layout_outer = QVBoxLayout()\n upper = QHBoxLayout()\n lower = QHBoxLayout()\n button_box = QVBoxLayout()\n\n layout_outer.addLayout(upper)\n layout_outer.addLayout(lower)\n\n upper.addWidget(self.log_textbox.widget)\n lower.addWidget(self.image_widget)\n lower.addLayout(button_box)\n\n self.brightness_label = QLabel(self)\n self.brightness_label.setText('Brightness: ' + \"0\")\n self.contrast_label = QLabel(self)\n self.contrast_label.setText('Contrast: ' + \"1\")\n self.gamma_label = QLabel(self)\n self.gamma_label.setText('Gamma: ' + \"1\")\n\n button_box.addWidget(self.clear_button)\n button_box.addWidget(self.save_button)\n button_box.addWidget(self.continue_button)\n button_box.addWidget(self.brightness_label)\n button_box.addWidget(self.brightness_slider)\n button_box.addWidget(self.contrast_label)\n button_box.addWidget(self.contrast_slider)\n button_box.addWidget(self.gamma_label)\n button_box.addWidget(self.gamma_slider)\n button_box.addWidget(self.start_button)\n button_box.addWidget(self.stop_button)\n\n self.setLayout(layout_outer)\n\n def init_log_textbox(self):\n \"\"\"Initializes the text box where logs are presented.\"\"\"\n log_textbox = QTextEditLogger(self)\n log_textbox.setFormatter(CustomFormatter())\n log.addHandler(log_textbox)\n log.setLevel(logging.INFO)\n return log_textbox\n\n def show_image(self, image):\n \"\"\"Shows the image in the image widget.\"\"\"\n self.pixmap = image\n self.image_widget.setPixmap(self.pixmap)\n\n def init_buttons(self):\n \"\"\"Initializes clear, save, continue, start and stop buttons.\"\"\"\n button_size = QtCore.QSize(DEFAULT_BUTTON_HEIGHT, DEFAULT_BUTTON_WIDTH)\n self.clear_button = QPushButton(\"Clear\")\n self.clear_button.setStyleSheet(\"background-color: gray\")\n self.clear_button.setFixedSize(button_size)\n\n self.save_button = QPushButton(\"Save\")\n self.save_button.setStyleSheet(\"background-color: violet\")\n self.save_button.setFixedSize(button_size)\n\n self.continue_button = QPushButton(\"Continue\")\n self.continue_button.setStyleSheet(\"background-color: blue\")\n self.continue_button.setFixedSize(button_size)\n\n self.start_button = QPushButton(\"Start\")\n self.start_button.setStyleSheet(\"background-color: green\")\n self.start_button.setFixedSize(button_size)\n\n self.stop_button = QPushButton(\"Stop\")\n self.stop_button.setStyleSheet(\"background-color: red\")\n self.stop_button.setFixedSize(button_size)\n self.init_button_clicked()\n\n def init_sliders(self):\n \"\"\"Initializes brightness, contrast and gamma sliders.\"\"\"\n self.brightness_slider = QSlider(QtCore.Qt.Orientation.Horizontal, self)\n self.brightness_slider.setMaximum(10000)\n self.brightness_slider.setMinimum(0)\n self.brightness_slider.setTickInterval(100)\n self.brightness_slider.sliderReleased.connect(self.change_brightness)\n\n self.contrast_slider = QSlider(QtCore.Qt.Orientation.Horizontal, self)\n self.contrast_slider.setMaximum(3)\n self.contrast_slider.setMinimum(1)\n self.contrast_slider.sliderReleased.connect(self.change_contrast)\n\n self.gamma_slider = QSlider(QtCore.Qt.Orientation.Horizontal, self)\n self.gamma_slider.setMaximum(110)\n self.gamma_slider.setMinimum(90)\n self.gamma_slider.setValue(100)\n self.gamma_slider.sliderReleased.connect(self.change_gamma)\n\n def init_button_clicked(self):\n \"\"\"Connect button pushes to the corresponding methods.\"\"\"\n self.clear_button.clicked.connect(self.clear_log)\n self.save_button.clicked.connect(self.save_image)\n self.continue_button.clicked.connect(self.do_continue)\n self.start_button.clicked.connect(self.do_start)\n self.stop_button.clicked.connect(self.do_stop)\n\n def save_image(self):\n \"\"\"Use OpenFileDialog to get an image path and call the save event.\"\"\"\n file_name = QFileDialog.getSaveFileName(self, \"Save File\", home_dir)\n if file_name[0]:\n self.save_image_path = file_name[0]\n self.on_do_save()\n\n def do_continue(self):\n self.on_do_continue()\n\n def do_start(self):\n if self.verified:\n self.on_do_start()\n else:\n QMessageBox.information(self, \"Data not verified\", \"The setup is not verified (yet)\")\n\n def do_stop(self):\n self.on_do_stop()\n\n def change_brightness(self):\n self.on_brightness_changed()\n\n def change_contrast(self):\n self.on_contrast_changed()\n\n def change_gamma(self):\n self.on_gamma_changed()\n\n def clear_log(self):\n self.log_textbox.widget.clear()\n\n def get_pixmap(self):\n return self.pixmap\n\n def add_subscriber_for_start_event(self, obj_method):\n self.on_do_start += obj_method\n\n def remove_subscriber_for_start_event(self, obj_method):\n self.on_do_start -= obj_method\n\n def add_subscriber_for_stop_event(self, obj_method):\n self.on_do_stop += obj_method\n\n def remove_subscriber_for_stop_event(self, obj_method):\n self.on_do_stop -= obj_method\n\n def add_subscriber_for_continue_event(self, obj_method):\n self.on_do_continue += obj_method\n\n def remove_subscriber_for_continue_event(self, obj_method):\n self.on_do_continue -= obj_method\n\n def add_subscriber_for_save_event(self, obj_method):\n self.on_do_save += obj_method\n\n def add_subscriber_for_brightness_event(self, obj_method):\n self.on_brightness_changed += obj_method\n\n def add_subscriber_for_contrast_event(self, obj_method):\n self.on_contrast_changed += obj_method\n\n def add_subscriber_for_gamma_event(self, obj_method):\n self.on_gamma_changed += obj_method\n","repo_name":"Me3ke/Theben--Synchronisation-for-Scanning-Lightsheet-Microscope","sub_path":"src/GUI/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":8009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4276469613","text":"\"\"\"Guangxi\"\"\"\r\ndef read_file(filename, r_flag, grid_size_n):\r\n \"\"\"\r\n Input:\r\n filename = \"./sample50_compact/50-1/placement_info.txt\"\r\n r_flag = 1 补全矩形\r\n r_flag = 0 不补全矩形\r\n 所有的面积 长度都按照矩形计算的\r\n\r\n Output:\r\n 一个列表,列表里面的每一个元素是对应模块的一个字典,字典里面抽取了相关模块的信息\r\n\r\n \"\"\"\r\n X_factor=0\r\n Y_factor=0\r\n origin_point=[]\r\n canvas_x_max=0\r\n canvas_x_min=0\r\n canvas_y_max=0\r\n canvas_y_min=0\r\n canvas_range=[]\r\n #whole_area=0\r\n m_list = []\r\n m_dict = {}\r\n with open(filename) as f:\r\n line = f.readline()\r\n while 1:\r\n flag = 0\r\n if line.split(\":\")[0] == \"Area\":\r\n canvas_b_list=[]\r\n canvas_x_list=[]\r\n canvas_y_list=[]\r\n for element in line.split(\"(\")[1:]:\r\n canvas_x_list.append(float(element.split(\",\")[0]))\r\n canvas_y_list.append(float(element.split(\",\")[1].split(\")\")[0].strip()))\r\n canvas_b_list.append([float(element.split(\",\")[0]), float(element.split(\",\")[1].split(\")\")[0].strip())])\r\n canvas_x_max = max(canvas_x_list)\r\n \r\n canvas_x_min = min(canvas_x_list)\r\n \r\n canvas_y_max = max(canvas_y_list)\r\n \r\n canvas_y_min = min(canvas_y_list)\r\n \r\n canvas_range.append(canvas_x_max)\r\n canvas_range.append(canvas_x_min)\r\n canvas_range.append(canvas_y_max)\r\n canvas_range.append(canvas_y_min)\r\n Canvas_Length_x = abs(canvas_x_max - canvas_x_min)\r\n Canvas_Length_y = abs(canvas_y_max - canvas_y_min)\r\n origin_point=canvas_b_list[0]\r\n \"\"\"Guangxi\"\"\"\r\n X_factor=Canvas_Length_x/grid_size_n\r\n Y_factor=Canvas_Length_y/grid_size_n\r\n if line.split(\":\")[0] == \"Module\":\r\n id = line.split(\"M\")[-1]\r\n m_dict[f\"Module ID\"] = int(id)-1\r\n p_id = 1\r\n while 1:\r\n line = f.readline()\r\n\r\n if line.split(\":\")[0] == \"Boundary\":\r\n b_list = []\r\n x_list = []\r\n y_list = []\r\n \"\"\"Boundary Edge Extraction\"\"\"\r\n for element in line.split(\"(\")[1:]:\r\n x_list.append(float(element.split(\",\")[0]))\r\n y_list.append(float(element.split(\",\")[1].split(\")\")[0].strip()))\r\n b_list.append([float(element.split(\",\")[0]), float(element.split(\",\")[1].split(\")\")[0].strip())])\r\n x_max = max(x_list)\r\n x_min = min(x_list)\r\n y_max = max(y_list)\r\n y_min = min(y_list) \r\n\r\n if r_flag == 1 and len(b_list) != 4:\r\n b_list = []\r\n b_list = [[x_min,y_min], [x_min,y_max], [x_max,y_max], [x_max,y_min]] \r\n\r\n \"\"\"Length_x, Length_y Calculation, regard as rectangular\"\"\"\r\n Length_x = abs(x_max - x_min)\r\n Length_y = abs(y_max - y_min)\r\n Central_x = (x_max + x_min) / 2\r\n Central_y = (y_max + y_min) / 2\r\n #M_area = Length_x * Length_y \r\n\r\n #m_dict[\"Boundary\"] = b_list\r\n #m_dict[\"Boundary_inf\"] = line.split(\";\")[-1].strip(\"\\n\")\r\n #m_dict[\"Module_Area\"] = M_area\r\n m_dict[\"Module_Lx\"] = Length_x\r\n m_dict[\"Module_Ly\"] = Length_y\r\n # m_dict[\"Module_Central_x\"] = Central_x\r\n # m_dict[\"Module_Central_y\"] = Central_y\r\n # m_dict[\"Left_down_x\"]=-0.5*Length_x\r\n # m_dict[\"Left_down_y\"]=-0.5*Length_y\r\n # m_dict[\"Left_up_x\"]=-0.5*Length_x\r\n # m_dict[\"Left_up_y\"]=0.5*Length_y\r\n # m_dict[\"Right_up_x\"]=0.5*Length_x\r\n # m_dict[\"Right_up_y\"]=0.5*Length_y\r\n # m_dict[\"Right_down_x\"]=0.5*Length_x\r\n # m_dict[\"Right_down_y\"]=-0.5*Length_y\r\n\r\n elif line.split(\":\")[0] == \"Port\":\r\n\r\n p_list = []\r\n px_list = []\r\n py_list = []\r\n \"\"\"Port Edge Extraction\"\"\"\r\n for element in line.split(\"(\")[1:]:\r\n p_list.append([float(element.split(\",\")[0]), float(element.split(\",\")[1].split(\")\")[0].strip())])\r\n px_list.append(float(element.split(\",\")[0]))\r\n py_list.append(float(element.split(\",\")[1].split(\")\")[0].strip()))\r\n\r\n\r\n \"\"\"Position Difference\"\"\"\r\n Central_px = (max(px_list) + min(px_list)) / 2\r\n Central_py = (max(py_list) + min(py_list)) / 2 \r\n #m_dict[f\"Port_{p_id}\"] = p_list\r\n #m_dict[f\"Port_{p_id}_inf\"] = line.split(\";\")[-1].strip(\"\\n\")\r\n #m_dict[f\"Port_{p_id}_Cnetral_x\"] = Central_px\r\n #m_dict[f\"Port_{p_id}_Cnetral_y\"] = Central_py\r\n m_dict[f\"Port_{p_id}_Delta_x\"] = Central_px - Central_x\r\n m_dict[f\"Port_{p_id}_Delta_y\"] = Central_py - Central_y\r\n p_id += 1\r\n\r\n\r\n elif line.split(\":\")[0] == \"Module\" or not line:\r\n m_list.append(m_dict)\r\n flag = 1\r\n m_dict = {}\r\n break\r\n \r\n if not line:\r\n break\r\n if flag == 1:\r\n continue\r\n line = f.readline()\r\n\r\n #calculate whole area of all the macros\r\n # for i in range(len(m_list)): \r\n # whole_area+=m_list[i]['Module_Area']\r\n return X_factor, Y_factor, origin_point, canvas_range, m_list\r\n\r\n\"\"\"Guangxi\"\"\"\r\ndef Coordinates_transform(result, grid_size_n):\r\n \"\"\"\r\n Input:self.result 布局完成后的从macro 0到49的在32*32学习空间上的坐标\r\n Output:\r\n 一个列表,列表里的每一个元素是每一个macro在实际canvas中的中心点坐标,用于输出提供给华大布线器的txt\r\n \"\"\"\r\n\r\n \"\"\"Guangxi\"\"\"\r\n X_factor, Y_factor, origin_point_canvas, canvas_range, m_list=read_file('./placement_info.txt',1, grid_size_n)\r\n #Valid_final_list=[]\r\n #Valid_macro_area=0\r\n #In_Canvas_range_list=[]\r\n Macro_center_point_list=[]\r\n Ports_of_macro_list=[]\r\n for i in range(len(result)):\r\n #macro_point=[]\r\n origin_point=[]\r\n #left_up_point=[]\r\n #right_up_point=[]\r\n #right_down_point=[]\r\n center_point=[]\r\n port1_point=[]\r\n port2_point=[]\r\n port3_point=[]\r\n for j in range(len(m_list)): \r\n if(m_list[j]['Module ID']==i):\r\n x_origin_old=result[i][0]\r\n x_origin_new=origin_point_canvas[0]+X_factor*x_origin_old\r\n y_origin_old=result[i][1]\r\n y_origin_new=origin_point_canvas[1]+Y_factor*y_origin_old \r\n x_center_new=x_origin_new+0.5*m_list[j]['Module_Lx']\r\n origin_point=[x_origin_new,y_origin_new]\r\n y_center_new=y_origin_new+0.5*m_list[j]['Module_Ly']\r\n center_point=[x_center_new,y_center_new]\r\n Macro_center_point_list.append(center_point)\r\n \r\n port1_point_raw_x=m_list[j]['Port_1_Delta_x']+x_center_new\r\n port1_point_raw_y=m_list[j]['Port_1_Delta_y']+y_center_new\r\n port1_point=[port1_point_raw_x, port1_point_raw_y]\r\n \r\n port2_point_raw_x=m_list[j]['Port_2_Delta_x']+x_center_new\r\n port2_point_raw_y=m_list[j]['Port_2_Delta_y']+y_center_new\r\n port2_point=[port2_point_raw_x, port2_point_raw_y]\r\n \r\n port3_point_raw_x=m_list[j]['Port_3_Delta_x']+x_center_new\r\n port3_point_raw_y=m_list[j]['Port_3_Delta_y']+y_center_new\r\n port3_point=[port3_point_raw_x, port3_point_raw_y]\r\n \r\n Ports_of_macro_list.append([port1_point,port2_point,port3_point])\r\n #left_up_point=[origin_point[0],origin_point[1]+m_list[j]['Module_Ly']]\r\n #right_up_point=[origin_point[0]+m_list[j]['Module_Lx'],origin_point[1]+m_list[j]['Module_Ly']]\r\n #right_down_point=[origin_point[0]+m_list[j]['Module_Lx'],origin_point[1]]\r\n # macro_point.append(i) #0\r\n # macro_point.append(origin_point) #1\r\n # macro_point.append(left_up_point) #2\r\n # macro_point.append(right_up_point) #3\r\n # macro_point.append(right_down_point)#4\r\n \r\n \"\"\"judge if a point is out of the canvas\"\"\"\r\n # if (0==((macro_point[1][0]>canvas_range[0])or(macro_point[3][0]<canvas_range[1])or(macro_point[1][1]>canvas_range[2])or(macro_point[3][1]<canvas_range[3]))):\r\n # In_Canvas_range_list.append(macro_point) \r\n \r\n \"\"\"judge if a macro is overlapped by others\"\"\"\r\n # for n in range(len(In_Canvas_range_list)):\r\n # overlap_flag=0\r\n # for u in range(len(In_Canvas_range_list)):\r\n # if(n!=u):\r\n # for t in range(1,5): #1:origin(left_down_point) 2:right_up_point \r\n # if((In_Canvas_range_list[n][t][0]>=In_Canvas_range_list[u][1][0])and(In_Canvas_range_list[n][t][0]<=In_Canvas_range_list[u][3][0])and(In_Canvas_range_list[n][t][1]>=In_Canvas_range_list[u][1][1])and(In_Canvas_range_list[n][t][1]<=In_Canvas_range_list[u][3][1])):\r\n # overlap_flag=1 #a macro's point in in another macro's area\r\n # if(0==overlap_flag):\r\n # #valid_point_struct=[]\r\n # #valid_point_struct.append(In_Canvas_range_list[n][0]) #valid macro id\r\n # #valid_point_struct.append(In_Canvas_range_list[n][1]) #valid macro central point\r\n # Valid_final_list.append(In_Canvas_range_list[n][0]+1)\r\n # #calculate Valid_macro_area\r\n # for i in range(len(Valid_final_list)):\r\n # valid_id=Valid_final_list[i]-1\r\n # for j in range(len(m_list)):\r\n # if(valid_id==m_list[j]['Module ID']):\r\n # Valid_macro_area+=m_list[j]['Module_Area']\r\n # Util_area_macro=Valid_macro_area/whole_area\r\n # Util_macro=len(Valid_final_list)/len(result)\r\n # #print(len(Valid_final_list))\r\n # print(Valid_final_list)\r\n #print(Util_macro)\r\n #print(Util_area_macro)\r\n #return Valid_final_list, Util_macro, Util_area_macro, Macro_center_point_list\r\n return Macro_center_point_list, Ports_of_macro_list\r\n #print(len(In_Canvas_range_list))\r\n #print(In_Canvas_range_list)\r\n \r\ndef write_best_result(Macro_center_point_list, n):\r\n print(n)\r\n #f=open('result_1.txt','w')\r\n f=open('result_'+str(n)+'.txt', 'w')\r\n for i in range(len(Macro_center_point_list)):\r\n x_center=Macro_center_point_list[i][0]\r\n y_center=Macro_center_point_list[i][1]\r\n line_first='Module M'+str(i+1)+'\\n'\r\n line_second='Orient: R0'+'\\n' #temporarily ignore rotation, only R0\r\n line_third='Position: '+'('+str(x_center)+','+str(y_center)+')'+'\\n'\r\n f.write(line_first)\r\n f.write(line_second)\r\n f.write(line_third)\r\n f.close()\r\n\r\n\"\"\"Guangxi\"\"\"\r\ndef get_range(number,n, grid_size_n):\r\n D_x_y=[] #use to store each macro dx and dy\r\n \"\"\"Guangxi\"\"\"\r\n X_factor=read_file('./placement_info.txt',1, grid_size_n)[0]\r\n Y_factor=read_file('./placement_info.txt',1, grid_size_n)[1]\r\n m_list=read_file('./placement_info.txt',1, grid_size_n)[4]\r\n for i in range(n):\r\n for j in range(len(m_list)):\r\n if(m_list[j]['Module ID']==i):\r\n D_x_y.append([float(m_list[j]['Module_Lx']/X_factor),float(m_list[j]['Module_Ly']/Y_factor)])\r\n return D_x_y[number]\r\n#############################Only for test#####################################\r\n","repo_name":"mtl2236/Place_project","sub_path":"EDA_AI-main/Valid_list_gen.py","file_name":"Valid_list_gen.py","file_ext":"py","file_size_in_byte":12550,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"18091037243","text":"def get_revNumber(review):\n ## Rev Number\n try:\n revNumber = review.attrs['id']\n return revNumber\n # revNo.append(x.find('li',{'class':' empReview cf '}).attrs[' id'])\n # for emp in x.find(':\n # print(emp.attrs[' id'])\n except:\n return 'ERR: No revNumber'\n\n\ndef get_overallRating(review):\n ## overall rating\n try:\n overallRating = review.find('span',{'class':'value-title'})\\\n .attrs['title']\n return overallRating \n \n except:\n return ('Null')\n\n\ndef get_date(review):\n ## Date\n try:\n date = review.find('time',{'class':'date subtle small'}).text\n return date\n except:\n return 'Null'\n\ndef get_summary(review):\n # Summary\n try:\n summar = review.a.text\n summar = summar.split('\"')\n summary = summar[1]\n return summary\n except:\n return 'Null'\n\ndef get_pros(review):\n ## Pros\n try:\n pro = review.find('p',{'class':' pros mainText truncateThis wrapToggleStr'}).text\n return pro \n except:\n return 'Null'\n\ndef get_cons(review):\n ## Cons\n try:\n con = review.find('p',{'class':' cons mainText truncateThis wrapToggleStr'}).text\n return con\n except:\n return 'Null'\n\ndef get_adviceToManagement(review):\n ## Advice to Management\n try:\n advice = review.find('p',{'class':' adviceMgmt mainText truncateThis wrapToggleStr'}).text\n return advice\n except:\n return 'Null'\n\ndef get_employeeType(review):\n ## Employee Type\n try:\n employeeType = review.find('span',{'class':\"authorJobTitle\"}).text\n return employeeType\n except:\n return 'Null'\n \ndef get_positionAndLocation(review):\n ## Position and Location\n try:\n positionAndLocation = review.find('p',{'class':' tightBot mainText'}).text\n return positionAndLocation\n except:\n return 'Null'\n\ndef get_reviewLink(review):\n## Review Link\n try:\n reviewLink = review.find('a',{'class':'reviewLink'}).attrs['href']\n return reviewLink\n except:\n return 'Null'\n\ndef get_subRatings(review):\n subs = {}\n # subRatings = review.find('div',{'class':'subRatings module'})\n for subRating in review.findAll('li'):\n try:\n for x in range(0,3):\n title = subRating.text\n score = subRating.find('span',{'class':'gdBars gdRatings med '}).attrs['title']\n subs[title] = score\n except:\n subs['missing'] = 'idk - missing'\n continue\n subs.pop('missing')\n if len(subs.keys()) > 1:\n return subs\n\n","repo_name":"ceewick/glassdoorScraper","sub_path":"2.0/extractors.py","file_name":"extractors.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"67"} +{"seq_id":"12895019653","text":"import re\n\nfrom sympy import Symbol\nfrom sympy.solvers import solve\n\nvalues = {}\nfor line in open(\"input.txt\").read().splitlines():\n var, value = re.findall(r\"(\\w+): (.*)\", line)[0]\n value = value.split()\n if len(value) == 1:\n values[var] = int(value[0])\n else:\n if var == \"root\":\n values[var] = (value[0], \"-\", value[2])\n else:\n values[var] = tuple(value)\ndel values[\"humn\"]\n\n\ndef get(var):\n if var in values:\n val = values[var]\n if isinstance(val, int):\n return val\n else:\n left, op, right = val\n return (get(left), op, get(right))\n else:\n return var\n\n\nequation = str(get(\"root\")).replace(\",\", \"\").replace(\"'\", \"\")\n\nhumn = Symbol(\"humn\")\nres = solve(eval(equation), humn)\nprint(int(res[0]))\n","repo_name":"CodyAdam/aoc-2022","sub_path":"python/21.2.py","file_name":"21.2.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22521391776","text":"import json\nfrom urllib.parse import urlparse\nfrom django.http import JsonResponse, HttpResponse\nfrom django.shortcuts import render, reverse, redirect\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views import View\nfrom django.views.generic import UpdateView\nfrom jobs.models import JobItem\nfrom mysearch import word_count\n\n\nclass Job_API(UpdateView, View):\n @csrf_exempt\n def dispatch(self, request, *args, **kwargs):\n return super(Job_API, self).dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n jobs = [job.to_dict() for job in JobItem.objects.all()]\n return JsonResponse({'jobs': jobs})\n\n def post(self, request, *args, **kwargs):\n params = json.loads(request.body)\n url = params['url']\n word = params['search_word']\n u = urlparse(url)\n url = url if u.scheme else 'http://' + url\n job = word_count.delay(url, word)\n JobItem.objects.create(\n uuid = job.id,\n status = job.status,\n url = params['url'],\n search_word = word\n )\n return HttpResponse(status=201)\n\n\ndef jobs_update(request):\n for entry in JobItem.objects.exclude(status='SUCCESS'):\n job = entry.get_job()\n entry.status = job.status\n entry.result = job.result\n entry.save()\n return HttpResponse(status=201)\n\ndef jobs_delete(request):\n JobItem.objects.all().delete()\n return HttpResponse(status=201)\n\n#test\n","repo_name":"geterodyn/search_spa","sub_path":"backend/jobs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20124058469","text":"import numpy as np\nfrom PIL import Image\nfrom pickle import load\nimport matplotlib.pyplot as plt\nfrom keras.models import load_model\nfrom keras.preprocessing.image import load_img, img_to_array\nfrom utils.model import CNNModel, generate_caption_beam_search\nimport os\nimport cv2\n\nfrom config import config\n\n\"\"\"\n *Some simple checking\n\"\"\"\nassert type(config['max_length']) is int, 'Please provide an integer value for `max_length` parameter in config.py file'\nassert type(config['beam_search_k']) is int, 'Please provide an integer value for `beam_search_k` parameter in config.py file'\n\ndef central_crop(image, central_fraction):\n\t\"\"\"Crop the central region of the image.\n\tRemove the outer parts of an image but retain the central region of the image\n\talong each dimension. If we specify central_fraction = 0.5, this function\n\treturns the region marked with \"X\" in the below diagram.\n\t --------\n\t | |\n\t | XXXX |\n\t | XXXX |\n\t | | where \"X\" is the central 50% of the image.\n\t --------\n\tArgs:\n\timage: 3-D array of shape [height, width, depth]\n\tcentral_fraction: float (0, 1], fraction of size to crop\n\tRaises:\n\tValueError: if central_crop_fraction is not within (0, 1].\n\tReturns:\n\t3-D array\n\t\"\"\"\n\tif central_fraction <= 0.0 or central_fraction > 1.0:\n\t\traise ValueError('central_fraction must be within (0, 1]')\n\tif central_fraction == 1.0:\n\t\treturn image\n\n\timg_shape = image.shape\n\tdepth = img_shape[2]\n\tfraction_offset = int(1 / ((1 - central_fraction) / 2.0))\n\tbbox_h_start = int(np.divide(img_shape[0], fraction_offset))\n\tbbox_w_start = int(np.divide(img_shape[1], fraction_offset))\n\n\tbbox_h_size = int(img_shape[0] - bbox_h_start * 2)\n\tbbox_w_size = int(img_shape[1] - bbox_w_start * 2)\n\n\timage = image[bbox_h_start:bbox_h_start+bbox_h_size, bbox_w_start:bbox_w_start+bbox_w_size]\n\treturn image\n\n# Extract features from each image in the directory\ndef extract_features(filename, model, model_type):\n\tif model_type == 'inceptionv3':\n\t\tfrom keras.applications.inception_v3 import preprocess_input\n\t\ttarget_size = (299, 299)\n\telif model_type == 'inceptionv4':\n\t\tfrom inception_v4 import preprocess_input\n\t\ttarget_size = (299, 299)\n # image = np.asarray(cv2.imread(filename))[:,:,::-1]\n # # Convert the image pixels to a numpy array\n # image = central_crop(image, 0.875)\n # # Reshape data for the model\n # image = cv2.resize(image, (299, 299))\n # # Prepare the image for the CNN Model model\n # image = preprocess_input(image)\n # image = image.reshape(-1,299,299,3)\n # feature = model.predict(image, verbose=0)\n\t# Loading and resizing image\n\timage = np.asarray(cv2.imread(filename))[:,:,::-1]\n\t# Convert the image pixels to a numpy array\n\timage = central_crop(image, 0.875)\n\t# Reshape data for the model\n\timage = cv2.resize(image, (299, 299))\n\t# Prepare the image for the CNN Model model\n\timage = preprocess_input(image)\n\timage = image.reshape(-1,299,299,3)\n\tfeatures = model.predict(image, verbose=0)\n\treturn features\n\n# Load the tokenizer\ntokenizer_path = config['tokenizer_path']\ntokenizer = load(open(tokenizer_path, 'rb'))\n\n# Max sequence length (from training)\nmax_length = config['max_length']\n\n# Load the model\ncaption_model = load_model(config['model_load_path'])\n\nimage_model = CNNModel(config['model_type'])\n\n# Load and prepare the image\nfor image_file in os.listdir(config['test_data_path']):\n\tif(image_file.split('--')[0]=='output'):\n\t\tcontinue\n\tif(image_file.split('.')[1]=='jpg' or image_file.split('.')[1]=='jpeg'):\n\t\tprint('Generating caption for {}'.format(image_file))\n\t\t# Encode image using CNN Model\n\t\timage = extract_features(config['test_data_path']+image_file, image_model, config['model_type'])\n\t\t# Generate caption using Decoder RNN Model + BEAM search\n\t\tgenerated_caption = generate_caption_beam_search(caption_model, tokenizer, image, max_length, beam_index=config['beam_search_k'])\n\t\t# Remove startseq and endseq\n\t\tcaption = 'Caption: ' + generated_caption.split()[1].capitalize()\n\t\tfor x in generated_caption.split()[2:len(generated_caption.split())-1]:\n\t\t caption = caption + ' ' + x\n\t\tcaption += '.'\n\t\t# Show image and its caption\n\t\tpil_im = Image.open(config['test_data_path']+image_file, 'r')\n\t\tfig, ax = plt.subplots(figsize=(8, 8))\n\t\tax.get_xaxis().set_visible(False)\n\t\tax.get_yaxis().set_visible(False)\n\t\t_ = ax.imshow(np.asarray(pil_im), interpolation='nearest')\n\t\t_ = ax.set_title(\"BEAM Search with k={}\\n{}\".format(config['beam_search_k'],caption),fontdict={'fontsize': '20','fontweight' : '40'})\n\t\tplt.savefig(config['test_data_path']+'output--'+image_file)","repo_name":"HaoHuynh0301/image-features-extractor","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4537,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"5765857987","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom .models import Category, Film\n\n# Create your views here.\ndef home(request, category_slug=None):\n category_page = None\n films = None\n if category_slug != None:\n category_page = get_object_or_404(Category, slug=category_slug)\n films = Film.objects.filter(category=category_page)\n else:\n films = Film.objects.all()\n\n return render(request, \"home.html\", dict(\n category=category_page,\n films=films\n ))\n\n\ndef about(request):\n return render(request, \"about.html\")\n\n\ndef film_detail(request, category_slug=None, film_slug=None):\n try:\n film = Film.objects.get(category__slug=category_slug, slug=film_slug)\n except Exception as ex:\n raise ex\n return render(request, \"film.html\", dict(\n film=film\n ))\n\n\ndef search(request):\n if request.method == \"POST\":\n searched = request.POST['searched']\n try:\n film = Film.objects.get(name=searched)\n return film_detail(request, film.category.slug, film.slug)\n except Exception as ex:\n print(ex)\n return home(request)\n\n","repo_name":"TrippyFrenemy/FilmLibrary","sub_path":"lib/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"14246419235","text":"\"\"\"\nThe initialization of the context for the Canvas Integration Plugin\n\"\"\"\nimport pkg_resources\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\nfrom web_fragments.fragment import Fragment\n\n\ndef get_resource_bytes(path):\n \"\"\"\n Helper method to get the unicode contents of a resource in this repo.\n\n Args:\n path (str): The path of the resource\n\n Returns:\n unicode: The unicode contents of the resource at the given path\n \"\"\" # noqa: D401\n resource_contents = pkg_resources.resource_string(__name__, path)\n return resource_contents.decode(\"utf-8\")\n\n\ndef plugin_context(context):\n \"\"\"Provide context based data for Canvas Integration plugin (For Instructor Dashboard)\"\"\" # noqa: E501\n\n course = context.get(\"course\")\n\n # Don't add Canvas tab is the Instructor Dashboard if it doesn't have any associated\n # canvas_course_id set from Canvas Service\n if not course.canvas_course_id:\n return None\n\n fragment = Fragment()\n # Adding JS as bytes (Inspired by what we are doing with Rapid Response xBlock)\n fragment.add_javascript(get_resource_bytes(\"static/js/canvas_integration.js\"))\n\n canvas_context = {\n \"section_key\": \"canvas_integration\",\n \"section_display_name\": _(\"Canvas\"),\n \"course\": context[\"course\"],\n \"add_canvas_enrollments_url\": reverse(\n \"add_canvas_enrollments\", kwargs={\"course_id\": course.id}\n ),\n \"list_canvas_enrollments_url\": reverse(\n \"list_canvas_enrollments\", kwargs={\"course_id\": course.id}\n ),\n \"list_canvas_assignments_url\": reverse(\n \"list_canvas_assignments\", kwargs={\"course_id\": course.id}\n ),\n \"list_canvas_grades_url\": reverse(\n \"list_canvas_grades\", kwargs={\"course_id\": course.id}\n ),\n \"list_instructor_tasks_url\": \"{}?include_canvas=true\".format(\n reverse(\"list_instructor_tasks\", kwargs={\"course_id\": course.id})\n ),\n \"push_edx_grades_url\": reverse(\n \"push_edx_grades\", kwargs={\"course_id\": course.id}\n ),\n \"fragment\": fragment,\n \"template_path_prefix\": \"/\",\n }\n\n sections = context.get(\"sections\", [])\n sections.append(canvas_context)\n context[\"sections\"] = sections\n\n return context\n","repo_name":"mitodl/open-edx-plugins","sub_path":"src/ol_openedx_canvas_integration/context_api.py","file_name":"context_api.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"26746779364","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport json\nimport os\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nimport keras\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\n\nfrom keras.layers import merge\nfrom keras.layers.core import *\nfrom keras.layers.recurrent import LSTM\nfrom keras.models import *\n\nimport numpy\nfrom keras.datasets import imdb\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing import sequence\n\nimport gensim\nfrom gensim.scripts.glove2word2vec import glove2word2vec\nimport gensim.models.keyedvectors as word2vec\nfrom gensim.models.keyedvectors import KeyedVectors\n\nglove2word2vec(glove_input_file=\"glove.6B.50d.txt\", word2vec_output_file=\"gensim_glove_vectors.txt\")\nglove_model = KeyedVectors.load_word2vec_format(\"gensim_glove_vectors.txt\", binary=False)\n\n\n# In[2]:\n\n\ndef filter_sentence(text):\n #tokenizer = RegexpTokenizer(r'\\w+')\n tokenizer = RegexpTokenizer(r'[A-z]+')\n stop_words = set(stopwords.words('english'))\n word_tokens = [string.lower() for string in tokenizer.tokenize(text)]\n word_tokens = [w for w in word_tokens if not w in stop_words]\n #word_tokens = [w for w in word_tokens if len(w)>2]\n return word_tokens\n\n\n# In[3]:\n\n\nfolder_names = [\"charliehebdo\",\"ferguson\",\"germanwings-crash\",\"ottawashooting\",\"sydneysiege\"]\n\n\n# In[4]:\n\n\nnon_rumour_json_obj = []\nfor name in folder_names:\n non_rumours_paths = \"pheme-rnr-dataset/\"+name+\"/non-rumours/\"\n non_rumours_path = [x for x in os.listdir(non_rumours_paths)]\n for subfolder in non_rumours_path:\n subfolder_path = non_rumours_paths + subfolder + \"/source-tweet/\"\n json_files = [pos_json for pos_json in os.listdir(subfolder_path) if pos_json.endswith('.json')]\n for json_file in json_files:\n json_file_path = subfolder_path + json_file\n #print(json_file_path)\n with open(json_file_path) as json_data:\n d = json.load(json_data)\n non_rumour_json_obj.append(d)\nrumour_json_obj = []\nfor name in folder_names:\n rumours_paths = \"pheme-rnr-dataset/\"+name+\"/rumours/\"\n rumours_path = [x for x in os.listdir(rumours_paths)]\n for subfolder in rumours_path:\n subfolder_path = rumours_paths + subfolder + \"/source-tweet/\"\n json_files = [pos_json for pos_json in os.listdir(subfolder_path) if pos_json.endswith('.json')]\n for json_file in json_files:\n json_file_path = subfolder_path + json_file\n #print(json_file_path)\n with open(json_file_path) as json_data:\n d = json.load(json_data)\n rumour_json_obj.append(d)\n\n\n# In[5]:\n\n\nretweet_count = []\ntext = []\ncreated = []\nfollowers_count = []\nverified = []\nstatuses_count = []\nuser_age = []\nrumour = []\n\nfor elem in non_rumour_json_obj:\n retweet_count.append(elem['retweet_count'])\n text.append(elem['text'])\n created.append(elem['created_at'])\n followers_count.append(elem['user']['followers_count'])\n verified.append(elem['user']['verified'])\n statuses_count.append(elem['user']['statuses_count'])\n user_age.append(elem['user']['created_at'])\n rumour.append('non_rumour')\nfor elem in rumour_json_obj:\n retweet_count.append(elem['retweet_count'])\n text.append(elem['text'])\n created.append(elem['created_at'])\n followers_count.append(elem['user']['followers_count'])\n verified.append(elem['user']['verified'])\n statuses_count.append(elem['user']['statuses_count'])\n user_age.append(elem['user']['created_at'])\n rumour.append('rumour')\n\n\n# In[6]:\n\n\ndf_temp = pd.DataFrame()\ndf = pd.DataFrame()\n\ndf_temp['retweet_count'] = retweet_count\ndf_temp['text'] = text\ndf_temp['created'] = created\ndf_temp['followers_count'] = followers_count\ndf_temp['verified'] = verified\ndf_temp['statuses_count'] = statuses_count\ndf_temp['user_age'] = user_age\ndf_temp['rumour'] = rumour\n\n\n# In[7]:\n\n\ndf.count()\ndf_temp.to_csv('/home/dell/rumour.csv',sep=',')\n\n\n# In[8]:\n\n\nle = preprocessing.LabelEncoder()\ndf_temp['verified_bool'] = le.fit_transform(df_temp['verified'])\ndf_temp['rumour_bool'] = le.fit_transform(df_temp['rumour'])\nactual_labels = df_temp['rumour_bool']\n\n\n# In[9]:\n\n\ndf = df_temp[['retweet_count','followers_count','verified_bool','statuses_count']]\n\n\n# In[10]:\n\n\nclf = RandomForestClassifier(max_depth=2)\nX_train, X_test, y_train, y_test = train_test_split(df, actual_labels, test_size=0.33, random_state=42)\n\n\n# In[11]:\n\n\nclf.fit(X_train, y_train)\npred = clf.predict(X_test)\nprint(accuracy_score(pred,y_test))\n\n\n# In[12]:\n\n\n####next attempt\n\n\n# In[13]:\n\n\ntokens = []\nfor t in text:\n token_line = filter_sentence(t)\n tokens.append(token_line)\nword_features = []\n\nnot_found = 0\ntext_features = []\nfor token_line in tokens:\n line = []\n for token in token_line:\n try: \n word_vector = glove_model.get_vector(token)\n line.append(word_vector)\n except:\n not_found = not_found + 1\n text_features.append(line)\n\n\n# In[14]:\n\n\n##take average of features\navg_text_features = []\nfor lines in text_features:\n avg = np.zeros(50)\n count = 0\n for line in lines:\n avg = avg + line\n count = count + 1\n avg_text_features.append(avg)\n\n\n# In[15]:\n\n\nword_avg_features = np.array(avg_text_features)\nconcat_features = np.concatenate((word_avg_features,df.as_matrix()),axis=1)\n\n\n# In[16]:\n\n\nclf = RandomForestClassifier(max_depth=2)\nX_train, X_test, y_train, y_test = train_test_split(word_avg_features, actual_labels, test_size=0.33, random_state=42)\nclf.fit(X_train, y_train)\npred = clf.predict(X_test)\nprint(accuracy_score(pred,y_test))\n\n\n# In[17]:\n\n\nclf = RandomForestClassifier(max_depth=2)\nX_train, X_test, y_train, y_test = train_test_split(concat_features, actual_labels, test_size=0.33, random_state=42)\nclf.fit(X_train, y_train)\npred = clf.predict(X_test)\nprint(accuracy_score(pred,y_test))\n\n\n# In[18]:\n\n\n####LSTM approach\n\n\n# In[19]:\n\n\n#x = Input(shape=(32,))\n#y = Dense(16, activation='softmax')(x)\n#model = Model(x, y)\nlengths = []\nfor line in text_features:\n lengths.append(len(line))\nprint(max(lengths))\n\n\n# In[20]:\n\n\nlen(text_features)\n\n\n# In[21]:\n\n\nlengths = []\ntraining_list_embedded = np.zeros(shape=(len(text_features),50,15))\nfor i in range(len(text_features)):\n sentence = text_features[i]\n length = len(sentence)\n padded_sequence = []\n j = 1\n \n while(1):\n #print(j)\n if j>(15-length):\n break\n padded_sequence.append(np.zeros(50))\n j = j + 1\n sentence = padded_sequence + sentence \n #print(length,15-length)\n #print(np.array(sentence).transpose().shape)\n training_list_embedded[i,:,:] = np.array(sentence).transpose()[:,:15]\n #print(str(i) + \" \" + str(len(sentence)))\n\n\n# In[22]:\n\n\nX_train, X_test, y_train, y_test = train_test_split(training_list_embedded, actual_labels, test_size=0.33, random_state=42)\n\n\n# In[23]:\n\n\n# create the model\nmodel = Sequential()\nmodel.add(LSTM(10,input_shape=(50,15,)))\nmodel.add(Dense(10))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nprint(model.summary())\nmodel.fit(training_list_embedded, np.array(actual_labels), nb_epoch=30, batch_size=100)\n\n\n# In[24]:\n\n\nmodel.evaluate(X_test,y_test)\n\n\n# In[25]:\n\n\ndf.head()\n\n\n# In[26]:\n\n\ndf_temp\n\n","repo_name":"ParmeetSingh/fake-detection","sub_path":"rumour_detection_original.py","file_name":"rumour_detection_original.py","file_ext":"py","file_size_in_byte":7608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31250414948","text":"font = \"Hiragino Kaku Gothic Pro\"\n\n# -------------Window--------------\nwindow_style = {\n \"auto_size_text\": True,\n \"auto_size_buttons\": False,\n \"default_element_size\": (20, 1),\n \"finalize\": True,\n \"grab_anywhere\": True,\n \"element_justification\": \"center\",\n \"text_justification\": \"center\",\n \"location\": (200, 200),\n}\n\n# 外枠\ncolumn_left = {\n \"size\": (1280, 1080),\n \"pad\": 0,\n}\n\ncolumn_right = {\n \"size\": (320, 1080),\n \"pad\": 0,\n}\n\ncolumn_values = {\n \"pad\": ((10, 10), (0, 10)),\n \"element_justification\": \"center\",\n \"expand_x\": True,\n \"expand_y\": True,\n}\n\ncolumn_setup = {\n \"pad\": ((10, 10), (0, 10)),\n \"element_justification\": \"center\",\n \"expand_x\": True,\n \"expand_y\": True,\n}\n\ncolumn_save_item = {\n \"pad\": ((10, 10), (0, 10)),\n \"element_justification\": \"center\",\n \"expand_x\": True,\n \"expand_y\": True,\n}\n\ncolumn_save = {\n \"pad\": ((10, 10), (0, 10)),\n \"element_justification\": \"center\",\n \"expand_x\": True,\n \"expand_y\": True,\n}\n\ncolumn_reset = {\n \"pad\": ((10, 10), (0, 10)),\n \"element_justification\": \"center\",\n \"expand_x\": True,\n \"expand_y\": True,\n}\n\ncolumn_base_frame = {\n \"size\": (320, 300),\n \"pad\": ((0, 0), (10, 0)),\n \"element_justification\": \"center\",\n \"expand_x\": True,\n \"expand_y\": True,\n \"font\": (font, 10),\n}\n\ncolumn_graph_l = {\n \"size\": (640, 480 * 2),\n \"pad\": 0,\n \"element_justification\": \"center\",\n}\n\ncolumn_base = {\n \"size\": (640, 300),\n \"pad\": 0,\n \"element_justification\": \"center\",\n}\n\n# グラフ\nfig_style_l = {\"size\": (640, 480), \"pad\": ((0, 0), (1, 0))}\n\n# テキスト\nvalues_text_title_style = {\n \"pad\": ((1, 1), (5, 0)),\n \"font\": (font, 10),\n \"justification\": \"center\",\n \"size\": (18, 1),\n}\nvalues_text_style = {\n \"pad\": ((1, 1), (0, 5)),\n \"font\": (font, 14),\n \"justification\": \"center\",\n \"size\": (14, 1),\n}\n\n# コネクト\nport_selection_style = {\n \"font\": (font, 8),\n \"size\": (27, 2),\n}\ninput_button_style_refresh = {\n \"font\": (font, 8),\n \"size\": (7, 1),\n \"button_color\": (\"#ffffff\", \"#000000\"),\n \"disabled_button_color\": (\"#999999\", \"#cccccc\"),\n}\n\n# ボタン類\ninput_button_style_l = {\n \"pad\": ((5, 5), (5, 10)),\n \"size\": (20, 2),\n \"button_color\": (\"#ffffff\", \"#000000\"),\n \"disabled_button_color\": (\"#999999\", \"#cccccc\"),\n \"font\": (font, 11),\n}\n\ninput_button_style_m = {\n \"pad\": ((10, 10), (7, 12)),\n \"size\": (28, 2),\n \"button_color\": (\"#ffffff\", \"#000000\"),\n \"disabled_button_color\": (\"#999999\", \"#cccccc\"),\n \"font\": (font, 10),\n}\n\ninput_button_style_s = {\n \"pad\": (5, 5),\n \"size\": (8, 1),\n \"button_color\": (\"#ffffff\", \"#444444\"),\n \"disabled_button_color\": (\"#999999\", \"#cccccc\"),\n \"font\": (font, 10),\n}\n\ninput_button_style_sl = {\n \"pad\": ((0, 0), (5, 10)),\n \"size\": (18, 1),\n \"button_color\": (\"#ffffff\", \"#444444\"),\n \"disabled_button_color\": (\"#999999\", \"#cccccc\"),\n \"font\": (font, 10),\n}\n\ninput_button_style_reset = {\n \"pad\": ((5, 5), (5, 10)),\n \"size\": (20, 2),\n \"button_color\": (\"#ffffff\", \"#444444\"),\n \"disabled_button_color\": (\"#999999\", \"#cccccc\"),\n \"font\": (font, 10),\n}\n","repo_name":"gekkyo/dendai_ict","sub_path":"src/view/Style.py","file_name":"Style.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32607805014","text":"import ast\nimport sys\n\nimport pytest\n\nfrom peval.core.reify import KnownValue, reify, reify_unwrapped\nfrom peval.core.gensym import GenSym\n\nfrom utils import assert_ast_equal\n\n\ndef check_reify(value, expected_ast, preferred_name=None, expected_binding=None):\n kvalue = KnownValue(value, preferred_name=preferred_name)\n gen_sym = GenSym()\n node, gen_sym, binding = reify(kvalue, gen_sym)\n\n assert_ast_equal(node, expected_ast)\n if expected_binding is not None:\n assert binding == expected_binding\n\n\ndef check_node_to_maybe_kvalue(node, bindings, expected_result, expected_preferred_name=None):\n node_or_kvalue = node_to_maybe_kvalue(node, bindings)\n\n if isinstance(node_or_kvalue, KnownValue):\n assert node_or_kvalue.value == expected_result\n assert node_or_kvalue.preferred_name == expected_preferred_name\n else:\n assert_ast_equal(node_or_kvalue, expected_result)\n\n\ndef test_simple_reify():\n check_reify(True, ast.Constant(value=True, kind=None))\n check_reify(False, ast.Constant(value=False, kind=None))\n check_reify(None, ast.Constant(value=None, kind=None))\n\n class Dummy:\n pass\n\n x = Dummy()\n check_reify(\n x,\n ast.Name(id=\"__peval_temp_1\", ctx=ast.Load()),\n expected_binding=dict(__peval_temp_1=x),\n )\n check_reify(\n x,\n ast.Name(id=\"y\", ctx=ast.Load()),\n preferred_name=\"y\",\n expected_binding=dict(y=x),\n )\n\n check_reify(1, ast.Constant(value=1, kind=None))\n check_reify(2.3, ast.Constant(value=2.3, kind=None))\n check_reify(3 + 4j, ast.Constant(value=3 + 4j, kind=None))\n check_reify(\"abc\", ast.Constant(value=\"abc\", kind=None))\n\n s = bytes(\"abc\", encoding=\"ascii\")\n check_reify(s, ast.Constant(value=s, kind=None))\n\n\ndef test_reify_unwrapped():\n class Dummy:\n pass\n\n x = Dummy()\n gen_sym = GenSym()\n node, gen_sym, binding = reify_unwrapped(x, gen_sym)\n assert_ast_equal(node, ast.Name(id=\"__peval_temp_1\", ctx=ast.Load()))\n assert binding == dict(__peval_temp_1=x)\n\n\ndef test_str_repr():\n kv = KnownValue(1, preferred_name=\"x\")\n s = str(kv)\n nkv = eval(repr(kv))\n assert nkv.value == kv.value\n assert nkv.preferred_name == kv.preferred_name\n","repo_name":"fjarri/peval","sub_path":"tests/test_core/test_reify.py","file_name":"test_reify.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"67"} +{"seq_id":"72902448212","text":"# -*- coding: utf-8 -*-\n\n'''\nCriado em 04/2020\n@Autor: Paulo https://github.com/alpdias\n\nforked de exemplo como base https://github.com/freenetwork/investing.com.economic-calendar\n'''\n\n#bibliotecas importadas\nfrom datetime import datetime\nfrom time import sleep\nimport datetime as DT\nimport arrow \n# import requests --> bilbioteca bloqueada pelo CLOUDFLARE\nimport cfscrape # nova biblioteca de scraping\nfrom bs4 import BeautifulSoup\n\n'''\n\nLista de paises completa = ['Inglaterra', 'África do Sul', 'EUA', 'Turquia', 'Cingapura', 'Suécia', 'Rússia', 'Portugal', 'Nova Zelândia', 'Noruega', 'Países Baixos', 'México', 'Coreia do Sul', 'Coreia do Norte', 'Japão', 'Itália', 'Índia', 'Irlanda', 'Indonésia', 'Hong Kong', 'Reino Unido', 'França', 'Zona Euro', 'Espanha', 'Alemanha', 'China', 'Suíça', 'Canadá', 'Brasil', 'Austrália', 'Argentina']\n\n'''\n\nseleçao = ['Inglaterra', 'EUA', 'Reino Unido', 'Zona Euro', 'Espanha', 'Alemanha', 'Japão', 'França', 'Nova Zelândia'] # especificaçao dos paises para selecionar as noticias\n\ndef calendario(url): \n \n \"\"\"\n -> Funçao para obter as noticas do calendario economico a partir de um webscraping e tratando o html\\\n \\n:param url:\\\n \\n:return:\\\n \"\"\"\n\n # processo de requisiçao de dados no site -->\n \n url = url # site utilizado no webscraping\n\n '''\n cabeçalho = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'} # cabeçalho para obter a requisiçao do site (site só aceita acesso por navegador)\n\n requisiçao = requests.get(url, headers=cabeçalho) # requisiçao dentro do site\n '''\n\n scraper = cfscrape.create_scraper() # novo modelo de requiçao usando a biblioteca 'cfscraper'\n requisiçao = scraper.get(url)\n \n # processo de requisiçao de dados no site <--\n\n\n # processo de tratamento do html -->\n\n soup = BeautifulSoup(requisiçao.text, 'html.parser') # tratamento do html com o modulo 'bs4'\n\n tabela = soup.find('table', {'id': 'economicCalendarData'}) # apenas a tabela com o id especifico\n\n corpo = tabela.find('tbody') # apenas o corpo da tabela\n\n linhas = corpo.findAll('tr', {'class': 'js-event-item'}) # apenas as linhas da tabela\n\n calendario = [] # lista para as noticias\n\n # 'tr' linha dentro da tabela html\n # 'td' coluna dentro da tabela html\n \n for tr in linhas:\n\n horario = tr.attrs['data-event-datetime'] # separando o horario da noticia pela tag html 'data-event-datetime'\n horario = arrow.get(horario, 'YYYY/MM/DD HH:mm:ss') # converter uma string de horario em um formato aceito pelo python\n horario = horario.strftime('%H:%M')\n calendario.append(horario)\n\n horario = tr.attrs['data-event-datetime'] # separando o horario da noticia pela tag html 'data-event-datetime'\n horario = arrow.get(horario, 'YYYY/MM/DD HH:mm:ss') # converter uma string de horario em um formato aceito pelo python\n horas = (int(horario.strftime('%H')) * 60)\n minutos = int(horario.strftime('%M'))\n verificaçao = horas + minutos # horario em minutos para verrificar o tempo em minutos para envio da noticia\n calendario.append(verificaçao) \n\n coluna = tr.find('td', {'class': 'flagCur'}) # separando o pais da noticia pela tag html 'flagCur'\n bandeira = coluna.find('span')\n calendario.append(bandeira.get('title'))\n\n impacto = tr.find('td', {'class': 'sentiment'})\n touro = impacto.findAll('i', {'class': 'grayFullBullishIcon'}) # separando o impacto da noticia pela tag html 'grayFullBullishIcon' e sua quantidade respectiva\n calendario.append(len(touro))\n\n evento = tr.find('td', {'class': 'event'})\n a = evento.find('a') # separando a tag html especifica 'a' para obter o nome e a url da noticia\n\n calendario.append('{}{}'.format('https://br.investing.com', a['href'])) # separando a url da noticia com o url do site e tag de referencia html 'href'\n\n calendario.append(a.text.strip()) # separando a chamada na notica pela tag html 'a' (texto dentro da tag)\n\n if bandeira.get('title') not in seleçao: # verificaçao de paises selecionados previamente\n \n for item in range(0, 6):\n calendario.pop() # remoçao de paises nao selecionados previamente\n \n else:\n\n if len(touro) < 2: # filtro do impacto das noticias \n\n for i in range(0, 6):\n calendario.pop()\n\n # processo de tratamento do html <--\n\n return calendario # retorna a lista com as noticias\n\n\n# uso dos dados em html tratados em variaveis -->\n\ndados = (calendario('https://br.investing.com/economic-calendar/')) # dados obtidos do html\n\n'''\nprint(dados) # lista com os dados\nprint('')\n'''\n\nquantidade = (len(dados) / 6) # quantidade de noticias\n \nresumo = []\n\nresumo.append(' ')\n\nwhile True:\n \n horario = dados[0] # dado especifico para o horario da \n verificaçao = dados[1]\n pais = dados[2] # dado especifico para o pais da noticia\n impacto = dados[3] # dado especifico para o impacto da noticia\n link = dados[4] # dado especifico para o link da noticia\n chamada = dados[5] # dado especifico para a chamada da noticia\n\n '''\n noticia = (f'Local: {pais}\\\n \\nHorário: {horario}\\\n \\nImpacto da notícia: {impacto}\\\n \\nNotícia: {chamada}\\\n \\nPara ver mais acesse: {link}\\\n \\n').strip() # noticia formatada \n\n print(noticia)\n print('')\n '''\n\n noticia = (f'Local: {pais} %% Horário: {horario} %% Impacto da notícia: {impacto} %%').strip()\n \n resumo.append(noticia)\n\n for item in range(0, 6):\n del dados[0] \n\n quantidade = quantidade - 1\n\n if quantidade == 0:\n break\n\n else:\n pass\n\n '''\n for item in range(0, 6):\n del dados[0] # apaga as ultimas informaçoes ja usadas(6 primeiros itens na lista), para nao ter repetiçoes\n\n quantidade = quantidade - 1\n\n if quantidade == 0:\n break\n\n else:\n pass\n '''\n\n# uso dos dados em html tratados em variaveis <--\n\nresumoDia = f'Resumo do dia\\\n \\n{resumo}'.replace('[','').replace(']','').replace(',','\\n').replace('%%','\\n').replace(\"'\",\"\")\n\nprint(resumoDia)\n","repo_name":"alpdias/economic-calendar","sub_path":"src/calendario.py","file_name":"calendario.py","file_ext":"py","file_size_in_byte":6266,"program_lang":"python","lang":"pt","doc_type":"code","stars":10,"dataset":"github-code","pt":"67"} +{"seq_id":"72264273815","text":"import base64\nfrom Cryptodome.Cipher import AES\n\npwd = input('Digite a senha:')\n\nmsg_text = bytes(pwd,'utf-8').rjust(32)\nsecret_key = b'Glock9mmGlock9mm'\n\ncipher = AES.new(secret_key, AES.MODE_ECB) # never use ECB in strong systems obviously\nencoded = base64.b64encode(cipher.encrypt(msg_text))\nprint('Aqui está sua senha criptografada:')\nprint('Senha:' + encoded.decode('utf-8'))\n","repo_name":"ewertonhm/VerificaCircuitos","sub_path":"cripto.py","file_name":"cripto.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6614109137","text":"import torch\nimport torch.nn.functional as F\nfrom resblock import ResBlock_G, ResBlock_D, ResBlock_D_opt\nfrom layers import Linear, Embedding\n\nclass Generator(torch.nn.Module):\n def __init__(self, num_features=16, n_z=128, bottom_width=4,\n activation=F.relu, num_classes=0):\n super(Generator, self).__init__()\n\n self.num_features = num_features\n self.n_z = n_z\n self.bottom_width = bottom_width\n self.activation = activation\n self.num_classes = num_classes\n\n self.fc = torch.nn.Linear(self.n_z, 16 * self.num_features * self.bottom_width * self.bottom_width)\n self.block1 = ResBlock_G(self.num_features * 16, self.num_features * 16,\n activation=self.activation, upsample=True, num_classes=self.num_classes)\n self.block2 = ResBlock_G(self.num_features * 16, self.num_features * 16,\n activation=self.activation, upsample=True, num_classes=self.num_classes)\n self.block3 = ResBlock_G(self.num_features * 16, self.num_features * 16,\n activation=self.activation, upsample=True, num_classes=self.num_classes)\n self.bn = torch.nn.BatchNorm2d(16 * self.num_features)\n self.conv = torch.nn.Conv2d(16 * self.num_features, 3, 1, 1)\n self.tanh = torch.nn.Tanh()\n self._initialize()\n \n def _initialize(self):\n torch.nn.init.xavier_uniform_(self.fc.weight)\n torch.nn.init.xavier_uniform_(self.conv.weight)\n\n def forward(self, x, y=None):\n h = x.size(0)\n x = self.fc(x)\n x = x.view(h, -1, self.bottom_width, self.bottom_width)\n x = self.block1(x, y)\n x = self.block2(x, y)\n x = self.block3(x, y)\n x = self.activation(self.bn(x))\n x = self.tanh(self.conv(x))\n return x\n\nclass Discriminator(torch.nn.Module):\n def __init__(self, num_features=128, num_classes=0, activation=F.relu):\n super(Discriminator, self).__init__()\n\n self.num_features = num_features\n self.num_classes = num_classes\n self.activation = activation\n\n self.block1 = ResBlock_D_opt(3, self.num_features)\n self.block2 = ResBlock_D(self.num_features, self.num_features,\n activation=self.activation, downsample=True)\n self.block3 = ResBlock_D(self.num_features, self.num_features,\n activation=self.activation, downsample=False)\n self.block4 = ResBlock_D(self.num_features, self.num_features,\n activation=self.activation, downsample=False)\n self.fc1 = Linear(self.num_features, 1) \n if self.num_classes > 0:\n self.emb1 = Embedding(self.num_classes, num_features)\n self.fc10 = Linear(self.num_features, self.num_classes)\n self._initialize()\n \n def _initialize(self):\n torch.nn.init.xavier_uniform_(self.fc1.weight)\n if self.num_classes > 0:\n torch.nn.init.xavier_uniform_(self.emb1.weight)\n torch.nn.init.xavier_uniform_(self.fc10.weight)\n \n def forward(self, x, y=None):\n x = self.block1(x)\n x = self.block2(x)\n x = self.block3(x)\n x = self.block4(x)\n x = self.activation(x)\n x = torch.sum(x, dim=(2,3))\n source = self.fc1(x)\n if y is not None:\n source += torch.sum(self.emb1(y) * x, dim=1, keepdim=True)\n if self.num_classes > 0:\n label = self.fc10(x)\n return source, label\n else:\n return source\n\nif __name__ == '__main__':\n a = Generator(num_classes=10)","repo_name":"Taeseven/SNGAN","sub_path":"models/models_resnet.py","file_name":"models_resnet.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3182782717","text":"from bs4 import BeautifulSoup\nimport requests\nimport re\nimport unicodedata\n\n\nclass AbstractScraper:\n def __init__(self, url):\n r = requests.get(url)\n c = r.content\n self.soup = BeautifulSoup(c, \"lxml\")\n\n def normalize_caseless(self, text):\n return unicodedata.normalize(\"NFKD\", text.casefold())\n\n def caseless_include(self, left, right):\n return self.normalize_caseless(left) in self.normalize_caseless(right) or self.normalize_caseless(right) in self.normalize_caseless(left)\n\n def is_stem_of_el(self, lst, el):\n for e in lst:\n if (e in el) or (el in e):\n return True\n return False\n\n def get_selector(self, tag):\n selector = ''\n\n if (tag.has_attr('id')):\n selector += '#' + tag['id']\n\n if (tag.has_attr('class')):\n cleanclass = []\n for c in tag['class']:\n if not self.is_stem_of_el(cleanclass, c):\n cleanclass.append(c)\n selector += '.' + c\n\n if selector is '':\n selector = tag.name\n \n return selector\n\n # Build selector for user inputted text by order of most stable\n def lookup_selector(self, search_str, scope):\n\n reverse = scope.find(True, string=re.compile(search_str, re.IGNORECASE))\n\n if not reverse:\n return self.find_mixed_tags(scope, search_str)\n\n # First try - use reverse's id / classname\n if reverse.has_attr('id'):\n if len(scope.find_all(id=reverse['id'])) == 1:\n return '#'+reverse['id']\n\n # Second try - use parent's selector > tagname\n # TODO make sure have unique selector for parent\n parent = reverse.find_parent()\n\n selector = \"{} > {}\".format(self.get_selector(parent), self.get_selector(reverse))\n\n # TODO support returning multiple vs single based on if tag should be multiple or single\n return selector\n\n # Third try - use parent's selector:nth-child() - check for single elements vs lists like ingred/steps\n for index, el in enumerate(new_els, start=1):\n if search_str in el.get_text():\n break\n\n selector += ':nth-of-type({})'.format(index)\n\n return selector\n\n def child_depth(self, parent, child):\n depth = 1\n\n while child.parent != parent:\n child = child.parent\n depth += 1\n\n return depth\n\n def find_mixed_tags(self, scope, string):\n words = string.split(' ')\n tags = scope.find_all(True, string=re.compile(words[0], re.IGNORECASE))\n\n #Loop through tags parents until find one with string that matches 'string'\n #This is not necessarily the most shallow tag, so add to depths list for later parsing\n depths = []\n for t in tags:\n d = 0\n for p in t.find_parents(True):\n d += 1\n if p is not None and self.caseless_include(string, p.get_text()):\n depths.append((d, self.get_selector(p)))\n\n # Loop through array of depths, find most shallow selector\n mindepth = -1\n sel = \"\"\n\n for d, s in depths:\n if mindepth == -1 or d < mindepth:\n sel = s\n mindepth = d\n\n return sel\n\n\n def find_scope(self, lookups):\n titles = self.soup.body.find_all(True, string=re.compile(lookups['title'], re.IGNORECASE))\n depth = []\n\n # compute the depth of titles parent relative to ingredients parent to find closest title to ingredient list\n for title in titles:\n scope = title.parent\n \n if scope is None:\n continue\n\n el = scope.find(True, string=re.compile(lookups['steps'], re.IGNORECASE))\n while el is None:\n scope = scope.parent\n\n if scope is None:\n break\n\n el = scope.find(True, string=re.compile(lookups['steps'], re.IGNORECASE))\n \n if scope != None:\n d = self.child_depth(scope, el)\n depth.append((d, scope))\n\n mindepth = 0\n scp = self.soup.body\n\n for d, s in depth:\n if mindepth == 0 or d < mindepth:\n scp = s\n mindepth = d\n\n return scp\n\n def grab_selectors(self, lookups):\n #TODO find scope by title/ingredient combo? Title first, then\n # try to find ingredient in parent classes, first success is scope\n\n scope = self.find_scope(lookups)\n\n sels = {}\n for key, val in lookups.items():\n sels[key] = self.lookup_selector(val, scope)\n\n scopesel = self.get_selector(scope)\n #TODO add scope back in\n for k, v in sels.items():\n if v != \"\" and scopesel not in v.split(' '):\n sels[k] = \"{} \".format(scopesel) + v\n\n print(sels)\n return sels\n\n def scrape_selector(self, sel):\n els = self.soup.select(sel)\n texts = []\n\n for e in els:\n texts.append(e.get_text(strip=True))\n\n return texts\n \n def grab_recipe(self, selectors):\n rec = {}\n for key, val in selectors.items():\n if val != \"\":\n rec[key] = self.scrape_selector(val)\n print(\"{} : {}\".format(key, rec[key]))\n\n return rec\n\nselectors = {\n 'title': 'h1.recipe-summary__h1',\n 'author': 'span.submitter__name',\n 'desc': 'div.submitter__description',\n 'ingred': 'span[itemprop=\"recipeIngredient\"]',\n 'preptime': 'time[itemprop=\"prepTime\"]',\n 'cooktime': 'time[itemprop=\"cookTime\"]',\n 'totaltime': 'time[itemprop=\"totalTime\"]',\n 'steps': 'li.step .recipe-directions__list--item',\n 'cals': '.calorie-count span:first-child'}\n\nlookups = {\n 'title': 'Zucchini Banana Bread',\n 'ingred': 'all-purpose or white whole wheat flour',\n 'preptime': '20 Minutes',\n 'cooktime': '55 Minutes',\n 'totaltime': '1 Hour 15 Minutes',\n 'steps': 'Preheat the oven to 350 degrees F. Lightly grease an 8 1/2-inch by 4 1/2-inch loaf pan. Set aside.',\n 'yield': '1 Loaf'\n}\n\ncookielookups = {\n 'title': 'BEST CHOCOLATE CHIP COOKIE RECIPE',\n 'desc': 'is easy to make and a great base for',\n 'ingred': '1 large egg',\n 'preptime': '15',\n 'cooktime': '15',\n 'totaltime': '30',\n 'steps': 'Note: This dough requires chilling.',\n 'yield': '24'\n}\n\nnyt = {\n 'title': 'The Only Ice Cream Recipe You’ll Ever Need',\n 'desc': 'This silky, luscious and very classic custard can be used as the base for any ice cream',\n 'ingred': 'cups heavy cream',\n 'totaltime': '20 minutes plus sev',\n 'steps': 'In a small pot, simme',\n 'yield': 'About 1 1/2 pints'\n}\n\nscraper = AbstractScraper('https://www.allrecipes.com/recipe/25037/best-big-fat-chewy-chocolate-chip-cookie/')\n#scraper = AbstractScraper('https://www.crazyforcrust.com/best-chocolate-chip-cookie-recipe/')\n#scraper = AbstractScraper('https://cooking.nytimes.com/recipes/1016605-the-only-ice-cream-recipe-youll-ever-need')\n#sels = scraper.grab_selectors(lookups)\n\nscraper.grab_recipe(selectors)\n\n#otherscraper = AbstractScraper('https://www.nourish-and-fete.com/easy-flour-tortillas-from-scratch/')\n\n#otherscraper.grab_recipe(sels)","repo_name":"calpoly-chefs/onlinemenu","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":6980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29976409501","text":"from math import sqrt\nfrom math import log2\n\ndef f(x):\n return x * x + sqrt(x)\n \ndef solve(rightPartOfEquationConstant):\n PRECISION = 1e-7\n left = 1.0\n right = rightPartOfEquationConstant\n iterationsCount = int(log2((right - left) / PRECISION))\n\n for i in range(iterationsCount):\n\n if (right - left) < PRECISION:\n return right\n \n m = (left + right) / 2.0\n if f(m) < rightPartOfEquationConstant:\n left = m\n else:\n right = m\n \n return (left + right) / 2.0\n \ndef solveProblem():\n rightPartOfEquationConstant = float(input())\n result = solve(rightPartOfEquationConstant)\n print('{:.6f}'.format(result))\n \nsolveProblem()","repo_name":"be-y-a/smth","sub_path":"MADE/ALGORITHMS/WEEK3_C.py","file_name":"WEEK3_C.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3751911473","text":"# Answer1 - Iterative\nclass Solution:\n def myPow(self, x: float, n: int) -> float:\n if x == 1:\n return 1\n \n base = x\n result = 1\n temp_n = n\n n = abs(n) if n < 0 else n\n while n > 0:\n if n%2 == 1:\n result*=base\n base*=base\n n//=2\n \n return result if temp_n > 0 else 1/result \n\n# Answer 2 - Recursive\nclass Solution:\n def binaryExp(self, x: float, n: int) -> float:\n # Base case, to stop recursive calls.\n if n == 0:\n return 1\n if n < 0:\n return 1.0 / self.binaryExp(x, -1 * n)\n \n # Perform Binary Exponentiation.\n # If 'n' is odd we perform Binary Exponentiation on 'n - 1' and multiply result with 'x'.\n if n % 2 == 1:\n return x * self.binaryExp(x * x, (n - 1) // 2)\n # Otherwise we calculate result by performing Binary Exponentiation on 'n'.\n else:\n return self.binaryExp(x * x, n // 2)\n\n def myPow(self, x: float, n: int) -> float:\n return self.binaryExp(x, n)\n","repo_name":"amankitsingh/Programming","sub_path":"powx-n/powx-n.py","file_name":"powx-n.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"20009179403","text":"from marketorestpython.client import MarketoClient\r\nfrom pandas.io.json import json_normalize\r\nfrom psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT\r\nfrom sqlalchemy import create_engine\r\nimport csv\r\nimport json\r\nimport sys\r\nimport psycopg2\r\nimport datetime\r\nimport subprocess\r\nimport pandas as pd\r\nimport smtplib\r\nimport time\r\n\r\ndef sendErrorMail(variable, e):\r\n smtpserver = smtplib.SMTP(\"smtp.gmail.com\", 587)\r\n smtpserver.ehlo()\r\n smtpserver.starttls()\r\n smtpserver.ehlo()\r\n smtpserver.login(variable['sender'], variable['password'])\r\n subject = \"Subject: Error in Script\\n\"\r\n msg = 'Subject: {}\\n\\n{}'.format(subject, str(e))\r\n smtpserver.sendmail(variable['sender'], variable['reciever'], msg)\r\n print('Error Sent in email')\r\n smtpserver.close()\r\n\r\n\r\ndef creating_database(conn, postgresDatabaseName):\r\n conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\r\n cursor = conn.cursor()\r\n statement = \"SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname = '\" + postgresDatabaseName + \"';\"\r\n cursor.execute(statement)\r\n not_exists_row = cursor.fetchone()\r\n not_exists = not_exists_row[0]\r\n if not_exists == False:\r\n cursor = conn.cursor()\r\n cursor.execute('CREATE DATABASE ' + postgresDatabaseName + ';')\r\n conn.commit()\r\n else:\r\n print(\"Database Already Exists....\")\r\n\r\n\r\ndef main():\r\n # getting Marketo Credentials\r\n with open('config.json') as Variable_file:\r\n variable = json.load(Variable_file)\r\n\r\n munchkin_id = variable[\"Marketo_Credentials\"][\"Munchkin_Id\"]\r\n client_id = variable[\"Marketo_Credentials\"][\"Client_Id\"]\r\n client_secret = variable[\"Marketo_Credentials\"][\"Client_Secret\"]\r\n try:\r\n mc = MarketoClient(munchkin_id, client_id, client_secret)\r\n except Exception as e:\r\n # sendErrorMail(variable['email'], e)\r\n print(\"error sent in email\", e)\r\n\r\n postgresHost = variable[\"PostgreSQL\"][\"PostgreSQL_Host\"]\r\n postgresDatabaseName = variable[\"PostgreSQL\"][\"PostgreSQL_Database_Name\"]\r\n postgresSchemaName = variable[\"PostgreSQL\"][\"PostgreSQL_Schema_Name\"]\r\n postgresUserName = variable[\"PostgreSQL\"][\"PostgreSQL_User_Name\"]\r\n postgresPassword = variable[\"PostgreSQL\"][\"PostgreSQL_Password\"]\r\n postgresPort = variable[\"PostgreSQL\"][\"PostgreSQL_Port\"]\r\n tableProgramsInMarketo = variable[\"PostgreSQL\"][\"Table\"][\"Table_programs_in_marketo\"]\r\n tableProgramData = variable[\"PostgreSQL\"][\"Table\"][\"Table_programs_data\"]\r\n tableProgramsMembersData = variable[\"PostgreSQL\"][\"Table\"][\"Table_programs_members_data\"]\r\n\r\n\r\n ''' Creating Schema and Using it as default'''\r\n try:\r\n conn = psycopg2.connect(dbname=postgresDatabaseName, user=postgresUserName, password=postgresPassword,\r\n port=postgresPort)\r\n conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\r\n cursor = conn.cursor()\r\n cursor.execute('CREATE SCHEMA IF NOT EXISTS ' + postgresSchemaName + ';')\r\n cursor.execute(\"SET search_path TO \" + postgresSchemaName)\r\n\r\n except:\r\n print(\"Error! Connection Failure.\")\r\n # sendErrorMail(variable['email'], e)\r\n sys.exit()\r\n\r\n engine = create_engine(\r\n r'postgresql://' + postgresUserName + ':' + postgresPassword + '@' + postgresHost + ':' + str(\r\n postgresPort) + '/' + postgresDatabaseName)\r\n\r\n try:\r\n cursor.execute(\"Truncate Table \" + tableProgramData)\r\n cursor.execute(\"Truncate Table \" + tableProgramsInMarketo)\r\n cursor.execute(\"Truncate Table \" + tableProgramsMembersData)\r\n programIds = get_programIds(mc)\r\n check_new_programs(engine, postgresSchemaName, tableProgramsInMarketo, programIds)\r\n get_programsData(mc, cursor, engine, postgresSchemaName, tableProgramsInMarketo, tableProgramData)\r\n get_membersData(mc, cursor, engine, postgresSchemaName, tableProgramsInMarketo, tableProgramsMembersData)\r\n except Exception as e:\r\n # sendErrorMail(variable['email'], e)\r\n print(\"error sent in email\", e)\r\n\r\n # membersData = pd.DataFrame(mc.execute(method='get_multiple_leads_by_program_id', programId='1137'))\r\n\r\ndef get_programsData(mc, cursor, engine, postgresSchemaName, tableProgramsInMarketo, tableProgramData):\r\n query = \"SELECT program_id from \" + postgresSchemaName + \".\" + tableProgramsInMarketo + \" WHERE fetched_program_data is FALSE\"\r\n df = pd.read_sql_query(query, engine)\r\n pid = list(df['program_id'])\r\n print(\"New Programs Data for our database : \", len(pid))\r\n # programData = pd.DataFrame()\r\n\r\n for id in pid:\r\n cost = 0\r\n print(\"fetching {}'s data\".format(id))\r\n data = mc.execute(method='get_program_by_id', id=id)\r\n time.sleep(2)\r\n if (len(data[0]['costs']) < 1):\r\n cost = 0\r\n else:\r\n cost = []\r\n for each in data[0]['costs']:\r\n cost += each['costs']\r\n\r\n programData = pd.DataFrame(data)\r\n # programData = pd.concat([programData, df], axis=0, ignore_index=True)\r\n # df = pd.DataFrame(programData)\r\n temp = programData[['id', 'tags']]\r\n temp = unstack(temp, 'tags').dropna(subset=['tags']).to_json(orient='records')\r\n temp = json_normalize(json.loads(temp))\r\n if len(temp):\r\n temp = temp.drop('tags.tagType', axis=1)\r\n temp = temp.groupby('id')['tags.tagValue'].apply(', '.join).reset_index()\r\n programData = programData.merge(temp, left_on='id', right_on='id', how='left').drop('tags', axis=1).rename(\r\n index=str, columns={'tags.tagValue': 'tags'})\r\n else:\r\n programData['tags'] = ''\r\n programData = programData.rename(index=str, columns={'id': 'program_id'}).drop('folder', axis=1)\r\n # cost = pd.Series(costs)\r\n programData['costs'] = cost\r\n programData.columns = map(str.lower, programData.columns)\r\n programData.to_sql(tableProgramData, engine, if_exists='append', schema=postgresSchemaName, index=False)\r\n cursor.execute(\r\n \"Update \" + postgresSchemaName + \".\" + tableProgramsInMarketo + \" set fetched_program_data ='TRUE' where program_id = \" + str(\r\n id))\r\n print(\"Program Data is updated\")\r\n\r\ndef get_membersData(mc, cursor, engine, postgresSchemaName, tableProgramsInMarketo, tableProgramsMembersData):\r\n query = \"SELECT program_id from \" + postgresSchemaName + \".\" + tableProgramsInMarketo + \" WHERE fetched_members_data is FALSE\"\r\n df = pd.read_sql_query(query, engine)\r\n pid = list(df['program_id'])\r\n print(\"Members data need to update of {} programs : \".format(len(pid)))\r\n for id in pid:\r\n members_data = pd.DataFrame(mc.execute(method='get_multiple_leads_by_program_id', programId=id,\r\n fields=['createdAt', 'email', 'firstName', 'id', 'lastName',\r\n 'leadStatus', 'sfdcAccountId', 'sfdcContactId', 'sfdcId',\r\n 'sfdcLeadId', 'updatedAt', 'mktoName', 'title', 'company',\r\n 'phone',\r\n 'leadSource', 'sfdcType', 'True_Market_Name__c',\r\n 'Market_Region__c', 'Lead_Source_Detail__c',\r\n 'Account_Owner_Email__c'], batchSize=None))\r\n members_data['program_id'] = id\r\n members_data = members_data.to_json(orient='records')\r\n members_data = json_normalize(json.loads(members_data)).rename(index=str, columns={'id': 'leadid'}).rename(\r\n index=str, columns=lambda x: x.replace('.', '_'))\r\n members_data.columns = map(str.lower, members_data.columns)\r\n members_data.to_sql(tableProgramsMembersData, engine, if_exists='append', schema=postgresSchemaName,\r\n index=False)\r\n cursor.execute(\r\n \"Update \" + postgresSchemaName + \".\" + tableProgramsInMarketo + \" set fetched_members_data ='TRUE' where program_id = \" + str(\r\n id))\r\n print(\"Member inserted of program no. \", id)\r\n print(\"Program Data is updated\")\r\n\r\ndef get_programIds(mc):\r\n subprocess.call(['pythonw.exe', 'check_API_calls.py'])\r\n programs = pd.DataFrame(mc.execute(method='browse_programs', maxReturn=200))\r\n print(\"Total Program in Marketo : \", len(programs))\r\n programs = list(programs['id'].drop_duplicates())\r\n return programs\r\n\r\ndef unstack(dataframe, colum):\r\n temp = dataframe[colum].apply(pd.Series).unstack().reset_index(level=0, drop=True)\r\n temp.name = colum\r\n dataframe = dataframe.drop(colum, axis=1).join(temp)\r\n del temp\r\n return dataframe\r\n\r\ndef check_new_programs(engine, postgresSchemaName, table, pid):\r\n query = \"SELECT program_id from \" + postgresSchemaName + \".\" + table\r\n df = pd.read_sql_query(query, engine)\r\n programsInTable = list(df['program_id'])\r\n programsInTable = set(sorted(programsInTable))\r\n programsFromMarketo = set(sorted(pid))\r\n new_pid = list(programsFromMarketo.difference(programsInTable))\r\n print(\"new_pid :\", new_pid)\r\n print(\"New programs for our database : \", len(new_pid))\r\n df = pd.DataFrame(new_pid).rename(index=str, columns={0: 'program_id'})\r\n df['fetched_program_data'] = False\r\n df['fetched_members_data'] = False\r\n df.to_sql(table, engine, if_exists='append', schema=postgresSchemaName, index=False)\r\n return new_pid\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"JaswantD/MarketoRest","sub_path":"Programs_Marketo.py","file_name":"Programs_Marketo.py","file_ext":"py","file_size_in_byte":9636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1375958280","text":"from json.decoder import JSONDecodeError\nimport requests\n\ndef merriam_webster(w):\n key_used = 0\n keys = ['2181863b-1231-465b-84f2-db16c8853fa1', 'b1366550-3d40-4f7c-b53d-46c13eb246ab']\n dictionary_types = ['collegiate', 'sd3']\n url = \"https://dictionaryapi.com/api/v3/references/\" + dictionary_types[key_used] + '/json/' + w + '?key=' + keys[key_used]\n r = requests.get(url)\n try:\n r.raise_for_status()\n except:\n return 'No Definitions Found'\n try:\n a = r.json()\n except:\n raise Exception(r.text)\n\n if a == []: \n return 'No Definitions Found'\n if type(a[0]) == str: \n return 'No Definitions Found'\n else:\n return a[0]['fl']\n \ndef free_dictionary(w):\n\n w.replace(' ', '%20')\n r = requests.get(' https://api.dictionaryapi.dev/api/v2/entries/en/' + w)\n try:\n r.raise_for_status()\n except:\n return 'No Definitions Found'\n \n a = r.json()\n try:\n output = a[0]['meanings'][0]['partOfSpeech']\n except:\n output = 'No Definitions Found'\n \n return output\n\nif '__main__' == __name__:\n word = 'services industry?'\n print(merriam_webster(word))\n print(free_dictionary(word))\n","repo_name":"feedur/job","sub_path":"seek/dictionaries.py","file_name":"dictionaries.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14107504872","text":"#######################\n# Packages ############\n#######################\n\nimport tweepy\nimport pytumblr\nimport os\nimport pandas as pd\nimport pymongo\n\nfrom dotenv import load_dotenv\n# Load variables found in .env as environment variables\ndirname = os.getcwd()\nenvfile = os.path.join(dirname, '.env')\nload_dotenv(envfile)\n\n#######################\n# Authentication ######\n#######################\n\ntumb_client = pytumblr.TumblrRestClient(\n os.getenv(\"CONSUMER_KEY_TUMBLR\"),\n os.getenv(\"CONSUMER_SECRET_TUMBLR\"),\n os.getenv(\"ACCESS_TOKEN_TUMBLR\"),\n os.getenv(\"ACCESS_TOKEN_SECRET_TUMBLR\"),\n)\n\ntumb_client.info() # Grabs the current user information\n\n\n###########################\n# read posts with wandsbek#\n# and store inside MongoDB#\n###########################\ncollection_tag = 'billstedt'\ntagged_return = tumb_client.tagged(collection_tag)\ntumblr_posts = [item for item in tagged_return]\nclient = pymongo.MongoClient('localhost:27017')\ndb = client[\"sma\"]\ncollection = db[collection_tag]\n\nfor post in tumblr_posts:\n id_post = post\n id_post['_id'] = id_post.pop('id')\n try:\n collection.insert_one(post)\n except pymongo.errors.DuplicateKeyError:\n continue\n\ncollection.count_documents({})\nfor post in collection.find():\n print(post)","repo_name":"jaebjgh/application-project","sub_path":"API_Tests/tumblrAPI_test.py","file_name":"tumblrAPI_test.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74089623252","text":"# Generic responds for SVO structure\ngSVO = [\n \"I remember you told me that %1. \",\n \"Oh, I know that %1. \",\n]\n# Generic respond for if a certain object doesn't have adj or adv\ngS = [\n \"Oh, your %1. \",\n \"You mean %1. \"\n]\n\ngReflections = {\n \"am\" : \"are\",\n \"was\" : \"were\",\n \"i\" : \"you\",\n \"i'd\" : \"you would\",\n \"i've\" : \"you have\",\n \"i'll\" : \"you will\",\n \"my\" : \"your\",\n \"are\" : \"am\",\n \"you've\": \"I have\",\n \"you'll\": \"I will\",\n \"your\" : \"my\",\n \"yours\" : \"mine\",\n \"you\" : \"me\",\n \"me\" : \"you\"\n}\n\ndef generateResp(input):\n result = []\n for item in input:\n # change to list format\n words = list(item)\n words = ' '.join(words)\n words = words.split()\n dict = gReflections\n keys = dict.keys();\n for i in range(0, len(words)):\n if words[i] in keys:\n words[i] = dict[words[i]]\n fact = (' '.join(words))\n for resp in gSVO:\n result.append(resp.replace(\"%1\", fact))\n return result\n","repo_name":"JerryHwung/ELIZA-long-term-memory","sub_path":"resp.py","file_name":"resp.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26184792488","text":"from typing import List\n\nimport numpy as np\n\nfrom lambda_cps.envs.wrapped import WrapperBase\n\n\nclass RandomShootingController:\n def __init__(self, env: WrapperBase):\n self.env = env\n\n def next_actions(self, n_step: int, n_traj: int) -> List[np.ndarray]:\n current_state = self.env.get_state()\n\n best_reward = -np.inf\n best_action_seq = None\n for i in range(n_traj):\n self.env.set_state(current_state)\n rollout_reward = 0\n action_seq = []\n for _ in range(n_step):\n action = self.env.action_space.sample()\n _, reward, _, _ = self.env.step(action)\n action_seq.append(action)\n rollout_reward += reward\n if rollout_reward > best_reward:\n best_reward = rollout_reward\n best_action_seq = action_seq\n\n self.env.set_state(current_state)\n\n return best_action_seq\n","repo_name":"Wencybiubiubiu/lambdaCPS","sub_path":"lambda_cps/evaluation/control/random_shooting.py","file_name":"random_shooting.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"22461759930","text":"from typing import List\nimport unittest\n\nfrom tracetools_launch.action import Trace\nfrom tracetools_launch.actions.ld_preload import LdPreload\n\n\nclass TestTraceAction(unittest.TestCase):\n\n def __init__(self, *args) -> None:\n super().__init__(\n *args,\n )\n\n def test_has_profiling_events(self) -> None:\n events_lists_match: List[List[str]] = [\n [\n 'lttng_ust_cyg_profile_fast:func_entry',\n 'hashtag:yopo',\n ],\n [\n 'lttng_ust_cyg_profile:func_entry',\n 'some_other_event',\n 'lttng_ust_cyg_profile:func_exit',\n ],\n ]\n events_lists_no_match: List[List[str]] = [\n [\n 'lttng_ust_statedump:bin_info',\n 'ros2:event',\n ],\n [],\n ]\n for events in events_lists_match:\n self.assertTrue(Trace.has_profiling_events(events))\n for events in events_lists_no_match:\n self.assertFalse(Trace.has_profiling_events(events))\n\n def test_has_ust_memory_events(self) -> None:\n events_lists_match: List[List[str]] = [\n [\n 'hashtag:yopo',\n 'lttng_ust_libc:malloc',\n 'lttng_ust_libc:realloc',\n ],\n [\n 'lttng_ust_libc:still_a_match',\n ],\n ]\n events_lists_no_match: List[List[str]] = [\n [],\n [\n 'my_random:event',\n 'lttng_ust_whatever'\n ]\n ]\n for events in events_lists_match:\n self.assertTrue(Trace.has_ust_memory_events(events))\n for events in events_lists_no_match:\n self.assertFalse(Trace.has_ust_memory_events(events))\n\n def test_get_shared_lib_path(self) -> None:\n # Only test not finding a lib for now\n self.assertIsNone(\n LdPreload.get_shared_lib_path('random_lib_that_does_not_exist_I_hope.so')\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"paulbovbel/ros2_tracing","sub_path":"tracetools_launch/test/tracetools_launch/test_trace_action.py","file_name":"test_trace_action.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"9446157268","text":"import os \nimport time\nimport pandas as pd\nfrom pandas import DataFrame\n\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup, NavigableString\n\nfrom libs.logging_process import Logging_process\nfrom libs.selenium_process import Selenium_process\n\nlogger = Logging_process('scrap_macro_economics_test')\n\nclass Naver_finance_crawler(object):\n \"\"\"naver finance 에 있는 항목들을 크롤링하는데 사용함,\n Selenium 사용하지 않고 크롤링 함 \"\"\"\n\n def __init__(self, df, url):\n\n self.df = df\n self.url = url\n\n def get_page_html(self, page_num):\n\n url = self.url + \"&page={}\".format(page_num)\n logger.info(url)\n\n try:\n html = urlopen(url)\n\n except Exception as error:\n logger.info(error)\n return None, None \n\n soup = BeautifulSoup(html.read(), 'html.parser')\n\n thead = soup.find('thead')\n tbody = soup.find('tbody')\n\n return thead, tbody\n\n def tbody_data(self, tbody):\n\n out_list = []\n\n for tr in tbody.children:\n\n if isinstance(tr, NavigableString):\n continue\n\n date = tr.find('td' , {'class' : 'date'})\n num = tr.find('td', {'class' : 'num'})\n\n date = date.get_text().strip()\n date = date.replace('.', '-')\n #date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n\n num = num.get_text().strip()\n num = num.replace(',', '') \n num = float(num)\n \n\n out_list.append((date, num)) \n\n return out_list \n\n def data_to_df(self, df, data_list):\n\n #print(tbody)\n\n index = len(df)\n \n for i in range(len(data_list)):\n\n day = data_list[i][0]\n num = data_list[i][1]\n\n print(day)\n\n #print(day in df[self.df.columns[0]].values)\n if day in df[self.df.columns[0]].values:\n\n return df\n\n df.at[index, self.df.columns[0]] = day\n df.at[index, self.df.columns[1]] = num\n\n index += 1\n\n return df\n\n def run(self, page):\n \"\"\"page 수 만큼 반복하여 크롤링하기\"\"\"\n\n df = self.df \n\n\n for page_num in range(1, page + 1):\n # page 는 1부터 시작함 \n\n try:\n _, tbody = self.get_page_html(page_num)\n except Exception as error:\n logger.info(error)\n return None \n\n out_list = self.tbody_data(tbody)\n\n df = self.data_to_df(df ,out_list)\n\n # 혹시 모를 ip 차단을 피하기 위해서 쉬어가는 타임\n time.sleep(1)\n \n df = df.sort_values(by = self.df.columns[0], ascending = False)\n\n return df\n\nclass Crawler_BS4(object):\n \"\"\"Selenium driver를 이용한 stock index crawler 부분\"\"\"\n\n def __init__(self, driver):\n \"\"\"driver를 받아 BeautifulSoup 객체를 생성한다.\"\"\"\n\n self.html = driver.page_source\n self.soup = BeautifulSoup(self.html, \"html.parser\")\n logger.info(\"soup 생성\")\n\n def get_page_html(self):\n\n try:\n soup = self.soup\n\n thead = soup.find('thead')\n tbody = soup.findAll('tbody')[1]\n except Exception as error:\n logger.info(error)\n return None, None\n\n return thead, tbody\n\n def get_data(self, tbody):\n \n out_list = []\n\n for tr in tbody.children:\n \n if isinstance(tr, NavigableString):\n continue\n\n day = tr.find('td', {'class' : 'tb_td'}) \n day = day.get_text().strip()\n day = day.replace('.', '-')\n\n close_point = tr.find('td', {'class' : 'tb_td2'})\n close_point = close_point.get_text().strip()\n close_point = close_point.replace(',', '')\n close_point = float(close_point)\n\n open_point = tr.find('td', {'class' : 'tb_td4'})\n open_point = open_point.get_text().strip()\n open_point = open_point.replace(',', '')\n open_point = float(open_point)\n\n high_point = tr.find('td', {'class' : 'tb_td5'})\n high_point = high_point.get_text().strip()\n high_point = high_point.replace(',', '')\n high_point = float(high_point)\n\n low_point = tr.find('td', {'class' : 'tb_td6'})\n low_point = low_point.get_text().strip()\n low_point = low_point.replace(',', '')\n low_point = float(low_point)\n\n out_list.append((day, close_point, open_point, high_point, low_point))\n\n return out_list\n \n\n\n def data_to_df(self, df, data_list):\n\n index = len(df)\n \n for i in range(len(data_list)):\n\n day = data_list[i][0]\n close_point = data_list[i][1]\n open_point = data_list[i][2]\n high_point = data_list[i][3]\n low_point = data_list[i][4]\n\n if day in df[df.columns[0]].values:\n\n return df, False\n\n df.at[index, df.columns[0]] = day\n df.at[index, df.columns[1]] = close_point\n df.at[index, df.columns[2]] = open_point\n df.at[index, df.columns[3]] = high_point\n df.at[index, df.columns[4]] = low_point\n\n index += 1\n\n return df, True\n\n def process(self, df):\n\n _, tbody = self.get_page_html()\n #print(tbody)\n\n out_list = self.get_data(tbody)\n #print(out_list)\n\n df = self.data_to_df(df, out_list)\n\n return df \n\nclass World_Stock_Market(object):\n \"\"\"naver finance 에서 주가지수 항목들은 크롤링할때\n Selenium 을 사용함\"\"\"\n\n def __init__(self, df, url):\n logger.info(\"Creating Selenium Object\")\n\n self.sp = Selenium_process(url)\n\n logger.info(\"Selenium driver loading\")\n self.driver = self.sp.run_chromedriver()\n\n self.driver.implicitly_wait(5)\n self.main_page = self.driver.current_window_handle\n\n self.df = df\n\n def run(self, page):\n\n # scrap 할 page 갯��\n max_count_page = page\n # scrap 한 page 갯수\n count_page = 0\n\n stop = False \n \n #stock page 보면 10 page 씩 묶여있다. \n #처음에는 1 ~ 10 page 있고, 다음 눌러야 11 ~ 20 page 가 나온다.\n # page xpath 구하는 숫자\n view_page = 0\n\n while(stop == False):\n\n for num in range(1,11):\n\n page_num = view_page * 10 + num\n\n page_xpath = '//*[@id=\"dayLink{}\"]'.format(page_num)\n page = self.driver.find_element_by_xpath(page_xpath) \n\n try:\n page.click()\n except Exception as error:\n logger.info(error)\n stop = True\n break \n \n self.main_page = self.driver.current_window_handle\n\n try:\n cbs4 = Crawler_BS4(self.driver)\n\n #time.sleep(3)\n except Exception as error:\n logger.info(error)\n stop = True\n break\n\n try:\n self.df, conti = cbs4.process(self.df)\n logger.info('id=\"dayLink{}\" 의 데이터를 DataFrame 으로 저장함'.format(page_num))\n except Exception as error:\n logger.info(error)\n stop = True\n break\n\n logger.info('id=\"dayLink{}\" 처리 완료'.format(page_num))\n\n # scrap 끝났으니까 \n count_page += 1\n\n # 혹시 모를 ip 차단을 피하기 위해서 쉬어가는 타임 \n time.sleep(1)\n\n # scrap page 갯수가 만족 되었으면 중지함 \n if count_page > max_count_page:\n stop = True\n break\n\n #for 문 완료됨, 그럼 이제 다음 view_page로 넘어감\n \n class_name = 'next'\n next_page = self.driver.find_element_by_class_name(class_name)\n\n try:\n next_page.click()\n except Exception as error:\n logger.info(error)\n break \n\n self.main_page = self.driver.current_window_handle\n\n view_page += 1 \n\n self.df = self.df.sort_values(by = self.df.columns[0], ascending = False)\n\n self.sp.down_chromedriver(self.driver)\n\n return self.df\n\n\nclass scrap_macro_economics(object):\n \"\"\"항목마다 DB table 생성 & 크롤링 함수들이 있음\"\"\"\n\n def __init__(self):\n pass\n\n def scrap_exchange_rate(self, name, url, page):\n \"환율 크롤링 함수\"\n df = DataFrame(columns = ['날짜', '매매기준율'])\n\n # create table (name) \n\n # select max_date from table (name)\n\n self.nfc = Naver_finance_crawler(df, url)\n\n df = self.nfc.run(page) \n\n print(df)\n\n save_file_path = os.path.join(\"data\", \"csv_file\", \"exchange_rate\")\n os.makedirs(save_file_path, exist_ok = True)\n\n df.to_csv(os.path.join(save_file_path, name + \".csv\"))\n\n # update table (name)\n\n def scrap_interest_rate(self, name, url, page):\n \"금리 크롤링 함수\"\n df = DataFrame(columns = ['날짜', '종가'])\n\n # create table (name) \n\n # select max_date from table (name)\n\n self.nfc = Naver_finance_crawler(df, url)\n\n df = self.nfc.run(page) \n\n print(df)\n\n save_file_path = os.path.join(\"data\", \"csv_file\", \"interest_rate\")\n os.makedirs(save_file_path, exist_ok = True)\n\n df.to_csv(os.path.join(save_file_path, name + \".csv\"))\n\n # update table (name)\n\n def scrap_gold_oil(self, name, url, page):\n \"금&석유 크롤링 함수\"\n df = DataFrame(columns = ['날짜', '종가'])\n\n # create table (name) \n\n # select max_date from table (name)\n\n self.nfc = Naver_finance_crawler(df, url)\n\n df = self.nfc.run(page) \n\n print(df)\n\n save_file_path = os.path.join(\"data\", \"csv_file\", \"gold_oil\")\n os.makedirs(save_file_path, exist_ok = True)\n\n df.to_csv(os.path.join(save_file_path, name + \".csv\"))\n\n # update table (name)\n\n def scrap_non_ferrous_metal(self, name, url, page):\n \"비철금속 크롤링 함수\"\n df = DataFrame(columns = ['날짜', '종가'])\n\n # create table (name) \n\n # select max_date from table (name)\n\n self.nfc = Naver_finance_crawler(df, url)\n\n df = self.nfc.run(page) \n\n print(df)\n\n save_file_path = os.path.join(\"data\", \"csv_file\", \"non_ferrous_metal\")\n os.makedirs(save_file_path, exist_ok = True)\n\n df.to_csv(os.path.join(save_file_path, name + \".csv\"))\n\n # update table (name) \n\n def scrap_agricultural_product(self, name, url, page):\n \"농산물 크롤링 함수\"\n df = DataFrame(columns = ['날짜', '종가'])\n\n # create table (name) \n\n # select max_date from table (name)\n\n self.nfc = Naver_finance_crawler(df, url)\n\n df = self.nfc.run(page) \n\n print(df)\n\n save_file_path = os.path.join(\"data\", \"csv_file\", \"agricultural_product\")\n os.makedirs(save_file_path, exist_ok = True)\n\n df.to_csv(os.path.join(save_file_path, name + \".csv\"))\n\n # update table (name)\n \n\n def scrap_global_stock(self, name, url, page):\n '''외국 주요 주가지수 크롤링 함수'''\n\n df = DataFrame(columns = ['일자', '종가', '시가', '고가', '저가'])\n\n self.wsm = World_Stock_Market(df, url)\n\n # create table (name) \n\n # select max_date from table (name)\n\n df = self.wsm.run(page)\n\n print(df)\n\n save_file_path = os.path.join(\"data\", \"csv_file\", \"global_stock\")\n os.makedirs(save_file_path, exist_ok = True)\n\n df.to_csv(os.path.join(save_file_path, name + \".csv\"))\n\n # update table (name) \n\n\n\n\nif __name__==\"__main__\":\n\n pass ","repo_name":"9033/web_crawler2","sub_path":"naver_finance_ver2/scrap_macro_economics_test.py","file_name":"scrap_macro_economics_test.py","file_ext":"py","file_size_in_byte":12255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14424074902","text":"import matplotlib.pyplot as plt\nimport networkx as nx\n\n\ndef draw_graph(G, colors, pos):\n default_axes = plt.axes(frameon=True)\n nx.draw_networkx(G, node_color=colors, node_size=600,\n alpha=0.8, ax=default_axes, pos=pos)\n edge_labels = nx.get_edge_attributes(G, \"weight\")\n nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=edge_labels)\n # Write this figure to a file\n plt.savefig(\"graph.png\")\n\n\ndef draw_tsp_solution(G, order, colors, pos):\n G2 = nx.DiGraph()\n G2.add_nodes_from(G)\n n = len(order)\n for i in range(n):\n j = (i + 1) % n\n G2.add_edge(order[i], order[j], weight=G[order[i]][order[j]][\"weight\"])\n default_axes = plt.axes(frameon=True)\n nx.draw_networkx(\n G2, node_color=colors, edge_color=\"b\", node_size=600, alpha=0.8, ax=default_axes, pos=pos\n )\n edge_labels = nx.get_edge_attributes(G2, \"weight\")\n nx.draw_networkx_edge_labels(\n G2, pos, font_color=\"b\", edge_labels=edge_labels)\n\n # Write this figure to a file\n plt.savefig(\"solution.png\")\n","repo_name":"adpadillar/quantum-hack","sub_path":"draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6709778913","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.pylab as pylab\nimport matplotlib.ticker as ticker\nimport os\nfrom matplotlib.font_manager import FontProperties\nfrom collections import defaultdict\nfrom statistics import *\nfrom subprocess import call\n\ndef init(args = None):\n return\n\ndef do_work(tracer, tracer_name, args = None):\n loops = str(args['loop'])\n tp_sizes = args['tp_sizes']\n nprocess = args['nprocess']\n\n for tp_size in tp_sizes:\n for i in nprocess:\n args['tp_size'] = tp_size\n tracer.start_tracing('session-test', args)\n if tracer_name == 'perf':\n call(\"perf record -e 'empty_tp:empty_ioctl_\" + tp_size + \"b' /home/mogeb/git/benchtrace/all-calls/allcalls -t \"\n + tracer_name + \" -n \" + loops + \" -p \" + str(i) + \" -o \" + tracer_name + \".out\" + \" -s \" + tp_size, shell=True)\n else:\n call(\"/home/mogeb/git/benchtrace/all-calls/allcalls -t \"\n + tracer_name + \" -n \" + loops + \" -p \" + str(i) + \" -o \" + tracer_name + \".out\" + \" -s \" + tp_size, shell=True)\n tracer.stop_tracing('session-test')\n\ndef cleanup(args = None):\n return\n\n\ndef compile_results(args):\n # compile_bars(args)\n compile_graphs(args)\n # compile_percentiles(args)\n return\n\n\ndef compile_graphs(args):\n str(args['buf_size_kb'])\n numbers_of_threads = ['1', '2', '4', '8']\n tracers = ['none', 'lttng', 'ftrace', 'perf']\n values = defaultdict(list)\n\n \"\"\"\n Fix thread size = 1\n \"\"\"\n for tracer in tracers:\n for number_of_threads in numbers_of_threads:\n fname = tracer + '_128bytes_' + number_of_threads + 'process.hist'\n if not os.path.isfile(fname):\n continue\n with open(fname, 'rb') as file:\n for line in file:\n pass\n values[tracer].append(line)\n print(values)\n for tracer in tracers:\n if not len(values[tracer]) == 0:\n plt.plot(numbers_of_threads, values[tracer], 'o-', label=tracer)\n plt.title('Time taken to finish workload according to number of threads')\n plt.xlabel('Number of threads')\n plt.ylabel('Time in ns')\n plt.legend()\n plt.show()\n\n return\n\n\n\"\"\"\nWill analyze the .hist files.\n\"\"\"\ndef compile_percentiles(args):\n res_dir = '/home/mogeb/git/benchtrace/trace-client/'\n byte_sizes = ['4', '32', '64', '128', '192', '256']\n nprocesses = ['1']\n tracers = ['none', 'lttng', 'ftrace', 'perf']\n perc = 0.90\n\n for nprocess in nprocesses:\n for tracer in tracers:\n percentiles = []\n for bytes in byte_sizes:\n fname = res_dir + tracer + '_' + bytes + 'bytes_' + nprocess + 'process.hist'\n values = np.genfromtxt(fname, delimiter=',', skip_header=0,\n names=['min', 'max', 'num'], dtype=None, skip_footer=1)\n percentiles.append(getPercentile(values['max'], values['num'], perc))\n plt.plot(byte_sizes, percentiles, 'o-', label=tracer)\n\n plt.axis([0, 310, 50, 210])\n plt.title(str(int(perc * 100)) + 'th percentiles for the cost of a tracepoint')\n plt.xlabel('Payload size in bytes')\n plt.ylabel('Time in ns')\n fontP = FontProperties()\n fontP.set_size('small')\n\n imgname = 'results/' + nprocess + 'proc_' + str(args['buf_size_kb']) + 'subbuf_kb'\n # plt.savefig(imgname + '.png')\n # plt.savefig(imgname + '.pdf')\n plt.show()\n\n return values['max'], values['num'], percentiles\n\ndef compile_bars(args):\n res_dir = '/home/mogeb/git/benchtrace/trace-client/'\n width = 0.2 # the width of the bars\n\n none_percentiles = []\n lttng_percentiles = []\n ftrace_percentiles = []\n perf_percentiles = []\n byte_sizes = ['4', '64', '128', '192', '256']\n tracers = ['none', 'lttng', 'ftrace', 'perf']\n nprocesses = ['1']\n\n for nprocess in nprocesses:\n for bytes in byte_sizes:\n lttng_values = np.genfromtxt(res_dir + 'lttng_' + bytes + 'bytes_1process.hist', delimiter=',', skip_header=0,\n names=['min', 'max', 'num'], dtype=None, skip_footer=1)\n lttng_percentiles.append(getPercentile(lttng_values['max'], lttng_values['num'], 0.9))\n\n N = len(lttng_values['num'])\n ind = np.arange(N) # the x locations for the groups\n\n fig, ax = plt.subplots()\n\n none_values = np.genfromtxt(res_dir + 'none_' + bytes + 'bytes_1process.hist', delimiter=',', skip_header=0,\n names=['min', 'max', 'num'], dtype=None, skip_footer=1)\n none_percentiles.append(getPercentile(none_values['max'], none_values['num'], 0.9))\n\n ftrace_values = np.genfromtxt(res_dir + 'ftrace_' + bytes + 'bytes_1process.hist', delimiter=',', skip_header=0,\n names=['min', 'max', 'num'], dtype=None, skip_footer=1)\n ftrace_percentiles.append(getPercentile(ftrace_values['max'], ftrace_values['num'], 0.9))\n\n perf_values = np.genfromtxt(res_dir + 'perf_' + bytes + 'bytes_1process.hist', delimiter=',', skip_header=0,\n names=['min', 'max', 'num'], dtype=None, skip_footer=1)\n perf_percentiles.append(getPercentile(perf_values['max'], perf_values['num'], 0.9))\n # rect_none = ax.bar(ind, none_values['totaltime'], width, color='b')\n\n rect_none = ax.bar(ind, none_values['num'], width, color='r')\n rect_lttng = ax.bar(ind + width, lttng_values['num'], width, color='b')\n rect_ftrace = ax.bar(ind + 2 * width, ftrace_values['num'], width, color='y')\n rect_perf = ax.bar(ind + 3 * width, perf_values['num'], width, color='m')\n\n # add some text for labels, title and axes ticks\n ax.set_ylabel('Time in ns')\n ax.set_title('Time taken to do N calls')\n ax.set_xticks(ind + width)\n ax.set_xticklabels(lttng_values['max'])\n\n ax.legend((rect_none[0], rect_lttng[0], rect_ftrace[0], rect_perf[0]), ('None', 'LTTng', 'Ftrace', 'Perf'))\n\n fontP = FontProperties()\n fontP.set_size('small')\n plt.axis([10, 40, 0, int(args['loop'])])\n\n imgname = 'results/hist_' + nprocess + 'proc_' + str(args['buf_size_kb']) + 'subbuf_kb'\n fig = plt.gcf()\n fig.set_size_inches(12, 7)\n # fig.savefig('test2png.png', dpi=100)\n # plt.savefig(imgname + '.png')\n # plt.savefig(imgname + '.pdf')\n\n return\n\n\ndef compile_bars_old():\n res_dir = '/home/mogeb/git/benchtrace/trace-client/'\n width = 0.15 # the width of the bars\n\n lttng_values = np.genfromtxt(res_dir + 'lttng.out', delimiter=',', skip_header=0,\n names=['nthreads', 'totaltime', 'loop'], dtype=None)\n\n N = len(lttng_values['totaltime'])\n ind = np.arange(N) # the x locations for the groups\n\n fig, ax = plt.subplots()\n\n none_values = np.genfromtxt(res_dir + 'none.out', delimiter=',', skip_header=0,\n names=['nthreads', 'totaltime', 'loop'], dtype=None)\n rect_none = ax.bar(ind, none_values['totaltime'], width, color='b')\n\n rect_lttng = ax.bar(ind + width, lttng_values['totaltime'], width, color='r')\n\n ftrace_values = np.genfromtxt(res_dir + 'ftrace.out', delimiter=',', skip_header=0,\n names=['nthreads', 'totaltime', 'loop'], dtype=None)\n rect_ftrace = ax.bar(ind + 2 * width, ftrace_values['totaltime'], width, color='y')\n\n perf_values = np.genfromtxt(res_dir + 'perf.out', delimiter=',', skip_header=0,\n names=['nthreads', 'totaltime', 'loop'], dtype=None)\n rect_perf = ax.bar(ind + 3 * width, perf_values['totaltime'], width, color='m')\n\n # add some text for labels, title and axes ticks\n ax.set_ylabel('Time in ns')\n ax.set_title('Time taken to do N calls')\n ax.set_xticks(ind + width)\n ax.set_xticklabels(lttng_values['nthreads'])\n\n ax.legend((rect_none[0], rect_lttng[0], rect_ftrace[0], rect_perf[0]), ('None', 'LTTng', 'Ftrace', 'Perf'))\n\n plt.show()\n return\n\ndef compile_histograms():\n res_dir = '/home/mogeb/git/benchtrace/trace-client/'\n # none_values = np.genfromtxt(res_dir + 'none.out', delimiter=',', skip_header=2,\n # names=['latency'], dtype=None)\n\n none_values = [ ( pylab.loadtxt(filename) ) for filename in [(res_dir + 'none.hist')] ] [0]\n lttng_values = [ ( pylab.loadtxt(filename) ) for filename in [(res_dir + 'lttng.hist')] ] [0]\n ftrace_values = [ ( pylab.loadtxt(filename) ) for filename in [(res_dir + 'ftrace.hist')] ][0]\n perf_values = [ ( pylab.loadtxt(filename) ) for filename in [(res_dir + 'perf.hist')] ][0]\n\n print('Mean none: ' + str(mean(none_values.tolist())))\n print('Median none: ' + str(median(none_values.tolist())))\n print('90th per none: ' + str(np.percentile(none_values.tolist(), 90)))\n print('95th per none: ' + str(np.percentile(none_values.tolist(), 95)))\n print()\n print('Mean lttng: ' + str(mean(lttng_values.tolist())))\n print('Median lttng: ' + str(median(lttng_values.tolist())))\n print('90th per lttng: ' + str(np.percentile(lttng_values.tolist(), 90)))\n print('95th per lttng: ' + str(np.percentile(lttng_values.tolist(), 95)))\n print()\n print('Mean ftrace: ' + str(mean(ftrace_values.tolist())))\n print('Median ftrace: ' + str(median(ftrace_values.tolist())))\n print('90th per ftrace: ' + str(np.percentile(ftrace_values.tolist(), 90)))\n print('95th per ftrace: ' + str(np.percentile(ftrace_values.tolist(), 95)))\n print()\n print('Mean perf: ' + str(mean(perf_values.tolist())))\n print('Median perf: ' + str(median(perf_values.tolist())))\n print('90th per perf: ' + str(np.percentile(perf_values.tolist(), 90)))\n print('95th per perf: ' + str(np.percentile(perf_values.tolist(), 95)))\n\n nbins=1000\n isnormed=False\n iscumul=False\n if isnormed == True:\n plt.axis([30, 400, 0, 1])\n else:\n plt.axis([30, 400, 0, 11000])\n lttng_filtered = lttng_values[~is_outlier(lttng_values)]\n ftrace_filtered = ftrace_values[~is_outlier(ftrace_values)]\n none_filtered = none_values[~is_outlier(none_values)]\n perf_filtered = perf_values[~is_outlier(perf_values)]\n plt.hist(none_filtered.tolist(), normed=isnormed, cumulative=iscumul, bins=nbins, color='y', alpha=0.5, label='none')\n plt.hist(lttng_filtered.tolist(), normed=isnormed, cumulative=iscumul, bins=nbins, color='b', label='lttng')\n plt.hist(ftrace_filtered.tolist(), normed=isnormed, cumulative=iscumul, bins=nbins, color='r', alpha=0.5, label='ftrace')\n plt.hist(perf_filtered.tolist(), normed=isnormed, cumulative=iscumul, bins=nbins, color='g', alpha=0.5, label='perf')\n plt.title(\"Gaussian Histogram\")\n plt.xlabel(\"Value\")\n plt.ylabel(\"Frequency\")\n plt.legend()\n\n print('none: ' + str(len(none_filtered.tolist())))\n print('lttng: ' + str(len(lttng_filtered.tolist())))\n print('ftrace: ' + str(len(ftrace_filtered.tolist())))\n print('perf: ' + str(len(perf_filtered.tolist())))\n\n\n plt.show()\n\n\n # plt.xlabel('Smarts')\n # plt.ylabel('Probability')\n # plt.grid(True)\n #\n # plt.show()\n\n\n\ndef is_outlier(points, thresh=500000000):\n if len(points.shape) == 1:\n points = points[:,None]\n median = np.median(points, axis=0)\n diff = np.sum((points - median)**2, axis=-1)\n diff = np.sqrt(diff)\n med_abs_deviation = np.median(diff)\n\n modified_z_score = 0.6745 * diff / med_abs_deviation\n\n return modified_z_score > thresh\n\n\n\"\"\"\nGet the nth percentile from values\n\"\"\"\ndef getPercentile(values, density, n):\n population = 0\n count = 0\n\n if not n < 1:\n print('N must be smaller than 1')\n return -1\n\n if len(values) != len(density):\n print('Values and density should be the same size')\n return -1\n\n for i in density:\n population += i\n\n for i in range(1, len(density)):\n count += density[i]\n if count > population * n:\n return values[i]\n\n return density[len(density)]\n\n\n\n\n\n","repo_name":"mogeb/benchtrace","sub_path":"trace-client/cost-for-workload.py","file_name":"cost-for-workload.py","file_ext":"py","file_size_in_byte":12168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37510956857","text":"\"\"\"<tt077.py\r\n\r\nIn this program we explore some ...\r\n\r\nMORE ADVANCED FEATURES OF COMMAND BINDING\r\n\r\nIn our program tt075.py, we used the \"command\" option to bind an event handler \r\nto a widget. For example, in that program the statement\r\n\r\n self.button1 = Button(self.myContainer1, command=self.button1Click)\r\n\r\nbound the button1Click function to the button1 widget.\r\n\r\nAnd we used event binding to bind our buttons to the <Return> keyboard event.\r\n\r\n self.button1.bind(\"<Return>\", self.button1Click_a)\r\n\r\nIn our earlier program, the event handlers for the two buttons performed quite \r\ndifferent functions. \r\n\r\nBut suppose that the situation was different. Suppose that we have several \r\nbuttons, all of which should trigger essentially the *same* type of action. The \r\nbest way to handle such a situation is to bind the events for all of the buttons \r\nto a single event handler. Each button would invoke the same handler routine, \r\nbut pass it different arguments telling it what to do. \r\n\r\nThat is what we are doing in this program. \r\n\r\n\r\nCOMMAND BINDING\r\n\r\nIn this program, as you can see, we have two buttons, and we use the \"command\" \r\noption to bind them all to the same event handler -- the \"buttonHandler\" routine. \r\nWe pass the buttonHandler routine three arguments: the name of the button (in \r\nthe button_name variable), a number, and a string. \r\n\r\n self.button1 = Button(self.myContainer1,\r\n command=self.buttonHandler(button_name, 1, \"Good stuff!\")\r\n )\r\n\r\nIn a serious application, the buttonHandler routine would of course do serious \r\nwork, but in this program it merely prints the arguments that it receives.\r\n\r\n\r\nEVENT BINDING\r\n\r\nSo much for command binding. What about event binding? \r\n\r\nYou will note that we have commented out the two lines that do event binding on \r\nthe <Return> event.\r\n\r\n # self.button1.bind(\"<Return>\", self.buttonHandler_a(event, button_name, 1, \"Good stuff!\"))\r\n\r\nThis is the first sign of a problem. Event binding automatically passes an\r\nevent argument -- but there is simply no way to include that event argument in\r\nour list of arguments. \r\n\r\nWe'll have to come back to this problem later. For now, let's simply run the \r\nprogram and see what happens.\r\n\r\n\r\nPROGRAM BEHAVIOR\r\n\r\nWhen you look at the code, this program looks quite reasonable. But when you \r\nrun it, you will see that it doesn't work right. The buttonHandler routine is \r\ninvoked even before the GUI is displayed. In fact, it is invoked TWO times! \r\n\r\nAnd if you left-mouse-click on any of the buttons, you will find that nothing \r\nhappens -- the \"eventHandler\" routine is *not* being invoked.\r\n\r\nNote that the only way to close this program is to click the \"close\" icon (the \r\n\"X\" in a box) on the right side of the title bar.\r\n\r\nSo run the program now, and see what happens. Then, in our next program, we \r\nwill see why it happens.\r\n\r\n[revised: 2003-02-23]\r\n>\"\"\"\r\nfrom tkinter import *\r\n\r\nclass MyApp:\r\n def __init__(self, parent):\r\n self.myParent = parent \r\n self.myContainer1 = Frame(parent)\r\n self.myContainer1.pack()\r\n \r\n button_name = \"OK\"\r\n self.button1 = Button(self.myContainer1,\r\n command=self.buttonHandler(button_name, 1, \"Good stuff!\"))\r\n \r\n # self.button1.bind(\"<Return>\", self.buttonHandler_a(event, button_name, 1, \"Good stuff!\"))\r\n self.button1.configure(text=button_name, background=\"green\") \r\n self.button1.pack(side=LEFT)\r\n self.button1.focus_force() # Put keyboard focus on button1 \r\n \r\n button_name = \"Cancel\"\r\n self.button2 = Button(self.myContainer1, \r\n command=self.buttonHandler(button_name, 2, \"Bad stuff!\")) \r\n \r\n # self.button2.bind(\"<Return>\", self.buttonHandler_a(event, button_name, 2, \"Bad stuff!\")) \r\n self.button2.configure(text=button_name, background=\"red\")\r\n self.button2.pack(side=LEFT) \r\n \r\n \r\n def buttonHandler(self, arg1, arg2, arg3): \r\n print(\" buttonHandler routine received arguments:\", arg1.ljust(8), arg2, arg3 )\r\n \r\n def buttonHandler_a(self, event, arg1, arg2, arg3):\r\n print(\"buttonHandler_a received event\", event)\r\n self.buttonHandler(arg1, arg2, arg3)\r\n \r\nprint(\"\\n\"*100 ) # clear the screen\r\nprint(\"Starting program tt077.\" ) \r\nroot = Tk()\r\nmyapp = MyApp(root)\r\nprint(\"Ready to start executing the event loop.\" )\r\nroot.mainloop()\r\nprint(\"Finished executing the event loop.\" )\r\n","repo_name":"daleathan/thinkingintkinter","sub_path":"tt077.py","file_name":"tt077.py","file_ext":"py","file_size_in_byte":4555,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"23544331604","text":"#! /usr/bin/python3\n\nimport sys\nimport os\nimport time\nfrom typing import Dict, Iterable, List, Tuple\nfrom collections import defaultdict\n\n\nDIRECTIONS = {\n \"N\": 1j,\n \"S\": -1j,\n \"E\": 1,\n \"W\": -1\n}\n\n\ndef get_distances(routes: str) -> Iterable[int]:\n distances: Dict[complex, int] = defaultdict(lambda: sys.maxsize)\n distances[0j] = 0\n group_ends: List[complex] = []\n head = 0j\n for c in routes[1:-1]:\n if c == \"(\":\n group_ends.append(head)\n elif c == \")\":\n head = group_ends.pop()\n elif c == \"|\":\n head = group_ends[-1]\n else:\n previous = head\n head += DIRECTIONS[c]\n distances[head] = min(distances[head], distances[previous] + 1)\n return distances.values()\n\n\ndef solve(routes: str) -> Tuple[int, int]:\n distances = get_distances(routes)\n return (\n max(distances),\n sum(1 for distance in distances if distance >= 1000)\n )\n\n\ndef get_input(file_path: str) -> str:\n if not os.path.isfile(file_path):\n raise FileNotFoundError(file_path)\n\n with open(file_path, \"r\") as file:\n return file.read().strip()\n\n\ndef main():\n if len(sys.argv) != 2:\n raise Exception(\"Please, add input file path as parameter\")\n\n start = time.perf_counter()\n part1_result, part2_result = solve(get_input(sys.argv[1]))\n end = time.perf_counter()\n print(\"P1:\", part1_result)\n print(\"P2:\", part2_result)\n print()\n print(f\"Time: {end - start:.7f}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Bigsby/aoc","sub_path":"2018/20/py/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"10545713606","text":"#Game of Hangman\n\nimport random\nfrom words import words\n#import string to use string.ascii_uppercase constants\nimport string\n\n#check if the random word choice is valid, if it contains ' ' or '-' then \n#computer has to choose again\ndef get_valid_word(words):\n word = random.choice(words)\n while '-' in word or ' ' in word:\n word = random.choice(words)\n return word.upper()\n\n#hangman processes\ndef hangman():\n #using get_valid_word to check if word from words is valid and stores in\n word = get_valid_word(words)\n #make all the letters in the word a set\n word_letters = set(word)\n #creates the string of uppercase ascii letters into a set and stores in alphabet\n alphabet = set(string.ascii_uppercase)\n #letters already guessed by the user\n used_letters = set()\n\n lives = 10\n\n #gets user input\n #while the length of the word and lives is greater than zero\n while len(word_letters) > 0 and lives > 0:\n #each loop, updates lives and used letters\n print('You have ', lives, 'lives left and You have guessed these letters: ', ' '.join(used_letters))\n #what the current word is, replace * with guessed letter (ie: W * R D)\n #list of every letter users guessed, is shown, otherwise is a star\n word_list = [letter if letter in used_letters else '*' for letter in word]\n #put togther all letter with space using join \n #' '.join(['a', 'b', 'cd']) --> 'a b cd'\n print('Current word: ',' '.join(word_list))\n\n #gets user input and convert to upper case\n user_letter = input('Guess a letter: ').upper()\n #if user_letter is a valid character in the alphabet that haven't been used\n if user_letter in alphabet - used_letters:\n #add user_letter to used letters\n used_letters.add(user_letter)\n #and if user guess correctly\n if user_letter in word_letters:\n #remove user letter in word letter\n #which keeps track of all the letters in word\n #word letter will decrease in size\n word_letters.remove(user_letter)\n #print('')\n else:\n #if wrong, life - 1\n lives -= 1\n print(\"\\nYou letter,\", user_letter, 'is not in the word. Guess again!')\n \n #checks if letter has already been entered\n elif user_letter in used_letters:\n print(\"You have already used this letter! Guess again\")\n\n #means character just entered is not in the alphabet\n else:\n print(\"Invalid character! Guess again\")\n \n if lives == 0:\n print('Sorry, you lost! The word was: ', word)\n\n print('Yay! you won! The word was: ', word)\n\nif __name__ == '__main__':\n hangman()","repo_name":"smei08/FreeCodeCampProject","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23214741965","text":"from versionedobj import VersionedObject, Serializer\n\nclass NestedObject(VersionedObject):\n var1 = 28\n var2 = 99\n\nclass TopObject(VersionedObject):\n var1 = 77.7\n var2 = NestedObject()\n\n\n\n# Create object instance, and serialize it\nobj = TopObject()\nserializer = Serializer(obj)\nprint(serializer.to_json(indent=4))\n\n# Now, set all attributes to 0\nfor attr_name in obj:\n obj[attr_name] = 0\n\n# Serialize it again\nprint(serializer.to_json(indent=4))\n\n","repo_name":"eriknyquist/versionedobj","sub_path":"examples/iter_and_set_attributes.py","file_name":"iter_and_set_attributes.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"16031344446","text":"#/bin/python\n# This is a simple chat client used to test the chat server.\n# You can open as many instances of it as you want.\n\nimport sys\nimport socket\nfrom threading import Thread\n\n\nclass Client(Thread):\n\n def __init__(self, port=888, host=socket.gethostname(), name=raw_input, output=sys.stdout.write):\n \"\"\"\n Sets default values for port in which the socket will connect\n to 888, and the host to the current running the script.\n It can be changed on the class instantiation.\n\n @type port: int\n @type host: str\n \"\"\"\n\n # Initiates the Thread class when the Server class is instantiated.\n super(Client, self).__init__()\n\n self.input = name\n self.output = output\n\n # Variable used by loops to check if it should continue (client is\n # connected).\n self.running = True\n\n self.host = host\n self.port = port\n\n # Creates the socket with default parameters.\n self.sock = socket.socket()\n\n def run(self):\n\n # Connect the socket to the defined port and host.\n self.sock.connect((self.host, self.port))\n\n # Checks if the user name isn't defined yet.\n\n # Listen for the standard input to the user's nickname.\n name = self.input(\"Please enter your nickname: \")\n\n # Send the server a message with the command name which sets the name\n # for the socket.\n self.sock.send('!name: '+name)\n\n # Sets a dedicated thread to writing the output to the screen.\n thread1 = Thread(target=self.loop_output)\n\n # Sets a dedicated thread to listening to user input.\n thread2 = Thread(target=self.loop_input)\n\n # Starts output thread first, so that we can read the server response\n # to user connection.\n thread1.start()\n\n # Starts the input thread and execute the function.\n thread2.start()\n\n def loop_input(self):\n \"\"\"\n It handles user input to the server.\n \"\"\"\n\n # Loops the input so that the user can constantly type.\n while self.running:\n\n # Reads the user input to the console.\n message = self.input()\n\n # Send the server the entered message without any filtering.\n self.sock.send(message)\n\n def loop_output(self):\n \"\"\"\n It handles user output to the screen.\n \"\"\"\n\n # Loops forever until no data is received.\n while self.running:\n\n # Blocking method that listen for incoming data, it listens\n # for at most 1024 bytes at once.\n data = self.sock.recv(1024)\n\n # If no data is received.\n if not data:\n\n # Exits the loop.\n break\n\n # Print the received data to the screen.\n self.output(data+'\\n')\n\n def __close__(self):\n # Sets the running condition to false so that a loop knows\n # that the client is disconnected.\n self.running = False\n\n # Close the socket so it can't be used anymore.\n self.sock.close()\n\n# If running the script by itself the block gets executed.\nif '__main__' == __name__:\n import doctest\n doctest.testmod()\n\n # Instantiates the server class.\n client = Client()\n\n # Run the client.\n client.run()","repo_name":"frederichtig/python-chat-server","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14722048891","text":"import pyttsx3\r\nimport datetime\r\nimport speech_recognition as sr\r\nimport wikipedia\r\nimport webbrowser\r\nimport os\r\nmyName = 'hannah'\r\n\r\nengine = pyttsx3.init('sapi5')\r\nvoices = engine.getProperty('voices')\r\nengine.setProperty('voice',voices[1].id)\r\n\r\ndef speak(audio):\r\n engine.say(audio)\r\n engine.runAndWait()\r\n\r\n\r\n\r\n\r\ndef wishme():\r\n hour = datetime.datetime.now().hour\r\n if hour>=0 and hour<=12:\r\n speak('good morning, Madhu')\r\n elif hour>12 and hour<16:\r\n speak('good afternoon,madhu')\r\n else:\r\n speak('good evening,Madhu')\r\n speak(f'I am {myName},how may i help you?')\r\n\r\n\r\n\r\ndef hearMe():\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print('Listening...')\r\n audio = r.listen(source)\r\n try:\r\n print('recognizing....')\r\n query = r.recognize_google(audio,language='en-in')\r\n print('you said',query)\r\n except Exception:\r\n print('say that again, please')\r\n return 'none'\r\n return query\r\n\r\nif __name__ == \"__main__\":\r\n wishme()\r\n while True:\r\n query = hearMe().lower()\r\n\r\n\r\n if 'wikipedia' in query:\r\n speak(\"searching wikipedia....\")\r\n query = query.replace('wikipedia','')\r\n result = wikipedia.summary(query,sentences=2)\r\n speak('According to wikipedia')\r\n print(result)\r\n speak(result)\r\n elif 'open google' in query:\r\n webbrowser.open('www.google.com')\r\n elif 'open youtube' in query:\r\n webbrowser.open('www.youtube.com')\r\n elif 'open my python code' in query:\r\n os.startfile('C:\\\\Users\\\\acer\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python36')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"madhushree7498/Virtual-assisstant-using-Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19001561751","text":"import keras\r\n\r\n#Part 1: Building the CNN\r\n\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Convolution2D\r\nfrom keras.layers import MaxPooling2D\r\nfrom keras.layers import Flatten\r\nfrom keras.layers import Dense\r\n\r\nclassifier = Sequential()\r\n\r\n#Step 1 : Convolution\r\n#Convolution step uses the stride of 1\r\n#creating 32 feature detectors of 3*3 dimension whch means our CNN will have 32 feature maps.We can increase the size to 64,128,...\r\n#We can add more more convolution layers containing 64,128 feature maps to improve the results.\r\n#input shape define the format of the image. Colored images(RGB = 3) of 256 * 256 dim.But we will use small format\r\n# In tensorflow backend , the order is 2D dim and input channel\r\nclassifier.add(Convolution2D(32,3,3, input_shape = (64,64,3), activation='relu'))\r\n\r\n#Step2: pooling\r\n#pooling step uses stride of 2.\r\nclassifier.add(MaxPooling2D(pool_size=(2,2)))\r\n\r\n\r\n#Adding the second Convolution layer to improve the accuracy.\r\n#Since the input shape is pooled featured maps from previous step, we don't need to specify them.\r\nclassifier.add(Convolution2D(32,3,3, activation='relu'))\r\nclassifier.add(MaxPooling2D(pool_size=(2,2)))\r\n\r\n\r\n##We can add many convolution layers with increased number of feature detectors.\r\n\r\n\r\n#Step3: Flattening\r\nclassifier.add(Flatten())\r\n\r\n#Step 4 : Full connection\r\n#Flatten step previously will act as an input layer.\r\nclassifier.add(Dense(output_dim=128,activation='relu')) #1st hidden layer\r\nclassifier.add(Dense(output_dim=1,activation='sigmoid')) #output layer\r\n\r\nclassifier.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])\r\n\r\n# Part2: Fitting the CNN to images.\r\n\r\n#performating data augmentation. This to to enrich our dataset to get more diverse images by transforming, rotating and other.\r\n#From the small amount of datasset, we take batches of images geneate multiple images. This helps to avoid overfitting.\r\n\r\n#This code wiill preprocess the images, apply data augmentation and fit CNN.\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\n\r\ntrain_datagen = ImageDataGenerator(\r\n rescale=1./255,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True)\r\n\r\ntest_datagen = ImageDataGenerator(rescale=1./255)\r\n\r\n#target size should be same as input_shape parameter choosen earlier.\r\ntraining_set = train_datagen.flow_from_directory(\r\n 'training_set',\r\n target_size=(64, 64),\r\n batch_size=32,\r\n class_mode='binary')\r\n\r\ntest_set = test_datagen.flow_from_directory(\r\n 'test_set',\r\n target_size=(64, 64),\r\n batch_size=32,\r\n class_mode='binary')\r\n#steps_per_epoch and validation_steps are training and test sizes we have in our dataset.\r\nclassifier.fit_generator(\r\n training_set,\r\n steps_per_epoch=8000, \r\n epochs=25,\r\n validation_data=test_set,\r\n validation_steps=2000)\r\n\r\n","repo_name":"Aqumar/Deep-Learning","sub_path":"CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18620660438","text":"import tensorflow as tf\r\nimport numpy as np\r\n\r\nREGULARIZER_COF = 0.01\r\n\r\n\r\ndef _norm(x,name=\"BN\",isTraining=True):\r\n bs, h, w, c = x.get_shape().as_list()\r\n s = tf.get_variable(name+\"s\", c,\r\n initializer=tf.random_normal_initializer(1.0, 0.02, dtype=tf.float32))\r\n o = tf.get_variable(name+\"o\", c,\r\n initializer=tf.constant_initializer(0.0))\r\n mean, var = tf.nn.moments(x, axes=[1,2], keep_dims=True)\r\n eps = 10e-10\r\n normalized = (x - mean) / (tf.sqrt(var) + eps)\r\n return s * normalized + o\r\n\r\ndef _fc_variable(weight_shape,name):\r\n with tf.variable_scope(name):\r\n # check weight_shape\r\n input_channels = int(weight_shape[0])\r\n output_channels = int(weight_shape[1])\r\n weight_shape = (input_channels, output_channels)\r\n regularizer = tf.contrib.layers.l2_regularizer(scale=REGULARIZER_COF)\r\n\r\n # define variables\r\n weight = tf.get_variable(\"w\", weight_shape ,\r\n initializer=tf.contrib.layers.xavier_initializer(),\r\n regularizer =regularizer)\r\n bias = tf.get_variable(\"b\", [weight_shape[1]],\r\n initializer=tf.constant_initializer(0.0))\r\n return weight, bias\r\n\r\ndef _conv_variable(weight_shape,name=\"conv\"):\r\n with tf.variable_scope(name):\r\n # check weight_shape\r\n w = int(weight_shape[0])\r\n h = int(weight_shape[1])\r\n input_channels = int(weight_shape[2])\r\n output_channels = int(weight_shape[3])\r\n weight_shape = (w,h,input_channels, output_channels)\r\n regularizer = tf.contrib.layers.l2_regularizer(scale=REGULARIZER_COF)\r\n # define variables\r\n weight = tf.get_variable(\"w\", weight_shape ,\r\n initializer=tf.contrib.layers.xavier_initializer_conv2d(),\r\n regularizer=regularizer)\r\n bias = tf.get_variable(\"b\", [output_channels],\r\n initializer=tf.constant_initializer(0.0))\r\n return weight, bias\r\n\r\ndef _deconv_variable(weight_shape,name=\"deconv\"):\r\n with tf.variable_scope(name):\r\n # check weight_shape\r\n w = int(weight_shape[0])\r\n h = int(weight_shape[1])\r\n output_channels = int(weight_shape[2])\r\n input_channels = int(weight_shape[3])\r\n weight_shape = (w,h,input_channels, output_channels)\r\n regularizer = tf.contrib.layers.l2_regularizer(scale=REGULARIZER_COF)\r\n # define variables\r\n weight = tf.get_variable(\"w\", weight_shape ,\r\n initializer=tf.contrib.layers.xavier_initializer_conv2d(),\r\n regularizer=regularizer)\r\n bias = tf.get_variable(\"b\", [input_channels],\r\n initializer=tf.constant_initializer(0.0))\r\n return weight, bias\r\n\r\ndef _conv2d(x, W, stride):\r\n return tf.nn.conv2d(x, W, strides = [1, stride, stride, 1], padding = \"SAME\")\r\n\r\ndef _deconv2d(x, W, output_shape, stride=1):\r\n # x : [nBatch, height, width, in_channels]\r\n # output_shape: [nBatch, height, width, out_channels]\r\n return tf.nn.conv2d_transpose(x, W, output_shape=output_shape, strides=[1,stride,stride,1], padding = \"SAME\",data_format=\"NHWC\")\r\n\r\ndef MinibatchstateConcat(input, averaging='all'):\r\n\r\n adjusted_std = lambda x, **kwargs: tf.sqrt(tf.reduce_mean((x - tf.reduce_mean(x, **kwargs)) **2, **kwargs) + 1e-8)\r\n vals = adjusted_std(input, axis=0, keep_dims=True)\r\n if averaging == 'all':\r\n vals = tf.reduce_mean(vals, keep_dims=True)\r\n else:\r\n print (\"nothing\")\r\n vals = tf.tile(vals, multiples=[tf.shape(input)[0], 4, 4, 1])\r\n return tf.concat([input, vals], axis=3)\r\n\r\ndef _deconv(x,input_layer, output_layer, stride=2, filter_size=3, name=\"deconv\", isTraining=True):\r\n bs, h, w, c = x.get_shape().as_list()\r\n deconv_w, deconv_b = _deconv_variable([filter_size,filter_size,input_layer,output_layer],name=\"deconv\"+name )\r\n h = _deconv2d(x,deconv_w, output_shape=[bs,h*stride,w*stride,output_layer], stride=stride) + deconv_b\r\n return h\r\n\r\ndef _conv(x, input_layer, output_layer, stride, filter_size=5, name=\"conv\", isTraining=True):\r\n conv_w, conv_b = _conv_variable([filter_size,filter_size,input_layer,output_layer],name=\"conv\"+name)\r\n h = _conv2d(x,conv_w,stride=stride) + conv_b\r\n return h\r\n\r\ndef resBlock_g(x,input_layer, output_layer,filter_size=3, name=\"deconv\", isTraining=True):\r\n h = _norm(x,name=\"BN1_\"+name,isTraining=isTraining)\r\n h = tf.nn.leaky_relu(h)\r\n h = _deconv(h,input_layer,output_layer,stride=2,filter_size=filter_size,name=name+\"_1\")\r\n h = _norm(h,name=\"BN2_\"+name,isTraining=isTraining)\r\n h = tf.nn.leaky_relu(h)\r\n h = _deconv(h,output_layer,output_layer,stride=1,filter_size=filter_size,name=name+\"_2\")\r\n\r\n x = _deconv(x,input_layer,output_layer,stride=2,filter_size=filter_size,name=name+\"_skip\")\r\n return h+x\r\n\r\ndef resBlock_d(x,input_layer, output_layer,filter_size=3, name=\"conv\", isTraining=True):\r\n #h = tf.contrib.layers.batch_norm(x, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=isTraining, scope=\"BN1_\"+name)\r\n h = tf.nn.leaky_relu(x)\r\n h = _conv(h,input_layer,output_layer,stride=2,filter_size=filter_size,name=name+\"_1\")\r\n #h = tf.contrib.layers.batch_norm(h, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=isTraining, scope=\"BN2_\"+name)\r\n h = tf.nn.leaky_relu(h)\r\n h = _conv(h,output_layer,output_layer,stride=1,filter_size=filter_size,name=name+\"_2\")\r\n\r\n x = _conv(x,input_layer,output_layer,stride=2,filter_size=filter_size,name=name+\"_skip\")\r\n return h+x\r\n\r\ndef buildGenerator(z, z_dim,reuse=False, isTraining=True):\r\n with tf.variable_scope(\"Generator\") as scope:\r\n if reuse: scope.reuse_variables()\r\n h = z\r\n # fc1\r\n g_fc1_w, g_fc1_b = _fc_variable([z_dim,1024*4*4],name=\"fc1\")\r\n h = tf.matmul(h, g_fc1_w) + g_fc1_b\r\n h = tf.nn.relu(h)\r\n #\r\n h = tf.reshape(h,(-1,4,4,1024))\r\n\r\n h = resBlock_g(h,1024,512,name=\"g5\")\r\n h = resBlock_g(h,512,256,name=\"g4\")\r\n h = resBlock_g(h,256,128,name=\"g3\")\r\n h = resBlock_g(h,128,64,name=\"g2\")\r\n h = resBlock_g(h,64,64,name=\"g1\")\r\n\r\n h = tf.contrib.layers.batch_norm(h, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=isTraining, scope=\"BN_out\")\r\n h = tf.nn.leaky_relu(h)\r\n g_deconv1_w, g_deconv1_b = _conv_variable([1,1,64,3],name=\"deconv1\")\r\n h = _conv2d(h,g_deconv1_w, stride=1) + g_deconv1_b\r\n y = tf.tanh(h)\r\n\r\n return y\r\n\r\ndef buildDiscriminator(y, nBatch, reuse=False, isTraining=True):\r\n with tf.variable_scope(\"Discriminator\") as scope:\r\n if reuse: scope.reuse_variables()\r\n h = y\r\n # conv1\r\n h = resBlock_d(h,3,64,name=\"d1\")\r\n # conv2\r\n h = resBlock_d(h,64,128,name=\"d2\")\r\n h = resBlock_d(h,128,256,name=\"d3\")\r\n h = resBlock_d(h,256,512,name=\"d4\")\r\n h = resBlock_d(h,512,512,name=\"d5\")\r\n h = MinibatchstateConcat(h)\r\n print(h)\r\n # fc1\r\n n_b, n_h, n_w, n_f = [int(x) for x in h.get_shape()]\r\n h = tf.reshape(h,[nBatch,n_h*n_w*n_f])\r\n #print(h)\r\n d_fc1_w, d_fc1_b = _fc_variable([n_h*n_w*n_f,1],name=\"fc1\")\r\n h = tf.matmul(h, d_fc1_w) + d_fc1_b\r\n\r\n ### summary\r\n return h\r\n","repo_name":"itsuki8914/WGAN-GP-ResBlock-TensorFlow","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7554,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"6132262881","text":"# -*- coding: utf-8 -*-\n# 拆分步骤\n\"\"\"\n1.kegg_enrich文件结合diff_anno表格 提取每一条通路的全部KO\n2.对每个KO,获得全部注释的gene以及上下调信息,全部基因上调为红色,全部基因下调为绿色,都有为蓝色\n3.对于每个KO获取其在通路图中的位置信息\n4.构建URL请求,并下载图片\n5.爬取网页中的map部分的代码,更换注释信息\n6.按照结构生成html,并保存\n7.在表格中添加本地文件的超链接\n\"\"\"\nimport re\nimport os\nimport sys\nimport random\nimport urllib\nimport requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom pathlib import Path\n\n\ndef splitKO(KOinfo: str):\n if not str(KOinfo) == \"nan\":\n return KOinfo.split(',')[0]\n return KOinfo\n\n\ndef getblockWeb(ko: str,KO_list: list)-> dict:\n \"\"\"\n get the block html ,find KO - point(like 1.14.16.1 ...) \n return dict of KO to point\n\n \"\"\"\n KO_Point_dict = {}\n orignal_url = \"https://www.kegg.jp/kegg-bin/show_pathway?\"\n myid = re.sub(r'^ko','map',ko)\n # print(myid)\n blockurl = orignal_url+myid\n # print(blockurl)\n user_agent=['Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.87 Safari/537.36',\n 'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)',\n ]\n headers={\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'User-Agent': user_agent[random.randint(0,5)]\n }\n \n \n response = requests.get(blockurl,headers=headers)\n if response.text == '<html><body>'+myid+' not found.</body></html>':\n print(\"There is no {} ({})\".format(ko,blockurl))\n return 'no kegg web'\n soup=BeautifulSoup(response.text,'lxml')\n \n # 解析全部的标签内容\n for content in soup.find(id ='mapdata').contents:\n content = str(content)\n\n if content is \"\\n\" or content == \"\\r\" or content == \"\":\n next\n\n try:\n title = re.search(r\".*title=\\\"(?P<title>.*)\\\"\",content)\n title = title.group('title')\n\n\n ID = re.search(r\".*\\shref=\\\"(?P<ID>[\\S]+)\\\"\",content)\n ID = ID.group('ID')\n\n coords = re.search(r\".*data-coords=\\\"(?P<coords>[\\d,]+)\\\"\",content)\n coords = coords.group('coords')\n \n refind_KOlist = re.findall(r'(K\\d+)',title)\n refind_KOnumber = re.search(r',\\s(?P<number>\\d.\\d.+), ',title)\n \n if len(refind_KOlist) == 0:\n next\n else:\n for reKO in refind_KOlist:\n KO_Point_dict[reKO] = refind_KOnumber\n except:\n pass\n #print(\"Error om {} !\".format(content))\n\n\n for content in soup.find(id ='module_mapdata').contents:\n content = str(content)\n\n if content is \"\\n\" or content == \"\\r\" or content == \"\":\n next\n\n try:\n title = re.search(r\".*title=\\\"(?P<title>.*)\\\"\",content)\n title = title.group('title')\n\n\n ID = re.search(r\".*\\shref=\\\"(?P<ID>[\\S]+)\\\"\",content)\n ID = ID.group('ID')\n\n coords = re.search(r\".*data-coords=\\\"(?P<coords>[\\d,]+)\\\"\",content)\n coords = coords.group('coords')\n \n refind_KOlist = re.findall(r'(K\\d+)',title)\n refind_KOnumber = re.search(r',\\s(?P<number>\\d.\\d.+), ',title)\n \n if len(refind_KOlist) == 0:\n next\n else:\n for reKO in refind_KOlist:\n KO_Point_dict[reKO] = refind_KOnumber\n except:\n pass\n #print(\"Error om {} !\".format(content))\n\n\n return KO_Point_dict\n\n\n \ndef getko_url(KO_Point_dict: dict,ko: str, KO_list: list, KO_dict: dict)-> str:\n \"\"\"\n get the url to kegg database for colorful annotations of KOs and diff levels\n return a path of url\n e.g.\n https://www.kegg.jp/kegg-bin/show_pathway?map=map00100&multi_query=K07436+red,blue\n \"\"\"\n #map00100&multi_query=K07436+red,blue\n part_url = 'https://www.kegg.jp/kegg-bin/show_pathway?map='\n ko_url = part_url + ko + '&multi_query='\n for KO in KO_list:\n level = set(KO_dict[KO]['level'].tolist()) \n# print(\"level:\")\n# print(level)\n level_color = 'blue,black'\n if level == set(['Increased']) or level == set(['up']) or level == set(['Up']):\n level_color = 'red,black'\n elif level == set([\"Decreased\"]) or level == set(['down']) or level == set(['Down']):\n level_color = 'green,black'\n \n ko_url = ko_url+KO+\"+\"+level_color+\"%0d%0a\"\n return ko_url\n \ndef getWeb(ko: str,KO_url: str,result_dir: str) -> list:\n \"\"\"\n get the passway picture and passway html of kegg database. \n return the ko_html,ko_pic path\n \n \"\"\"\n ko_html,ko_pic = 0,0\n work_dir = Path(result_dir)\n if not work_dir.exists():\n work_dir.mkdir(exist_ok=True)\n \n user_agent=['Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.87 Safari/537.36',\n 'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)',\n ]\n headers={'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'User-Agent': user_agent[random.randint(0,5)]\n } \n \n \n \n response = requests.get(KO_url,headers=headers)\n \n # ------ 获取帖子内所有图片地址的方法 ------\n \n soup=BeautifulSoup(response.text,'lxml')\n imginfo = soup.find('img',id=\"pathwayimage\")\n imgsrc = str(re.search(r\"src=\\\"(?P<src>.*\\S*\\.png)\\\"\", str(imginfo)))\n\n imgsrc_path = Path(Path(imgsrc).name).stem\n imgName = imgsrc_path\n\n # ------ 这里最好使用异常处理及多线程编程方式 ------\n pattern = r'src=\"(.*\\S*\\.png)\"'\n # try: \n imgsrc_path = re.search(pattern, str(imginfo)).group(1)\n # catch AttributeError:\n \n try:\n\n\n img_data = urllib.request.urlopen('https://www.kegg.jp'+imgsrc_path).read()\n ko_pic = os.path.join(result_dir , str(imgName)+\".png\")\n\n f = open(ko_pic, 'wb')\n f.write(img_data)\n f.close()\n except Exception as e:\n print('https://www.kegg.jp/'+imgsrc_path+ \"error\")\n \n htl = open(os.path.join(result_dir , str(imgName)+\"_old.html\"),'w')\n htl.write(soup.prettify())\n htl.close()\n ko_html = os.path.join(result_dir ,str(imgName)+\"_old.html\")\n\n return ko_html,ko_pic\n\"\"\"\n# 测试重构web\n1.Beautiful 提取全部area ,并转化,过滤 \n 1.提取样式信息 \n 2.将title结合KO_dict[KOID],结合level以及logFC信息,转化成info \n2.重建web code \n\"\"\"\ndef creatNew(ko:str ,ko_html: str,ko_pic: str,KO_list: list,KO_dict: dict)-> str:\n \"\"\"\n read the html , build a new html \n return the path of new html\n \n \"\"\"\n new_dir = os.path.dirname(ko_html)\n path_html = os.path.join(new_dir,ko+'_new.html')\n print(ko,ko_html,ko_pic,KO_list)\n \n # 解析ko_html\n ko_html_file = open(ko_html,'r')\n ko_html_text = ko_html_file.read()\n #print(ko_html_text)\n soup = BeautifulSoup(ko_html_text)\n #print(soup)\n area_list = soup.find_all('area')\n \n # html 框架\n html_head = \"<html>\\n<head>\\n<meta http-equiv=\\\"content-type\\\" content=\\\"text/html; charset=utf-8\\\">\\n<title>\"\n html_head = html_head + ko\n html_head = html_head +\"\"\"\n\n\n\n\n\\n\"\n html_tail = \"\"\"\n\n\n\"\"\"\n \n \n #整理全部\n new_html = open(path_html,'w') \n new_html.write(html_head)\n \n for area in area_list:\n# print('Before...')\n# print(area)\n try:\n area['href'] = \"http://www.kegg.jp/\"+ area['href']\n except:\n error_info = \"Cant create {}\".format(path_html)\n continue\n\n try:\n area['onmouseover'] = \"\\n\" + Title2JsInfo(area['title'],KO_dict)\n del area['data-coords']\n del area['data-entry']\n del area['title']\n del area['class']\n except:\n area['onmouseover'] = 'None'\n del area['data-coords']\n del area['data-entry']\n del area['class']\n# print('After...')\n# print(area)\n if area['onmouseover'] == \"\\n\"+\"None informations\" or area['onmouseover'] == 'None':\n del area['onmouseover']\n continue\n new_html.write(area.prettify()+'\\n')\n \n# print(area.prettify())\n #print(area.encode(\"utf-8\"))\n new_html.write(html_tail)\n new_html.close()\n return path_html\n\ndef Title2JsInfo(title_info:str,KO_dict: dict)-> str:\n \"\"\"\n get info about diff genes from title \n return to the code of js ,to show some infos (Up/Down,geneID,logFC..)\n \n \"\"\"\n# print(title_info)\n ID_list = title_info.split(',')\n ID_list2 = []\n for temp_id in ID_list:\n temp_id = temp_id.strip()\n# print(temp_id)\n ID_list2.append(temp_id.split(' ')[0])\n# print(ID_list2)\n \n Up_info = ''\n Down_info = ''\n Up_KO = []\n Down_KO = []\n \n for ID in ID_list2:\n \n if ID in KO_dict.keys():\n myKO_dict = KO_dict[ID]\n UP_df = myKO_dict[myKO_dict['level'] == 'Increased'] #or myKO_dict['level'] == 'up' or myKO_dict['level'] == 'UP']]\n Down_df = myKO_dict[myKO_dict['level'] == 'Decreased']# or myKO_dict['level'] == 'down' or myKO_dict['level'] == 'Down' ]\n\n if len(UP_df) >= 1:\n Up_info = KODictDF2str(UP_df)\n Up_KO_list = list(set(UP_df['KOID'].tolist()))\n if Up_KO == []:\n Up_KO = Up_KO_list\n else:\n Up_KO = Up_KO +Up_KO_list\n if len(Down_df) >= 1:\n Down_info = KODictDF2str(Down_df)\n Down_KO_list = list(set(Down_df['KOID'].tolist()))\n if Down_KO == []:\n Down_KO = Down_KO_list\n else:\n Down_KO = Down_KO +Down_KO_list\n \n if Up_KO != [] and Down_KO != []:\n jscode = \"javascript: showInfo(\\\"
  • \"+ title_info + ':'+\"Up regulated
    • \"+Up_info+\"
  • Down regulated
    • \"+Down_info+\"
\\\");\"#.encode(\"utf-8\")\n elif Up_KO == [] and Down_KO == []:\n jscode = \"None informations\"\n elif Up_KO != [] and Down_KO == []:\n jscode = \"javascript: showInfo(\\\"
  • \"+ title_info + ':'+\"Up regulated
    • \"+Up_info+\"
\\\");\"#.encode(\"utf-8\")\n elif Up_KO == [] and Down_KO != []:\n jscode = \"javascript: showInfo(\\\"
  • \"+ title_info + ':'+\"Down regulated
    • \"+Down_info+\"
\\\");\"#.encode(\"utf-8\")\n\n return jscode\n\ndef KODictDF2str(df)-> str:\n '''df:\n gene_id KOID level logFC\n670 PH01000295G0710 K00006 Increased 2.368777\n3547 PH01001609G0120 K00006 Decreased -1.593535\n4282 PH01000393G0300 K00006 Decreased -2.086649\n '''\n info_all = ''\n for indexs in df.index:\n info = df.loc[indexs,['gene_id','logFC']].values\n# print(info.tolist())\n\n info = str(info[0])+'('+str(info[1])+')'\n if info_all == '':\n info_all = info\n else:\n info_all = info_all+','+info\n return info_all\n\ndef main(diff_anno_file,kegg_anno,result_dir):\n #1.kegg_enrich文件结合diff_anno表格 提取每一条通路的全部KO\n # diff_anno = pd.read_csv('B2_vs_B1_diff_anno.xls',sep=\"\\t\") \n # KO_Group = gene2KO.groupby(\"KOID\")\n\n diff_anno = pd.read_csv(diff_anno_file,sep=\"\\t\") \n \n\n diff_anno['KOID'] = diff_anno['KO'].apply(splitKO)\n diff_anno = diff_anno.dropna(axis=0)\n\n gene2KO = diff_anno.loc[:,['gene_id','KOID','level','logFC']]\n gene2KO = gene2KO[gene2KO['level'] != 'nonsignificant']\n\n KO_Group = gene2KO.groupby(\"KOID\")\n # 2.对每个KO,获得全部注释的gene以及上下调信息,全部基因上调为红色,全部基因下调为绿色,都有为蓝色\n KO_dict = {}\n for OneKO in KO_Group:\n KO_dict[OneKO[0]] = OneKO[1]\n\n # enrich_df = pd.read_csv('kegg_enrich_all.xls',sep=\"\\t\") \n enrich_df = pd.read_csv(kegg_anno,sep=\"\\t\")\n\n for ko in enrich_df['ID'].tolist():\n genes = enrich_df[enrich_df['ID'] == ko]['geneID'].tolist()\n try:\n genestr = genes[0]\n except IndexError:\n genes = str(genes)\n if \"'\" in genes:\n genestr = genes.split(\"'\")\n else:\n genestr = genes.split(\"\\\"\")\n print(genestr)\n gene_list = genestr.split('/')\n gene_list = list(set(gene_list))\n KO_list = []\n for gene in gene_list:\n mylist = gene2KO[gene2KO['gene_id'] == gene][\"KOID\"].tolist()\n if(len(mylist)!=0):\n for i in mylist:\n KO_list.append(i)\n KO_list = list(set(KO_list))\n # print(KO_list)\n # 1,获得无颜色界面,然后解析html页面,根据标注找到关键KO的点(类似1.14.16.1)\n KO_Point_dict = getblockWeb(ko, KO_list)\n if KO_Point_dict == 'no kegg web':\n continue\n # 2,根据KO-点-上下调,获得颜色注释界面,并��载html以及图片\n ko_url = getko_url(KO_Point_dict, ko, KO_list, KO_dict)\n # html_path,img_path = getWeb(ko, ko_url, 'test')\n try:\n html_path,img_path = getWeb(ko, ko_url, result_dir)\n except AttributeError:\n continue\n print(html_path,img_path)\n # 3,根据上下调,重构html,添加注释效果\n # 获得图片相对于页面的相对路径\n try:\n img_dir ,img_name = os.path.split(img_path)\n img_relative_path = './'+img_name\n getnew_html = creatNew(ko,html_path, img_relative_path, KO_list, KO_dict)\n except TypeError:\n print(\"Error HTML: \" + ko_url)\n # print(getnew_html)\n\n\nif __name__ == '__main__':\n args_list = sys.argv[1:]\n\n diff_anno = args_list[0]\n kegg_anno = args_list[1]\n result_dir = args_list[2]\n\n main(diff_anno,kegg_anno,result_dir)\n\n","repo_name":"guowenbo1/deg_rfc","sub_path":"scripts/diff/KEGGAnnoAuto.py","file_name":"KEGGAnnoAuto.py","file_ext":"py","file_size_in_byte":17434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40282679126","text":"import cv2\nimport numpy as np\n\nimg=cv2.imread(\"test.jpg\")\n\ngray_img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\ngray_img=np.float32(gray_img)\n\ndst=cv2.cornerHarris(gray_img,2,3,0.04)\ndst=cv2.dilate(dst,None)\n\nimg[dst>0.01*dst.max()]=[0,0,255]\ncv2.imshow(\"output\",img)\ncv2.waitKey(1)\ncv2.imwrite(\"corner10.1_output,jpg\",img)\n","repo_name":"topG-123/pract","sub_path":"ASIP/ASIP 10.1 blob.py","file_name":"ASIP 10.1 blob.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13591415058","text":"__author__ = 'rutger'\n\ndef gc(s):\n if len(s) <= 0:\n return 0\n return (s.count(\"C\") + s.count(\"G\")) / len(s)\n\na = open(\"file_005.txt\").readlines()\nlabel = \"\"\nsequence = \"\"\nsolutions = []\nfor line in a:\n if line[0] == \">\":\n solutions.append((label, gc(sequence)))\n label = line\n sequence = \"\"\n else:\n sequence += line.replace(\"\\n\", \"\")\nsolutions.append((label, gc(sequence)))\n\nmax = solutions[0]\nfor i in range(1, len(solutions)):\n if solutions[i][1] > max[1]:\n max = solutions[i]\n\nprint(\"%s%f\" % (max[0].replace(\">\", \"\"), max[1] * 100))\n","repo_name":"RutgerMoons/Rosalind","sub_path":"1-25/Problem_005.py","file_name":"Problem_005.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21233917574","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom layers import GraphConvolution, GraphAggregation, MultiGraphConvolutionLayers, MultiDenseLayer\n\n\nclass Generator(nn.Module):\n \"\"\"Generator network.\"\"\"\n\n def __init__(self, conv_dims, z_dim, vertexes, edges, nodes, dropout_rate):\n super(Generator, self).__init__()\n self.multi_dense_layer = MultiDenseLayer(z_dim, conv_dims, torch.nn.Tanh())\n\n self.vertexes = vertexes\n self.edges = edges\n self.nodes = nodes\n\n self.edges_layer = nn.Linear(conv_dims[-1], edges * vertexes * vertexes)\n self.nodes_layer = nn.Linear(conv_dims[-1], vertexes * nodes)\n self.dropoout = nn.Dropout(p=dropout_rate)\n\n def forward(self, x):\n output = self.multi_dense_layer(x)\n edges_logits = self.edges_layer(output).view(-1, self.edges, self.vertexes, self.vertexes)\n edges_logits = (edges_logits + edges_logits.permute(0, 1, 3, 2)) / 2\n edges_logits = self.dropoout(edges_logits.permute(0, 2, 3, 1))\n\n nodes_logits = self.nodes_layer(output)\n nodes_logits = self.dropoout(nodes_logits.view(-1, self.vertexes, self.nodes))\n\n return edges_logits, nodes_logits\n\n\nclass EncoderVAE(nn.Module):\n \"\"\"VAE encoder sharing part.\"\"\"\n def __init__(self, conv_dim, m_dim, b_dim, z_dim, with_features=False, f_dim=0, dropout_rate=0.):\n super(EncoderVAE, self).__init__()\n\n graph_conv_dim, aux_dim, linear_dim = conv_dim\n # discriminator\n self.gcn_layer = GraphConvolution(m_dim, graph_conv_dim, b_dim, with_features, f_dim, dropout_rate)\n self.agg_layer = GraphAggregation(graph_conv_dim[-1]+m_dim, aux_dim, torch.nn.Tanh(), with_features, f_dim,\n dropout_rate)\n self.multi_dense_layer = MultiDenseLayer(aux_dim, linear_dim, torch.nn.Tanh(), dropout_rate=dropout_rate)\n self.emb_mean = nn.Linear(linear_dim[-1], z_dim)\n self.emb_logvar = nn.Linear(linear_dim[-1], z_dim)\n\n @staticmethod\n def reparameterize(mu, logvar):\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return eps * std + mu\n\n def forward(self, adj, hidden, node, activation=None):\n adj = adj[:, :, :, 1:].permute(0, 3, 1, 2)\n h = self.gcn_layer(node, adj, hidden)\n h = self.agg_layer(h, node, hidden)\n h = self.multi_dense_layer(h)\n h_mu = self.emb_mean(h)\n h_logvar = self.emb_logvar(h)\n h = self.reparameterize(h_mu, h_logvar)\n return h, h_mu, h_logvar\n\n\nclass Discriminator(nn.Module):\n \"\"\"Discriminator network with PatchGAN.\"\"\"\n\n def __init__(self, conv_dim, m_dim, b_dim, dropout):\n super(Discriminator, self).__init__()\n\n graph_conv_dim, aux_dim, linear_dim = conv_dim\n # discriminator\n self.gcn_layer = GraphConvolution(m_dim, graph_conv_dim, b_dim)\n self.agg_layer = GraphAggregation(graph_conv_dim[-1]+m_dim, aux_dim, torch.nn.Tanh())\n self.multi_dense_layer = MultiDenseLayer(aux_dim, linear_dim, torch.nn.Tanh())\n\n self.output_layer = nn.Linear(linear_dim[-1], 1)\n\n def forward(self, adj, hidden, node, activation=None):\n adj = adj[:, :, :, 1:].permute(0, 3, 1, 2)\n h = self.gcn_layer(node, adj, hidden)\n h = self.agg_layer(h, node, hidden)\n h = self.multi_dense_layer(h)\n\n output = self.output_layer(h)\n output = activation(output) if activation is not None else output\n\n return output, h\n","repo_name":"ZhenyueQin/Implementation-MolGAN-PyTorch","sub_path":"models_vae.py","file_name":"models_vae.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"67"} +{"seq_id":"29081675007","text":"class Solution:\n def canBeEqual(self, target: List[int], arr: List[int]) -> bool:\n # 因为不统计翻转次数,所以只要排序后顺序一样就行了\n target.sort()\n arr.sort()\n if target == arr:\n return True\n else:\n return False\n\nif __name__ == '__main__':\n target = [1,2,3,4]\n arr = [2,4,1,3]\n target = [3, 7, 9]\n arr = [3, 7, 11]\n ret = Solution().canBeEqual(target, arr)\n print(ret)","repo_name":"freesan44/LeetCode","sub_path":"LeetCode_1460.py","file_name":"LeetCode_1460.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41718197626","text":"import mysql.connector\nimport pandas as pd\nimport streamlit as st\n\nconn = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"\",\n database=\"db_fp_daa\",\n)\n\nmycursor = conn.cursor()\n\ndef showTable():\n query = \"SELECT * FROM data\"\n mycursor.execute(query)\n result = mycursor.fetchall()\n df = pd.DataFrame(columns=['ID Buku', 'Judul', 'Jumlah Halaman', 'Tanggal Publikasi', 'Penulis', 'Penerbit', 'Bahasa', 'Rating', 'Stok'])\n for i in result:\n df.loc[len(df)+1] = i\n st.table(df)\n\ndef showByTitle(order):\n if order == 'asc':\n query = \"SELECT judul FROM data ORDER BY judul ASC\"\n elif order == 'desc':\n query = \"SELECT judul FROM data ORDER BY judul DESC\"\n mycursor.execute(query)\n result = mycursor.fetchall()\n df = pd.DataFrame(columns=['Judul'])\n for i in result:\n df.loc[len(df)+1] = i\n st.table(df)\n\ndef showByPages(order):\n if order == 'asc':\n query = \"SELECT judul, jml_halaman FROM data ORDER BY jml_halaman ASC\"\n elif order == 'desc':\n query = \"SELECT judul, jml_halaman FROM data ORDER BY jml_halaman DESC\"\n mycursor.execute(query)\n result = mycursor.fetchall()\n df = pd.DataFrame(columns=['Judul', 'Jumlah Halaman'])\n for i in result:\n df.loc[len(df)+1] = i\n st.table(df)\n\ndef showByDate(order):\n if order == 'asc':\n query = \"SELECT judul, tgl_publikasi FROM data ORDER BY tgl_publikasi ASC\"\n elif order == 'desc':\n query = \"SELECT judul, tgl_publikasi FROM data ORDER BY tgl_publikasi DESC\"\n mycursor.execute(query)\n result = mycursor.fetchall()\n df = pd.DataFrame(columns=['Judul', 'Tanggal Publikasi'])\n for i in result:\n df.loc[len(df)+1] = i\n st.table(df)\n\ndef showByAuth(order):\n if order == 'asc':\n query = \"SELECT judul, penulis FROM data ORDER BY penulis ASC\"\n elif order == 'desc':\n query = \"SELECT judul, penulis FROM data ORDER BY penulis DESC\"\n mycursor.execute(query)\n result = mycursor.fetchall()\n df = pd.DataFrame(columns=['Judul', 'Penulis'])\n for i in result:\n df.loc[len(df)+1] = i\n st.table(df)\n\ndef showByPub(order):\n if order == 'asc':\n query = \"SELECT judul, penerbit FROM data ORDER BY penerbit ASC\"\n elif order == 'desc':\n query = \"SELECT judul, penerbit FROM data ORDER BY penerbit DESC\"\n mycursor.execute(query)\n result = mycursor.fetchall()\n df = pd.DataFrame(columns=['Judul', 'Penerbit'])\n for i in result:\n df.loc[len(df)+1] = i\n st.table(df)\n\ndef showByLang(order):\n if order == 'asc':\n query = \"SELECT judul, bahasa FROM data ORDER BY bahasa ASC\"\n elif order == 'desc':\n query = \"SELECT judul, bahasa FROM data ORDER BY bahasa DESC\"\n mycursor.execute(query)\n result = mycursor.fetchall()\n df = pd.DataFrame(columns=['Judul', 'Bahasa'])\n for i in result:\n df.loc[len(df)+1] = i\n st.table(df)\n\ndef showByRating(order):\n if order == 'asc':\n query = \"SELECT judul, rating FROM data ORDER BY rating ASC\"\n elif order == 'desc':\n query = \"SELECT judul, rating FROM data ORDER BY rating DESC\"\n mycursor.execute(query)\n result = mycursor.fetchall()\n df = pd.DataFrame(columns=['Judul', 'Rating'])\n for i in result:\n df.loc[len(df)+1] = i\n st.table(df)\n\ndef showByStock(order):\n if order == 'asc':\n query = \"SELECT judul, stok FROM data ORDER BY stok ASC\"\n elif order == 'desc':\n query = \"SELECT judul, stok FROM data ORDER BY stok DESC\"\n mycursor.execute(query)\n result = mycursor.fetchall()\n df = pd.DataFrame(columns=['Judul', 'Stok'])\n for i in result:\n df.loc[len(df)+1] = i\n st.table(df)\n\ndef cekPeminjam(nama):\n query = \"SELECT * FROM peminjam WHERE nama = '\" + nama + \"'\"\n mycursor.execute(query)\n result = mycursor.fetchone()\n if result == None:\n return False\n else:\n return result[0]\n\ndef addPeminjam(nama):\n query = \"INSERT INTO `peminjam` (`nama`) VALUES ('\" + nama + \"')\"\n mycursor.execute(query)\n conn.commit()\n\ndef cekBuku(id):\n query = \"SELECT id_buku FROM data WHERE id_buku = \" + id\n mycursor.execute(query)\n result = mycursor.fetchone()\n return result[0]\n\ndef cekStok(id):\n query = \"SELECT stok FROM data WHERE id_buku = \" + id\n mycursor.execute(query)\n result = mycursor.fetchone()\n return result[0]\n\ndef updateStok(id, jumlah):\n jumlah = str(jumlah)\n query = \"UPDATE data SET stok = stok-\" + jumlah + \" WHERE id_buku = \" + id\n mycursor.execute(query)\n conn.commit()\n\ndef addTransaksi(nama, buku, jml, tgl, tgl_kembali):\n nama = str(cekPeminjam(nama))\n jml = str(jml)\n tgl = str(tgl)\n tgl_kembali = str(tgl_kembali)\n awal = str(cekStok(buku))\n akhir = str(cekStok(buku) - int(jml))\n updateStok(buku, jml)\n query = \"INSERT INTO `transaksi` (`id_peminjam`, `id_buku`, `jml_pinjaman`, `stok_awal`, `stok_akhir`, `tgl_transaksi`, `tgl_pengembalian`) VALUES ('\" + nama + \"', '\" + buku + \"', '\" + jml + \"', '\" + awal + \"', '\" + akhir + \"', '\" + tgl + \"', '\" + tgl_kembali + \"')\"\n mycursor.execute(query)\n conn.commit()","repo_name":"agungmahadana/Perpustakaan-Wibu-Jaya","sub_path":"Logic.py","file_name":"Logic.py","file_ext":"py","file_size_in_byte":5157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13955877643","text":"from transformers.modeling_bert import *\nimport torch.nn.functional as F\nfrom torch.nn import MarginRankingLoss\n\nclass BertHingeForSequenceClassification(BertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, self.num_labels)\n\n self.init_weights()\n\n def forward(\n self,\n input_ids_1=None,\n attention_mask_1=None,\n token_type_ids_1=None,\n input_ids_2=None,\n attention_mask_2=None,\n token_type_ids_2=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs_1 = self.bert(\n input_ids_1,\n attention_mask=attention_mask_1,\n token_type_ids=token_type_ids_1,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output_1 = outputs_1[1]\n pooled_output_1 = self.dropout(pooled_output_1)\n logits_1 = self.classifier(pooled_output_1)\n prob_1 = F.softmax(logits_1)[:, 1]\n\n\n outputs_2 = self.bert(\n input_ids_2,\n attention_mask=attention_mask_2,\n token_type_ids=token_type_ids_2,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output_2 = outputs_2[1]\n pooled_output_2 = self.dropout(pooled_output_2)\n logits_2 = self.classifier(pooled_output_2)\n prob_2 = F.softmax(logits_2)[:, 1]\n\n loss = None\n if labels is not None:\n loss_fct = MarginRankingLoss(margin=0.5)\n loss = loss_fct(prob_1.view(-1), prob_2.view(-1), labels.view(-1))\n # print('logit_1 shape:', logits_1.shape)\n # print('prob_1 shape:', prob_1.shape)\n if not return_dict:\n # output = (torch.cat([logits_1[:, 1], logits_2[:, 1]], -1),) + (outputs_1[2:], outputs_2[2:])\n output = (F.softmax(logits_1) - F.softmax(logits_2),) + outputs_1[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits= torch.cat([logits_1[:, 1], logits_2[:, 1]], -1),\n hidden_states=[outputs_1.hidden_states, outputs_2.hidden_states],\n attentions=[outputs_1.attentions, outputs_2.attentions],\n )","repo_name":"luciusssss/VGaokao","sub_path":"src/utils_bert_hinge.py","file_name":"utils_bert_hinge.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"25193298977","text":"import json\n\n\ndef get_params_string(filename):\n \"\"\"\n A simple function that takes in a configuration file path ``file_path``\n and parses it to convert the keys to command line tags and the values to\n their respective command line tag values.\n\n :param file_path: Path to JSON file.\n :return: String representing arguments in a command line.\n \"\"\"\n with open(filename) as f:\n data = json.load(f)\n return \" \".join([f\"-{k} {data[k]}\" for k in data.keys()])\n","repo_name":"IntelAI/openseismic","sub_path":"core/python/utils/jsonparse_util.py","file_name":"jsonparse_util.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"42349255419","text":"from unittest import TestCase\nfrom edge import Edge\nfrom node import Node\n\n\nclass TestEdge(TestCase):\n def test_get_parent(self):\n a = Node('A')\n b = Node('B')\n\n e = Edge(a, b)\n\n self.assertTrue('A' == e.get_parent().get_name())\n\n def test_get_destination(self):\n a = Node('A')\n b = Node('B')\n\n e = Edge(a, b)\n\n self.assertTrue('B' == e.get_child().get_name())\n","repo_name":"eross-uwp/Server","sub_path":"bayesian_network/bayesian_network/graph/tests/test_edge.py","file_name":"test_edge.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"17637002898","text":"import itertools\nimport os\nimport time\nfrom typing import List\n\nfrom torch.nn.utils.rnn import pad_sequence\n\nfrom src.neural_recon.losses import loss2\n\n# sys.path.append(\"thirdparty/sdf_computer/build/\")\n# import pysdf\n\nimport torch\nfrom torch import nn\nfrom torch.optim import SGD\nfrom torch.utils.data import DataLoader\nfrom torch.distributions.utils import _standard_normal\nimport networkx as nx\nfrom torch_scatter import scatter_min\nimport faiss\n\nimport cv2\nimport numpy as np\nimport open3d as o3d\n\nfrom tqdm import tqdm\nimport ray\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning import Trainer, seed_everything\n\nimport hydra\nfrom omegaconf import DictConfig, OmegaConf\n\nfrom src.neural_recon.optimize_segment import sample_img\nfrom shared.common_utils import save_line_cloud, normalize_tensor, \\\n to_homogeneous_tensor, caculate_align_mat, normalize_vector, \\\n refresh_timer, get_line_mesh\n\nfrom src.neural_recon.colmap_io import read_dataset, Image\n\n\nclass Singel_node_dataset(torch.utils.data.Dataset):\n def __init__(self, v_only_train_target, v_id_batched_points, v_batched_total_points, v_id_target_face,\n v_training_mode):\n super(Singel_node_dataset, self).__init__()\n self.only_train_target = v_only_train_target\n self.training_mode = v_training_mode\n\n id_target_points = np.unique(np.concatenate([v_id_batched_points[item][::4] for item in v_id_target_face]))\n self.validation_data = np.where([points[0, 0] in id_target_points for points in v_batched_total_points])[0]\n if self.training_mode == \"validation\" or self.only_train_target:\n self.length = len(self.validation_data)\n else:\n self.length = len(v_batched_total_points)\n\n def __getitem__(self, index):\n if self.training_mode == \"validation\" or self.only_train_target:\n return self.validation_data[index]\n else:\n return index\n\n def __len__(self):\n return self.length\n\n\nclass Multi_node_single_img_dataset(torch.utils.data.Dataset):\n def __init__(self, v_data, v_is_one_target, v_id_target_face, v_training_mode, mul_number=100):\n super(Multi_node_single_img_dataset, self).__init__()\n self.img_database: List[Image] = v_data[0]\n self.graphs = v_data[1]\n self.camera_pairs = v_data[2]\n self.target_img = 0\n self.training_vertices = self.graphs[self.target_img].graph[\"training_vertices\"]\n self.training_mode = v_training_mode\n if self.training_mode == \"validation\" or v_is_one_target:\n id_nodes = np.unique(list(itertools.chain(*[\n self.graphs[self.target_img].graph[\"faces\"][id_face] for id_face in v_id_target_face])))\n self.training_vertices = [item for item in self.training_vertices if item[0][0] in id_nodes]\n self.mul_number = mul_number\n pass\n\n def __len__(self):\n return len(self.training_vertices) * self.mul_number\n\n def __getitem__(self, idx):\n idx = idx % len(self.training_vertices)\n id_src_imgs = [int(id_img) for id_img in self.camera_pairs[self.target_img][:, 0]]\n projection2 = np.stack([self.img_database[id_img].projection for id_img in id_src_imgs], axis=0)\n intrinsic = self.img_database[self.target_img].intrinsic\n transformation = projection2 @ np.linalg.inv(self.img_database[self.target_img].extrinsic)\n src_imgs = np.stack([\n cv2.imread(self.img_database[id_img].img_path, cv2.IMREAD_GRAYSCALE) for id_img in id_src_imgs], axis=0)\n ref_img = cv2.imread(self.img_database[self.target_img].img_path, cv2.IMREAD_GRAYSCALE)\n imgs = np.concatenate((ref_img[None, :], src_imgs), axis=0)\n ray_c = np.stack([self.graphs[self.target_img].nodes[id_node][\"ray_c\"]\n for id_node in self.training_vertices[idx][:, :2].reshape(-1)], axis=0)\n valid_flags = np.stack([self.graphs[self.target_img].edges[edge][\"valid_flag\"]\n for edge in self.training_vertices[idx][:, :2]], axis=0)\n # assert valid_flags.shape[0]==3 # Currently we only support 3 edges per point\n return torch.tensor(self.target_img, dtype=torch.long), \\\n torch.from_numpy(self.training_vertices[idx]).to(torch.long), \\\n torch.from_numpy(transformation).to(torch.float32), \\\n torch.from_numpy(intrinsic).to(torch.float32), \\\n torch.from_numpy(imgs).to(torch.float32) / 255., \\\n torch.from_numpy(ray_c).to(torch.float32), \\\n torch.from_numpy(valid_flags).to(torch.bool)\n\n @staticmethod\n def collate_fn(items):\n id_cur_imgs = -1\n batched_indexes = []\n batched_transformations = None\n batched_intrinsic = None\n batched_imgs = None\n batched_ray_c = []\n batched_valid_flags = []\n for item in items:\n id_cur_imgs = item[0]\n batched_indexes.append(item[1])\n batched_transformations = item[2]\n batched_intrinsic = item[3]\n batched_imgs = item[4]\n batched_ray_c.append(item[5])\n batched_valid_flags.append(item[6])\n batched_indexes = pad_sequence(batched_indexes, batch_first=True)\n batched_ray_c = pad_sequence(batched_ray_c, batch_first=True)\n batched_valid_flags = pad_sequence(batched_valid_flags, batch_first=True)\n return id_cur_imgs, batched_indexes, batched_transformations, batched_intrinsic, \\\n batched_imgs, batched_ray_c, batched_valid_flags\n\n\nclass LModel20(nn.Module):\n def __init__(self, v_data, v_is_regress_normal, v_viz_patch, v_log_root):\n super(LModel20, self).__init__()\n self.log_root = v_log_root\n self.is_regress_normal = v_is_regress_normal\n\n self.init_regular_variables(v_data)\n self.distances = nn.ParameterList()\n self.distance_normalizer = 10.\n for id_graph, graph in enumerate(self.graphs):\n distances = np.asarray([graph.nodes[item][\"distance\"] for item in graph])\n distances = torch.from_numpy(distances).to(torch.float32) / self.distance_normalizer\n self.distances.append(nn.Parameter(distances))\n\n # Debug\n self.id_viz_face = v_viz_patch\n\n # Init-related methods\n def init_regular_variables(self, v_data):\n # Graph related\n self.graphs = v_data[1]\n self.img_database = v_data[0]\n\n def sample_points_based_on_up(self, start_point, end_point, edge_up_c):\n time_profile = [0 for _ in range(10)]\n timer = time.time()\n device = self.ray_c.device\n\n cur_dir = end_point - start_point\n cur_length = torch.linalg.norm(cur_dir + 1e-6, dim=1)\n cur_dir = cur_dir / cur_length[:, None]\n\n # cur_up = normalize_tensor(torch.cross(edge_up_c[:, 0], cur_dir))\n # cur_up = normalize_tensor(edge_up_c)\n\n # 1-7: compute_roi\n half_window_size_meter_horizontal = cur_length # m\n half_window_size_meter_vertical = torch.tensor(0.2).to(device) # m\n half_window_size_step = 0.01\n\n # Compute interpolated point\n num_horizontal = torch.clamp((half_window_size_meter_horizontal // half_window_size_step).to(torch.long), 2,\n 1000) # (M,)\n num_vertical = torch.clamp((half_window_size_meter_vertical // half_window_size_step).to(torch.long), 2,\n 1000) # (9,); fixed\n num_coordinates_per_edge = num_horizontal * num_vertical\n\n begin_idxes = num_horizontal.cumsum(dim=0)\n total_num_x_coords = begin_idxes[-1]\n begin_idxes = begin_idxes.roll(1) # Used to calculate the value\n begin_idxes[0] = 0 # (M,)\n dx = torch.arange(num_horizontal.sum()).to(begin_idxes.device) - \\\n begin_idxes.repeat_interleave(num_horizontal) # (total_num_x_coords,)\n dx = dx / (num_horizontal - 1).repeat_interleave(num_horizontal) * \\\n half_window_size_meter_horizontal.repeat_interleave(num_horizontal) # (total_num_x_coords,)\n dy = torch.arange(num_vertical).to(begin_idxes.device) / (num_vertical - 1) * half_window_size_meter_vertical\n time_profile[1], timer = refresh_timer(timer)\n\n # Meshgrid\n total_num_coords = total_num_x_coords * dy.shape[0]\n coords_x = dx.repeat_interleave(torch.ones_like(dx, dtype=torch.long) * num_vertical) # (total_num_coords,)\n coords_y = torch.tile(dy, (total_num_x_coords,)) # (total_num_coords,)\n coords = torch.stack((coords_x, coords_y), dim=1)\n time_profile[2], timer = refresh_timer(timer)\n\n interpolated_coordinates_camera = \\\n cur_dir.repeat_interleave(num_coordinates_per_edge, dim=0) * coords_x[:, None] + \\\n edge_up_c.repeat_interleave(num_coordinates_per_edge, dim=0) * coords_y[:, None] + \\\n start_point.repeat_interleave(num_coordinates_per_edge, dim=0)\n time_profile[3], timer = refresh_timer(timer)\n\n return total_num_coords, interpolated_coordinates_camera\n\n def sample_triangles(self, num_per_m, p1, p2, p3, num_max_sample=500):\n d1 = p2 - p1\n d2 = p3 - p2\n area = torch.linalg.norm(torch.cross(d1, d2) + 1e-6, dim=1).abs() / 2\n\n num_edge_points, edge_points = self.sample_edge(num_per_m,\n # torch.stack((d1, d2, p1 - p3), dim=1).reshape(-1, 3),\n torch.stack((d1,), dim=1).reshape(-1, 3),\n # torch.stack((p1, p2, p3), dim=1).reshape(-1, 3),\n torch.stack((p1,), dim=1).reshape(-1, 3),\n num_max_sample=num_max_sample)\n # num_edge_points = num_edge_points.reshape(-1, 3).sum(dim=1)\n num_edge_points = num_edge_points.reshape(-1, 1).sum(dim=1)\n\n if not self.is_regress_normal:\n return num_edge_points, edge_points # Debug only\n\n num_per_m2 = num_per_m * num_per_m\n num_tri_samples = torch.clamp((area * num_per_m2).to(torch.long), 1, num_max_sample * 4)\n samples = torch.rand(num_tri_samples.sum(), 2, device=p1.device)\n _t1 = torch.sqrt(samples[:, 0:1] + 1e-6)\n sampled_polygon_points = (1 - _t1) * p1.repeat_interleave(num_tri_samples, dim=0) + \\\n _t1 * (1 - samples[:, 1:2]) * p2.repeat_interleave(num_tri_samples, dim=0) + \\\n _t1 * samples[:, 1:2] * p3.repeat_interleave(num_tri_samples, dim=0)\n\n # Only use the code below for debug\n if True:\n num_total_points = num_edge_points + num_tri_samples\n num_total_points_cumsum = num_total_points.cumsum(0).roll(1)\n num_total_points_cumsum[0] = 0\n sampled_total_points = torch.zeros((num_total_points.sum(), 3), device=p1.device, dtype=torch.float32)\n num_edge_points_ = num_edge_points.cumsum(0).roll(1)\n num_edge_points_[0] = 0\n num_tri_points_ = num_tri_samples.cumsum(0).roll(1)\n num_tri_points_[0] = 0\n edge_index = torch.arange(num_edge_points.sum(), device=p1.device) \\\n - (num_edge_points_ - num_total_points_cumsum).repeat_interleave(num_edge_points)\n tri_index = torch.arange(num_tri_samples.sum(), device=p1.device) \\\n - (num_tri_points_ - num_total_points_cumsum - num_edge_points).repeat_interleave(\n num_tri_samples)\n sampled_total_points[edge_index] = edge_points\n sampled_total_points[tri_index] = sampled_polygon_points\n return num_total_points, sampled_total_points\n return None, torch.cat((edge_points, sampled_polygon_points), dim=0)\n\n def sample_points_2d(self, v_edge_points, v_num_horizontal):\n device = v_edge_points.device\n cur_dir = v_edge_points[:, 1] - v_edge_points[:, 0]\n cur_length = torch.linalg.norm(cur_dir, dim=-1) + 1e-6\n\n cur_dir_h = torch.cat((cur_dir, torch.zeros_like(cur_dir[:, 0:1])), dim=1)\n z_axis = torch.zeros_like(cur_dir_h)\n z_axis[:, 2] = 1\n edge_up = normalize_tensor(torch.cross(cur_dir_h, z_axis, dim=1)[:, :2]) * 0.00167\n # The vertical length is 10 -> 10/6000 = 0.00167\n\n # Compute interpolated point\n num_horizontal = v_num_horizontal\n num_half_vertical = 10\n num_coordinates_per_edge = num_horizontal * num_half_vertical * 2\n\n begin_idxes = num_horizontal.cumsum(dim=0)\n total_num_x_coords = begin_idxes[-1]\n begin_idxes = begin_idxes.roll(1) # Used to calculate the value\n begin_idxes[0] = 0 # (M,)\n dx = torch.arange(num_horizontal.sum(), device=device) - \\\n begin_idxes.repeat_interleave(num_horizontal) # (total_num_x_coords,)\n dx = dx / (num_horizontal - 1).repeat_interleave(num_horizontal)\n dy = torch.arange(num_half_vertical, device=device) / (num_half_vertical - 1)\n dy = torch.cat((torch.flip(-dy, dims=[0]), dy))\n\n # Meshgrid\n total_num_coords = total_num_x_coords * dy.shape[0]\n coords_x = dx.repeat_interleave(torch.ones_like(dx, dtype=torch.long) * dy.shape[0]) # (total_num_coords,)\n coords_y = torch.tile(dy, (total_num_x_coords,)) # (total_num_coords,)\n coords = torch.stack((coords_x, coords_y), dim=1)\n\n interpolated_coordinates = \\\n cur_dir.repeat_interleave(num_coordinates_per_edge, dim=0) * coords_x[:, None] + \\\n edge_up.repeat_interleave(num_coordinates_per_edge, dim=0) * coords_y[:, None] + \\\n v_edge_points[:, 0].repeat_interleave(num_coordinates_per_edge, dim=0)\n\n return num_coordinates_per_edge, interpolated_coordinates\n\n def sample_edge(self, num_per_edge_m, cur_dir, start_point, num_max_sample=2000):\n times = [0 for _ in range(10)]\n cur_time = time.time()\n length = torch.linalg.norm(cur_dir + 1e-6, dim=1)\n num_edge_points = torch.clamp((length * num_per_edge_m).to(torch.long), 1, 2000)\n num_edge_points_ = num_edge_points.roll(1)\n num_edge_points_[0] = 0\n times[1] += time.time() - cur_time\n cur_time = time.time()\n sampled_edge_points = torch.arange(num_edge_points.sum(), device=cur_dir.device) - num_edge_points_.cumsum(\n dim=0).repeat_interleave(num_edge_points)\n times[2] += time.time() - cur_time\n cur_time = time.time()\n sampled_edge_points = sampled_edge_points / ((num_edge_points - 1 + 1e-8).repeat_interleave(num_edge_points))\n times[3] += time.time() - cur_time\n cur_time = time.time()\n sampled_edge_points = cur_dir.repeat_interleave(num_edge_points, dim=0) * sampled_edge_points[:, None] \\\n + start_point.repeat_interleave(num_edge_points, dim=0)\n times[4] += time.time() - cur_time\n cur_time = time.time()\n return num_edge_points, sampled_edge_points\n\n def compute_similarity_wrapper(self, start_points, end_points,\n imgs, transformations, intrinsic):\n times = [0 for _ in range(10)]\n cur_time = time.time()\n num_src_imgs = imgs.shape[0] - 1\n points_c = torch.stack([start_points, end_points], dim=1).reshape(-1, 3)\n times[0] += time.time() - cur_time\n cur_time = time.time()\n\n edge_points = (intrinsic @ points_c.T).T\n edge_points = edge_points[:, :2] / (edge_points[:, 2:3] + 1e-6)\n edge_points = edge_points.reshape(-1, 2, 2)\n\n # sample step=0.01\n num_horizontal = torch.clamp((torch.linalg.norm(end_points - start_points, dim=-1) / 0.01).to(torch.long),\n 2, 1000)\n\n num_per_edge1, points_2d1 = self.sample_points_2d(edge_points, num_horizontal)\n\n valid_mask1 = torch.logical_and(points_2d1 > 0, points_2d1 < 1)\n valid_mask1 = torch.logical_and(valid_mask1[:, 0], valid_mask1[:, 1])\n points_2d1 = torch.clamp(points_2d1, 0, 0.999999)\n\n edge_points = (transformations @ to_homogeneous_tensor(points_c).T).transpose(1, 2)\n edge_points = edge_points[:, :, :2] / (edge_points[:, :, 2:3] + 1e-6)\n edge_points = edge_points.reshape(num_src_imgs, -1, 2, 2)\n\n num_per_edge2, points_2d2 = self.sample_points_2d(edge_points.reshape(-1, 2, 2),\n num_horizontal.tile(num_src_imgs))\n num_per_edge2 = num_per_edge2.reshape(num_src_imgs, -1)\n points_2d2 = points_2d2.reshape(num_src_imgs, -1, 2)\n\n valid_mask2 = torch.logical_and(points_2d2 > 0, points_2d2 < 1)\n valid_mask2 = torch.logical_and(valid_mask2[:, :, 0], valid_mask2[:, :, 1])\n points_2d2 = torch.clamp(points_2d2, 0, 0.999999)\n\n # 4. Sample pixel color\n sample_imgs1 = sample_img(imgs[0:1, None, :, :], points_2d1[None, :, :])[0]\n sample_imgs2 = sample_img(imgs[1:, None], points_2d2)\n times[1] += time.time() - cur_time\n cur_time = time.time()\n\n similarity_loss = loss2(sample_imgs1, sample_imgs2, num_per_edge1)\n similarity_mask = torch.logical_and(valid_mask1[None, :].tile([valid_mask2.shape[0], 1]), valid_mask2)\n similarity_mask = scatter_min(similarity_mask.to(torch.long), torch.arange(\n num_per_edge1.shape[0], device=similarity_loss.device).repeat_interleave(num_per_edge1), dim=1)[0]\n times[3] += time.time() - cur_time\n cur_time = time.time()\n\n is_log = False\n if is_log:\n start_length = 0\n for idx, length in enumerate(num_per_edge1):\n img1 = (sample_imgs1[start_length:start_length + length].reshape(-1,\n 20).detach().cpu().numpy() * 255).astype(\n np.uint8).T\n img2 = (sample_imgs2[start_length:start_length + length].reshape(-1,\n 20).detach().cpu().numpy() * 255).astype(\n np.uint8).T\n cv2.imwrite(os.path.join(self.log_root, \"{}.png\".format(idx)),\n np.concatenate((img1, np.zeros_like(img1[0:1, :]), img2), axis=0))\n start_length += length\n\n return similarity_loss, similarity_mask.to(torch.bool), [points_2d1, points_2d2]\n\n #\n # start_rays: (B, E, 3)\n # end_points_c: (B, E, 3)\n # v_new_distances: (B, S)\n # valid_flags: (B, S)\n #\n def random_search(self, start_rays, end_points_c, v_new_distances,\n imgs, transformations, intrinsic, valid_flags, num_max_edges_per_vertice):\n batch_size = 10\n num_points = v_new_distances.shape[0]\n num_sampled = v_new_distances.shape[1] # Sample from normal distribution + 1\n num_imgs = imgs.shape[0] - 1\n\n repeated_start_points_c = start_rays[:, :, None].tile([1, 1, num_sampled, 1]) \\\n * v_new_distances[:, None, :, None] * self.distance_normalizer\n repeated_end_points_c = end_points_c[:, :, None].tile([1, 1, num_sampled, 1])\n\n losses = []\n masks = []\n for id_batch in range(num_sampled // batch_size + 1):\n id_batch_start = min(num_sampled, id_batch * batch_size)\n id_batch_end = min(num_sampled, (id_batch + 1) * batch_size)\n if id_batch_start >= id_batch_end:\n continue\n num_batch = id_batch_end - id_batch_start\n\n similarity_loss, similarity_mask, _ = self.compute_similarity_wrapper(\n # The layout is like p0_s0, p0_s1, p0_s2, ..., p1_s0, p1_s1\n repeated_start_points_c[:, :, id_batch_start:id_batch_end].reshape(-1, 3),\n repeated_end_points_c[:, :, id_batch_start:id_batch_end].reshape(-1, 3),\n imgs, transformations, intrinsic\n )\n losses.append(similarity_loss.reshape(num_imgs, num_points, -1, num_batch))\n masks.append(similarity_mask.reshape(num_imgs, num_points, -1, num_batch))\n\n similarity_loss_ = torch.cat(losses, dim=3)\n similarity_mask_ = torch.cat(masks, dim=3)\n similarity_loss_[~similarity_mask_] = torch.inf\n similarity_loss = similarity_loss_.permute(1, 2, 3, 0)\n\n # Some vertices are along the border, discard them\n similarity_loss[~valid_flags] = torch.inf\n\n is_single_img = False\n if is_single_img:\n similarity_loss_avg = similarity_loss[:, :, :, 0].mean(dim=1)\n else:\n inf_mask = torch.isinf(similarity_loss)\n # Set inf to a large value in order to calculate mean\n similarity_loss[inf_mask] = 5.\n similarity_loss_avg = torch.mean(similarity_loss, dim=3).mean(dim=1)\n\n id_best = similarity_loss_avg.argmin(dim=1)\n id_best[similarity_loss_avg[\n torch.arange(similarity_loss_avg.shape[0], device=similarity_loss.device), id_best] == 5] = 0\n return id_best\n\n def forward(self, idxs, v_id_epoch, is_log):\n # 0: Unpack data\n v_id_epoch += 1\n # (1,)\n id_cur_imgs = idxs[0]\n # (B, E, 4)\n batched_ids = idxs[1]\n # (N, 4, 4)\n transformations = idxs[2]\n # (3, 3)\n intrinsic = idxs[3]\n # (N+1, h, w)\n imgs = idxs[4]\n # (B, E * 2, 3)\n ray_c = idxs[5]\n # (B, E * 2)\n valid_flags = idxs[6]\n batch_size = batched_ids.shape[0]\n num_vertices = batch_size\n num_max_edges_per_vertice = batched_ids.shape[1]\n device = id_cur_imgs.device\n times = [0 for _ in range(10)]\n cur_time = time.time()\n\n # (B * E)\n id_start_point = batched_ids[:, :, 0].reshape(-1)\n # (B, E)\n id_end_point = batched_ids[:, :, 1]\n # (B, E, 3)\n start_ray_c = ray_c[:, ::2]\n # (B, E, 3)\n end_ray_c = ray_c[:, 1::2]\n # (B,)\n vertices_distances = self.distances[id_cur_imgs][batched_ids[:, 0, 0]]\n # (B * E,)\n end_point_distances = self.distances[id_cur_imgs][id_end_point]\n # (B * E,)\n end_points_c = end_ray_c * end_point_distances[:, :, None] * self.distance_normalizer\n\n times[1] += time.time() - cur_time\n cur_time = time.time()\n\n # Random search\n if self.training:\n with torch.no_grad():\n num_sample = 100\n scale_factor = 0.16\n # (B * S,)\n new_distance = -torch.ones((num_vertices * num_sample,), device=device, dtype=torch.float32)\n sample_distance_mask = torch.logical_and(new_distance > 0, new_distance < 1)\n # (B * S)\n repeated_vertices_distances = vertices_distances.repeat_interleave(num_sample)\n while not torch.all(sample_distance_mask):\n t_ = new_distance[~sample_distance_mask]\n a = repeated_vertices_distances[~sample_distance_mask] + \\\n scale_factor * torch.distributions.utils._standard_normal(\n t_.shape[0],\n device=device,\n dtype=t_.dtype)\n new_distance[~sample_distance_mask] = a\n sample_distance_mask = torch.logical_and(new_distance > 0, new_distance < 1)\n new_distance = new_distance.reshape(-1, num_sample)\n # (B, (S + 1))\n new_distance = torch.cat((vertices_distances[:, None], new_distance), dim=1)\n id_best_distance = self.random_search(\n start_ray_c, end_points_c, new_distance,\n imgs, transformations, intrinsic, valid_flags, num_max_edges_per_vertice\n )\n self.distances[id_cur_imgs][batched_ids[:, 0, 0]] = new_distance[\n torch.arange(new_distance.shape[0], dtype=torch.long, device=new_distance.device),\n id_best_distance]\n times[3] += time.time() - cur_time\n cur_time = time.time()\n\n start_points_c = start_ray_c * self.distances[id_cur_imgs][batched_ids[:, 0, 0]][:, None,\n None] * self.distance_normalizer\n similarity_loss, similarity_mask, [points_2d1, points_2d2] = self.compute_similarity_wrapper(\n start_points_c.reshape(-1, 3),\n end_points_c.reshape(-1, 3),\n imgs, transformations, intrinsic\n )\n similarity_loss[~similarity_mask] = 0\n is_single_img = True\n if is_single_img:\n similarity_loss = similarity_loss[0]\n else:\n raise\n similarity_loss[~valid_flags.reshape(-1)] = 0\n times[4] += time.time() - cur_time\n cur_time = time.time()\n if is_log:\n with torch.no_grad():\n rgb1 = cv2.cvtColor((imgs[0].cpu().numpy() * 255).astype(np.uint8), cv2.COLOR_GRAY2BGR)\n shape1 = rgb1.shape[:2][::-1]\n\n start_points_c = start_points_c.reshape(-1, 3)\n end_points_c = end_points_c.reshape(-1, 3)\n start_points_c = start_points_c[(start_points_c != 0).all(axis=1)]\n end_points_c = end_points_c[(end_points_c != 0).all(axis=1)]\n start_points_2d1 = (intrinsic @ start_points_c.T).T\n start_points_2d1 = (start_points_2d1[:, :2] / start_points_2d1[:, 2:3]).cpu().numpy()\n start_points_2d1 = (np.clip(start_points_2d1, 0, 0.99999) * shape1).astype(int)\n end_points_2d1 = (intrinsic @ end_points_c.T).T\n end_points_2d1 = (end_points_2d1[:, :2] / end_points_2d1[:, 2:3]).cpu().numpy()\n end_points_2d1 = (np.clip(end_points_2d1, 0, 0.99999) * shape1).astype(int)\n\n line_img1 = rgb1.copy()\n\n line_thickness = 1\n point_thickness = 2\n point_radius = 1\n\n for id_ver, _ in enumerate(end_points_2d1):\n cv2.line(line_img1, start_points_2d1[id_ver], end_points_2d1[id_ver], (0, 0, 255),\n thickness=line_thickness)\n for id_ver, _ in enumerate(end_points_2d1):\n cv2.circle(line_img1, start_points_2d1[id_ver], radius=point_radius,\n color=(0, 255, 255), thickness=point_thickness)\n cv2.circle(line_img1, end_points_2d1[id_ver], radius=point_radius,\n color=(0, 255, 255), thickness=point_thickness)\n\n line_imgs2 = []\n for i_img in range(imgs[1:].shape[0]):\n rgb2 = cv2.cvtColor((imgs[1 + i_img].cpu().numpy() * 255).astype(np.uint8), cv2.COLOR_GRAY2BGR)\n line_img2 = rgb2.copy()\n shape2 = rgb2.shape[:2][::-1]\n start_points_2d2 = (transformations[i_img] @ to_homogeneous_tensor(start_points_c).T).T\n start_points_2d2 = (start_points_2d2[:, :2] / start_points_2d2[:, 2:3]).cpu().numpy()\n start_points_2d2 = (np.clip(start_points_2d2, 0, 0.99999) * shape2).astype(int)\n end_points_2d2 = (transformations[i_img] @ to_homogeneous_tensor(end_points_c).T).T\n end_points_2d2 = (end_points_2d2[:, :2] / end_points_2d2[:, 2:3]).cpu().numpy()\n end_points_2d2 = (np.clip(end_points_2d2, 0, 0.99999) * shape2).astype(int)\n\n for id_ver, _ in enumerate(end_points_2d2):\n cv2.line(line_img2, start_points_2d2[id_ver], end_points_2d2[id_ver], (0, 0, 255),\n thickness=line_thickness)\n for id_ver, _ in enumerate(end_points_2d2):\n cv2.circle(line_img2, start_points_2d2[id_ver], radius=point_radius,\n color=(0, 255, 255), thickness=point_thickness)\n cv2.circle(line_img2, end_points_2d2[id_ver], radius=point_radius,\n color=(0, 255, 255), thickness=point_thickness)\n line_imgs2.append(line_img2)\n\n big_imgs = np.concatenate(\n (np.concatenate(\n (line_img1, line_imgs2[0], line_imgs2[1], line_imgs2[2]), axis=1),\n np.concatenate(\n (line_imgs2[3], line_imgs2[4], line_imgs2[5], line_imgs2[6]), axis=1),\n np.concatenate(\n (line_imgs2[7], line_imgs2[8], line_imgs2[9], line_imgs2[9]), axis=1),\n )\n , axis=0)\n\n cv2.imwrite(os.path.join(self.log_root, \"2d_{:05d}.jpg\".format(v_id_epoch)),\n big_imgs)\n\n polygon_points_2d_1 = points_2d1.detach().cpu().numpy()\n polygon_points_2d_2 = points_2d2[0].detach().cpu().numpy()\n\n line_img1 = rgb1.copy()\n line_img2 = cv2.cvtColor((imgs[1].cpu().numpy() * 255).astype(np.uint8), cv2.COLOR_GRAY2BGR)\n\n roi_coor_2d1_numpy = np.clip(polygon_points_2d_1, 0, 0.99999)\n viz_coords = (roi_coor_2d1_numpy * shape1).astype(np.int32)\n line_img1[viz_coords[:, 1], viz_coords[:, 0]] = (0, 0, 255)\n\n # Image 2\n roi_coor_2d2_numpy = np.clip(polygon_points_2d_2, 0, 0.99999)\n viz_coords = (roi_coor_2d2_numpy * shape2).astype(np.int32)\n line_img2[viz_coords[:, 1], viz_coords[:, 0]] = (0, 0, 255)\n cv2.imwrite(os.path.join(self.log_root, \"3d_{:05d}.jpg\".format(v_id_epoch)),\n np.concatenate((line_img1, line_img2), axis=1))\n\n return torch.mean(similarity_loss), [None, None, None]\n\n def forwardb(self, id_points, v_id_epoch, is_log):\n # 0: Unpack data\n v_id_epoch += 1\n\n similarity_losses = []\n for idx in id_points:\n id_start_point = self.id_point_to_id_up_and_face[idx][0, 0]\n id_end_point = self.id_point_to_id_up_and_face[idx][:, 1]\n\n id_up = self.id_point_to_id_up_and_face[idx][:, 2:4]\n id_face = self.id_point_to_id_up_and_face[idx][:, 4:6]\n\n start_ray = self.ray_c[id_start_point].repeat(id_end_point.shape[0]).reshape(id_end_point.shape[0], 3)\n end_ray = self.ray_c[id_end_point]\n\n start_points = self.seg_distance[id_start_point] * self.seg_distance_normalizer * start_ray\n end_points = self.seg_distance[id_end_point][:, None] * self.seg_distance_normalizer * end_ray\n v_up = self.get_up_vector2(id_up, start_points, end_points)\n\n centroid_ray1 = self.center_ray_c[id_face[:, 0]]\n centroid_ray2 = self.center_ray_c[id_face[:, 1]]\n\n mask1 = torch.tensor(id_face[:, 0] != -1, device=centroid_ray1.device)\n mask2 = torch.tensor(id_face[:, 1] != -1, device=centroid_ray1.device)\n # Random search\n if self.training:\n with torch.no_grad():\n self.seg_distance.data[id_start_point] = self.random_search(\n start_ray, end_ray, id_start_point, id_end_point, v_up, centroid_ray1, centroid_ray2, mask1,\n mask2,\n self.scale[id_start_point]\n )\n similarity_loss, batched_mask, similarity_mask = self.compute_similarity_wrapper(\n start_ray, end_ray, self.seg_distance[id_start_point], self.seg_distance[id_end_point],\n v_up, centroid_ray1, centroid_ray2, mask1, mask2\n )\n # similarity_loss[~batched_mask] = 0\n # similarity_loss[~similarity_mask] = 0\n similarity_losses.append(similarity_loss.mean())\n\n return torch.mean(torch.stack(similarity_losses)), [None, None, None]\n\n if is_log and self.id_viz_edge in id_point:\n with torch.no_grad():\n line_thickness = 1\n point_thickness = 2\n point_radius = 1\n\n polygon_points_2d_1 = (self.intrinsic1 @ coords_per_edge.T).T\n polygon_points_2d_1 = (polygon_points_2d_1[:, :2] / polygon_points_2d_1[:, 2:3]).detach().cpu().numpy()\n polygon_points_2d_2 = (self.transformation @ to_homogeneous_tensor(coords_per_edge).T).T\n polygon_points_2d_2 = (polygon_points_2d_2[:, :2] / polygon_points_2d_2[:, 2:3]).detach().cpu().numpy()\n\n line_img1 = self.rgb1.copy()\n line_img1 = cv2.cvtColor(line_img1, cv2.COLOR_GRAY2BGR)\n shape = line_img1.shape[:2][::-1]\n\n roi_coor_2d1_numpy = np.clip(polygon_points_2d_1, 0, 0.99999)\n viz_coords = (roi_coor_2d1_numpy * shape).astype(np.int32)\n line_img1[viz_coords[:, 1], viz_coords[:, 0]] = (0, 0, 255)\n\n polygon_2d1 = (self.intrinsic1 @ edge_points[0].T).T\n polygon_2d1 = polygon_2d1[:, :2] / polygon_2d1[:, 2:3]\n polygon_2d1 = (polygon_2d1.detach().cpu().numpy() * shape).astype(np.int32)\n cv2.line(line_img1, polygon_2d1[0], polygon_2d1[1],\n color=(0, 255, 0), thickness=line_thickness)\n cv2.circle(line_img1, polygon_2d1[0], radius=point_radius, color=(0, 255, 255),\n thickness=point_thickness)\n cv2.circle(line_img1, polygon_2d1[1], radius=point_radius, color=(0, 255, 255),\n thickness=point_thickness)\n\n # Image 2\n line_img2 = self.rgb2.copy()\n line_img2 = cv2.cvtColor(line_img2, cv2.COLOR_GRAY2BGR)\n shape = line_img2.shape[:2][::-1]\n\n roi_coor_2d2_numpy = np.clip(polygon_points_2d_2, 0, 0.99999)\n viz_coords = (roi_coor_2d2_numpy * shape).astype(np.int32)\n line_img2[viz_coords[:, 1], viz_coords[:, 0]] = (0, 0, 255)\n\n polygon_2d2 = (self.transformation @ to_homogeneous_tensor(edge_points[0]).T).T\n polygon_2d2 = polygon_2d2[:, :2] / polygon_2d2[:, 2:3]\n polygon_2d2 = (polygon_2d2.detach().cpu().numpy() * shape).astype(np.int32)\n cv2.line(line_img2, polygon_2d2[0], polygon_2d2[1],\n color=(0, 255, 0), thickness=line_thickness)\n cv2.circle(line_img2, polygon_2d2[0], radius=point_radius, color=(0, 255, 255),\n thickness=point_thickness)\n cv2.circle(line_img2, polygon_2d2[1], radius=point_radius, color=(0, 255, 255),\n thickness=point_thickness)\n\n cv2.imwrite(os.path.join(self.log_root, \"{:05d}.jpg\".format(v_id_epoch)),\n np.concatenate((line_img1, line_img2), axis=0))\n return total_loss, [None, None, None]\n\n def debug_save(self, v_index):\n id_epoch = v_index + 1\n seg_distance = self.seg_distance * self.seg_distance_normalizer\n point_pos_c = self.ray_c * seg_distance[:, None]\n\n id_points = torch.from_numpy(np.concatenate([\n self.id_point_to_id_up_and_face[idx] for idx in np.arange(len(self.id_point_to_id_up_and_face))], axis=0),\n ).to(device=seg_distance.device).to(torch.long)\n\n id_start_point = id_points[:, 0]\n id_end_point = id_points[:, 1]\n\n id_up = id_points[:, 2:4]\n id_face = id_points[:, 4:6]\n\n start_ray = self.ray_c[id_start_point]\n end_ray = self.ray_c[id_end_point]\n\n start_points = self.seg_distance[id_start_point][:, None] * self.seg_distance_normalizer * start_ray\n end_points = self.seg_distance[id_end_point][:, None] * self.seg_distance_normalizer * end_ray\n v_up = self.get_up_vector2(id_up, start_points, end_points)\n\n centroid_ray1 = self.center_ray_c[id_face[:, 0]]\n centroid_ray2 = self.center_ray_c[id_face[:, 1]]\n\n mask1 = id_face[:, 0] != -1\n mask2 = id_face[:, 1] != -1\n\n def get_arrow(v_edge_points, v_up_c):\n total_edge_points = v_edge_points\n\n center_point_c = (total_edge_points[:, 0] + total_edge_points[:, 1]) / 2\n up_point = center_point_c + v_up_c\n\n center_point_w = ((torch.inverse(self.extrinsic1) @ to_homogeneous_tensor(center_point_c).T).T)[:,\n :3].cpu().numpy()\n up_vector_w = normalize_vector(((torch.inverse(self.extrinsic1) @ to_homogeneous_tensor(up_point).T).T)[:,\n :3].cpu().numpy() - center_point_w)\n\n arrows = o3d.geometry.TriangleMesh()\n for i in range(center_point_w.shape[0]):\n arrow = o3d.geometry.TriangleMesh.create_arrow(cylinder_radius=0.0001, cone_radius=0.00015,\n cylinder_height=0.0005, cone_height=0.0005,\n resolution=3, cylinder_split=1)\n arrow.rotate(caculate_align_mat(up_vector_w[i]), center=(0, 0, 0))\n arrow.translate(center_point_w[i])\n arrows += arrow\n colors = np.zeros_like(np.asarray(arrows.vertices))\n colors[:, 0] = 1\n arrows.vertex_colors = o3d.utility.Vector3dVector(colors)\n return arrows\n\n arrows = get_arrow(torch.stack((start_points, end_points), dim=1), v_up[:, 0])\n o3d.io.write_triangle_mesh(os.path.join(self.log_root, \"total_{}_arrow.obj\".format(id_epoch)), arrows)\n start_point_w = ((torch.inverse(self.extrinsic1) @ to_homogeneous_tensor(start_points).T).T)[:, :3] \\\n .cpu().numpy()\n end_point_w = ((torch.inverse(self.extrinsic1) @ to_homogeneous_tensor(end_points).T).T)[:, :3] \\\n .cpu().numpy()\n edge_index = np.stack((\n np.arange(start_point_w.shape[0]), np.arange(start_point_w.shape[0]) + start_point_w.shape[0]\n ), axis=1)\n get_line_mesh(os.path.join(self.log_root, \"total_{}_line.obj\".format(id_epoch)),\n np.concatenate((start_point_w, end_point_w), axis=0), edge_index)\n return\n\n def debug_save_(self, v_index):\n id_epoch = v_index + 1\n seg_distance = self.seg_distance * self.seg_distance_normalizer\n point_pos_c = self.ray_c * seg_distance[:, None]\n\n def get_arrow(v_edge_points, v_up_c):\n total_edge_points = v_edge_points\n\n center_point_c = (total_edge_points[:, 0] + total_edge_points[:, 1]) / 2\n up_point = center_point_c + v_up_c\n\n center_point_w = ((torch.inverse(self.extrinsic1) @ to_homogeneous_tensor(center_point_c).T).T)[:,\n :3].cpu().numpy()\n up_vector_w = normalize_vector(((torch.inverse(self.extrinsic1) @ to_homogeneous_tensor(up_point).T).T)[:,\n :3].cpu().numpy() - center_point_w)\n\n arrows = o3d.geometry.TriangleMesh()\n for i in range(center_point_w.shape[0]):\n arrow = o3d.geometry.TriangleMesh.create_arrow(cylinder_radius=0.0001, cone_radius=0.00015,\n cylinder_height=0.0005, cone_height=0.0005,\n resolution=3, cylinder_split=1)\n arrow.rotate(caculate_align_mat(up_vector_w[i]), center=(0, 0, 0))\n arrow.translate(center_point_w[i])\n arrows += arrow\n colors = np.zeros_like(np.asarray(arrows.vertices))\n colors[:, 0] = 1\n arrows.vertex_colors = o3d.utility.Vector3dVector(colors)\n return arrows\n\n id_patch = torch.tensor((self.id_viz_face,), dtype=torch.long, device=point_pos_c.device)\n # Visualize target patch\n edge_points = point_pos_c[self.batched_points_per_patch[id_patch]].reshape(-1, 4, 3)\n up_c = self.get_up_vector2(np.arange(self.id_viz_edge, self.id_viz_edge + edge_points.shape[0]),\n edge_points[:, 0], edge_points[:, 1])\n arrows = get_arrow(edge_points, up_c[:, 0])\n o3d.io.write_triangle_mesh(os.path.join(self.log_root, \"target_{}_arrow.obj\".format(id_epoch)), arrows)\n id_points = np.asarray(self.batched_points_per_patch[id_patch]).reshape(-1, 4)[:, 0]\n start_point_w = ((torch.inverse(self.extrinsic1) @ to_homogeneous_tensor(point_pos_c[id_points]).T).T)[:, :3] \\\n .cpu().numpy()\n edge_index = np.stack((\n np.arange(start_point_w.shape[0]), (np.arange(start_point_w.shape[0]) + 1) % start_point_w.shape[0]\n ), axis=1)\n get_line_mesh(os.path.join(self.log_root, \"target_{}_line.obj\".format(id_epoch)), start_point_w, edge_index)\n return 0\n\n # Visualize whole patch\n edge_points = point_pos_c[list(itertools.chain(*self.batched_points_per_patch))].reshape(-1, 4, 3)\n up_c = self.get_up_vector2(np.arange(np.sum([len(item) // 4 for item in self.batched_points_per_patch])),\n edge_points[:, 0], edge_points[:, 1])\n arrows = get_arrow(edge_points, up_c[:, 0])\n o3d.io.write_triangle_mesh(os.path.join(self.log_root, \"total_{}_arrow.obj\".format(id_epoch)), arrows)\n start_point_w = ((torch.inverse(self.extrinsic1) @ to_homogeneous_tensor(point_pos_c).T).T)[:, :3] \\\n .cpu().numpy()\n edge_index = np.asarray(list(self.graph1.edges()))\n get_line_mesh(os.path.join(self.log_root, \"total_{}_line.obj\".format(id_epoch)), start_point_w, edge_index)\n pass\n\n return 0\n\n def len(self):\n return len(self.graph1.graph[\"faces\"])\n\n\nclass Phase4(pl.LightningModule):\n def __init__(self, hparams, v_data):\n super(Phase4, self).__init__()\n self.hydra_conf = hparams\n self.learning_rate = self.hydra_conf[\"trainer\"][\"learning_rate\"]\n self.batch_size = self.hydra_conf[\"trainer\"][\"batch_size\"]\n self.num_worker = self.hydra_conf[\"trainer\"][\"num_worker\"]\n self.save_hyperparameters(hparams)\n\n if not os.path.exists(self.hydra_conf[\"trainer\"][\"output\"]):\n os.makedirs(self.hydra_conf[\"trainer\"][\"output\"])\n\n self.data = v_data\n self.model = LModel20(self.data,\n self.hydra_conf[\"model\"][\"regress_normal\"],\n self.hydra_conf[\"dataset\"][\"id_viz_face\"],\n self.hydra_conf[\"trainer\"][\"output\"]\n )\n # self.model = LModel31(self.data, self.hydra_conf[\"trainer\"][\"loss_weight\"], self.hydra_conf[\"trainer\"][\"img_model\"])\n # self.model = LModel12(self.data, self.hydra_conf[\"trainer\"][\"loss_weight\"], self.hydra_conf[\"trainer\"][\"img_model\"])\n\n def prepare_data(self) -> None:\n graphs = self.data[1]\n img_database = self.data[0]\n for id_img, img in enumerate(graphs):\n training_vertices = []\n for id_start_node in graphs[id_img].nodes():\n training_vertices.append(np.asarray([(\n id_start_node, id_end_node,\n graphs[id_img][id_start_node][id_end_node][\"valid_flag\"],\n graphs[id_img][id_start_node][id_end_node][\"id_face\"],\n ) for id_end_node in graphs[id_img][id_start_node]], dtype=np.int32))\n # Trim the last 4 vertices. They are boundaries\n training_vertices = training_vertices[:-4]\n graphs[id_img].graph[\"training_vertices\"] = training_vertices\n\n def train_dataloader(self):\n is_one_target = self.hydra_conf[\"dataset\"][\"only_train_target\"]\n id_face = self.hydra_conf[\"dataset\"][\"id_viz_face\"]\n self.train_dataset = Multi_node_single_img_dataset(\n self.data,\n is_one_target,\n id_face,\n \"training\",\n )\n # self.train_dataset = Node_dataset(self.model.id_point_to_id_up_and_face, \"training\")\n # self.train_dataset = Edge_dataset(self.model.batched_points_per_patch, is_one_target, id_edge, \"training\")\n return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True,\n collate_fn=Multi_node_single_img_dataset.collate_fn,\n num_workers=self.hydra_conf[\"trainer\"][\"num_worker\"],\n pin_memory=True,\n persistent_workers=True if self.hydra_conf[\"trainer\"][\"num_worker\"] > 0 else False)\n\n def val_dataloader(self):\n is_one_target = self.hydra_conf[\"dataset\"][\"only_train_target\"]\n id_face = self.hydra_conf[\"dataset\"][\"id_viz_face\"]\n self.valid_dataset = Multi_node_single_img_dataset(\n self.data,\n is_one_target,\n id_face,\n \"validation\"\n )\n return DataLoader(self.valid_dataset, batch_size=64,\n collate_fn=Multi_node_single_img_dataset.collate_fn,\n num_workers=0)\n\n def configure_optimizers(self):\n # grouped_parameters = [\n # {\"params\": [self.model.seg_distance], 'lr': self.learning_rate},\n # {\"params\": [self.model.v_up], 'lr': 1e-2},\n # ]\n\n optimizer = SGD(filter(lambda p: p.requires_grad, self.parameters()), lr=self.learning_rate, )\n # optimizer = SGD(grouped_parameters, lr=self.learning_rate, )\n\n return {\n 'optimizer': optimizer,\n 'monitor': 'Validation_Loss'\n }\n\n def training_step(self, batch, batch_idx):\n total_loss, losses = self.model(batch, self.current_epoch, False)\n\n self.log(\"Training_Loss\", total_loss.detach(), prog_bar=True, logger=True, on_step=True, on_epoch=True,\n sync_dist=True,\n batch_size=1)\n return total_loss\n\n def validation_step(self, batch, batch_idx):\n total_loss, losses = self.model(batch, self.current_epoch if not self.trainer.sanity_checking else -1, True)\n\n self.log(\"Validation_Loss\", total_loss.detach(), prog_bar=True, logger=True, on_step=True, on_epoch=True,\n sync_dist=True,\n batch_size=1)\n\n return total_loss\n\n def validation_epoch_end(self, result) -> None:\n if self.global_rank != 0:\n return\n\n # self.model.debug_save(self.current_epoch if not self.trainer.sanity_checking else -1)\n\n if self.trainer.sanity_checking:\n return\n\n # def on_after_backward(self) -> None:\n # \"\"\"\n # Skipping updates in case of unstable gradients\n # https://github.com/Lightning-AI/lightning/issues/4956\n # \"\"\"\n # valid_gradients = True\n # for name, param in self.named_parameters():\n # if param.grad is not None:\n # valid_gradients = not (torch.isnan(param.grad).any() or torch.isinf(param.grad).any())\n # if not valid_gradients:\n # break\n # if not valid_gradients:\n # print(f'detected inf or nan values in gradients. not updating model parameters')\n # self.zero_grad()\n\n\n@ray.remote\ndef read_graph(v_filename, img_size):\n data = [item for item in open(v_filename).readlines()]\n vertices = [item.strip().split(\" \")[1:-1] for item in data if item[0] == \"v\"]\n vertices = np.asarray(vertices).astype(np.float32) / img_size\n faces = [item.strip().split(\" \")[1:] for item in data if item[0] == \"f\"]\n graph = nx.Graph()\n graph.add_nodes_from([(idx, {\"pos_2d\": item}) for idx, item in enumerate(vertices)])\n new_faces = [] # equal faces - 1 because of the obj format\n\n for id_face, id_edge_per_face in enumerate(faces):\n id_edge_per_face = (np.asarray(id_edge_per_face).astype(np.int32) - 1).tolist()\n new_faces.append(id_edge_per_face)\n id_edge_per_face = [(id_edge_per_face[idx], id_edge_per_face[idx + 1]) for idx in\n range(len(id_edge_per_face) - 1)] + [(id_edge_per_face[-1], id_edge_per_face[0])]\n graph.add_edges_from(id_edge_per_face)\n\n graph.graph[\"faces\"] = new_faces\n\n # Mark boundary nodes, lines and faces\n for node in graph.nodes():\n graph.nodes[node][\"valid_flag\"] = graph.nodes[node][\"pos_2d\"][0] != 0 and \\\n graph.nodes[node][\"pos_2d\"][1] != 0 and \\\n graph.nodes[node][\"pos_2d\"][0] != 1 and \\\n graph.nodes[node][\"pos_2d\"][1] != 1\n for node1, node2 in graph.edges():\n graph.edges[(node1, node2)][\"valid_flag\"] = graph.nodes[node1][\"valid_flag\"] and \\\n graph.nodes[node1][\"valid_flag\"]\n face_flags = []\n for id_face, face in enumerate(graph.graph[\"faces\"]):\n face_flags.append(min([graph.nodes[point][\"valid_flag\"] for point in face]))\n for id_point in range(len(face)):\n id_start = id_point\n id_end = (id_start + 1) % len(face)\n graph[face[id_start]][face[id_end]][\"id_face\"] = id_face\n\n graph.graph[\"face_flags\"] = face_flags\n return graph\n\n\ndef prepare_dataset_and_model(v_colmap_dir, v_viz_face, v_bounds):\n print(\"Start to prepare dataset\")\n print(\"1. Read imgs\")\n\n img_cache_name = \"output/img_field_test/img_cache.npy\"\n if os.path.exists(img_cache_name):\n print(\"Found cache \", img_cache_name)\n img_database, points_3d = np.load(img_cache_name, allow_pickle=True)\n else:\n print(\"Dosen't find cache, read raw img data\")\n bound_min = np.array((v_bounds[0], v_bounds[1], v_bounds[2]))\n bound_max = np.array((v_bounds[3], v_bounds[4], v_bounds[5]))\n img_database, points_3d = read_dataset(v_colmap_dir,\n [bound_min,\n bound_max]\n )\n np.save(img_cache_name[:-4], np.asarray([img_database, points_3d], dtype=object))\n print(\"Save cache to \", img_cache_name)\n\n graph_cache_name = \"output/img_field_test/graph_cache.npy\"\n print(\"2. Build graph\")\n if os.path.exists(graph_cache_name):\n graphs = np.load(graph_cache_name, allow_pickle=True)\n else:\n ray.init()\n tasks = [read_graph.remote(\n os.path.join(v_colmap_dir, \"wireframe/{}.obj\".format(img_database[i_img].img_name)),\n img_database[i_img].img_size\n ) for i_img in range(len(img_database))]\n graphs = ray.get(tasks)\n # for i_img in tqdm(range(len(img_database))):\n # data = [item for item in open(\n # os.path.join(v_colmap_dir, \"wireframe/{}.obj\".format(img_database[i_img].img_name))).readlines()]\n # vertices = [item.strip().split(\" \")[1:-1] for item in data if item[0] == \"v\"]\n # vertices = np.asarray(vertices).astype(np.float32) / img_database[i_img].img_size\n # faces = [item.strip().split(\" \")[1:] for item in data if item[0] == \"f\"]\n # graph = nx.Graph()\n # graph.add_nodes_from([(idx, {\"pos_2d\": item}) for idx, item in enumerate(vertices)])\n # new_faces = [] # equal faces - 1 because of the obj format\n #\n # for id_face, id_edge_per_face in enumerate(faces):\n # id_edge_per_face = (np.asarray(id_edge_per_face).astype(np.int32) - 1).tolist()\n # new_faces.append(id_edge_per_face)\n # id_edge_per_face = [(id_edge_per_face[idx], id_edge_per_face[idx + 1]) for idx in\n # range(len(id_edge_per_face) - 1)] + [(id_edge_per_face[-1], id_edge_per_face[0])]\n # graph.add_edges_from(id_edge_per_face)\n #\n # graph.graph[\"faces\"] = new_faces\n #\n # # Mark boundary nodes, lines and faces\n # for node in graph.nodes():\n # graph.nodes[node][\"valid_flag\"] = graph.nodes[node][\"pos_2d\"][0] != 0 and \\\n # graph.nodes[node][\"pos_2d\"][1] != 0 and \\\n # graph.nodes[node][\"pos_2d\"][0] != 1 and \\\n # graph.nodes[node][\"pos_2d\"][1] != 1\n # for node1, node2 in graph.edges():\n # graph.edges[(node1, node2)][\"valid_flag\"] = graph.nodes[node1][\"valid_flag\"] and \\\n # graph.nodes[node1][\"valid_flag\"]\n # face_flags = []\n # for id_face, face in enumerate(graph.graph[\"faces\"]):\n # face_flags.append(min([graph.nodes[point][\"valid_flag\"] for point in face]))\n # for id_point in range(len(face)):\n # id_start = id_point\n # id_end = (id_start + 1) % len(face)\n # graph[face[id_start]][face[id_end]][\"id_face\"] = id_face\n #\n # graph.graph[\"face_flags\"] = face_flags\n # # print(\"Read {}/{} vertices\".format(vertices.shape[0], len(graph.nodes)))\n # # print(\"Read {} faces\".format(len(faces)))\n # graphs.append(graph)\n print(\"Read {} graphs\".format(len(graphs)))\n graphs = np.asarray(graphs, dtype=object)\n np.save(graph_cache_name, graphs, allow_pickle=True)\n\n points_cache_name = \"output/img_field_test/points_cache.npy\"\n if os.path.exists(points_cache_name):\n points_from_sfm = np.load(points_cache_name)\n else:\n preserved_points = []\n for point in tqdm(points_3d):\n for track in point.tracks:\n if track[0] in [1, 2]:\n preserved_points.append(point)\n if len(preserved_points) == 0:\n points_from_sfm = np.array([[0.5, 0.5, 0.5]], dtype=np.float32)\n else:\n points_from_sfm = np.stack([item.pos for item in preserved_points])\n np.save(points_cache_name, points_from_sfm)\n\n print(\"Start to calculate initial wireframe for each image\")\n\n def project_points(v_projection_matrix, points_3d_pos):\n projected_points = np.transpose(v_projection_matrix @ np.transpose(np.insert(points_3d_pos, 3, 1, axis=1)))\n projected_points = projected_points[:, :2] / projected_points[:, 2:3]\n projected_points_mask = np.logical_and(projected_points[:, 0] > 0, projected_points[:, 1] > 0)\n projected_points_mask = np.logical_and(projected_points_mask, projected_points[:, 0] < 1)\n projected_points_mask = np.logical_and(projected_points_mask, projected_points[:, 1] < 1)\n points_3d_pos = points_3d_pos[projected_points_mask]\n projected_points = projected_points[projected_points_mask]\n return points_3d_pos, projected_points\n\n def draw_initial(img, v_graph):\n # cv2.namedWindow(\"1\", cv2.WINDOW_NORMAL)\n # cv2.resizeWindow(\"1\", 1600, 900)\n # cv2.moveWindow(\"1\", 5, 5)\n v_rgb = cv2.imread(img.img_path, cv2.IMREAD_UNCHANGED)\n point_img = v_rgb.copy()\n for point in points_from_sfm_2d:\n cv2.circle(point_img, (point * img.img_size).astype(np.int32), 2, (0, 0, 255), thickness=4)\n print(\"Draw lines on img1\")\n line_img1 = v_rgb.copy()\n\n # Draw first img\n for idx, face in enumerate(v_graph.graph[\"faces\"]):\n # print(idx)\n vertices = [v_graph.nodes[id_node][\"pos_2d\"] for id_node in face]\n cv2.polylines(line_img1, [(np.asarray(vertices) * img.img_size).astype(np.int32)], True, (0, 0, 255),\n thickness=1)\n # cv2.imshow(\"1\", line_img1)\n # cv2.waitKey()\n\n # Draw target patch\n for id_patch in v_viz_face:\n vertices_t = [v_graph.nodes[id_node][\"pos_2d\"] for id_node in v_graph.graph[\"faces\"][id_patch]]\n cv2.polylines(line_img1, [(np.asarray(vertices_t) * img.img_size).astype(np.int32)], True, (0, 255, 0),\n thickness=1)\n for item in vertices_t:\n cv2.circle(line_img1, (item * img.img_size).astype(np.int32), 1, (0, 255, 255), 2)\n viz_img = np.concatenate((point_img, line_img1), axis=0)\n cv2.imwrite(\"output/img_field_test/input_img.jpg\", viz_img)\n\n def compute_initial(v_graph, v_points_3d, v_points_2d, v_extrinsic, v_intrinsic):\n distance_threshold = 5 # 5m; not used\n\n v_graph.graph[\"face_center\"] = np.zeros((len(v_graph.graph[\"faces\"]), 2), dtype=np.float32)\n v_graph.graph[\"ray_c\"] = np.zeros((len(v_graph.graph[\"faces\"]), 3), dtype=np.float32)\n for id_face, id_edge_per_face in enumerate(v_graph.graph[\"faces\"]):\n # Convex assumption\n center_point = np.stack(\n [v_graph.nodes[id_vertex][\"pos_2d\"] for id_vertex in id_edge_per_face], axis=0).mean(axis=0)\n v_graph.graph[\"face_center\"][id_face] = center_point\n\n # Query points: (M, 2)\n # points from sfm: (N, 2)\n kd_tree = faiss.IndexFlatL2(2)\n kd_tree.add(v_points_2d.astype(np.float32))\n vertices_2d = np.asarray([v_graph.nodes[id_node][\"pos_2d\"] for id_node in v_graph.nodes()]) # (M, 2)\n centroids_2d = v_graph.graph[\"face_center\"]\n query_points = np.concatenate([vertices_2d, centroids_2d], axis=0)\n shortest_distance, index_shortest_distance = kd_tree.search(query_points, 32) # (M, K)\n\n points_from_sfm_camera = (v_extrinsic @ np.insert(v_points_3d, 3, 1, axis=1).T).T[:, :3] # (N, 3)\n\n # Select the point which is nearest to the actual ray for each endpoints\n # 1. Construct the ray\n # (M, 2); points in camera coordinates\n ray_c = (np.linalg.inv(v_intrinsic) @ np.insert(query_points, 2, 1, axis=1).T).T\n ray_c = ray_c / np.linalg.norm(ray_c + 1e-6, axis=1, keepdims=True) # Normalize the points\n nearest_candidates = points_from_sfm_camera[index_shortest_distance] # (M, K, 3)\n # Compute the shortest distance from the candidate point to the ray for each query point\n # (M, K, 1): K projected distance of the candidate point along each ray\n distance_of_projection = nearest_candidates @ ray_c[:, :, np.newaxis]\n # (M, K, 3): K projected points along the ray\n projected_points_on_ray = distance_of_projection * ray_c[:, np.newaxis, :]\n distance_from_candidate_points_to_ray = np.linalg.norm(\n nearest_candidates - projected_points_on_ray + 1e-6, axis=2) # (M, 1)\n # (M, 1): Index of the best projected points along the ray\n index_best_projected = distance_from_candidate_points_to_ray.argmin(axis=1)\n\n chosen_distances = distance_of_projection[np.arange(projected_points_on_ray.shape[0]), index_best_projected]\n valid_mask = distance_from_candidate_points_to_ray[np.arange(\n projected_points_on_ray.shape[0]), index_best_projected] < distance_threshold # (M, 1)\n # (M, 3): The best projected points along the ray\n initial_points_camera = projected_points_on_ray[\n np.arange(projected_points_on_ray.shape[0]), index_best_projected]\n initial_points_world = (np.linalg.inv(v_extrinsic) @ np.insert(initial_points_camera, 3, 1, axis=1).T).T\n initial_points_world = initial_points_world[:, :3] / initial_points_world[:, 3:4]\n\n for idx, id_node in enumerate(v_graph.nodes):\n v_graph.nodes[id_node][\"pos_world\"] = initial_points_world[idx]\n v_graph.nodes[id_node][\"distance\"] = chosen_distances[idx, 0]\n v_graph.nodes[id_node][\"ray_c\"] = ray_c[idx]\n\n for id_face in range(v_graph.graph[\"face_center\"].shape[0]):\n idx = id_face + len(v_graph.nodes)\n v_graph.graph[\"ray_c\"][id_face] = ray_c[idx]\n\n line_coordinates = []\n for edge in v_graph.edges():\n line_coordinates.append(np.concatenate((initial_points_world[edge[0]], initial_points_world[edge[1]])))\n save_line_cloud(\"output/img_field_test/initial_segments.obj\", np.stack(line_coordinates, axis=0))\n pc = o3d.geometry.PointCloud()\n pc.points = o3d.utility.Vector3dVector(initial_points_world[len(v_graph.nodes):])\n o3d.io.write_point_cloud(\"output/img_field_test/initial_face_centroid.ply\", pc)\n return\n\n for id_img, img in enumerate(img_database):\n points_from_sfm, points_from_sfm_2d = project_points(img.projection, points_from_sfm)\n rgb = cv2.imread(img.img_path, cv2.IMREAD_UNCHANGED)[:, :, :3]\n rgb = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)[:, :, None]\n # draw_initial(rgb, graphs[id_img], img)\n compute_initial(graphs[id_img], points_from_sfm, points_from_sfm_2d, img.extrinsic, img.intrinsic)\n draw_initial(img_database[0], graphs[0])\n\n # Read camera pairs\n camera_pair_txt = open(os.path.join(v_colmap_dir, \"pairs.txt\")).readlines()\n assert (len(img_database) == int(camera_pair_txt[0]))\n camera_pair_txt.pop(0)\n camera_pair_data = [np.asarray(item.strip().split(\" \")[1:], dtype=np.float32).reshape(-1, 2) for item in\n camera_pair_txt[1::2]]\n\n return img_database, graphs, camera_pair_data\n\n\n@hydra.main(config_name=\"phase4_abc.yaml\", config_path=\"../../../configs/neural_recon/\", version_base=\"1.1\")\ndef main(v_cfg: DictConfig):\n seed_everything(0)\n print(OmegaConf.to_yaml(v_cfg))\n data = prepare_dataset_and_model(\n v_cfg[\"dataset\"][\"colmap_dir\"],\n v_cfg[\"dataset\"][\"id_viz_face\"],\n v_cfg[\"dataset\"][\"scene_boundary\"],\n )\n\n hydra_cfg = hydra.core.hydra_config.HydraConfig.get()\n log_dir = hydra_cfg['runtime']['output_dir']\n v_cfg[\"trainer\"][\"output\"] = os.path.join(log_dir, v_cfg[\"trainer\"][\"output\"])\n\n model = Phase4(v_cfg, data)\n\n trainer = Trainer(\n accelerator='gpu' if v_cfg[\"trainer\"].gpu != 0 else None,\n # strategy = \"ddp\",\n devices=v_cfg[\"trainer\"].gpu, enable_model_summary=False,\n max_epochs=int(1e8),\n num_sanity_val_steps=2,\n check_val_every_n_epoch=v_cfg[\"trainer\"][\"check_val_every_n_epoch\"],\n default_root_dir=log_dir,\n # precision=16,\n # gradient_clip_val=0.5\n )\n\n if v_cfg[\"trainer\"].resume_from_checkpoint is not None and v_cfg[\"trainer\"].resume_from_checkpoint != \"none\":\n state_dict = torch.load(v_cfg[\"trainer\"].resume_from_checkpoint)[\"state_dict\"]\n model.load_state_dict(state_dict, strict=False)\n\n if v_cfg[\"trainer\"].evaluate:\n trainer.test(model)\n else:\n trainer.fit(model)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yilinliu77/python","sub_path":"src/neural_recon/bak/phase4.py","file_name":"phase4.py","file_ext":"py","file_size_in_byte":62880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"71544680212","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 31 10:23:56 2019\n\n@author: christiancasey\n\nThis script processes the scraped data from the ISAW New Titles pages\nand creates a new set of files with only book entries grouped by category.\n\"\"\"\n\nimport os\nimport glob\nimport re\nimport pandas\n\n#%% Load categories data files\n\n# Basic categories (index corresponds to category number - 1)\nwith open('categories.dat', 'r') as f:\n\tvCategories = f.read().split('\\n')\n\tf.close()\nnCategories = len(vCategories)\n\n# Messy categories used in HTML and mapping to category number\ndfCategoryMap = pandas.read_csv('category_map.csv')\ndCategoryMap = dict(zip(dfCategoryMap['Category'], dfCategoryMap['Number']))\n\n\n#%% Get a list of unique categories as they appear in the HTML\n## There is no need to do this again, all categories are now loaded from files above\n#vCategoriesMessy = []\n#vFiles = glob.glob('pages/*')\n#vFiles.sort()\n#\n#for strFilename in vFiles:\n#\twith open(strFilename, 'r') as f:\n#\t\tstrPage = f.read()\n#\t\tf.close()\n#\t\t\n#\tstrRegex = re.compile('.*?)\" id=\"(?P.*?)\">')\n#\tvMatches = re.findall(strRegex, strPage)\n#\t\n#\t# Deal with the fact that some files have links without a name attribute\n#\t# Some have only one link with a name attribute\n#\t# Any file with fewer than 4 matches needs to be researched\n#\tif len(vMatches) <= 3:\n#\t\tstrRegex = re.compile('.*?)\">')\n#\t\tvMatches = re.findall(strRegex, strPage)\n#\t\n#\t\n#\tprint('%i - %s' % (len(vMatches), strFilename) )\n#\t\n#\tfor vMatch in vMatches:\n#\t\tif isinstance(vMatch, str):\n#\t\t\tvMatch = [vMatch];\n#\t\tvCategoriesMessy.append(vMatch[0])\n#\t\t\n#print()\n#vCategoriesMessy = list(set(vCategoriesMessy))\n#vCategoriesMessy.sort()\n\n#%% Get list of files and mark categories\n# Replace the category HTML string with a clear formatted marker, \n# which maps all the various category names to a single unique identifier\nvFiles = glob.glob('pages/*')\nvFiles.sort()\n\n\nvBooksInCat = [''] * nCategories \t\t# Empty container for all book lists grouped by category\n\t\nfor strFilename in vFiles:\n\twith open(strFilename, 'r') as f:\n\t\tstrPage = f.read()\n\t\tf.close()\n\t\n\t# Replace with for consistency across files\n\tstrPage = re.sub( r'<([\\/]*)strong>', r'<\\1b>', strPage )\n\t\n\tstrRegex = re.compile('')\n\tvMatches = re.findall(strRegex, strPage)\n#\tprint('• %i - %s' % (len(vMatches), strFilename) )\n\t\n\t# Go through the matching category ids and map them to an set integer value\n\tiPrev = -1;\n\tfor strMatch in vMatches:\n\t\t# Some matched strings in this looser search are not in the dict, use .get()\n\t\t# 0 refers to no match or an erroneous one (e.g. \"Top\")\n\t\tiCatID = dCategoryMap.get(strMatch, 0)\n\t\t\n\t\t# Only encode the category when a match is found\n\t\tif not (iCatID == 0):\n\t\t\t# Replace the match with a unambiguous string containing category number\n\t\t\tstrPage = re.sub( re.compile('' % strMatch), '\\n\\n•Category¶%i§\\n\\n' % (iCatID), strPage )\n\t\t\n\t\t\n\t\t# Use iPrev to ensure that category numbers are always ascending\n\t\tif iCatID > 0 and iCatID < iPrev:\n\t\t\traise Warning('Categories are not in ascending order: %s', strFilename)\n\t\tiPrev = iCatID\n\t\n\t# Loop through the categories and clean up the page\n\tfor iCatID in range(1,nCategories+1):\n\t\t\n\t\t# Keep only the last category identifier\n\t\t# The last category heading is the one preceeding the books\n\t\tstrCategoryLabel = '•Category¶%i§' % (iCatID)\n\t\tiLastCategoryInstance = strPage.rfind( strCategoryLabel )\n\t\t# Delete all category labels except the last one\n\t\tstrPage = strPage[:iLastCategoryInstance].replace( strCategoryLabel, '' ) + strPage[iLastCategoryInstance:]\n\t\n\t# Get the locations of all category labels\n\tvMatches = re.finditer('•Category¶\\d',strPage)\n\tvMatchStart = [ reMatch.start() for reMatch in vMatches ]\n\t\n\t# Use this random comment in the HTML to mark the end of the last category\n\t# It appears in every file in the same place, right after the last category\n\tiEOF = strPage.find('')\n\t\n\t# Raise a warning if the EOF signal is not found\n\tif iEOF == -1:\n\t\traise Warning('No EOF signal found in file: %s' % strFilename)\n\t\n\t# Put the end of file index for the last category\n\tvMatchStart.append(iEOF)\n\t\t\n\t# Loop through categories in file and extract content\n\tvMatches = re.finditer('•Category¶\\d',strPage)\n\tfor iMatch, reMatch in enumerate(vMatches):\n\t\tiCatID = int(reMatch.group()[-1])\n\t\tstrBooksInCat = strPage[vMatchStart[iMatch]:vMatchStart[iMatch+1]]\n\t\tstrBooksInCat = '%s\\n\\n%s\\n\\n\\n\\n%s\\n' % (strFilename,strBooksInCat, '~'*80) \t\t\t\t\t# Add some whitespace\n\t\tvBooksInCat[iCatID-1] = vBooksInCat[iCatID-1] + strBooksInCat\n\t\n#%% Save books in category to text files\nfor iCatID in range(1,nCategories+1):\n\tstrFilename = 'books_in_categories/%i – %s.txt' % (iCatID, vCategories[iCatID-1])\n\twith open(strFilename, 'w') as f:\n\t\tf.write(vBooksInCat[iCatID-1])\n\t\tf.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"christiancasey/nt-categorize","sub_path":"02_extract_categories_from_pages.py","file_name":"02_extract_categories_from_pages.py","file_ext":"py","file_size_in_byte":4913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7972007754","text":"import numpy as np\nfrom azua.datasets.variables import Variable\nfrom azua.preprocessing.transforms import IdentityTransform, UnitScaler\n\n\ndef test_identity_transform():\n transform = IdentityTransform()\n data = np.random.rand(100, 6)\n transformed = transform.fit_transform(data)\n np.testing.assert_allclose(data, transformed)\n restored = transform.inverse_transform(transformed)\n np.testing.assert_allclose(data, restored)\n\n\ndef test_unit_scaler():\n transform = UnitScaler([Variable(\"numeric_input\", True, \"continuous\", 0, 9)])\n data = np.arange(10, dtype=np.float32).reshape((10, 1))\n transformed = transform.fit_transform(data)\n np.testing.assert_almost_equal(transformed.min(), 0.0)\n np.testing.assert_almost_equal(transformed.max(), 1.0)\n restored = transform.inverse_transform(transformed)\n np.testing.assert_allclose(data, restored)\n","repo_name":"microsoft/project-azua","sub_path":"tests/unit_tests/preprocessing/test_transforms.py","file_name":"test_transforms.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":224,"dataset":"github-code","pt":"67"} +{"seq_id":"2808985699","text":"\r\nfrom .member import Member\r\nfrom .curve import *\r\n\r\nfool = Member('fool', 0, 0)\r\n\r\nclass Party():\r\n def __init__(self, _min: int, _max: int):\r\n self.people = [fool]\r\n self.signable = False\r\n self.now_size = 1\r\n self.max_size = 1 + _max\r\n self.min_size = 0 + _min\r\n self.storage = []\r\n\r\n\r\n def create_user(self, _name: str):\r\n if self.now_size == self.max_size: return False\r\n user = Member(_name, self.min_size, self.max_size)\r\n self.people.append(user)\r\n self.now_size = len(self.people)\r\n return self.people[self.now_size - 1].gen_private_poly()\r\n\r\n\r\n def remove_user(self, _name: str):\r\n if self.now_size == 1: return []\r\n _people = [user for user in self.people if user.name != _name]\r\n if len(_people) < len(self.people): \r\n self.signable = False\r\n self.people = _people\r\n self.now_size = len(self.people)\r\n return self.list_member()\r\n\r\n\r\n def list_member(self) -> list[str]:\r\n _lname = [user.name for user in self.people if user.name != 'fool']\r\n return _lname\r\n\r\n\r\n def reset_member(self):\r\n self.people = [fool]\r\n self.now_size = 1\r\n\r\n \r\n def set_user_status(self, _name: str, _stat: str):\r\n for user in self.people:\r\n if user.name == _name:\r\n return user.set_status(_stat)\r\n return False\r\n\r\n\r\n def broadcast_obfused_poly(self):\r\n self.storage = [[]]\r\n for i, user in enumerate(self.people):\r\n if i == 0: continue\r\n obs_poly = user.obfuscate_poly()\r\n self.storage.append(obs_poly)\r\n\r\n\r\n def get_all_address(self) -> list[Point]:\r\n result = []\r\n for i in range(1, self.max_size):\r\n addr = self.people[i].p_key\r\n result.append(addr)\r\n return result\r\n\r\n\r\n def set_private_chall(self):\r\n group_addr = self.get_all_address()\r\n group_hash = hash_multiset(group_addr)\r\n for i in range(1, self.max_size):\r\n self.people[i].set_master_key(group_hash)\r\n\r\n\r\n def each_share_secret(self):\r\n for j in range(1, self.max_size):\r\n for i in range(1, self.max_size):\r\n y_value = self.people[i].gen_secret_share(j)\r\n self.people[j].recv_secret_share(y_value, i)\r\n\r\n\r\n def check_share_secret(self):\r\n for i, user in enumerate(self.people):\r\n if i == 0: continue\r\n if True != user.verify_obfuscation(i, self.storage):\r\n return False\r\n user.sum_secret_share()\r\n return True\r\n\r\n\r\n def setup_group(self):\r\n if self.now_size != self.max_size: \r\n return False\r\n self.set_private_chall()\r\n self.broadcast_obfused_poly()\r\n self.each_share_secret()\r\n self.signable = self.check_share_secret()\r\n return self.signable\r\n\r\n\r\n def view_secret(self, _name: str):\r\n for user in self.people:\r\n if user.name == _name:\r\n return user.get_pair_secret()\r\n return False\r\n\r\n\r\n def view_nonce(self, _name: str):\r\n for user in self.people:\r\n if user.name == _name:\r\n return user.gen_pair_nonce()\r\n return False\r\n\r\n\r\n def count_signers(self) -> list[int]:\r\n if self.signable == False: return []\r\n result = []\r\n for i, user in enumerate(self.people):\r\n if user.p_once != E.INF and user.online == True:\r\n result.append(i)\r\n return result\r\n\r\n\r\n def rebuild_group_pkey(self, subset: list[int]) -> Point:\r\n pk_group = E.INF\r\n for signer in subset:\r\n pi_piece = self.people[signer].get_pkey_piece(signer, subset)\r\n pk_group = pk_group + pi_piece\r\n return pk_group\r\n\r\n \r\n def true_group_pkey(self) -> Point:\r\n pk_group = E.INF\r\n for user in self.people:\r\n pk_group = pk_group + user.p_key * user.chall\r\n return pk_group\r\n\r\n\r\n def public_group_nonce(self, subset: list[int]) -> Point:\r\n r_nonce = E.INF\r\n for signer in subset:\r\n r_nonce += self.people[signer].p_once\r\n return r_nonce\r\n\r\n\r\n def create_signature(self, e: bytes, subset: list[int]):\r\n v_sign = 0\r\n for signer in subset:\r\n v_sign += self.people[signer].sign_challenge(e, signer, subset)\r\n v_sign %= N\r\n return v_sign\r\n\r\n\r\n def clear_all_nonce(self):\r\n for user in self.people:\r\n user.clear_nonce()\r\n return True\r\n\r\n\r\n def sign_message(self, message: str):\r\n subset = self.count_signers()\r\n if len(subset) < self.min_size: return False\r\n subset = subset[:self.min_size]\r\n\r\n p = self.rebuild_group_pkey(subset)\r\n assert p == self.true_group_pkey()\r\n\r\n r = self.public_group_nonce(subset)\r\n e = hash_signature(p, r, message)\r\n s = self.create_signature(e, subset)\r\n\r\n assert True == self.clear_all_nonce()\r\n return (p, r, s)","repo_name":"voanhkhoa0712/multisig","sub_path":"demo/party.py","file_name":"party.py","file_ext":"py","file_size_in_byte":5081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20577003434","text":"import os\nfrom datetime import datetime\nimport argparse\nimport torch.multiprocessing as mp\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\n\n\nclass ConvNet(nn.Module):\n def __init__(self, num_classes=10):\n super(ConvNet, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2))\n self.layer2 = nn.Sequential(\n nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2))\n self.fc = nn.Linear(7 * 7 * 32, num_classes)\n\n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = out.reshape(out.size(0), -1)\n out = self.fc(out)\n return out\n\n\ndef train(args):\n print(args.arg1, args.arg2)\n\n local_rank = int(os.environ['LOCAL_RANK'])\n # local_rank = int(args.local_rank)\n rank = int(os.environ['RANK'])\n world_size = int(os.environ['WORLD_SIZE'])\n gpu = local_rank\n\n dist.init_process_group(backend='nccl', init_method='env://', world_size=world_size, rank=rank)\n\n dist_rank = dist.get_rank()\n print(local_rank, rank, dist_rank, world_size)\n\n torch.manual_seed(0)\n\n model = ConvNet()\n torch.cuda.set_device(gpu)\n model.cuda(gpu)\n batch_size = 100\n # define loss function (criterion) and optimizer\n criterion = nn.CrossEntropyLoss().cuda(gpu)\n optimizer = torch.optim.SGD(model.parameters(), 1e-4)\n # Wrap the model\n model = nn.parallel.DistributedDataParallel(model, device_ids=[gpu])\n # Data loading code\n if not rank:\n print('I am root process loading dataset first')\n train_dataset = torchvision.datasets.MNIST(root='./data',\n train=True,\n transform=transforms.ToTensor(),\n download=True)\n dist.barrier()\n if rank:\n print('I am root process loading dataset first')\n train_dataset = torchvision.datasets.MNIST(root='./data',\n train=True,\n transform=transforms.ToTensor(),\n download=True)\n dist.barrier()\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,\n num_replicas=world_size,\n rank=rank)\n train_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=2,\n pin_memory=True,\n sampler=train_sampler)\n\n start = datetime.now()\n total_step = len(train_loader)\n epochs = 200\n if rank == 0:\n print('started straining')\n print('I am', rank)\n for epoch in range(epochs):\n for i, (images, labels) in enumerate(train_loader):\n images = images.cuda(non_blocking=True)\n labels = labels.cuda(non_blocking=True)\n # Forward pass\n outputs = model(images)\n loss = criterion(outputs, labels)\n loss_value = loss.detach()\n\n dist.reduce(loss_value, 0, dist.ReduceOp.SUM)\n\n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (i + 1) % 10 == 0 and rank == 0:\n print(loss_value.cpu().item())\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch + 1, epochs, i + 1, total_step,\n loss.item()))\n if rank == 0:\n print(\"Training complete in: \" + str(datetime.now() - start))\n dist.barrier()\n print('I am done', rank)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--arg1\", type=str)\n parser.add_argument(\"--arg2\", type=str)\n parser.add_argument(\"--local_rank\", type=int)\n args = parser.parse_args()\n\n train(args)\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"mbiparva/mitorch","sub_path":"tools/train_mpi_practice.py","file_name":"train_mpi_practice.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"42386078899","text":"from mctspy.tree.nodes import TwoPlayersGameMonteCarloTreeSearchNode\nimport numpy as np\nimport time\n\nclass NeuralNetNode(TwoPlayersGameMonteCarloTreeSearchNode):\n\n def __init__(self, state, clf, parent=None, oneSided=None):\n super().__init__(state, parent)\n self.clf = clf\n self.oneSided = oneSided\n # def best_child(self, c_param=1.4):\n # choices_weights = [\n # (c.q / c.n) + c_param * np.sqrt((2 * np.log(self.n) / c.n))\n # for c in self.children\n # ]\n # return self.children[np.argmax(choices_weights)]\n\n def expand(self):\n action = self.untried_actions.pop()\n next_state = self.state.move(action)\n child_node = NeuralNetNode(\n next_state, self.clf, parent=self, oneSided=self.oneSided\n )\n self.children.append(child_node)\n return child_node\n\n def rollout_policy(self, possible_moves): \n if self.clf is None or self.oneSided is None or self.oneSided != self.state.next_to_move:\n # initial random for learning\n # start = time.time()\n index = np.random.randint(len(possible_moves))\n # end = time.time()\n # print (\"rando time: \", end - start)\n return possible_moves[index]\n else:\n \n #start = time.time()\n \n p = [ self.state.raw(possible_move) for possible_move in possible_moves]\n \n playerIndex = list(self.clf.classes_).index(self.state.next_to_move)\n \n \n predictions = [ v[playerIndex] for v in self.clf.predict_proba(p)]\n \n index = np.argmax(predictions)\n \n #end = time.time()\n #print (\"prediction time: \", end - start)\n #print ('index: ', index, 'pred_prob: ', predictions[index], 'move: ' ,possible_moves[index])\n \n return possible_moves[index]","repo_name":"ni3po42/Cosc6368-Project","sub_path":"neuralnetnode.py","file_name":"neuralnetnode.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31553696314","text":"import pytesseract\r\nimport os\r\nfrom PIL import Image\r\npytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'\r\ninputpath = r'C:\\Users\\sarth\\OneDrive\\Desktop\\CS Question Bank\\\\'\r\noutputpath = r'C:\\Users\\sarth\\OneDrive\\Desktop\\scanned1\\\\'\r\noutputname = 0\r\nfor filename in os.listdir(inputpath):\r\n img = Image.open(inputpath + filename)\r\n text = pytesseract.image_to_string(img)\r\n outputname += 1\r\n file = open(outputpath + str(outputname),\"w\")\r\n file.write(text)\r\n\r\n","repo_name":"sarthak812/OCR","sub_path":"imgtotext.py","file_name":"imgtotext.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27655821681","text":"import streamlit as st\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nbf = pd.read_csv('BlackFriday.csv')\r\nprint('Número de linhas: ', bf.shape[0])\r\nprint('Número de colunas: ', bf.shape[1])\r\n\r\n#inicio do app\r\nst.title('Data Analysis')\r\n\r\nmarital_true = bf.Age.loc[bf.Marital_Status == 1].value_counts()\r\nmarital_false = bf.Age.loc[bf.Marital_Status == 0].value_counts()\r\n\r\nx1 = marital_true.index\r\ny1 = marital_true.values\r\n\r\nx2 = marital_false.index\r\ny2 = marital_false.values\r\n\r\nplt.bar(x1, y1, label='Casados', width=0.4, align='edge')\r\nplt.bar(x2, y2, label='Não casados', width=0.4, align='edge')\r\nplt.legend()\r\nplt.title('Casados e não casados por idade')\r\n\r\nst.pyplot(plt)\r\nplt.clf()\r\n\r\nporc_gender = bf.Gender.value_counts(normalize=True)\r\nx = porc_gender.values\r\nplt.pie(x, labels=['Homens', 'Mulheres'], autopct='%1.1f%%')\r\nst.pyplot(plt)","repo_name":"RyanOliveira10/Data-Science","sub_path":"Streamlit/streamlit.py","file_name":"streamlit.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32264517977","text":"import re\r\n\r\npfr_articles = []\r\nwith open(\"PFRdemoData.txt\", \"r\", encoding=\"utf-8\") as f:\r\n for line in f.readlines():\r\n line = line.strip('\\n')\r\n line = line.strip()\r\n line = re.sub(\"[ ]+\", \" \", line)\r\n line_list = line.split(' ')\r\n word_tag_list = []\r\n nt = False\r\n temp_word = \"\"\r\n if line_list[0] != '':\r\n for temp_str in line_list:\r\n temp_list = temp_str.split('/')\r\n if '[' in temp_list[0]:\r\n nt = True\r\n temp_list[0] = temp_list[0].replace('[', '')\r\n if ']' in temp_list[1]:\r\n nt = False\r\n temp_list[1] = temp_list[1].split(']')[0]\r\n temp_word += temp_list[0]\r\n word_tag_list += [[temp_word, 'nt']]\r\n temp_word = \"\"\r\n if nt == True:\r\n temp_word += temp_list[0]\r\n word_tag_list += [temp_list]\r\n pfr_articles += [word_tag_list]\r\nprint(pfr_articles)\r\nfor line in pfr_articles:\r\n print(line)\r\n","repo_name":"wzifan/NLP","sub_path":"文字处理/PFR1.py","file_name":"PFR1.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19595581002","text":"# -*- coding: utf-8 -*-\n\n\"\"\" \nCreated on 2018-01-04 \n@author: lbrein\n 从各网站读取symbol信息 \n ----- \n\"\"\"\n \nfrom com.object.obj_entity import warInfo, warrant_his, wave , warrant_online\nfrom com.data.data_base import fytuObject\nfrom com.data.data_interface import TensentInfo\nimport time \nfrom com.base.public import public , logger\n\nimport pandas as pd\nimport math \n\ndef erfcc(x):\n \"\"\"Complementary error function.\"\"\"\n z = abs(x)\n t = 1. / (1. + 0.5*z)\n r = t * math.exp(-z*z-1.26551223+t*(1.00002368+t*(.37409196+\n t*(.09678418+t*(-.18628806+t*(.27886807+\n t*(-1.13520398+t*(1.48851587+t*(-.82215223+\n t*.17087277)))))))))\n \n if (x >= 0.):\n return r\n else:\n return 2. - r\n\ndef Normsdist(x):\n return 1. - 0.5*erfcc(x/(2**0.5))\n\n\n# 港股涡轮对比模型 - 正股每日波动率计算 \nclass warModel:\n \"\"\"\n 涡轮计算模型,\n 1、获取涡轮基础信息\n 2、统计正股波动率 和收盘价 (在data_waveCompare)\n 3、查询涡轮历史数据,并根据s_d公式计算每日的预测价格\n\n \"\"\" \n None_list = ['lot_size','stockid']\n WarInfo_map = {} \n \n def __init__(self):\n self.Futu = fytuObject() # \n self.Tensent = TensentInfo() # \n self.War = warInfo() # 涡轮基础信息表 \n self.Wh = warrant_his() # \n self.Wave = wave() # 波动率结果表 \n \n # 查询并存储war基础信息\n def initWar(self):\n df = self.Futu.getStockInfo(\"HK\",\"WARRANT\")\n self.add(df) \n \n # 添加到warInfo表基础数据\n def add(self,df):\n time0 = time.time()\n pc = 50 #页码\n c = df[\"code\"].count()\n pgs = c//pc +1\n for i in range(pgs):\n s, e = i*pc, (i+1)*pc \n if e > c: e = c \n # 截取pandas\n docs = df[s:e].to_dict(orient='records')\n \n # 查询获得腾讯其他信息\n maps = self.Tensent.getWar(df[s:e][\"code\"].tolist())\n for doc in docs:\n if doc[\"code\"] in maps.keys():\n doc.update(maps[doc[\"code\"]])\n else:\n doc.update({'exercise_price':0,'exchange_ratio':0,'due_date': ''})\n doc = self.alter(doc)\n \n res = self.War.insertAll(docs)\n print(i, time.time()-time0)\n if not res :\n break \n \n self.Futu.Quant.close() \n \n def check(self):\n # 检查并更新 \n df = self.Futu.getStockInfo(\"HK\",\"WARRANT\")\n # 查询已有code\n sql = \"select DISTINCT 'HK.'+code from warInfo\"\n rs = self.War.execSql(sql,isJson=False)\n es = [r[0] for r in rs]\n \n # 过滤\n nf = df[-(df[\"code\"].isin(es))]\n print(nf[\"code\"].count())\n \n self.add(nf) \n \n # 调整 \n def alter(self,doc):\n for key in self.None_list:\n del doc[key]\n if doc['due_date'].find(\"-\")==-1:\n doc['due_date']= '2000-01-01'\n \n s = doc[\"code\"].split(\".\")\n doc[\"code\"] = s[1]\n doc[\"market\"] = s[0]\n return doc\n \n #每日更新涡轮股价计算\n def update(self):\n # 甲岸村最新日 \n res = self.Wh.execOne(\"select max(sdate) from warrant_his\", isJson=False)\n start = public.getDate(1,res[0])\n self.price(start)\n\n # 计算价格 \n def price(self,start = None):\n # 正股中间表\n owner_maps = {}\n time0 = time.time()\n # 查询已有code\n k = 1 \n for doc in self.War.getWars():\n # 查询正股波动率\n owner = doc[\"owner_stock_code\"][3:]\n if not owner in owner_maps.keys():\n Ow = [n for n in self.Wave.getByCode(owner)] \n if len(Ow)==0: continue \n owner_maps[owner] = Ow \n else:\n Ow = owner_maps[owner]\n \n df = pd.DataFrame(Ow)\n \n if not start:\n start = doc[\"listing_date\"] \n end = df.values[df[\"sdate\"].count()-1, 3] \n end = str(end)[:10]\n \n # 查询涡轮历史每日价格 \n df_w = self.Futu.get_history_kline( \"HK.\"+doc[\"code\"],start=start, end=end)\n \n if type(df_w) == str :\n continue \n \n if df_w[\"code\"].count()>0:\n recs = [] \n for index, row in df_w.iterrows():\n doc1 = row.to_dict()\n rec ,doc2 = None, {} \n try:\n # 计算结果\n doc2 = df[(df['sdate']== doc1[\"time_key\"][:10])].to_dict(orient='records')[0]\n rec = self.calcPrice(doc, doc1, doc2)\n except:\n continue \n \n if rec:\n recs.append(rec)\n # 保存 \n if len(recs) > 0 :\n self.Wh.insertAll(recs)\n \n if k % 50==0:\n print(k, doc[\"code\"],time.time()-time0)\n #break \n k+=1 \n \n self.Futu.Quant.close() \n \n def calcPrice(self, doc_info, doc_war, doc_wave):\n # doc_info 涡轮基本信息 包括代码-\n # doc_war 涡轮K线数据\n # doc_wave 正股波动率\n #print(doc_info)\n res = {\n 'code': doc_info[\"code\"], # \n 'sdate': doc_wave[\"sdate\"], # \n 'sclose': doc_war[\"close\"] , # 当期收盘价\n 'owner': doc_wave[\"sclose\"], # 正股价\n 'ep': doc_info[\"exercise_price\"], # 行权价\n 'sd_Price':0.0,\n 'd1':0.0,\n 'd2':0.0,\n } \n \n params_d = {\n \"type\": doc_info[\"stock_child_type\"],\n \"S\": doc_wave[\"sclose\"], # 正股价 \n \"L\": doc_info[\"exercise_price\"], # 行权价 \n \"E\": 1 if doc_info[\"exchange_ratio\"]== 0 else doc_info[\"exchange_ratio\"], # 换股比例\n \"T\": self.timeDiff(doc_info[\"due_date\"], doc_wave[\"sdate\"]), # 到期日\n \"b\": doc_wave[\"wave\"] * math.sqrt(250), # 波动率\n \"r\": 0.01\n }\n # 计算 d1, d2 \n #print(code, params_d)\n d1, d2 = self.calc_d(params_d)\n \n # 计算实际价格 \n params_d.update({\"d1\":d1,\"d2\":d2}) \n \n # 计算目标价\n c = self.calc_c(params_d)\n \n res.update({\"d1\":d1,\"d2\":d2,\"sd_Price\":c})\n return res \n \n def calc_d(self, pm):\n s1 = math.log(pm['S']/pm['L'])+ pm['r'] * pm[\"T\"] \n s2 = 0.5 * pm[\"T\"] * pm['b']**2 \n s3 = pm['b'] * math.sqrt(pm [\"T\"])\n\n d1, d2 = (s1+s2)/s3, (s1- s2)/s3\n return d1, d2 \n\n\n def calc_c(self,pm):\n s1 = pm[\"S\"] * Normsdist(pm[\"d1\"])\n s2 = pm[\"L\"] * math.exp((-1) * pm[\"r\"] * pm[\"T\"]) * Normsdist( pm[\"d2\"])\n s3 = 0 \n \n if pm[\"type\"]==\"PUT\":\n s3 = pm[\"L\"] * math.exp((-1) * pm[\"r\"] * pm[\"T\"]) - pm[\"S\"]\n\n\n c = (s1-s2+s3) / float(pm[\"E\"]) \n #print(c) \n return c\n \n # 计算时间间隔 \n def timeDiff(self,t1,t2):\n diff = public.timeDiff(t1 + \" 00:00:00\", str(t2) + \" 00:00:00\") / 60.0 / 60.0 / 24.0 / 365.0 \n return 1 if diff==0 else diff\n \n \"\"\"\n 在线监控程序\n \n \"\"\"\n # 查询正股收盘价和前一日波动率 \n def getOwnerMap(self, owners = None):\n owners_map = {}\n res = self.Wave.getLastWave()\n # 存储对象\n # 查询获得正股波动率\n oDf = pd.DataFrame([doc for doc in res if (owners is None or doc['code'] in owners)])\n # 正股列表\n lists = ('HK.'+oDf['code']).tolist()\n \n # 查询获得正股当前股价\n closes, volumes, rates = [],[],[]\n for ls in public.eachPage(lists,pc=200):\n df = self.Futu.getSnap(ls)\n #print(df)\n if not df.empty:\n closes += df['last_price'].tolist()\n volumes += df['volume'].tolist()\n rates += df['turnover_rate'].tolist()\n \n time.sleep(5)\n \n #添加正股收盘价\n oDf['sclose'] = closes\n oDf['volume'] = volumes\n oDf['turnover_rate'] = rates\n oDf['sdate'] = public.getDate()\n \n docs = oDf.to_dict(orient='records')\n for doc in docs:\n owners_map[doc['code']] = doc\n \n return owners_map \n \n # 在线监控\n def online(self):\n # 查询正股波动率和当前正股价\n time0 =time.time()\n Obj = warrant_online() # sql数据表\n Obj.empty()\n \n # 检查链接状态\n\n\n # 查询已有code\n wars ,owners = [],[] \n for doc in self.War.getOnline():\n wars.append(doc) \n if doc['owner_stock_code'][3:] not in owners:\n owners.append(doc['owner_stock_code'][3:]) # 正股ID\n \n owner_maps = self.getOwnerMap(owners)\n \n # 查询涡轮历史每日价格 \n k ,total = 0, 0 \n for docs in public.eachPage(wars,pc=200):\n #每批次200个读取当前价格\n res = [] #结果集\n lst = ['HK.'+doc['code'] for doc in docs] \n df = self.Futu.getSnap(lst)\n \n i = 0 \n for index,row in df.iterrows():\n doc = {\"code\": row['code'] ,\n \"close\": row['last_price']\n }\n \n pcode = docs[i]['owner_stock_code'][3:]\n if pcode in owner_maps.keys(): \n try:\n rec = self.calcPrice(docs[i], doc, owner_maps[pcode])\n #更改时间为当前时间\n if rec:\n # 添加交易量\n rec.update({\n 'sdate':public.getDatetime(), \n 'w_volume':row['volume'],\n 'w_turnover_rate':row['turnover_rate'],\n 'o_volume':owner_maps[pcode]['volume'],\n 'o_turnover_rate':owner_maps[pcode]['turnover_rate'],\n })\n \n res.append(rec)\n except:\n i+=1\n continue\n i += 1 \n k+=1\n total += len(res)\n print(k,len(res), time.time()-time0)\n Obj.insertAll(res) \n time.sleep(5) \n #break \n \n logger.info(\"窝轮波动率检测结束,共 %s\" % total) \n self.Futu.Quant.close() \n\ndef main():\n actionMap = {\n \"new\":0, #历史数据初始化\n \"check\":0, \n \"price\":0, # 计算每日涡轮实际价和预测价格\n \"update\":0,\n \"online\":1,\n }\n \n obj = warModel()\n if actionMap[\"new\"]==1:\n obj.initWar()\n \n if actionMap[\"check\"]==1:\n obj.check()\n \n if actionMap[\"price\"]==1:\n obj.price()\n \n if actionMap[\"update\"]==1:\n obj.update()\n \n if actionMap[\"online\"]==1:\n obj.online()\n \n \n \nif __name__ == '__main__':\n main()\n","repo_name":"lbrein/stock","sub_path":"com/option/model_warrant.py","file_name":"model_warrant.py","file_ext":"py","file_size_in_byte":11833,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"13502943578","text":"import os\nimport pickle\nimport bwipp_encoders\n\n\nclass Barcode:\n\tdef __init__(self,\n\t encoder_name,\n\t code,\n\t more_option='',\n\t position=(50, 50),\n\t bwipp=r'data/barcode',\n\t header=r'data/header',\n\t footer=r'data/footer',\n\t encoders=r'data/encoders'):\n\t\tself.moveto = position\n\t\tself.bwipp = bwipp\n\t\tself.header = header\n\t\tself.footer = footer\n\t\tassert all(map(os.path.exists, (self.bwipp, self.header, self.footer)))\n\t\tself.encoders = encoders\n\t\tif os.path.exists(self.encoders):\n\t\t\twith open(self.encoders, 'rb') as info:\n\t\t\t\tself.encoders_info = pickle.load(info)\n\t\telse:\n\t\t\tself.encoders_info = bwipp_encoders.gen_encoders(self.bwipp, self.encoders)\n\t\tself.full_template = '{bwipp}\\n\\n{header}\\n\\n{pos_x} {pos_y} moveto ({code}) ({exop}{more_option})\\n/{encoder_name} /uk.co.terryburton.bwipp findresource exec\\n\\n{footer}'\n\t\tself.encoder_name = encoder_name.lower()\n\t\tself.code = code\n\t\tself.more_option = ' ' + more_option if more_option else ''\n\n\tdef render(self):\n\t\treturn self.full_template.format(bwipp=open(self.bwipp).read(),\n\t\t header=open(self.header).read(),\n\t\t pos_x=self.moveto[0],\n\t\t pos_y=self.moveto[1],\n\t\t code=self.code,\n\t\t exop=self.encoders_info[encoder_name]['exop'],\n\t\t more_option=self.more_option,\n\t\t encoder_name=self.encoder_name,\n\t\t footer=open(self.footer).read())\n\n\nimport sys\n\nencoder_name, code, *more_option = sys.argv[1:]\nbCode = Barcode(encoder_name, code, more_option=' '.join(more_option))\nsys.stdout.write(bCode.render())\nsys.stdout.flush()\n\n\n\n\n","repo_name":"Vinalex/pyBarcode","sub_path":"barcode.py","file_name":"barcode.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28079050281","text":"import os\nimport random\n\nimg_path = 'dataset/images/'\nlabel_path = 'dataset/labels/'\ntrain_val_ratio = 0.9 #\n\n\ndef check_exist_txt(jpg_name: str):\n txt_path = os.path.join(label_path, jpg_name.split('.')[-2]) + '.txt'\n if os.path.exists(txt_path):\n return True\n else:\n print(txt_path)\n return False\n\n\nfile_list = [jpg_file for jpg_file in os.listdir(img_path)\n if jpg_file.split('.')[-1] == 'jpg']\n\nindices = list(range(len(file_list)))\nrandom.shuffle(indices)\n\ntrain_indices = indices[:int(len(file_list) * train_val_ratio)]\nval_indices = indices[int(len(file_list) * train_val_ratio) + 1:]\n\nf = open('train.txt', 'w')\nf2 = open('test.txt', 'w')\n\nfor train_idx in train_indices:\n if not check_exist_txt(file_list[train_idx]):\n continue\n print(os.path.abspath(img_path + file_list[train_idx]), file=f)\n\nfor val_idx in val_indices:\n if not check_exist_txt(file_list[val_idx]):\n continue\n print(os.path.abspath(img_path + file_list[val_idx]), file=f2)\n\nf.close()\nf2.close()\n","repo_name":"SCUCnSoftBei2020/SmartTraffic","sub_path":"tools/get_yolov5_train_txt.py","file_name":"get_yolov5_train_txt.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29723831947","text":"#in the name of Allah\nprint(\"in the name of Allah\")\n\ndef func(n, p):\n s=0\n for i in range(0, n+1):\n s += i**p\n return s\n\nnum = int(input(\"entrer un nombre: \"))\npnum = int(input(\"entrer un nombre: \"))\nprint(\"S\", pnum, \"(\", num, \")= \", func(num, pnum), sep=\"\")\n","repo_name":"IbrahimOuhamou/ofppt","sub_path":"algo-python/Serie7/Exercice4.py","file_name":"Exercice4.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71500177813","text":"import mimetypes\nfrom pathlib import Path\n\nimport pytest\nimport json\nfrom unittest.mock import patch, Mock\nfrom urllib.parse import urlencode\nimport responses\nfrom cloudbackup.exceptions import (\n ApiResponseException,\n FileIsNotDownloadableException\n)\nfrom cloudbackup.yadisk import YaDisk\nfrom cloudbackup.file_objects import YaDiskFile\nfrom cloudbackup.tests._yadisk_api_responses import (\n LSDIR_RESPONSE,\n LIST_FILES_RESPONSE\n)\n\n\n@pytest.fixture()\ndef yadisk():\n with patch(\"cloudbackup.yadisk.Authenticator\") as MockAuth:\n auth = MockAuth.return_value\n auth.get_yadisk_token.return_value = str(Mock())\n yield YaDisk()\n\n\n@responses.activate\ndef test_lsdir_def_args(yadisk):\n url_params = {\n \"path\": \"/\",\n \"sort\": \"modified\",\n \"limit\": \"20\",\n \"offset\": \"0\",\n }\n responses.add(\n responses.GET,\n url=f\"https://cloud-api.yandex.net/v1/disk/resources/?\"\n f\"{urlencode(url_params)}\",\n content_type=\"application/json\",\n match_querystring=True,\n body=json.dumps(LSDIR_RESPONSE),\n )\n yadisk.lsdir(\"/\")\n assert len(responses.calls) == 1\n assert \"Authorization\" in responses.calls[0].request.headers\n assert responses.calls[0].request.params == url_params\n\n\n@responses.activate\ndef test_lsdir_diff_args(yadisk):\n url_params = {\n \"path\": \"/\",\n \"sort\": \"path\",\n \"limit\": \"10\",\n \"offset\": \"5\",\n }\n responses.add(\n responses.GET,\n url=f\"https://cloud-api.yandex.net/v1/disk/resources/?\"\n f\"{urlencode(url_params)}\",\n content_type=\"application/json\",\n match_querystring=True,\n body=json.dumps(LSDIR_RESPONSE),\n )\n yadisk.lsdir(\n path=\"/\",\n sort=\"path\",\n limit=10,\n offset=5\n )\n assert len(responses.calls) == 1\n assert \"Authorization\" in responses.calls[0].request.headers\n assert responses.calls[0].request.params == url_params\n\n\n@responses.activate\ndef test_lsdir_returns_page(yadisk):\n url_params = {\n \"path\": \"/\",\n \"sort\": \"modified\",\n \"limit\": \"20\",\n \"offset\": \"0\",\n }\n responses.add(\n responses.GET,\n url=f\"https://cloud-api.yandex.net/v1/disk/resources/?\"\n f\"{urlencode(url_params)}\",\n content_type=\"application/json\",\n match_querystring=True,\n body=json.dumps(LSDIR_RESPONSE),\n )\n dir_files = yadisk.lsdir(\"/\")\n files = [\n YaDiskFile(LSDIR_RESPONSE[\"_embedded\"][\"items\"][0]),\n YaDiskFile(LSDIR_RESPONSE[\"_embedded\"][\"items\"][1]),\n ]\n assert dir_files == files\n url_params[\"path\"] = \"/second_file.pdf\"\n responses.add(\n responses.GET,\n url=f\"https://cloud-api.yandex.net/v1/disk/resources/?\"\n f\"{urlencode(url_params)}\",\n content_type=\"application/json\",\n match_querystring=True,\n body=json.dumps(LSDIR_RESPONSE[\"_embedded\"][\"items\"][1]),\n )\n single_file_list = yadisk.lsdir(\"/second_file.pdf\")\n assert single_file_list == []\n\n\n@responses.activate\ndef test_list_files_def_args(yadisk):\n url_keys = {\n \"sort\": \"name\",\n \"limit\": \"20\",\n \"offset\": \"0\",\n }\n responses.add(\n responses.GET,\n url=f\"https://cloud-api.yandex.net/v1/disk/resources/files?\"\n f\"{urlencode(url_keys)}\",\n content_type=\"application/json\",\n match_querystring=True,\n body=json.dumps(LIST_FILES_RESPONSE)\n )\n yadisk.list_files()\n assert len(responses.calls) == 1\n assert \"Authorization\" in responses.calls[0].request.headers\n assert responses.calls[0].request.params == url_keys\n\n\n@responses.activate\ndef test_list_files_diff_args(yadisk):\n url_keys = {\n \"sort\": \"created\",\n \"limit\": \"10\",\n \"offset\": \"4\",\n }\n responses.add(\n responses.GET,\n url=f\"https://cloud-api.yandex.net/v1/disk/resources/files?\"\n f\"{urlencode(url_keys)}\",\n content_type=\"application/json\",\n match_querystring=True,\n body=json.dumps(LIST_FILES_RESPONSE)\n )\n yadisk.list_files(\n sort=\"created\",\n limit=10,\n offset=4,\n )\n assert len(responses.calls) == 1\n assert \"Authorization\" in responses.calls[0].request.headers\n assert responses.calls[0].request.params == url_keys\n\n\n@responses.activate\ndef test_list_files_returns_correct_list(yadisk):\n url_keys = {\n \"sort\": \"name\",\n \"limit\": \"20\",\n \"offset\": \"0\",\n }\n responses.add(\n responses.GET,\n url=f\"https://cloud-api.yandex.net/v1/disk/resources/files?\"\n f\"{urlencode(url_keys)}\",\n content_type=\"application/json\",\n match_querystring=True,\n body=json.dumps(LIST_FILES_RESPONSE)\n )\n listed_files = yadisk.list_files()\n test_files = [\n YaDiskFile(LIST_FILES_RESPONSE[\"items\"][0]),\n YaDiskFile(LIST_FILES_RESPONSE[\"items\"][1])\n ] # this list is sorted\n assert listed_files == test_files\n\n\n@responses.activate\ndef test_get_download_link(yadisk):\n path = \"/tests.txt\"\n url_keys = {\"path\": path}\n responses.add(\n responses.GET,\n url=f\"https://cloud-api.yandex.net/v1/disk/resources/download?\"\n f\"{urlencode(url_keys)}\",\n content_type=\"application/json\",\n match_querystring=True,\n json={\"href\": \"https://very_secret_download_ref\"}\n )\n download_link = yadisk.get_download_link(path)\n assert len(responses.calls) == 1\n assert \"Authorization\" in responses.calls[0].request.headers\n assert responses.calls[0].request.params == url_keys\n assert \"href\" in responses.calls[0].response.json()\n assert responses.calls[0].response.json()[\"href\"] == download_link\n\n\n@responses.activate\ndef test_download(yadisk):\n download_link = \"https://download_link\"\n responses.add(\n responses.GET,\n url=download_link,\n body=\"raz dva tri\\n\"\n )\n file_bytes = yadisk.download(download_link)\n assert len(responses.calls) == 1\n assert responses.calls[0].response.content == file_bytes\n\n\n@responses.activate\ndef test_move_to_trash(yadisk):\n path = \"/remove.txt\"\n url_keys = {\n \"path\": path,\n \"permanently\": \"False\"\n }\n responses.add(\n method=\"DELETE\",\n url=f\"https://cloud-api.yandex.net/v1/disk/resources?\"\n f\"{urlencode(url_keys)}\",\n content_type=\"application/json\",\n status=204,\n body=\"\",\n )\n yadisk.remove(path)\n assert len(responses.calls) == 1\n assert \"Authorization\" in responses.calls[0].request.headers\n assert responses.calls[0].request.params == url_keys\n\n\n@responses.activate\ndef test_remove_permanently(yadisk):\n path = \"/remove.txt\"\n url_keys = {\n \"path\": path,\n \"permanently\": \"True\"\n }\n responses.add(\n method=\"DELETE\",\n url=f\"https://cloud-api.yandex.net/v1/disk/resources?\"\n f\"{urlencode(url_keys)}\",\n content_type=\"application/json\",\n status=204,\n body=\"\",\n )\n yadisk.remove(path, True)\n assert len(responses.calls) == 1\n assert \"Authorization\" in responses.calls[0].request.headers\n assert responses.calls[0].request.params == url_keys\n\n\n@responses.activate\ndef test_mkdir(yadisk):\n path = \"/test_dir\"\n url_keys = {\"path\": path}\n responses.add(\n responses.PUT,\n url=f\"https://cloud-api.yandex.net/v1/disk/resources?\"\n f\"{urlencode(url_keys)}\",\n content_type=\"application/json\",\n status=201,\n body=\"\"\n )\n yadisk.mkdir(path)\n assert len(responses.calls) == 1\n assert \"Authorization\" in responses.calls[0].request.headers\n assert responses.calls[0].request.params == url_keys\n\n\n@responses.activate\ndef test_get_upload_link(yadisk):\n file_path = Path(\"_yadisk_api_responses.py\")\n name = f'\"name\": \"{file_path.name}\"'\n mime_type = f'\"mime_type\": \"{mimetypes.guess_type(file_path)[0]}\"'\n req_params = {\n \"path\": \"/\",\n \"fields\": \"{\" + name + \", \" + mime_type + \"}\"\n }\n responses.add(\n responses.GET,\n url=f\"https://cloud-api.yandex.net/v1/disk/resources/upload?\"\n f\"{urlencode(req_params)}\",\n content_type=\"application/json\",\n json={\"href\": \"some_upload_link\"}\n )\n yadisk.get_upload_link(file_path, \"/\")\n assert len(responses.calls) == 1\n assert \"Authorization\" in responses.calls[0].request.headers\n assert responses.calls[0].request.params == req_params\n\n\n@responses.activate\ndef test_upload_file(yadisk):\n upload_link = \"https://cool_upload_link\"\n responses.add(\n responses.PUT,\n url=upload_link,\n content_type=\"application/json\",\n body=\"\",\n status=201,\n )\n yadisk.upload_file(upload_link, b\"test_bytes\")\n assert len(responses.calls) == 1\n assert responses.calls[0].request.body == b\"test_bytes\"\n\n\n@responses.activate\ndef test_lsdir_exception(yadisk):\n path = \"/tests\"\n url_keys = {\n \"path\": path,\n \"sort\": \"modified\",\n \"limit\": \"20\",\n \"offset\": \"0\",\n }\n responses.add(\n responses.GET,\n url=f\"https://cloud-api.yandex.net/v1/disk/resources/?\"\n f\"{urlencode(url_keys)}\",\n json={\n \"message\": \"Не удалось найти запрошенный ресурс.\",\n \"description\": \"Resource not found.\",\n \"error\": \"DiskNotFoundError\"\n },\n status=404\n )\n with pytest.raises(ApiResponseException) as api_exc:\n yadisk.lsdir(path)\n assert str(api_exc.value) == \"Resource not found.\"\n assert api_exc.value.status_code == 404\n\n\n@responses.activate\ndef test_get_download_link_for_not_existing_file(yadisk):\n path = \"/not_existing_file.txt\"\n url_keys = {\"path\": path}\n responses.add(\n responses.GET,\n url=f\"https://cloud-api.yandex.net/v1/disk/resources/download?\"\n f\"{urlencode(url_keys)}\",\n json={\n \"message\": \"Не удалось найти запрошенный ресурс.\",\n \"description\": \"Resource not found.\",\n \"error\": \"DiskNotFoundError\"\n },\n status=404\n )\n with pytest.raises(ApiResponseException) as api_exc:\n yadisk.get_download_link(path)\n assert str(api_exc.value) == \"Resource not found.\"\n assert api_exc.value.status_code == 404\n\n\n@responses.activate\ndef test_get_download_link_for_not_downloadable_file(yadisk):\n path = \"/\"\n url_keys = {\"path\": path}\n responses.add(\n responses.GET,\n url=f\"https://cloud-api.yandex.net/v1/disk/resources/download?\"\n f\"{urlencode(url_keys)}\",\n json={\"href\": \"\"},\n status=200\n )\n with pytest.raises(FileIsNotDownloadableException) as api_exc:\n yadisk.get_download_link(path)\n assert str(api_exc.value) == f\"File: `{path}` isn't downloadable.\"\n\n\n@responses.activate\ndef test_make_existing_dir(yadisk):\n path = \"/existing_dir\"\n url_keys = {\"path\": path}\n responses.add(\n responses.PUT,\n url=f\"https://cloud-api.yandex.net/v1/disk/resources?\"\n f\"{urlencode(url_keys)}\",\n json={\n \"message\": \"По указанному пути \\\"/existing_dir\\\"\"\n \" уже существует папка с таким именем.\",\n \"description\": \"Specified path \\\"/existing_dir\\\"\"\n \" points to existent directory.\",\n \"error\": \"DiskPathPointsToExistentDirectoryError\"\n },\n status=409\n )\n with pytest.raises(ApiResponseException) as api_exc:\n yadisk.mkdir(path)\n assert str(api_exc.value) == (\"Specified path \\\"/existing_dir\\\"\"\n \" points to existent directory.\")\n assert api_exc.value.status_code == 409\n","repo_name":"ddqof/cloud_backup","sub_path":"cloudbackup/tests/test_yadisk.py","file_name":"test_yadisk.py","file_ext":"py","file_size_in_byte":11830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42565526046","text":"# ----------------------------------\n# German Flashcards by Patt Martin -\n# ----------------------------------\nfrom utilities import *\nimport game_translate_one_word\nimport game_which_one\nimport game_number_translator\nimport game_translate_time\n\n# ---------------------------------------------------------------------\n# --- Starter Method ---\n# ----------------------\n\ndef __init__():\n clear_console()\n show_data_menu()\n\n\n\n# ---------------------------------------------------------------------\n# --- Main Methods ---\n# --------------------\n\n# Get the vocabulary words from a spreadsheet thats been converted into a JSON file\ndef show_data_menu():\n print(\"[0] Load vocab JSON via default path\")\n print(\"[1] Load vocab JSON via custom path\")\n try:\n dataLoadID = int(input(\"Select [#] to execute: \"))\n except:\n print(\"Invalid selection was made... try again\")\n show_data_menu()\n\n filepath = \"Deutsch Vokabeln.json\"\n\n if(dataLoadID == 1):\n filepath = input(\"customDataPath: \")\n\n germanData = GermanData()\n germanData.load_vocabulary_dict(load_json_file(filepath))\n\n show_loading_text(\"Loading\")\n print(len(germanData.vocab_words), \"vocabulary cards loaded!\")\n\n show_main_menu(germanData)\n\ndef show_main_menu(germanData:GermanData):\n while True:\n print_seperator()\n print(\"[0] Play simple flashcards\")\n print(\"[1] Play pick the correct one out of three\")\n print(\"[2] Translate that number!\")\n print(\"[3] Translate a random time!\")\n print(\"[9] Output specific card by index\")\n\n try:\n selectionID = input(\"Select [#] to execute: \")\n except:\n print(\"Invalid selection was made... try again\")\n\n if(selectionID == \"0\"):\n game_translate_one_word.play_game(germanData)\n elif(selectionID == \"1\"):\n game_which_one.play_game(germanData)\n elif(selectionID == \"2\"):\n game_number_translator.play_game(germanData)\n elif(selectionID == \"3\"):\n game_translate_time.play_game(germanData)\n elif(selectionID == \"9\"):\n print_specific_vocab_word(germanData)\n elif(selectionID == \"/exit\"):\n print(\"You're already on the main menu\")\n print_seperator(germanData)\n else:\n print(\"Invalid selection was made... try again\")\n\n\n# Remove later since this is just for debugging specific words\ndef print_specific_vocab_word(germanData:GermanData):\n count = len(germanData.vocab_words)\n\n while True:\n try:\n print_seperator()\n choice = input(\"Enter '/exit' to return or a value between 0-\" + str(count) + \": \")\n # Return to main menu\n if (choice == \"/exit\"):\n return\n\n germanData.get_flashcard(int(choice)).print_card()\n \n except:\n print(\"Invalid data entry\")\n\n\n# ---------------------------------------------------------------------\n\n# Run program\n__init__()","repo_name":"MaddHatt-PM/German-Flashcards","sub_path":"GermanFlashcards.py","file_name":"GermanFlashcards.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31416000595","text":"from io import open #libreria para leer el archivo \ndef nota(puntaje):\n\tnota = int(puntaje)\n\tif nota < 60: \n\t\tcalificacion = 1\n\telif nota >= 60 and nota <=69:\n\t\tcalificacion = 2\n\telif nota >= 70 and nota <=79: \n\t\tcalificacion = 3\n\telif nota >= 80 and nota <=89:\n\t\tcalificacion = 4\n\telse:\n\t\tcalificacion = 5\n\treturn calificacion\n#variables para la escritura y lectura de los archivos requeridos \nsalida = open(\"calificacion.txt\", \"w\", encoding= \"utf-8\")\narchivo = open (\"notas.txt\", \"r\", encoding = \"utf-8\")\ncount = 0 \nsalida.write(\"Apellido, Nombre \\t\\t Puntaje Nota\\n\")\nmejor = 0\naprobados = 0\n#recorremos el archivo de lectura\nfor linea in archivo:\n\tcount = count + 1\n\t[apellido,nombre,puntaje]= linea.strip().split(':')\n\tsalida.write(apellido + \", \" + nombre + \"\\t\\t\\t\"+ puntaje +\"\\t\"+ str(nota(puntaje)) + \"\\n\")\t\n\t#verificamos lo que pasaron la materia\n\tif nota(puntaje) > 2:\n\t\taprobados = aprobados +1\n\t#la mejor nota vemos aqui \n\tif nota(puntaje)>mejor:\n\t\tmejor = nota(puntaje);\n\t\talumno = nombre + \" \" + apellido\n\t\tpuntaje_mayor = puntaje\nsalida.write(\"Cantidad de aprobados \" + str(aprobados)+ \"\\n\")\nsalida.write(\"Mayor nota: \" + str(mejor) + \" Mejor Puntaje: \" + str(puntaje_mayor) + \" \\nAlumno con mejor puntaje: \" + alumno )\t\narchivo.close()","repo_name":"ricklegac/ayudas","sub_path":"2022/tareas 2022/fatima_segundo_parcial.py","file_name":"fatima_segundo_parcial.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37897819216","text":"import random\n\nfrom utils import dlib_detect, draw_pts, show_detection\nimport tensorflow as tf\nimport numpy as np\nimport collections\nimport pickle, time\nimport vgg_face\nimport argparse\nimport cv2\nimport dlib\n\n#For ipcam\nimport urllib\nimport time\n\nimport threading\n\nclass Env(object):\n def __init__(self, ip, port, total_steps, emo_model):\n self.ip = ip\n self.port = port\n\n self.action_dim = 15\n self.epi_num = 1\n self.state_num = 0\n\n #Emotion recognition module\n self.emo_list = ['angry','contemptuous','disgusted','fearful',\\\n 'happy','neutral','sad','surprised']\n self.face_det = dlib.get_frontal_face_detector()\n self.lm_det = dlib.shape_predictor('../model/shape_predictor_68_face_landmarks.dat')\n self.frames = 10\n\n self.lm_ph = tf.placeholder(tf.float32, [None, 51*2])\n self.img_ph = tf.placeholder(tf.float32, [None, 224, 224, 3])\n self.keep_prob = tf.placeholder(tf.float32)\n\n self.sess = tf.Session()\n dgn = vgg_face.DGN()\n dgn.build(self.lm_ph, self.keep_prob)\n self.pred_dgn = tf.argmax(dgn.prob, 1)\n\n dan = vgg_face.Vgg_face()\n dan.build(self.img_ph, self.keep_prob)\n self.pred_dan = tf.argmax(dan.prob, 1)\n\n self.emo_model = emo_model\n\n if self.emo_model == 'weighted-sum':\n prob_sum = tf.nn.softmax(dan.fc8+dgn.fc3)\n pred_sum = tf.argmax(prob_sum, 1)\n elif self.emo_model == 'joint-fine-tune':\n saver = tf.train.Saver()\n saver.restore(self.sess, '../model/dgan.ckpt')\n prob_joint = tf.nn.softmax(dan.fc8+dgn.fc3)\n self.pred_joint = tf.argmax(prob_joint, 1)\n\n self.sess.run(tf.global_variables_initializer())\n\n self.total_steps = total_steps\n random.seed()\n\n ipcam_url = 'http://admin:@'+ self.ip + ':' + str(self.port) + '/MJPEG.CGI'\n self.stream=urllib.urlopen(ipcam_url)\n self.stream.close()\n self.ipCamStart = True\n ipCamThread = threading.Thread(target=self._ipCamThread)\n ipCamThread.start()\n\n def StartIpCam(self):\n ipcam_url = 'http://admin:@'+ self.ip + ':' + str(self.port) + '/MJPEG.CGI'\n self.stream=urllib.urlopen(ipcam_url)\n self.bytes = ''\n self.emo_record = (np.ones(self.frames, dtype=int)*5).tolist()\n self.emo_buffer = collections.deque(maxlen=10)\n self.state_record = []\n\n def GetInitState(self):\n #return self._getEmotion()\n self.stream.close()\n state_ctr = collections.Counter(self.state_record)\n print(state_ctr.most_common()[0][0]) #TODO: test\n return state_ctr.most_common()[0][0]\n\n def Step(self, state):\n #n_state = self._getEmotion()\n self.stream.close()\n state_ctr = collections.Counter(self.state_record)\n n_state = state_ctr.most_common()[0][0]\n print(n_state) #TODO: test\n\n reward = self._reward(state, n_state)\n t = False\n\n self.state_num += 1\n if self.state_num == self.total_steps:\n t = True\n\n return reward, n_state, t\n\n def _ipCamThread(self):\n while self.ipCamStart:\n self._getEmotion()\n\n def _getEmotion(self):\n time.sleep(1.0)\n while not self.stream.fp == None:\n self.bytes += self.stream.read(1024)\n a = self.bytes.find('\\xff\\xd8')\n b = self.bytes.find('\\xff\\xd9')\n if a != -1 and b != -1:\n frame = cv2.imdecode(np.fromstring(self.bytes[a:b+2], dtype=np.uint8), 1)\n self.bytes = self.bytes[b+2:]\n\n num, face, shape, shape_origin = dlib_detect(\\\n frame, 3, self.face_det, self.lm_det, 224, 224)\n if num == 1:\n shape_norm = shape[17:]-shape[30]\n shape_norm = shape_norm.reshape([1,51*2])\n if self.emo_model == 'dan':\n pred = self.sess.run(self.pred_dan, feed_dict={\\\n self.img_ph: face.reshape([1,224,224,3]), self.keep_prob: 1.0})\n elif self.emo_model == 'dgn':\n pred = self.sess.run(self.pred_dgn, feed_dict={\\\n self.lm_ph: shape_norm, self.keep_prob: 1.0})\n elif self.emo_model == 'weighted-sum':\n pred = self.sess.run(self.pred_dgn, feed_dict={\\\n self.img_ph: face.reshape([1,224,224,3]), \\\n self.lm_ph: shape_norm, self.keep_prob: 1.0})\n elif self.emo_model == 'joint-fine-tune':\n pred = self.sess.run(self.pred_joint, feed_dict={\\\n self.img_ph: face.reshape([1,224,224,3]), \\\n self.lm_ph: shape_norm, self.keep_prob: 1.0})\n \n self.emo_record.append(int(pred))\n del self.emo_record[0]\n ctr = collections.Counter(self.emo_record)\n\n #TODO\n self.emo_buffer.append(ctr)\n emo_his = collections.Counter()\n emo_his_table = np.zeros(len(self.emo_list))\n emo_now_table = np.zeros(len(self.emo_list))\n for c in self.emo_buffer:\n emo_his += c\n emo_his_avg = [v/float(len(self.emo_buffer)) for v in emo_his.values()]\n emo_his = emo_his.items()\n emo_his = np.array([np.array([list(emo_his[i])[0], emo_his_avg[i]]) \\\n for i in range(len(emo_his))])\n emo_his_table[emo_his[:,0].astype(int)] = emo_his[:,1]\n emo_now = ctr.items()\n emo_now = np.array([np.array([list(e)[0], list(e)[1]]) for e in emo_now])\n emo_now_table[emo_now[:,0].astype(int)] = emo_now[:,1] \n state = np.array([(emo_now_table[i]>=3 and \\\n (emo_now_table[i]-emo_his_table[i])>=0.0) \\\n for i in range(len(self.emo_list))]).astype(int)\n\n state_int = 0\n for i, j in enumerate(state):\n state_int += j< Good \n if (self.goodState(state) == True) and (self.goodState(n_state) == True):\n reward = +0.1\n #Good -> Neutral\n elif (self.goodState(state) == True) and (n_state == 32):\n reward = 0.0\n #Good -> Bad\n elif (self.goodState(state) == True) and (self.goodState(n_state) == False):\n reward = -1.0\n #Bad -> Bad\n elif (self.goodState(state) == False) and (self.goodState(n_state) == False):\n reward = -0.1\n #Bad -> Good\n elif (self.goodState(state) == False) and (self.goodState(n_state) == True):\n reward = +1.0\n #Bad -> Neutral\n elif (self.goodState(state) == False) and (n_state == 32):\n reward = +0.1\n #Neutral -> Neutral\n elif (state == 32) and (n_state == 32):\n reward = 0.0\n #Neutral -> Good\n elif (state == 32) and (self.goodState(n_state) == True):\n reward = 0.1\n #Neutral -> Bad\n else:\n reward = -0.1\n\n return reward\n","repo_name":"gamborino/RoBoHoN","sub_path":"env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":7879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20769793038","text":"from vec import Vec\nfrom mat import Mat\nfrom bitutil import noise\nfrom GF2 import one\nfrom matutil import listlist2mat\nfrom matutil import mat2coldict\nfrom matutil import coldict2mat\nfrom bitutil import str2bits,bits2mat\n## Task 1 part 1\n\"\"\" Create an instance of Mat representing the generator matrix G. You can use\nthe procedure listlist2mat in the matutil module (be sure to import first).\nSince we are working over GF (2), you should use the value one from the\nGF2 module to represent 1\"\"\"\nG = None\nG = listlist2mat([[one,0,one,one],[one,one,0,one],[0,0,0,one],[one,one,one,0],[0,0,one,0],[0,one,0,0],[one,0,0,0]])\n## Task 1 part 2\n# Please write your answer as a list. Use one from GF2 and 0 as the elements.\nencoding_1001 = [0, 0, one, one, 0, 0, one]\n\n\n## Task 2\n# Express your answer as an instance of the Mat class.\nR = listlist2mat([[0,0,0,0,0,0,one],[0,0,0,0,0,one,0],[0,0,0,0,one,0,0],[0,0,one,0,0,0,0]])\n\n## Task 3\n# Create an instance of Mat representing the check matrix H.\nH = listlist2mat([[0, 0, 0, one, one, one, one], [0, one, one, 0, 0, one, one], [one, 0, one, 0, one, 0, one]])\n\n## Task 4 part 1\ndef find_error(e):\n \"\"\"\n Input: an error syndrome as an instance of Vec\n Output: the corresponding error vector e\n Examples:\n >>> find_error(Vec({0,1,2}, {0:one}))\n Vec({0, 1, 2, 3, 4, 5, 6},{3: one})\n >>> find_error(Vec({0,1,2}, {2:one}))\n Vec({0, 1, 2, 3, 4, 5, 6},{0: one})\n >>> find_error(Vec({0,1,2}, {1:one, 2:one}))\n Vec({0, 1, 2, 3, 4, 5, 6},{2: one}) \n \"\"\"\n #v=Vec(H.D[1],{x:0 for x in H.D[1]})\n v=Vec({0,1,2,3,4,5,6},{})\n colMat = mat2coldict(H)\n for column in range(len(H.D[1])):\n if colMat[column]==e:\n v[column]=one \n return v\n\n## Task 4 part 2\n# Use the Vec class for your answers.\nnon_codeword = Vec({0,1,2,3,4,5,6}, {0: one, 1:0, 2:one, 3:one, 4:0, 5:one, 6:one})\nerror_vector = Vec({0, 1, 2, 3, 4, 5, 6},{6: one})\ncode_word = Vec({0,1,2,3,4,5,6}, {0: one, 2:one, 3:one, 5:one})\noriginal = R*code_word # R * code_word\n\n\n## Task 5\ndef find_error_matrix(S):\n \"\"\"\n Input: a matrix S whose columns are error syndromes\n Output: a matrix whose cth column is the error corresponding to the cth column of S.\n Example:\n >>> S = listlist2mat([[0,one,one,one],[0,one,0,0],[0,0,0,one]])\n >>> find_error_matrix(S)\n Mat(({0, 1, 2, 3, 4, 5, 6}, {0, 1, 2, 3}), {(1, 2): 0, (3, 2): one, (0, 0): 0, (4, 3): one, (3, 0): 0, (6, 0): 0, (2, 1): 0, (6, 2): 0, (2, 3): 0, (5, 1): one, (4, 2): 0, (1, 0): 0, (0, 3): 0, (4, 0): 0, (0, 1): 0, (3, 3): 0, (4, 1): 0, (6, 1): 0, (3, 1): 0, (1, 1): 0, (6, 3): 0, (2, 0): 0, (5, 0): 0, (2, 2): 0, (1, 3): 0, (5, 3): 0, (5, 2): 0, (0, 2): 0})\n \"\"\"\n return coldict2mat({c:find_error(mat2coldict(S)[c]) for c in S.D[1]})\n\n## Task 6\ns = \"I'm trying to free your mind, Neo. But I can only show you the door. You’re the one that has to walk through it.\"\nP = bits2mat(str2bits(s))\n\n## Task 7\nC = None\nbits_before = None\nbits_after = None\n\n\n## Ungraded Task\nCTILDE = None\n\n## Task 8\ndef correct(A):\n \"\"\"\n Input: a matrix A each column of which differs from a codeword in at most one bit\n Output: a matrix whose columns are the corresponding valid codewords.\n Example:\n >>> A = Mat(({0,1,2,3,4,5,6}, {1,2,3}), {(0,3):one, (2, 1): one, (5, 2):one, (5,3):one, (0,2): one})\n >>> correct(A)\n Mat(({0, 1, 2, 3, 4, 5, 6}, {1, 2, 3}), {(0, 1): 0, (1, 2): 0, (3, 2): 0, (1, 3): 0, (3, 3): 0, (5, 2): one, (6, 1): 0, (3, 1): 0, (2, 1): 0, (0, 2): one, (6, 3): one, (4, 2): 0, (6, 2): one, (2, 3): 0, (4, 3): 0, (2, 2): 0, (5, 1): 0, (0, 3): one, (4, 1): 0, (1, 1): 0, (5, 3): one})\n \"\"\"\n pass\n","repo_name":"vineetyadav/coding_the_matrix","sub_path":"ecc_lab/ecc_lab.py","file_name":"ecc_lab.py","file_ext":"py","file_size_in_byte":3715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41128148823","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sn_app', '0004_auto_20150521_1420'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ContactMe',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('sender_name', models.CharField(max_length=100)),\n ('sender_email', models.EmailField(max_length=254)),\n ('message', models.TextField(max_length=2000)),\n ('time_sent', models.TimeField()),\n ],\n options={\n 'verbose_name': 'ContactMe message',\n 'verbose_name_plural': 'ContactMe messages',\n },\n ),\n ]\n","repo_name":"sergeynikiforov/sergey-nikiforov.com","sub_path":"sn_app/migrations/0005_contactme.py","file_name":"0005_contactme.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9013909240","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom telethon.sync import TelegramClient\nimport config\n\napi_id = config.api_id\napi_hash = config.api_hash\n\ndef send():\n with TelegramClient('Session', api_id, api_hash) as client:\n with open(\"id.txt\", \"r\", encoding='utf-8') as file:\n u_id = int(file.readline())\n with open(\"text.txt\", \"r\", encoding='utf-8') as file2:\n text = str(file2.readline())\n client.send_message(u_id, text)\nif __name__ == '__main__':\n send()\n","repo_name":"R0d17N/1029","sub_path":"1029/bin/Debug/netcoreapp3.1/доп.файлы/bot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27368599689","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nsys.path.append(os.getcwd() + '/../../')\nsys.path.append(os.getcwd() + '/../../third/vadlstd')\n\nfrom lnasr.utils import read_pcm\nfrom VadLstd import VadLtsd\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\ndef on_key(event:mpl.backend_bases.KeyEvent):\n if event.key == 'escape':\n plt.close()\n\nwd = read_pcm(\"data-vad.raw\")\nwd = np.array(wd / (65536.0/2), dtype=np.double)\nvad = VadLtsd(\n freq=16000,\n winsize=1024,\n stepsize=512,\n order=4,\n threshold=-6,\n alpha=0.4)\nltsd = vad.detect(wd)\n# 修改order可以改LTSE的包络宽度(order增大,会提前检测到Activity位置,也会延后Non-Activity位置)\nres_points = np.arange(ltsd.shape[0]) * vad.stepsize\nres_nor = ltsd / np.max(ltsd)\nres_bin = (res_nor > 0.25) * np.max(wd)\nplt.figure('Plot')\nplt.connect('key_press_event', on_key)\nplt.plot(wd, linewidth=0.5)\nplt.plot(res_points, res_nor, 'g')\nplt.plot(res_points, res_bin, 'r')\nplt.show()\n","repo_name":"yehuohan/ln-asr","sub_path":"test/third/vadlstd-test.py","file_name":"vadlstd-test.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"3920031093","text":"# lab on variables\nx = \"Isaac\" # x is now type of str\nprint(x)\n\n# A variable can have a short name (like x and y) or a more descriptive name (age, carname, total_volume). Rules for Python variables:\n# A variable name must start with a letter or the underscore character\n# A variable name cannot start with a number\n# A variable name can only contain alpha-numeric characters and underscores (A-z, 0-9, and _ )\n# Variable names are case-sensitive (age, Age and AGE are three different variables)\n\n# String variables can be declared either by using single or double quotes:\nx = \"Isaac\"\ny = 'Name'\nname = 'Helen'\nname = \"Helen\"\nflower = 'tulip'\nflower = \"tulip\"\n\n# variables can be called anything EXCEPT for keywords: https://www.w3schools.com/python/python_ref_keywords.asp\n# successfully installed Homebrew !\n# failure to set Python 3.8 as default\n\n# snake case\nname_flower = \"tulip\"\nname_son = \"Isaac\"\n# variables should be lower case; the only time things are upper case is when it's a Class or when it's a boolean value\n\n# Legal variable names:\nmyvar = \"John\"\nmy_var = \"John\"\n_my_var = \"John\"\nmyVar = \"John\"\nMYVAR = \"John\"\nmyvar2 = \"John\"\n\n# Illegal variable names:\n# 2myvar = \"John\"\nmyvar = \"John\"\nmyvar = \"John\"\n\n# Python allows you to assign values to multiple variables in one line:\n\nx, y, z = \"Orange\", \"Banana\", \"Cherry\"\nprint(x)\nprint(y)\nprint(z)\n\nprint(\"Hello World\") \n\nprint(\"Hello World\")\n\n#this is an error message for wrong print\n# print(Hello World)\n# ^\n# SyntaxError: invalid syntax\n\n","repo_name":"isaacamend/Python-exercises","sub_path":"lab1.py","file_name":"lab1.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29345101333","text":"import heapq\nfrom collections import defaultdict\nfrom typing import List\n\n\nclass Solution:\n def networkDelayTimeSlow(self, times: List[List[int]], n: int, k: int) -> int:\n graph = defaultdict(set)\n for s, d, t in times:\n graph[d].add(((s, t)))\n\n def dfs(self, source, depth, visited):\n visited.add(source)\n if source == k:\n return depth\n res = []\n for s, t in graph[source]:\n if s not in visited:\n res.append(dfs(self, s, depth + t, set(visited)))\n if res and -1 not in res: return min(res)\n return -1\n\n res = []\n for i in range(1, n + 1):\n if i != k:\n res.append(dfs(self, i, 0, set()))\n return -1 if -1 in res else max(res)\n\n def networkDelayTimeDFS(self, times: List[List[int]], n: int, k: int) -> int:\n graph = defaultdict(list)\n for u, v, w in times:\n graph[u].append((w, v))\n dist = {node: float('inf') for node in range(1, n + 1)}\n\n def dfs(node, elapsed):\n if elapsed >= dist[node]: return\n dist[node] = elapsed\n for time, nei in sorted(graph[node]):\n dfs(nei, elapsed + time)\n\n dfs(k, 0)\n ans = max(dist.values())\n return ans if ans < float('inf') else -1\n\n def networkDelayTimeDijkstra(self, times: List[List[int]], n: int, k: int) -> int:\n graph = defaultdict(list)\n for u, v, w in times:\n graph[u].append((v, w))\n\n dist = {node: float('inf') for node in range(1, n)}\n seen = [False] * (n + 1)\n dist[k] = 0\n\n while True:\n cand_node = -1\n cand_dist = float('inf')\n for i in range(1, n + 1):\n if not seen[i] and dist[i] < cand_dist:\n cand_dist = dist[i]\n cand_node = i\n if cand_node < 0: break\n seen[cand_node] = True\n for nei, d in graph[cand_node]:\n dist[nei] = min(dist[nei]), dist[cand_node + d]\n ans = max(dist.values())\n return ans if ans < float('inf') else -1\n\n def networkDelayTimeHeap(self, times: List[List[int]], n: int, k: int) -> int:\n graph = defaultdict(list)\n for u, v, w in times:\n graph[u].append((v, w))\n pq = [(0, k)]\n dist = {}\n while pq:\n d, node = heapq.heappop(pq)\n if node in dist: continue\n dist[node] = d\n for nei, d2 in graph[node]:\n if nei not in dist:\n heapq.heappush(pq, (d+d2, nei))\n return max(dist.values()) if len(dist) == n else -1\n\nif __name__ == '__main__':\n # times = [[2,1,1],[2,3,1],[3,4,1]]\n times = [[1, 2, 1], [2, 3, 2], [1, 3, 2]]\n # n = 4\n # k = 2\n n = 3\n k = 1\n print(Solution().networkDelayTime(times, n, k))\n","repo_name":"replcloud/interview_py","sub_path":"us/matthey/coco/algorithm/leetcode/network_delay_time.py","file_name":"network_delay_time.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7476512298","text":"from flask_caching import Cache\nfrom quart import Quart, render_template_string, ResponseReturnValue\n\napp = Quart(__name__)\ncache = Cache(app, config={\"CACHE_TYPE\": \"simple\"})\n\n\n@cache.cached(timeout=50)\n@app.route(\"/\")\nasync def index() -> ResponseReturnValue:\n return await render_template_string(\"Hello\")\n\n\nasync def test_flask_caching() -> None:\n test_client = app.test_client()\n response = await test_client.get(\"/\")\n assert (await response.get_data(as_text=True)) == \"Hello\"\n","repo_name":"pgjones/quart-flask-patch","sub_path":"tests/test_flask_caching.py","file_name":"test_flask_caching.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"67"} +{"seq_id":"39354610297","text":"import io\nimport math\nimport typing\n\n\nfrom settings import settings\n\n\ndef write_image(state, filename):\n image = Image(filename)\n write_header(state, image)\n write_x_axis(state, image)\n write_y_axis(state, image)\n write_boxes(state, image)\n write_footer(image)\n\n\nclass Image:\n def __init__(self, filename):\n self.file = io.open(filename, \"w\")\n self.indentation = 0\n\n def push(self, header: str):\n self.append(header)\n self.indentation += 1\n\n def pop(self, footer: str):\n assert self.indentation > 0\n self.indentation -= 1\n self.append(footer)\n\n def append(self, line: str):\n self.write_indent()\n print(line, file=self.file)\n\n def write_indent(self):\n self.file.write(\" \" * self.indentation)\n\n def __del__(self):\n self.file.close()\n\n\ndef write_line(x1: int, y1: int, x2: int, y2: int, image: Image):\n image.append(\n f'')\n\n\ndef write_color_line(x1: int, y1: int, x2: int, y2: int, color, image: Image):\n image.append(\n f'')\n\n\ndef write_text(\n x: int, y: int, rot: int,\n text: str, eid: str, align: str, image: typing.TextIO):\n image.append(\n f'{text}')\n\n\ndef write_text_row(row: int, text: str, eid: str, image: typing.TextIO):\n # A fudging to make the text appear centered on the row.\n text_height_fudge = 8\n x = settings.left_gap - 1\n y = settings.grid_top_gap + (row * settings.row_height) - text_height_fudge\n write_text(x, y, 0, text, eid, \"end\", image)\n\n\ndef write_box(\n x: int, y: int, width: int, height: int, title: str,\n image: typing.TextIO, **kwargs):\n line = f'{title}')\n image.pop('')\n\n\ndef write_box_color(\n x: int, y: int,\n width: int, height: int,\n title: str, color: str, image: typing.TextIO):\n\n image.push(\n f'')\n image.append(f'{title}')\n image.pop('')\n\n\ndef write_box_value(\n x: int, y: int,\n width: int, height: int,\n title: str, value: float, image: typing.TextIO):\n color = settings.color_range(value)\n color = f\"{color[0]}, {color[1]}, {color[2]}\"\n write_box_color(x, y, width, height, title, color, image)\n\n\ndef write_box_grid(\n row: int, column: int, title: str, value: float, image: typing.TextIO):\n col_width = settings.column_width\n row_height = settings.row_height\n\n fill_rate = 0.8\n box_width = col_width * fill_rate\n box_height = row_height * fill_rate\n\n width_gap = (1.0 - fill_rate) * col_width * 0.5\n height_gap = (1.0 - fill_rate) * row_height * 0.5\n\n x = settings.left_gap + column * col_width + width_gap\n y = settings.grid_top_gap + (row-1) * row_height + height_gap\n write_box_value(x, y, box_width, box_height, title, value, image)\n\n\ndef write_box_row(\n row: int, end_x: int, title: str, color, image: typing.TextIO):\n x = settings.left_gap\n y = settings.grid_top_gap + (row-1) * settings.row_height\n box_width = end_x - settings.left_gap\n box_height = settings.row_height\n write_box(\n x, y, box_width, box_height, title, image, color=color, opacity=0.5)\n\n\ndef write_x_axis(state, image):\n num_columns = state.gpus[-1].column + 1\n x1 = settings.left_gap\n x2 = settings.left_gap + (num_columns * settings.column_width)\n y = settings.timeline_gap\n write_line(x1, y, x2, y, image)\n\n text_height_fudge = 20\n\n for year in state.years:\n column = state.annums[year].column\n x = settings.left_gap + (column * settings.column_width)\n write_line(\n x, settings.timeline_gap,\n x, settings.timeline_gap - (0.7 * settings.row_height), image)\n write_text(\n x,\n settings.timeline_gap + text_height_fudge,\n 0,\n f\"{year}\", f\"tick-{year}\", \"middle\", image)\n\n for gpu in state.gpus:\n column = gpu.column\n x = settings.left_gap + (column * settings.column_width) \\\n + text_height_fudge\n y = settings.timeline_gap - settings.row_height\n write_text(x, y, -90, f\"{gpu.label}\", f\"{gpu.label}\", \"start\", image)\n last_row_y = \\\n settings.timeline_gap + (len(state.gpus) + 1) * settings.row_height\n x -= (0.3 * text_height_fudge)\n color = gpu.id[:3] == \"amd\" and \"100, 0, 0\" or \"0, 100, 0\"\n write_color_line(x, y, x, last_row_y, color, image)\n\n\ndef write_y_axis(state, image):\n num_columns = state.gpus[-1].column + 1\n end_x = settings.left_gap + (num_columns * settings.column_width)\n\n for i, gpu in enumerate(state.gpus):\n write_text_row(gpu.row, gpu.label, gpu.label, image)\n color = gpu.id[:3] == \"amd\" and \"255, 230, 230\" or \"230, 255, 230\"\n write_box_row(gpu.row, end_x, gpu.label, color, image)\n\n\ndef find_exponential_growth(v_i: float, v_j: float, dt: float) -> float:\n if dt == 0:\n # Instantaneous change doesn't have a rate.\n return math.nan\n return math.log(v_j / v_i) / dt\n\n\n# TODO: Consider using number of days instead of number of months.\ndef num_months_between(old, new):\n \"\"\"Return the number of month transitions between the two dates.\"\"\"\n return (new.year - old.year) * 12 + (new.month - old.month)\n\n\ndef write_boxes(state, image):\n for old_index, old_gpu in enumerate(state.gpus):\n row = old_gpu.row\n for new_index, new_gpu in enumerate(state.gpus[old_index + 1:]):\n column = new_gpu.column\n num_months = num_months_between(old_gpu.date, new_gpu.date)\n num_years = num_months / 12\n rate = find_exponential_growth(\n old_gpu.rating, new_gpu.rating, num_years)\n rate_percent = rate * 100\n title = (\n f\"{old_gpu.name} → {new_gpu.name}\\n\"\n f\"{old_gpu.rating} → {new_gpu.rating} = \"\n f\"{new_gpu.rating - old_gpu.rating} increase\\n\"\n f\"{num_years:.1f} years\\n\"\n f\"{rate_percent:.2f}% / year\"\n )\n write_box_grid(row, column, title, rate_percent, image)\n\n\ndef write_header(state, image: Image):\n num_columns = state.gpus[-1].column + 1\n width = settings.left_gap + (num_columns * settings.column_width)\n height = settings.timeline_gap + (len(state.gpus) + 1) * settings.row_height\n image.append('')\n image.push(\n f'')\n\n\ndef write_footer(image: Image):\n image.pop('')\n","repo_name":"ibbles/gpu-performance-rate","sub_path":"svg_writer.py","file_name":"svg_writer.py","file_ext":"py","file_size_in_byte":7398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"26761714170","text":"import datetime\nfrom configparser import ConfigParser\nfrom datetime import date\nfrom openpyxl import Workbook, load_workbook\nfrom dateutil.relativedelta import relativedelta, MO\n\n\nclass ReportAutomation():\n\n\tdef config_setup(self):\n\t\tfile = 'config.ini'\n\t\tself.config = ConfigParser()\n\t\tself.config.read(file)\n\n\n\tdef generate_dates(self):\n\t\tday = datetime.timedelta(1)\n\n\t\tself.mon = date.today() + relativedelta(weekday=MO(0))\n\t\tself.tue = self.mon + day\n\t\tself.wed = self.mon + day*2\n\t\tself.thur = self.mon + day*3\n\t\tself.fri = self.mon + day*4\n\t\tself.sat = self.mon + day*5\n\t\tself.sun = self.mon + day*6\n\n\n\tdef format_dates(self):\n\t\t# Format: \"26-Apr-22\"\n\t\tself.mon_date = self.mon.strftime(\"%d-%b-%y\") \n\t\tself.tue_date = self.tue.strftime(\"%d-%b-%y\")\n\t\tself.wed_date = self.wed.strftime(\"%d-%b-%y\")\n\t\tself.thur_date = self.thur.strftime(\"%d-%b-%y\")\n\t\tself.fri_date = self.fri.strftime(\"%d-%b-%y\")\n\t\tself.sat_date = self.sat.strftime(\"%d-%b-%y\")\n\t\tself.sun_date = self.sun.strftime(\"%d-%b-%y\")\n\t\t# Format: \"April 26 - April 27\"\n\t\tself.mon_wkst_title_date = self.mon.strftime(\"%B %d\")\n\t\tself.fri_wkst_title_date = self.fri.strftime(\"%B %d\")\n\t\t# Format: \"4/26/22 - 4/27/22\"\n\t\tself.mon_title_date = self.mon.strftime(\"%#m/%#d/%y\")\n\t\tself.fri_title_date = self.fri.strftime(\"%#m/%#d/%y\")\n\n\n\tdef setup_excel(self):\n\t\tself.wb = load_workbook(self.config['location']['address'])\n\t\tself.wb.active = self.wb['TEMPLATE']\n\t\tself.ws = self.wb.active\n\t\ttarget = self.wb.copy_worksheet(self.ws)\n\t\ttarget.sheet_view.zoomScale = 80\n\t\ttarget.title = f\"{self.mon_wkst_title_date} - {self.fri_wkst_title_date}\"\n\t\tself.ws = self.wb[f\"{self.mon_wkst_title_date} - {self.fri_wkst_title_date}\"]\n\n\n\tdef populate_cells(self):\n\t\tself.ws['A1'].value = f\"Weekly Report ({self.mon_title_date} - {self.fri_title_date})\"\n\t\tself.ws['B2'].value = self.mon_date\n\t\tself.ws['D2'].value = self.tue_date\n\t\tself.ws['F2'].value = self.wed_date\n\t\tself.ws['H2'].value = self.thur_date\n\t\tself.ws['J2'].value = self.fri_date\n\t\tself.ws['L2'].value = self.sat_date\n\t\tself.ws['N2'].value = self.sun_date\n\n\n\tdef save_workbook(self):\n\t\tself.wb.save(self.config['location']['address'])","repo_name":"EEgithubUser/reportgenerator","sub_path":"reportAutomation.py","file_name":"reportAutomation.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28329467688","text":"# identify, document, and remove rows with an Na value in any column in lake change input data\n# and save the cleaned files to a new directory\n# using conda env perm_ground\n\nimport geopandas as gpd\nimport pandas as pd\nfrom pathlib import Path\nimport os\n\n# collect all lake_change.gpkg filepaths in Ingmar's data\nbase_dir = Path('/home/pdg/data/nitze_lake_change/data_2022-11-04/lake_change_GD/')\nfilename = 'lake_change.gpkg'\n# To define each .gpkg file within each subdir as a string representation with forward slashes,\n# use as_posix()\n# The ** represents that any subdir string can be present between the base_dir and the filename\ninput = [p.as_posix() for p in base_dir.glob('**/' + filename)]\nprint(f\"Collected {len(input)} lake_change.gpkg filepaths.\")\n\n# import each filepath as a gdf\n# document which rows have Na (as a separate csv for each input gpkg)\n# drop any row with an Na value is any column\n# next step: also drop any rows with inf values!\n# for test runs, try 1 file:\ninput_subset = input[0:1]\n\nfor path in input_subset:\n print(f\"Checking file {path}.\")\n gdf = gpd.read_file(path)\n\n # first identify any rows with NA to document which are dropped\n drop_na_rows = []\n for index, row in gdf.iterrows():\n if row.isnull().any():\n drop_na_rows.append(row)\n # convert the list of rows to a dataframe\n drop_na_df = pd.DataFrame(drop_na_rows)\n\n # hard-code the start of the path to directory for the cleaned data\n filepath_start = \"/home/jcohen/lake_change_GD_workflow/workflow_cleaned/invalid_data_documentation/\"\n # next, pull the last couple parts of filepath to ID which lake_change.gpkg\n # is being processed, following Ingmar's directory hierarchy\n directory, filename = os.path.split(path)\n filepath_sections = directory.split(os.sep)\n relevant_sections = filepath_sections[-2:]\n partial_filepath = relevant_sections[0] + \"/\" + relevant_sections[1]\n full_filepath = filepath_start + partial_filepath + \"/drop_na_rows.csv\"\n # make the subdirectories if they do not yet exist\n directory_path = os.path.dirname(full_filepath)\n if not os.path.exists(directory_path):\n os.makedirs(directory_path)\n # save the df of rows with NA values as a csv\n drop_na_df.to_csv(full_filepath, index = False)\n print(f\"Saved rows with NA for lake change GDF:\\n{path}\\nto file:\\n{full_filepath}\")\n\n # drop the rows with Na in any column\n gdf.dropna(axis = 0, inplace = True)\n\n # save cleaned lake change file to new directory\n # (we are not overwriting the original lake change file)\n # hard-code the start of the path to directory for the cleaned data\n filepath_start = \"/home/jcohen/lake_change_GD_workflow/workflow_cleaned/cleaned_files/\"\n # next, pull the last couple parts of filepath to ID which lake_change.gpkg\n # is being processed, following Ingmar's directory hierarchy\n directory, filename = os.path.split(path)\n filepath_sections = directory.split(os.sep)\n relevant_sections = filepath_sections[-2:] + ['lake_change_cleaned_na.gpkg']\n filepath_end = relevant_sections[0] + \"/\" + relevant_sections[1] + \"/\" + relevant_sections[2]\n full_filepath = filepath_start + filepath_end\n print(f\"Saving file to {full_filepath}\")\n # make the subdirectories if they do not yet exist\n directory_path = os.path.dirname(full_filepath)\n if not os.path.exists(directory_path):\n os.makedirs(directory_path)\n gdf.to_file(full_filepath, driver = \"GPKG\") \n\nprint(f\"Cleaning complete.\")\n\n\n","repo_name":"julietcohen/lake_change_sample","sub_path":"clean_na_values.py","file_name":"clean_na_values.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27377014084","text":"#!/usr/bin/env python\n\nimport os\n\n#===============================================================================\nclass AlbumInfo(dict):\n def __init__(self):\n dict.__init__(self)\n self._musicFileExts = ['.mp3', '.mpc', '.wma', 'wav']\n\n #--------------------------------------------------------------------------\n def load(self, baseDir):\n self._info = {}\n albumFileName = os.path.join(baseDir, '.album')\n\n if not os.path.isfile(albumFileName):\n return False\n\n self['baseDir'] = baseDir\n\n albumFile = open(albumFileName, 'rt')\n for line in albumFile:\n line = line.split('#', 1)[0]\n line = line.rstrip()\n if not line:\n continue\n s = line.split('=', 2)\n name = s[0].rstrip()\n value = None\n if len(s) > 1:\n value = s[1].strip()\n self[name] = value\n\n s = baseDir.split(os.sep)\n if not 'artist' in self:\n self['artist'] = s[-2]\n if not 'album' in self:\n self['album'] = s[-1]\n \n if self['album'].lower().startswith(self['artist'].lower()) and len(self['album']) > len(self['artist']):\n self['album'] = self['album'][len(self['artist']):]\n self['album'] = self['album'].lstrip('-._ ')\n\n self._loadMusicFiles()\n return True\n\n #--------------------------------------------------------------------------\n def _loadMusicFiles(self):\n\n musicFiles = []\n for root, dirs, files in os.walk(self['baseDir'], topdown=False):\n for file in files:\n ext = os.path.splitext(file)[1].lower()\n if ext in self._musicFileExts:\n musicFiles.append(os.path.join(root, file))\n musicFiles.sort()\n self['musicFiles'] = musicFiles\n\n #--------------------------------------------------------------------------\n def getAlbumSize(self):\n totalSize = 0\n for musicFile in self['musicFiles']:\n totalSize += os.path.getsize(musicFile)\n return totalSize\n\n #--------------------------------------------------------------------------\n def __getitem__(self, key):\n if dict.has_key(self, key):\n return dict.__getitem__(self, key)\n return ''\n\n #--------------------------------------------------------------------------\n @staticmethod\n def sort(a, b):\n if a['artist'] == b['artist']:\n return cmp(a['album'], b['album'])\n return cmp(a['artist'], b['artist'])\n \n#===============================================================================\n\n","repo_name":"sourcesimian/wayBack","sub_path":"bin/syncMusicAlbums/AlbumInfo.py","file_name":"AlbumInfo.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4552867040","text":"# pywin32(win32com) 라이브러리 불러오기 \n# Excel이 설치되어야 하고, pip으로 pywin32 모듈 설치 필요\nimport win32com.client as com \n\n# 엑셀 실행하기 \napp = com.Dispatch(\"Excel.Application\")\napp.Visible = True\napp.DisplayAlerts = False \n\n# 엑셀에 신규 문서 생성\nbook = app.Workbooks.Add() \n# 활성 시트 가져오기 \nsheet = book.ActiveSheet \n\n# 시트에 값 쓰기 \nsheet.Range(\"B2\").Value = \"안녕하세요. 이 예제는 win32 COM 객체를 이용했습니다.\"","repo_name":"KhanKMS/CODEX","sub_path":"4.python/ch03/pywin32_hello.py","file_name":"pywin32_hello.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"35543835182","text":"import json\nimport unittest\nfrom unittest.mock import patch\nfrom datetime import datetime, timezone\nfrom pytz.exceptions import UnknownTimeZoneError\nfrom decimal import Decimal\n\nfrom yappa.utils import current_local_time\nfrom yappa.utils import decimal_default\n\n\nclass UtilsTestCase(unittest.TestCase):\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n @patch('yappa.utils.datetime')\n def test_current_local_time_taipei(self, mock_datetime):\n mock_now = datetime(2016, 5, 27, 16, 33, 0, 0, tzinfo=timezone.utc)\n mock_datetime.now.return_value = mock_now\n\n local_now = current_local_time('Asia/Taipei')\n\n self.assertEqual(local_now.isoformat(), '2016-05-28T00:33:00+08:00')\n\n @patch('yappa.utils.datetime')\n def test_current_local_time_canada(self, mock_datetime):\n mock_now = datetime(2016, 5, 27, 16, 33, 0, 0, tzinfo=timezone.utc)\n mock_datetime.now.return_value = mock_now\n\n local_now = current_local_time('Canada/Central')\n\n self.assertEqual(local_now.isoformat(), '2016-05-27T11:33:00-05:00')\n\n @patch('yappa.utils.datetime')\n def test_current_local_time_with_invalid_zone(self, mock_datetime):\n mock_now = datetime(2016, 5, 27, 16, 33, 0, 0, tzinfo=timezone.utc)\n mock_datetime.now.return_value = mock_now\n\n with self.assertRaises(UnknownTimeZoneError) as e:\n current_local_time('invalid/timezone')\n\n def test_decimal_default(self):\n product = {\n 'price': Decimal('55.12')\n }\n\n result = json.dumps(product, default=decimal_default)\n self.assertEqual(result, '{\"price\": 55.12}')\n\n def test_decimal_default_with_non_decimal(self):\n product = {\n 'name': 'Product 1'\n }\n\n result = json.dumps(product, default=decimal_default)\n self.assertEqual(result, '{\"name\": \"Product 1\"}')\n","repo_name":"spin/yet-another-python-paypal-adaptive","sub_path":"yappa/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34013214348","text":"from django.urls import path\nfrom . import views\n\napp_name = 'home'\n\nurlpatterns = [\n path('dashboard', views.home, name='home'),\n path('recommendations', views.recommendations, name='recommendations'),\n path('clustering', views.clustering, name='clustering'),\n path('aprOnClustering', views.aprOnClustering, name='aprOnClustering'),\n path('api', views.api, name='api'),\n\n]","repo_name":"paramSonawane/ClusteringApriori","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71730049174","text":"# -*- coding: utf-8 -*-\n\n# Scrapy settings for crab_soccer project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n#\n\nBOT_NAME = 'crab_soccer'\n\nSPIDER_MODULES = ['crab_soccer.spiders']\nNEWSPIDER_MODULE = 'crab_soccer.spiders'\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'crab_soccer (+http://www.yourdomain.com)'\nUSER_AGENT = 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.70 Safari/537.17'\n\nITEM_PIPELINES=[\n 'crab_soccer.pipelines.JsonWriterPipeline',\n# 'crab_soccer.pipelines.DBPipeline',\n ]\n\nimport sys\n\nreload(sys) \nsys.setdefaultencoding('utf8') # @UndefinedVariable\nimport os\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),\"../..\")))\nos.environ['DJANGO_SETTINGS_MODULE'] = 'soccer.settings'\n","repo_name":"acche/fb_doc","sub_path":"soccer/crab_soccer/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16607572181","text":"import re\nfrom collections import defaultdict\n\ndef get_sizes():\n sizes = defaultdict(int)\n sizes[\"/root\"] = 0\n with open('input.txt' if True else 'sample.txt', 'r') as i:\n output = list(map(lambda l: l.replace(\"\\n\", \"\"), i.readlines()))\n curr_folder = \"/root\"\n for line in output:\n cmd = re.search(r'\\$ (cd|ls) (.+)', line)\n if cmd:\n op, folder = cmd.groups()\n if op in [\"dir\", \"ls\"]: continue\n if op == \"cd\":\n if folder == \"/\":\n curr_folder = \"/root\"\n elif folder == \"..\":\n curr_folder = curr_folder[0:curr_folder.rfind(\"/\")]\n else:\n curr_folder += f'/{folder}'\n\n file_size = re.search(r'(\\d+) (.+)', line)\n if file_size:\n size, file = file_size.groups()\n folder = curr_folder\n for _ in range(curr_folder.count(\"/\")):\n sizes[folder] += int(size)\n folder = folder[:folder.rfind(\"/\")]\n return sizes\n \ndef part_01():\n sizes = get_sizes()\n total = 0\n for size in sizes.values():\n if size <= 100000:\n total += size\n print(f\"Part 1: {total}\")\n\ndef part_02():\n sizes = get_sizes()\n target = sizes[\"/root\"] - 39999999\n folders = []\n for size in sizes.values():\n if target <= size:\n folders.append(size)\n print(f\"Part 2: {min(folders)}\")\n\n\npart_01()\npart_02()","repo_name":"JJStoker/AOC","sub_path":"2022/07/no_space_left_on_device.py","file_name":"no_space_left_on_device.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"4154168697","text":"import typing as ty\nfrom collections.abc import Callable, Iterable\n\nimport ablator.utils.base as butils\nfrom ablator.modules.metrics.stores import MovingAverage, PredictionStore\n\n\nclass LossDivergedError(Exception):\n pass\n\n\nclass TrainMetrics:\n \"\"\"\n Stores and manages predictions and calculates metrics given some custom evaluation functions.\n Makes batch-updates\n Manages memory limits\n applies evaluation functions.\n provides cached or online updates on the train loss\n \"\"\"\n\n def __init__(\n self,\n *args,\n batch_limit=30,\n memory_limit=1e8,\n evaluation_functions: dict[str, Callable] | None = None,\n moving_average_limit=3000,\n tags: list[str] | None = None,\n # metrics with their initial value that are updated manually, i.e. learning rate\n static_aux_metrics: dict[str, ty.Any] | None = None,\n # metrics for which we update with their moving average, i.e. loss\n moving_aux_metrics: Iterable[str] | None = None,\n ):\n \"\"\"\n Initialize the train metrics settings\n\n Parameters\n ----------\n batch_limit : int, optional\n Maximum number of batches to keep for every category of data (specified by ``tags``), so only `batch_limit`\n number of latest batches is stored for each of the categories. Default is 30.\n memory_limit : int, optional\n Maximum memory (in bytes) of batches to keep for every category of data (specified by ``tags``). Every time\n this limit is exceeded, ``batch_limit`` will be reduced by 1. Default is 1e8.\n evaluation_functions : dict[str, Callable], optional\n A dictionary of key-value pairs, keys are evaluation function names, values are\n callable evaluation functions, e.g mean, sum. Note that arguments to this Callable\n must match with names of prediction batches that the model returns. So if model prediction over\n a batch looks like this: {\"preds\": , \"labels\": },\n then callable's arguments should be ``preds`` and ``labels``, e.g ``evaluation_functions=\n {\"mean\": lambda preds, labels: np.mean(preads) + np.mean(labels)}``. Default is None.\n moving_average_limit : int, optional\n The maximum number of values allowed to store moving average metrics. Default is 3000.\n tags : list[str], optional\n A list of tags to specify predictions results from different categories, a sample use case is to\n categorize different sets of data (train, evaluation, test sets), e.g: ``tags=[\"train\", \"val\"]``\n This will be combined with evaluation function names and moving auxiliary metrics names to create metrics.\n For example, if ``evaluation_functions.keys() = [\"mean\"]``, ``moving_aux_metrics = [\"loss\"]``, then metrics\n that will be tracked are: ``train_mean``, ``train_loss``, ``val_mean``, ``val_loss``.\n Default is ``[\"train\"]``.\n static_aux_metrics : dict[str, ty.Any], optional\n A dictionary of static metrics, those with their initial value that are updated manually,\n such as learning rate, best loss, total steps, etc. Keys of this dictionary are static metric names,\n while values is a proper initial value. Default is None.\n moving_aux_metrics : Iterable[str], optional\n A list of metrics, those we update with their moving average, such as loss. Default is None.\n\n Examples\n --------\n Initialize an object of TrainMetrics:\n\n >>> from ablator.modules.metrics.main import TrainMetrics\n >>> train_metrics = TrainMetrics(\n ... batch_limit=30,\n ... memory_limit=None,\n ... evaluation_functions={\"mean\": lambda x: np.mean(x)},\n ... moving_average_limit=100,\n ... tags=[\"train\", \"val\"],\n ... static_aux_metrics={\"lr\": 1.0},\n ... moving_aux_metrics={\"loss\"},\n ... )\n >>> train_metrics.to_dict() # metrics are set to np.nan if it's not updated yet\n {\n \"train_mean\": np.nan, \"train_loss\": np.nan,\n \"val_mean\": np.nan, \"val_loss\": np.nan,\n \"lr\": 1.0\n }\n \"\"\"\n if tags is None:\n tags = [\"train\"]\n assert len(args) == 0, \"Metrics takes no positional arguments.\"\n\n _static_aux_metrics = {} if static_aux_metrics is None else static_aux_metrics\n _moving_aux_metrics = (\n set({}) if moving_aux_metrics is None else set(moving_aux_metrics)\n )\n\n _evaluation_functions = (\n {} if evaluation_functions is None else evaluation_functions\n )\n self.__batch_limit__ = batch_limit\n self.__memory_limit__ = memory_limit\n self.__moving_average_limit__ = moving_average_limit\n self.__evaluation_functions__ = _evaluation_functions\n self.__static_aux_attributes__: list[str] = sorted(\n list(_static_aux_metrics.keys())\n )\n self.__tags__: list[str] = sorted(list(tags))\n self.__moving_aux_attributes__: list[str] = sorted(\n list(\n f\"{tag}_{eval_metric}\"\n for tag in self.__tags__\n for eval_metric in list(set(_moving_aux_metrics))\n )\n )\n self.__moving_eval_attributes__: list[str] = sorted(\n list(\n f\"{tag}_{eval_fn}\"\n for tag in self.__tags__\n for eval_fn in self.__evaluation_functions__\n )\n )\n _all_attr_names = (\n self.__moving_aux_attributes__\n + self.__moving_eval_attributes__\n + self.__static_aux_attributes__\n )\n duplicates = {x for x in _all_attr_names if _all_attr_names.count(x) > 1}\n\n assert (\n len(duplicates) == 0\n ), f\"Duplicate metric names with built-ins {duplicates}\"\n\n for tag, v in _static_aux_metrics.items():\n setattr(self, tag, v)\n for tag in set(self.__moving_aux_attributes__).union(\n self.__moving_eval_attributes__\n ):\n self._init_ma(tag)\n\n for tag in tags:\n self._init_preds(tag)\n\n def update_static_metrics(self, metric_dict: dict[str, ty.Any]):\n \"\"\"\n Update static metrics with the values in metric_dict.\n\n Parameters\n ----------\n metric_dict : dict[str, ty.Any]\n A dictionary containing the static metrics values to update.\n\n Raises\n ------\n AssertionError:\n If metric_dict has metrics that are not in static_aux_attributes.\n\n Notes\n -----\n Not all metric_dict items must be preset from static_aux_attributes.\n i.e. metric_dict.items - static_aux_attributes =/= static_aux_attributes - metric_dict.items\n\n Examples\n --------\n >>> from ablator.modules.metrics.main import TrainMetrics\n >>> train_metrics = TrainMetrics(\n ... batch_limit=30,\n ... memory_limit=None,\n ... evaluation_functions={\"mean\": lambda x: np.mean(x)},\n ... moving_average_limit=100,\n ... tags=[\"train\"],\n ... static_aux_metrics={\"lr\": 1.0},\n ... moving_aux_metrics={\"loss\"},\n ... )\n >>> train_metrics.to_dict()\n {\n \"train_mean\": np.nan, \"train_loss\": np.nan,\n \"lr\": 1.0\n }\n >>> train_metrics.update_static_metrics({\"lr\": 0.3})\n >>> train_metrics.to_dict()\n {\n \"train_mean\": np.nan, \"train_loss\": np.nan,\n \"lr\": 0.3\n }\n\n \"\"\"\n diff_metrics = set(metric_dict.keys()).difference(\n self.__static_aux_attributes__\n )\n metric_keys = sorted(list(metric_dict.keys()))\n assert len(diff_metrics) == 0, (\n \"There are difference in the class metrics: \"\n f\"{self.__static_aux_attributes__} and updated metrics {metric_keys}\"\n )\n metric_dict = butils.iter_to_numpy(metric_dict)\n for k, v in metric_dict.items():\n setattr(self, k, v)\n\n def update_ma_metrics(self, metric_dict: dict[str, ty.Any], tag: str):\n \"\"\"\n Keep the moving average aux metrics updated with new values from metric_dict.\n This method will append the new metric values to its collection of metric results.\n A sample use case for this method is when we finish a training iteration, we\n can add the training loss to ``loss`` moving average metric collection on tag ``train``,\n aka the train set.\n\n Parameters\n ----------\n metric_dict : dict[str, ty.Any]\n A dictionary containing the moving average metric values to update.\n tag : str\n A tag that specifies which set of predictions to update metric values.\n\n Raises\n ------\n AssertionError:\n If metric_dict has metrics that are not in moving_aux_metrics.\n\n Examples\n --------\n >>> from ablator.modules.metrics.main import TrainMetrics\n >>> train_metrics = TrainMetrics(\n ... batch_limit=30,\n ... memory_limit=None,\n ... evaluation_functions={\"sum\": lambda x: np.mean(x)},\n ... moving_average_limit=100,\n ... tags=[\"train\", \"val\"],\n ... static_aux_metrics={\"lr\": 1.0},\n ... moving_aux_metrics={\"loss\"},\n ... )\n >>> train_metrics.to_dict()\n {\n \"train_sum\": np.nan, \"train_loss\": np.nan,\n \"val_sum\": np.nan, \"val_loss\": np.nan,\n \"lr\": 1.0\n }\n >>> train_metrics.update_ma_metrics({\"loss\": 0.35}, tag=\"val\")\n >>> train_metrics.to_dict()\n {\n \"train_sum\": np.nan, \"train_loss\": np.nan,\n \"val_sum\": np.nan, \"val_loss\": 0.35,\n \"lr\": 1.0\n }\n \"\"\"\n metric_keys = {f\"{tag}_{k}\" for k in metric_dict}\n diff_metrics = metric_keys.difference(set(self.__moving_aux_attributes__))\n assert len(diff_metrics) == 0, (\n \"There are difference in the class metrics: \"\n f\"{self.__moving_aux_attributes__} and parsed metrics {sorted(list(metric_keys))}\"\n )\n self._update_ma_metrics(metric_dict, tag)\n\n def _update_ma_metrics(self, metric_dict: dict[str, ty.Any], tag=None):\n # metric dict should contain scalars\n metric_dict = butils.iter_to_numpy(metric_dict)\n for k, v in metric_dict.items():\n attr_name = f\"{tag}_{k}\" if tag is not None else k\n self._get_ma(attr_name).append(v)\n\n def reset(self, tag: str):\n \"\"\"\n Reset to empty all prediction sequences (e.g predictions, labels)\n in a set of predictions specified by ``tag`` argument.\n\n Parameters\n ----------\n tag : str\n A tag that specifies which set of predictions to be reset.\n\n Examples\n --------\n >>> train_metrics = TrainMetrics(\n ... batch_limit=30,\n ... memory_limit=None,\n ... evaluation_functions={\"sum\": lambda pred: np.mean(pred)},\n ... moving_average_limit=100,\n ... tags=[\"train\", \"val\"],\n ... static_aux_metrics={\"lr\": 1.0},\n ... moving_aux_metrics={\"loss\"},\n ... )\n >>> train_metrics.append_batch(pred=np.array([1] * 3), tag=\"train\") # e.g add 3 predictions all of class 1\n >>> train_metrics.reset(tag=\"train\")\n \"\"\"\n preds = self._get_preds(tag)\n preds.reset()\n\n def evaluate(self, tag, reset=True, update_ma=True):\n \"\"\"\n Apply evaluation_functions to a set of predictions specified by ``tag`` argument. Possibly update the\n moving averages (only those associated with evaluation functions, not moving auxiliary metrics) with\n the evaluated results, or reset the predictions.\n\n Parameters\n ----------\n tag : str\n A tag that specifies which set of predictions to evaluate.\n reset : bool, optional\n A flag that indicates whether to reset the predictions to empty after evaluation. Default is True.\n update_ma : bool, optional\n A flag that indicates whether to update the moving averages after evaluation. Default is True.\n\n Returns\n -------\n metrics : dict\n A dictionary of metric values calculated from the predictions.\n\n Examples\n --------\n >>> from ablator.modules.metrics.main import TrainMetrics\n >>> train_metrics = TrainMetrics(\n ... batch_limit=30,\n ... memory_limit=None,\n ... evaluation_functions={\"mean\": lambda pred: np.mean(pred)},\n ... moving_average_limit=100,\n ... tags=[\"train\", \"val\"],\n ... static_aux_metrics={\"lr\": 1.0},\n ... moving_aux_metrics={\"loss\"},\n ... )\n >>> train_metrics.append_batch(pred=np.array([100]), tag=\"val\")\n >>> train_metrics.evaluate(\"val\", reset=False, update=True) # val_mean is updated to\n mean among batch mean values: (100 / 1) / 1 = 100.0\n >>> train_metrics.append_batch(pred=np.array([0] * 3), tag=\"val\")\n\n For the following examples, the current evaluation result is: ``(100 + 0 + 0 + 0) / 4 = 25`` (which is returned\n by evaluate() function), and since update=True, val_mean is updated to: ``(100.0 + 25) / 2 = 62.5`` (we can\n see this if we use .to_dict())\n\n >>> train_metrics.evaluate(\"val\", reset=True, update=True)\n {'mean': 25.0}\n >>> train_metrics.to_dict()\n {'val_mean': 62.5}\n \"\"\"\n preds = self._get_preds(tag)\n metrics = preds.evaluate()\n if update_ma:\n self._update_ma_metrics(metrics, tag)\n if reset:\n preds.reset()\n return metrics\n\n def append_batch(self, *args, tag, **kwargs):\n \"\"\"\n Appends a batch of predictions to a specific set.\n\n Parameters\n ----------\n tag : str\n A tag that specifies which set of predictions to evaluate.\n **kwargs : dict\n A dictionary of key-value pairs, where key is type of prediction (e.g predictions, labels),\n and value is a batch of prediction values. Note that the passed keys in ``**kwrags`` must match arguments in\n evaluation functions arguments in Callable in evaluation_functions when we initialize TrainMetrics object.\n\n Raises\n ------\n AssertionError\n If any positional arguments are passed, or if the provided tag is not a defined metric category.\n\n Notes\n -----\n this is because it is easy to mix up the order of pred, labels and tags\n\n Examples\n --------\n >>> from ablator.modules.metrics.main import TrainMetrics\n >>> train_metrics = TrainMetrics(\n ... batch_limit=30,\n ... memory_limit=None,\n ... evaluation_functions={\"mean\": lambda labels: np.mean(labels)},\n ... moving_average_limit=100,\n ... tags=[\"train\", \"val\"],\n ... static_aux_metrics={\"lr\": 1.0},\n ... moving_aux_metrics={\"loss\"},\n ... )\n >>> train_metrics.append_batch(labels=np.array([100]), tag=\"train\")\n >>> train_metrics.append_batch(labels=np.array([0] * 3), tag=\"train\")\n >>> train_metrics.append_batch(labels=np.array([50]), tag=\"val\")\n\n \"\"\"\n # NOTE this is because it is easy to mix up the order of pred, labels and tags\n assert len(args) == 0, \"Metrics.append_batch takes no positional arguments.\"\n assert (\n tag in self.__tags__\n ), f\"Undefined tag '{tag}'. Metric tags {self.__tags__}\"\n self._get_preds(tag).append(**kwargs)\n\n def _init_preds(self, tag) -> PredictionStore:\n attr_name = f\"__{tag}_preds__\"\n _preds = PredictionStore(\n batch_limit=self.__batch_limit__,\n memory_limit=self.__memory_limit__,\n evaluation_functions=self.__evaluation_functions__,\n )\n setattr(self, attr_name, _preds)\n return getattr(self, attr_name)\n\n def _get_preds(self, tag) -> PredictionStore:\n attr_name = f\"__{tag}_preds__\"\n preds = getattr(self, attr_name)\n return preds\n\n def _init_ma(self, tag) -> MovingAverage:\n attr_name = f\"__{tag}_ma__\"\n _ma = MovingAverage(\n batch_limit=self.__moving_average_limit__,\n memory_limit=self.__memory_limit__,\n )\n setattr(self, attr_name, _ma)\n return getattr(self, attr_name)\n\n def _get_ma(self, tag) -> MovingAverage:\n attr_name = f\"__{tag}_ma__\"\n preds = getattr(self, attr_name)\n return preds\n\n def to_dict(self):\n \"\"\"\n Get all metrics, i.e moving aux metrics, moving evaluation metrics, and static aux metrics.\n Note that moving attributes will be an averaged value of all previous batches. Metrics are\n set to np.nan if it's never updated before\n\n Examples\n --------\n >>> from ablator.modules.metrics.main import TrainMetrics\n >>> train_metrics = TrainMetrics(\n ... batch_limit=30,\n ... memory_limit=None,\n ... evaluation_functions={\"mean\": lambda preds: np.mean(preds)},\n ... moving_average_limit=100,\n ... tags=[\"train\", \"val\"],\n ... static_aux_metrics={\"lr\": 0.75},\n ... moving_aux_metrics={\"loss\"},\n ... )\n >>> train_metrics.append_batch(preds=np.array([100]), tag=\"val\")\n >>> train_metrics.evaluate(\"val\", reset=False, update=True)\n >>> train_metrics.to_dict()\n {\n 'train_mean': np.nan, 'train_loss': np.nan,\n 'val_mean': 100.0, 'val_loss': np.nan,\n 'lr': 0.75\n }\n >>> train_metrics.append_batch(preds=np.array([0] * 3), tag=\"val\")\n >>> train_metrics.evaluate(\"val\", reset=True, update=True)\n >>> train_metrics.to_dict()\n {\n 'train_mean': np.nan, 'train_loss': np.nan,\n 'val_mean': 62.5, 'val_loss': np.nan,\n 'lr': 0.75\n }\n \"\"\"\n attrs = self.__moving_aux_attributes__ + self.__moving_eval_attributes__\n ma_attrs = {k: self._get_ma(k).value for k in attrs}\n static_attrs = {k: getattr(self, k) for k in self.__static_aux_attributes__}\n return {**ma_attrs, **static_attrs}\n","repo_name":"fostiropoulos/ablator","sub_path":"ablator/modules/metrics/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18609,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"67"} +{"seq_id":"34017790038","text":"import os\nfrom datetime import datetime\nfrom typing import Dict, List\n\nimport psutil\nimport slack\n\n\ndef running_processes() -> Dict[int, List[str]]:\n processes: Dict[int, List[str]] = dict()\n\n for process in psutil.process_iter(['pid', 'cmdline']):\n processes[process.info['pid']] = process.info['cmdline']\n\n return processes\n\n\ndef format_processes(processes: Dict[int, List[str]]) -> List[str]:\n lines = list()\n for pid, cmd in processes.items():\n lines.append(\"{}\\t{}\\n\".format(pid, cmd))\n\n return lines\n\n\ndef persist_processes(processes: List[str]) -> str:\n filename = \"{}.txt\".format(datetime.today())\n\n with open(file=filename, mode=\"a+\") as file:\n file.writelines(processes)\n file.close()\n\n return filename\n\n\ndef send_to_slack(processes: str):\n client = slack.WebClient(token=os.environ[\"SLACK_API_TOKEN\"])\n client.files_upload(channels=os.environ[\"SLACK_CHANNEL\"], file=processes)\n\n\nif __name__ == \"__main__\":\n send_to_slack(persist_processes(format_processes(running_processes())))\n","repo_name":"lucasvalenteds/training-devops","sub_path":"python/running-programs/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72713840533","text":"import datetime\nimport glob\nimport os\n\nfrom django.http import FileResponse\nfrom django.http.response import JsonResponse\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, parser_classes\nfrom rest_framework.parsers import FileUploadParser\nfrom rest_framework.response import Response\n\nfrom requesthandler.models import MapMetaData, MapFile\nfrom requesthandler.serializers import MapMetaDataSerializer\n\nEXTENSION = \".png\" # TODO CHANGE\n\n\n@api_view(['GET', 'DELETE'])\ndef meta_list(request):\n if request.method == 'GET':\n metadata = MapMetaData.objects.using('metadata').all()\n title = request.GET.get('title', None)\n if title is not None:\n metadata = metadata.filter(title__icontains=title)\n metadata_serializer = MapMetaDataSerializer(metadata, many=True)\n return JsonResponse(metadata_serializer.data, safe=False)\n elif request.method == 'DELETE':\n files = glob.glob('files/*') # Select all files in folder\n for f in files:\n os.remove(f)\n MapFile.objects.using('filestorage').all().delete()\n count = MapMetaData.objects.using('metadata').all().delete()\n if count[0] > 0:\n return JsonResponse({'message': '{} map(s) were deleted successfully!'.format(count[0])},\n status=status.HTTP_204_NO_CONTENT)\n else:\n return JsonResponse({'message': 'There are already no maps.'},\n status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view(['GET', 'DELETE'])\ndef meta_by_pk(request, pk):\n try:\n meta = MapMetaData.objects.using('metadata').get(pk=pk)\n return single_entity_access(request.method, meta)\n except:\n return JsonResponse({'message': 'Map does not exist'}, status=status.HTTP_404_NOT_FOUND)\n\n\n@api_view(['GET', 'DELETE'])\ndef meta_by_title(request, title):\n try:\n meta = MapMetaData.objects.using('metadata').get(title=title)\n return single_entity_access(request.method, meta)\n except:\n return JsonResponse({'message': 'Map does not exist'}, status=status.HTTP_404_NOT_FOUND)\n\n\ndef single_entity_access(requestMethod, meta):\n if requestMethod == 'GET':\n metadata_serializer = MapMetaDataSerializer(meta)\n return JsonResponse(metadata_serializer.data)\n elif requestMethod == 'DELETE':\n mapFileInstance = MapFile.objects.using('filestorage').get(pk=meta.mapStorageID)\n filepath = mapFileInstance.file.name\n os.remove(filepath)\n mapFileInstance.delete()\n meta.delete()\n return JsonResponse({'message': 'Map was deleted successfully!'}, status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view(['POST'])\n@parser_classes([FileUploadParser])\ndef upload(request, filename):\n f = request.data['file']\n newFile = MapFile.create(f)\n newFile.save(using='filestorage')\n print(f\"new file with id:{newFile.id} and name:{newFile.file.name} saved.\")\n\n filename = str(filename).split('.')[0]\n duration = request.query_params.get('duration')\n duration = datetime.datetime.strptime(duration, '%M:%S').time()\n difficulty = request.query_params.get('difficulty')\n newMeta = MapMetaData.create(filename, duration, difficulty, newFile.id)\n newMeta.save(using='metadata')\n\n return Response({'message': 'Map uploaded'}, status=status.HTTP_201_CREATED)\n\n\n@api_view(['GET'])\ndef download(request, pk):\n try:\n meta = MapMetaData.objects.using('metadata').get(mapStorageID=pk)\n meta.downloads += 1\n meta.save()\n mapFileInstance = MapFile.objects.using('filestorage').get(pk=pk)\n fileName = mapFileInstance.file\n return FileResponse(open(str(fileName), 'rb'))\n except:\n return JsonResponse({'message': 'Map does not exist'}, status=status.HTTP_404_NOT_FOUND)\n\n\n\n@api_view(['POST'])\ndef update_by_pk(request, pk):\n try:\n meta = MapMetaData.objects.using('metadata').get(pk=pk)\n success = request.query_params.get('success')\n success = str(success).lower() in ['true', '1', 't', 'yes']\n return update_meta(meta, success)\n except:\n return JsonResponse({'message': 'Map does not exist'}, status=status.HTTP_404_NOT_FOUND)\n\n\n@api_view(['POST'])\ndef update_by_title(request, title):\n try:\n meta = MapMetaData.objects.using('metadata').get(title=title)\n success = request.query_params.get('success')\n success = str(success).lower() in ['true', '1', 't', 'yes']\n return update_meta(meta, success)\n except:\n return JsonResponse({'message': 'Map does not exist'}, status=status.HTTP_404_NOT_FOUND)\n\n\ndef update_meta(metadata, success: bool):\n try:\n if success:\n metadata.successPlays += 1\n metadata.plays += 1\n metadata.save(using='metadata')\n metadata_serializer = MapMetaDataSerializer(metadata)\n return JsonResponse(metadata_serializer.data)\n except:\n return JsonResponse({'message': 'Update data failed'}, status=status.HTTP_404_NOT_FOUND)","repo_name":"supersloy/StorageServer","sub_path":"requesthandler/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37208479881","text":"from django.shortcuts import *\nfrom django.http import *\nfrom .models import *\n# Create your views here.\n\n\ndef login(request):\n\n return render(request, \"proj.html\")\n\n\ndef page1(request,redirect_field_name=None):\n\n student = Students.objects.all()\n return render(request, 'Managestudent.html', {'student': student})\n\n\n\ndef page2(request):\n\n course = Course.objects.all()\n return render(request, 'Managecourse.html', {'course': course})\n\n\ndef page3(request):\n\n staff = Staff.objects.all()\n return render(request, 'Managestaff.html', {'staff': staff})\n\n\ndef page4(request):\n\n student = Students.objects.all()\n\n if request.method == 'POST':\n\n Name = request.POST['namee']\n Id = request.POST['id']\n mobile = request.POST['mobo']\n email = request.POST['email']\n birthday = request.POST['birthday']\n gender = request.POST['gender']\n address = request.POST['address']\n \n new_student = Students.objects.create(FullName = Name, ID = Id , MobNumber = mobile , Email = email , Birthday = birthday , Gender = gender, Address= address)\n return redirect('index1')\n\n return render(request, 'addstudent.html',{'student' : student})\n\ndef page5(request):\n\n staff = Staff.objects.all()\n\n if request.method == 'POST':\n\n name = request.POST['namee']\n Id = request.POST['id']\n mobile = request.POST['mobo']\n email = request.POST['email']\n birthday = request.POST['birthday']\n gender = request.POST['gender']\n address = request.POST['address']\n\n new_staff = Staff.objects.create(\n FullName=name, ID=Id, MobNumber=mobile, Email=email, Birthday=birthday, Gender=gender, Address=address)\n return redirect('index5')\n\n return render(request, 'addstaff.html',{'staff' : staff})\n\n\ndef page6(request):\n course = Course.objects.all()\n\n if request.method == 'POST':\n\n Name = request.POST['NAME']\n Id = request.POST['ID']\n instructor = request.POST['INS']\n NumOfHou = request.POST['NOH']\n ReqCourse = request.POST['ARC']\n code = request.POST['code']\n\n new_course = Course.objects.create(\n CourseName=Name, CourseID=Id, CourseInstructor=instructor, CreditHours=NumOfHou, RequiredCourse=ReqCourse, CourseCode=code)\n return redirect('index6')\n return render(request, 'addcourse.html', {'course': course})\n\n\ndef page0(request):\n tot_students = Students.objects.count()\n tot_staff = Staff.objects.count()\n return render(request, 'home.html',{'total_students': tot_students,'total_staff' : tot_staff})","repo_name":"EbGazar/University-Management-System","sub_path":"unisite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1666373307","text":"import os\nimport glob\nfrom operator import itemgetter\nimport numpy as np\nimport json\nimport re\nimport random\n\nclass MakeImageHMDB51():\n def __init__(self, root, annotation_path, fold, train):\n self.root = root\n self.annotation_path = annotation_path\n self.fold = fold\n self.train = train\n self.TRAIN_TAG = 1\n self.TEST_TAG = 2\n \n def __select_fold__(self, video_list):\n target_tag = self.TRAIN_TAG if self.train else self.TEST_TAG\n split_pattern_name = \"*test_split{}.txt\".format(self.fold)\n split_pattern_path = os.path.join(self.annotation_path, split_pattern_name)\n annotation_paths = glob.glob(split_pattern_path)\n # for a in annotation_paths:\n # print(a)\n selected_files = []\n for filepath in annotation_paths:\n with open(filepath) as fid:\n lines = fid.readlines()\n for line in lines:\n video_filename, tag_string = line.split()\n tag = int(tag_string)\n if tag == target_tag:\n selected_files.append(video_filename[:-4])\n selected_files = set(selected_files)\n\n # print(selected_files, len(selected_files))\n indices = []\n for video_index, video_path in enumerate(video_list):\n # print(os.path.basename(video_path))\n if os.path.basename(video_path) in selected_files:\n indices.append(video_index)\n\n return indices\n\n def __call__(self):\n # classes = sorted(os.listdir(self.root))\n classes = [d.name for d in os.scandir(self.root) if d.is_dir()]\n classes.sort()\n class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}\n # print(\"classes:\",classes)\n # print(\"class_to_idx:\",class_to_idx)\n paths = []\n labels = []\n for c in classes:\n class_path = os.path.join(self.root, c)\n for v in os.scandir(class_path):\n if v.is_dir():\n video_path = os.path.join(class_path,v.name)\n paths.append(video_path)\n labels.append(class_to_idx[c])\n\n # print(paths, len(paths))\n indices = self.__select_fold__(paths)\n paths = list(itemgetter(*indices)(paths))\n labels = list(itemgetter(*indices)(labels))\n # print(paths, len(paths))\n # print(labels, len(labels))\n return paths, labels\n\nclass MakeHockeyDataset():\n def __init__(self, root, train, cv_split_annotation_path, path_annotations=None):\n self.root = root\n self.train = train\n self.path_annotations = path_annotations\n self.cv_split_annotation_path = cv_split_annotation_path\n # self.split = split\n # self.F_TAG = \"Fight\"\n # self.NF_TAG = \"NonFight\"\n self.classes = [\"nonviolence\",\"violence\"]\n \n def split(self):\n split = \"training\" if self.train else \"validation\"\n return split\n \n def load_annotation_data(self):\n with open(self.cv_split_annotation_path, 'r') as data_file:\n return json.load(data_file)\n \n def get_video_names_and_labels(self, data, split):\n video_names = []\n video_labels = []\n annotations = []\n\n for key, val in data['database'].items():\n if val['subset'] == split:\n label = val['annotations']['label']\n cl = 'violence' if label=='fi' else 'nonviolence'\n\n label = 0 if label=='no' else 1\n v_name = re.findall(r'\\d+', key)[0]\n folder = os.path.join(self.root, cl, v_name)\n assert os.path.isdir(folder), \"Folder:{} does not exist!!!\".format(folder)\n video_names.append(folder)\n video_labels.append(label)\n if self.path_annotations:\n ann_file = os.path.join(self.path_annotations, cl, v_name+'.json')\n assert os.path.isfile(ann_file), \"Annotation file:{} does not exist!!!\".format(ann_file)\n annotations.append(ann_file)\n\n return video_names, video_labels, annotations\n \n def __call__(self):\n data = self.load_annotation_data()\n split = self.split()\n paths, labels, annotations = self.get_video_names_and_labels(data, split)\n return paths, labels, annotations\n\nclass MakeRLVDDataset():\n def __init__(self, root, train, cv_split_annotation_path, path_annotations=None):\n self.root = root\n self.train = train\n self.path_annotations = path_annotations\n self.cv_split_annotation_path = cv_split_annotation_path\n # self.split = split\n # self.F_TAG = \"Fight\"\n # self.NF_TAG = \"NonFight\"\n self.classes = [\"NonViolence\",\"Violence\"]\n \n def split(self):\n split = \"training\" if self.train else \"validation\"\n return split\n \n def load_annotation_data(self):\n with open(self.cv_split_annotation_path, 'r') as data_file:\n return json.load(data_file)\n \n def get_video_names_and_labels(self, data, split):\n video_names = []\n video_labels = []\n annotations = []\n # num_frames = []\n\n for key, val in data['database'].items():\n if val['subset'] == split:\n label = val['annotations']['label']\n cl = 'Violence' if label=='fi' else 'NonViolence'\n\n label = 0 if label=='no' else 1\n # v_name = re.findall(r'\\d+', key)[0]\n v_name = key\n folder = os.path.join(self.root, cl, v_name)\n assert os.path.isdir(folder), \"Folder:{} does not exist!!!\".format(folder)\n video_names.append(folder)\n video_labels.append(label)\n n = os.listdir(folder)\n # n = [img for img in n if '.jpg' in img]\n # num_frames.append(len(n))\n if self.path_annotations:\n ann_file = os.path.join(self.path_annotations, cl, v_name+'.json')\n assert os.path.isfile(ann_file), \"Annotation file:{} does not exist!!!\".format(ann_file)\n annotations.append(ann_file)\n\n return video_names, video_labels, annotations\n \n def __call__(self):\n data = self.load_annotation_data()\n split = self.split()\n paths, labels, annotations = self.get_video_names_and_labels(data, split)\n return paths, labels, annotations\n\n\n \nCATEGORY_ALL = 2\nCATEGORY_POS = 1\nCATEGORY_NEG = 0\n\nclass MakeRWF2000():\n def __init__(self, \n root,\n train,\n category=CATEGORY_ALL,\n path_annotations=None, \n path_feat_annotations=None,\n path_person_detections=None,\n shuffle=False):\n self.root = root\n self.train = train\n self.path_annotations = path_annotations\n self.path_feat_annotations = path_feat_annotations\n # self.F_TAG = \"Fight\"\n # self.NF_TAG = \"NonFight\"\n self.classes = [\"NonFight\", \"Fight\"]\n self.category = category\n self.shuffle = shuffle\n \n def classes(self):\n return self.classes\n \n def split(self):\n split = \"train\" if self.train else \"val\"\n return split\n \n def all_categories(self, split):\n paths = []\n labels = []\n annotations = []\n feat_annotations = []\n for idx, cl in enumerate(self.classes):\n for video_sample in os.scandir(os.path.join(self.root, split, cl)):\n if video_sample.is_dir():\n paths.append(os.path.join(self.root, split, cl, video_sample))\n labels.append(idx)\n if self.path_annotations:\n assert os.path.exists(os.path.join(self.path_annotations, split, cl, video_sample.name +'.json')), \"Annotation does not exist!!!\"\n annotations.append(os.path.join(self.path_annotations, split, cl, video_sample.name +'.json'))\n if self.path_feat_annotations:\n assert os.path.exists(os.path.join(self.path_feat_annotations, split, cl, video_sample.name +'.txt')), \"Feature annotation does not exist!!!\"\n feat_annotations.append(os.path.join(self.path_feat_annotations, split, cl, video_sample.name +'.txt'))\n \n return paths, labels, annotations\n \n def positive_category(self, split):\n paths = []\n labels = []\n annotations = []\n feat_annotations = []\n label = 1\n label_name = self.classes[label]\n for video_sample in os.scandir(os.path.join(self.root, split, label_name)):\n if video_sample.is_dir():\n paths.append(os.path.join(self.root, split, label_name, video_sample))\n labels.append(label)\n if self.path_annotations:\n assert os.path.exists(os.path.join(self.path_annotations, split, label_name, video_sample.name +'.json')), \"Annotation does not exist!!!\"\n annotations.append(os.path.join(self.path_annotations, split, label_name, video_sample.name +'.json'))\n if self.path_feat_annotations:\n assert os.path.exists(os.path.join(self.path_feat_annotations, split, label_name, video_sample.name +'.txt')), \"Feature annotation does not exist!!!\"\n feat_annotations.append(os.path.join(self.path_feat_annotations, split, label_name, video_sample.name +'.txt'))\n \n return paths, labels, annotations\n \n def negative_category(self, split):\n paths = []\n labels = []\n annotations = []\n feat_annotations = []\n label = 0\n label_name = self.classes[label]\n for video_sample in os.scandir(os.path.join(self.root, split, label_name)):\n if video_sample.is_dir():\n paths.append(os.path.join(self.root, split, label_name, video_sample))\n labels.append(label)\n if self.path_annotations:\n assert os.path.exists(os.path.join(self.path_annotations, split, label_name, video_sample.name +'.json')), \"Annotation does not exist!!!\"\n annotations.append(os.path.join(self.path_annotations, split, label_name, video_sample.name +'.json'))\n if self.path_feat_annotations:\n assert os.path.exists(os.path.join(self.path_feat_annotations, split, label_name, video_sample.name +'.txt')), \"Feature annotation does not exist!!!\"\n feat_annotations.append(os.path.join(self.path_feat_annotations, split, label_name, video_sample.name +'.txt'))\n \n return paths, labels, annotations\n \n def __call__(self):\n split = self.split()\n if self.category == CATEGORY_ALL:\n paths, labels, annotations = self.all_categories(split)\n elif self.category == CATEGORY_POS:\n paths, labels, annotations = self.positive_category(split)\n elif self.category == CATEGORY_NEG:\n paths, labels, annotations = self.negative_category(split)\n \n if self.shuffle:\n c = list(zip(paths, labels, annotations))\n random.shuffle(c)\n paths, labels, annotations = zip(*c)\n \n return paths, labels, annotations\n \n\nfrom collections import Counter\nimport random\ndef JSON_2_tube(json_file):\n \"\"\"\n \"\"\"\n with open(json_file, \"r\") as read_file:\n decodedArray = json.load(read_file)\n # print(\"decoded Array:\", type(decodedArray), len(decodedArray))\n \n for f in decodedArray:\n for i, box in enumerate(f['boxes']):\n f['boxes'][i] = np.asarray(f['boxes'][i])\n # print(decodedArray[0])\n decodedArray = sorted(decodedArray, key = lambda i: i['id'])\n return decodedArray\n\ndef _avg_num_tubes(annotations):\n video_num_tubes=[]\n num_tubes=[]\n tube_lengths = []\n for ann in annotations:\n tubes = JSON_2_tube(ann)\n video_num_tubes.append((ann, len(tubes)))\n num_tubes.append(len(tubes))\n for tube in tubes:\n # print('tube[len]:', tube['len'], len(tube['boxes']), len(tube['foundAt']))\n l = 16 if tube['len']>16 else tube['len']\n tube_lengths.append(tube['len'])\n \n def Average(lst):\n return sum(lst) / len(lst)\n \n print('Avg num_tubes: ', Average(num_tubes))\n print('Avg len_tubes: ', Average(tube_lengths))\n\ndef _get_num_tubes(annotations, make_func):\n video_num_tubes=[]\n num_tubes=[]\n for ann in annotations:\n tubes = JSON_2_tube(ann)\n video_num_tubes.append((ann, len(tubes)))\n num_tubes.append(len(tubes))\n with open('hockey_num_tubes_{}.txt'.format('train' if make_func.train else 'val'), 'w') as filehandle:\n filehandle.writelines(\"{},{}\\n\".format(t[0], t[1]) for t in video_num_tubes)\n \n \n \nif __name__==\"__main__\":\n make_func = MakeRWF2000(root='/Users/davidchoqueluqueroman/Documents/DATASETS_Local/RWF-2000/frames', \n train=True,\n path_annotations='/Users/davidchoqueluqueroman/Documents/DATASETS_Local/ActionTubes/final/rwf',\n category=2)\n paths, labels, annotations = make_func()\n print(\"paths: \", len(paths))\n print(\"labels: \",len(labels))\n print(\"annotations: \",len(annotations))\n\n _avg_num_tubes(annotations)\n\n # print(\"no tubes in: \")\n # without_tube=[]\n # for ann in annotations:\n # tubes = JSON_2_tube(ann)\n # if len(tubes)==0:\n # # print(len(tubes))\n # without_tube.append(ann)\n \n # with open('3without_tube_{}.txt'.format('train' if make_func.train else 'val'), 'w') as filehandle:\n # filehandle.writelines(\"%s\\n\" % t for t in without_tube)\n\n # tubes = JSON_2_tube('/media/david/datos/Violence DATA/ActionTubes/RWF-2000/train/Fight/C8wt47cphU8_0.json')\n # print(\"tubes: \",len(tubes))\n\n \n ###################################################################################################################################\n # make_func = MakeHockeyDataset(root='/Users/davidchoqueluqueroman/Documents/DATASETS_Local/HockeyFightsDATASET/frames', \n # train=False,\n # cv_split_annotation_path='/Users/davidchoqueluqueroman/Documents/DATASETS_Local/VioNetDB-splits/hockey_jpg1.json',\n # path_annotations='/Users/davidchoqueluqueroman/Documents/DATASETS_Local/ActionTubes/final/hockey')\n # paths, labels, annotations = make_func()\n # print(\"paths: \", len(paths))\n # print(\"labels: \", len(labels))\n # print(\"annotations: \", len(annotations))\n\n # _avg_num_tubes(annotations)\n # _get_num_tubes(annotations, make_func)\n ###################################################################################################################################\n\n # m = MakeUCFCrime2Local(root='/Volumes/TOSHIBA EXT/DATASET/AnomalyCRIMEALL/UCFCrime2Local/frames',\n # annotation_path='/Volumes/TOSHIBA EXT/DATASET/AnomalyCRIMEALL/UCFCrime2Local/readme',\n # bbox_path='/Volumes/TOSHIBA EXT/DATASET/AnomalyCRIMEALL/UCFCrime2Local/readme/Txt annotations',\n # train=False)\n # paths, labels, annotations, intervals = m()\n # idx=22\n # print(paths[idx])\n # print(labels[idx])\n # print(annotations[idx][0:10])\n # print(intervals[idx])\n\n # m = MakeUCFCrime2LocalClips(root_anomaly='/Users/davidchoqueluqueroman/Documents/DATASETS_Local/UCFCrime2Local/UCFCrime2LocalClips/anomaly',\n # root_normal='/Volumes/TOSHIBA EXT/DATASET/AnomalyCRIMEALL/UCFCrime2Local/frames',\n # path_annotations='/Users/davidchoqueluqueroman/Documents/DATASETS_Local/CrimeViolence2LocalDATASET/Txt annotations-longVideos')\n # paths, labels, annotations = m()\n # # idx= random.randint(0, len(paths)-1)\n # idx=65\n # print(idx)\n # print(Counter(labels))\n # print(paths[idx])\n # print(labels[idx])\n # print(annotations[idx])\n\n # anns = m.ground_truth_boxes(paths[idx],annotations[idx])\n # m.plot(paths[idx], anns)\n\n ###################################################################################################################################\n # make_func = MakeRLVDDataset(root='/Users/davidchoqueluqueroman/Documents/DATASETS_Local/RealLifeViolenceDataset/frames', \n # train=False,\n # cv_split_annotation_path='/Users/davidchoqueluqueroman/Documents/DATASETS_Local/VioNetDB-splits/RealLifeViolenceDataset1.json',\n # path_annotations='/Users/davidchoqueluqueroman/Documents/DATASETS_Local/ActionTubes/RealLifeViolenceDataset')\n # paths, labels, annotations, num_frames = make_func()\n # print(\"paths: \", len(paths))\n # print(\"labels: \", len(labels))\n # print(\"annotations: \", len(annotations))\n # print(\"num_frames: \", len(num_frames))\n # _avg_num_tubes(annotations)\n\n # print(paths[33:40])\n # print(labels[33:40])\n # print(annotations[33:40])\n # print(num_frames[33:40])\n","repo_name":"davidGCR/WSVDL2021","sub_path":"datasets/make_dataset.py","file_name":"make_dataset.py","file_ext":"py","file_size_in_byte":17312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"2859358717","text":"import matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\n\ndef showImages(images, imgs_row, imgs_col, col_titles=None, cmap=None):\n fig, axes = plt.subplots(imgs_row, imgs_col, figsize=(35, 35),\n subplot_kw={'xticks': [], 'yticks': []})\n\n fig.subplots_adjust(hspace=0.1, wspace=0.1)\n\n if imgs_row == 1 and imgs_col == 1:\n axes.imshow(images[0], cmap=cmap)\n else:\n i = 0\n for ax, image in zip(axes.flat, images):\n if i < imgs_col and col_titles is not None:\n ax.set_title(col_titles[i], fontsize=50)\n ax.imshow(image, cmap=cmap)\n i += 1\n\n plt.show()\n plt.close()\n\ndef color_convert(image, cspace):\n if cspace != 'RGB':\n if cspace == 'HSV':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n feature_image[:,:,0] = np.array(feature_image)[:,:,0] / 360.\n elif cspace == 'LUV':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)\n elif cspace == 'HLS':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)\n feature_image[:,:,0] = np.array(feature_image)[:,:,0] / 360.\n elif cspace == 'YUV':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)\n else: feature_image = np.copy(image)\n return feature_image\n\ndef intersection(a,b):\n x = max(a[0][0], b[0][0])\n y = max(a[0][1], b[0][1])\n w = min(a[1][0], b[1][0]) - x\n h = min(a[1][1], b[1][1]) - y\n if w<=0 or h<=0: return None\n return ((x, y), (x+w, y+h))\n\ndef area(bbox):\n w = (bbox[1][0] - bbox[0][0])\n h = (bbox[1][1] - bbox[0][1])\n area_bbox = w * h\n return area_bbox\n","repo_name":"J-Rojas/vehichle-detection-opencv","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22177375748","text":"# import flask dependencies\r\nfrom flask import Flask, request, make_response, jsonify\r\n\r\n# initialize the flask app\r\napp = Flask(__name__)\r\n\r\n# default route\r\n@app.route('/')\r\ndef index():\r\n return 'Hello World!'\r\n\r\n# function for responses\r\ndef results():\r\n # build a request object\r\n req = request.get_json(force=True)\r\n\r\n # fetch action from json\r\n if (req.get('queryResult').get('action') == 'school'):\r\n return {'fulfillmentText': 'I did my high school from MLM mamallan matric higher secondary school'}\r\n elif (req.get('queryResult').get('action') == 'school-follow'):\r\n return {'fulfillmentText': 'I secured 94 Percent in my high school exams.'}\r\n elif (req.get('queryResult').get('action') == 'college'):\r\n return {'fulfillmentText': 'The name of the college is SSN'}\r\n elif (req.get('queryResult').get('action') == 'five'):\r\n return {'fulfillmentText': 'I see myself in responsible position contributing significantly to growth of the industry and my person self'}\r\n \r\n\r\n# create a route for webhook\r\n@app.route('/webhook', methods=['GET', 'POST'])\r\ndef webhook():\r\n # return response\r\n return make_response(jsonify(results()))\r\n\r\n# run the app\r\nif __name__ == '__main__':\r\n app.run()\r\n","repo_name":"karthikbala07/webhook_1","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42920283244","text":"\"\"\"\nThis module contains functionality for the cli of this repository.\n\"\"\"\n\nfrom argparse import ArgumentParser\n\n\ndef parse():\n parser = ArgumentParser(\n description=\"draw-some-gantts. \"\n \"ADraws some gantt charts from given json data.\",\n )\n\n parser.add_argument(\n \"-o\",\n \"--output\",\n required=True,\n type=str,\n help=\"Path to output (svg) file that will be created / show the gantt chart.\",\n )\n\n parser.add_argument(\n \"-f\",\n \"--file\",\n required=True,\n type=str,\n help=\"Path to source (json) file that will be used to create the gantt chart.\",\n )\n\n args, _ = parser.parse_known_args()\n\n return args\n","repo_name":"frank690/draw-some-gantts","sub_path":"draw_some_gantts/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38286919572","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: kaushik\n\"\"\"\n\n\"\"\"\nsteps for LSA\n1. TFIDF vectorizer or any other techniques such as word2vec or countvectorizer\n2. LSA\n\nor\n\n1. Count Vectorier\n2. Doc-term matrix\n3. LSA \n\"\"\"\n\n#importing required packages\nimport pandas as pd\nimport numpy as np\nimport warnings\ndef ignore_warn(*args, **kwargs):\n pass\nwarnings.warn = ignore_warn \nimport glob\nimport re\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nfrom pylab import *\n\n\n#importing all files into a dataframe\npath = \"C:\\\\git\\\\customer-conversation----LDA\\\\files\"\nallfiles = glob.glob(path + \"/*.tsv\")\nframe = pd.DataFrame()\nlist_ = []\nfor file in allfiles:\n dframe = pd.read_csv(file, sep='\\t', header=None, index_col=None, names=['time','person1','person2','conversation'], error_bad_lines = False)\n list_.append(dframe)\nframe = pd.concat(list_)\n\ndata = pd.DataFrame(frame.iloc[:,3]).reset_index(drop=True)\n\n#we know every 4 lines is one conversatio, so lets combine wvery 4 lines\nx=0\ny=0\nnewdf = pd.DataFrame(np.nan, index = range(0,int((data.shape[0]/4))), columns = ['conversation'])\nfor i in range(0, int((data.shape[0]/4))):\n newdf.conversation[x] = data.conversation[y] + \" \" +data.conversation[y+1] + \" \" + data.conversation[y+2] + \" \" + data.conversation[y+3]\n x += 1\n y += 4\n \n\n#the data is in proper format, let's start preprocessing it\n#remove punctuations\ndef clean(text):\n text = text.lower()\n text = re.sub(\"\\;|\\=|\\%|\\^|\\_|\\*|\\'|\\\"|\\?|\\.|\\,|\\:|\\<|\\>|\\*|\\@|\\#|\\&|\\[|\\]\",\" \",text)\n text = re.sub(\"www\",\" \",text)\n text = re.sub(\"com\",\" \",text)\n text = re.sub(\"thanks\",\" \", text)\n text = re.sub(\" \", \" \", text)\n text = ' '.join(w for w in text.split() if len(w)>1)\n text = text.strip()\n return text\nnewdf['conversation'] = newdf['conversation'].apply(lambda x: clean(x))\n\n#remove stopwords and lemmatizing the text\nstopw = set(stopwords.words('english'))\nlemma = WordNetLemmatizer()\ndef preprocess(doc):\n doc = ' '.join([w for w in doc.split() if w not in stopw])\n doc = ' '.join(lemma.lemmatize(w) for w in doc.split())\n return doc\n\n# to get in dataframe format\nnewdf['conversation'] = newdf['conversation'].apply(lambda x:preprocess(x))\n#to get it in a list format\nnewdf_list = [preprocess(doc) for doc in newdf['conversation']]\n \n\n\n#calculating tf-idf vectoriser\nvectorizer = TfidfVectorizer(max_df=0.8, max_features=4000,\n min_df=2, stop_words='english',\n use_idf=True)\n\n# Build the tfidf vectorizer from the training data (\"fit\"), and apply it \nx_train_tfidf = vectorizer.fit_transform(newdf_list)\nprint(\" Actual number of tfidf features: %d\" % x_train_tfidf.get_shape()[1])\n\n# Get the words that correspond to each of the features.\nfeat_names = vectorizer.get_feature_names()\n\n\n\n\n\nlsa_model = TruncatedSVD(n_components=5)\nlsa_topic_matrix = lsa_model.fit_transform(x_train_tfidf)\n\n\nfor compNum in range(0, 10):\n\n comp = lsa_model.components_[compNum]\n \n # Sort the weights in the first component, and get the indeces\n indeces = np.argsort(comp).tolist()\n \n # Reverse the indeces, so we have the largest weights first.\n indeces.reverse()\n \n # Grab the top 10 terms which have the highest weight in this component. \n terms = [feat_names[weightIndex] for weightIndex in indeces[0:10]] \n weights = [comp[weightIndex] for weightIndex in indeces[0:10]] \n \n # Display these terms and their weights as a horizontal bar graph. \n # The horizontal bar graph displays the first item on the bottom; reverse\n # the order of the terms so the biggest one is on top.\n terms.reverse()\n weights.reverse()\n positions = arange(10) + .5 # the bar centers on the y axis\n \n figure(compNum)\n barh(positions, weights, align='center')\n yticks(positions, terms)\n xlabel('Weight')\n title('Strongest terms for component %d' % (compNum))\n grid(True)\n show()\n","repo_name":"vkaushik189/customer-conversation-LSA","sub_path":"customer_conversation_lsa.py","file_name":"customer_conversation_lsa.py","file_ext":"py","file_size_in_byte":4096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71494492693","text":"import random\n\ndef comb(n,x):\n pay = 1\n payda = 1\n for i in range(1,n+1):\n pay*=i\n for i in range(1,n-x+1):\n payda*=i\n for i in range(1,x+1):\n payda*=i\n return pay/payda\n\ndef expvalue(sample):\n return sum(sample)/len(sample)\n\ndef variance(sample):\n return sum([(i-expvalue(sample))**2 for i in sample])/(len(sample))\n\ndef variancesamp(sample):\n return sum([(i-expvalue(sample))**2 for i in sample])/(len(sample)-1)\n\ndef makexbar(sample):\n return [expvalue(i) for i in sample]\n\ndef sampvariance(sample):\n for i in range(len(sample)):\n print(sample[i],end=\" \")\n sample[i] = round(variancesamp(sample[i]),4)\n print(sample[i])\n prb = 1/len(sample)\n result = 0\n for i in sample:\n result += i*prb\n return result\n\ndef sampwr(sample,n):\n samp = []\n while len(samp) < len(sample)**n:\n temp = random.choices(sample,k=n)\n if not temp in samp:\n samp.append(temp)\n else:\n temp.clear()\n return samp\n\ndef sampwor(sample,n):\n tsamp = list(sample)\n samp = []\n\n times = int(comb(len(sample),n))\n print(times,\"possible outcome :\")\n i = 0\n b = 0\n j = []\n kontrol = True\n\n while i < times:\n for x in range(n):\n temp = random.choice(tsamp)\n j.append(temp)\n del tsamp[tsamp.index(temp)]\n #print(tsamp)\n for x in samp:\n for c in x:\n if c in j:\n b+=1\n if b == n:\n b = 0\n kontrol = False\n break\n b = 0\n if not kontrol:\n kontrol = True\n j = []\n tsamp = list(sample)\n continue\n else:\n samp.append(j[::])\n tsamp = list(sample)\n j = []\n b = 0\n i+=1\n #print(*samp,sep=\"\\n\")\n return samp\n\n\nn = int(input(\"örneklem boyutunu giriniz : \"))\npopulation = [2,0,-1,4]\n\nsample = sampwr(population,n)\nprint(*sample,sep=\"\\n\")\n\nprint(\"******************\")\nprint(\"XBARS\")\nfor i in makexbar(sample[::]):\n print(round(i,4))\nprint(\"******************\")\nprint(\"SBARS\")\n\nprint(\"E(s**2) =\",round(sampvariance(sample[::]),4))\nprint(\"e(xbar) =\",round(expvalue(makexbar(sample[::])),4))\nprint(\"e(xbar**2) =\",round(expvalue([i**2 for i in makexbar(sample[::])]),4))\nprint(\"V(xbar) =\",round(variance(makexbar(sample[::])),4))\nprint(\"variance pop\",variance(population))\n\n","repo_name":"furkanergunes/statistical-tools","sub_path":"samplingdist.py","file_name":"samplingdist.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41259684376","text":"import pandas as pd\r\nimport numpy as np\r\nimport datetime as dt\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nimport streamlit as st\r\nimport seaborn as sns\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.metrics import silhouette_samples, silhouette_score\r\nfrom sklearn.cluster import KMeans\r\n\r\n\r\n\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\nst.set_option('deprecation.showPyplotGlobalUse', False)\r\nst.set_page_config(layout='wide')\r\n\r\n#ccreate columns to center pic\r\ncol1, col2, col3 = st.columns([2,5,2])\r\ncol2.image('online_retail.jpg')\r\n\r\n# Create a sidebar\r\nst.sidebar.title(\"Customer Segmentation\")\r\n@st.cache()\r\ndef read_data(filename):\r\n df1 = pd.read_csv(filename)\r\n return df1\r\n\r\ndf = pd.read_csv(r\"C:\\Users\\marwa\\OneDrive\\Desktop\\Final Project Epsilon\\OnlineRetail.csv\",encoding= 'unicode_escape')\r\n\r\nst.header(\"Data ~\")\r\nst.markdown(\"

\",unsafe_allow_html=True)\r\nst.write(df)\r\n\r\nst.markdown(\"

\",unsafe_allow_html=True)\r\nst.markdown(\"

\",unsafe_allow_html=True)\r\nst.markdown(\"

\",unsafe_allow_html=True)\r\n\r\n\r\nst.markdown(\"

\",unsafe_allow_html=True)\r\nst.markdown(\"

\",unsafe_allow_html=True)\r\nst.markdown(\"

\",unsafe_allow_html=True)\r\nrad = st.sidebar.radio(' ',['Introduction', \"Data Exploration\", 'RFM Analysis', \"K-Means Clustering\"\r\n , \"Cluster Calculator\"])\r\n\r\ndf.drop_duplicates(keep = 'first', inplace=True)\r\ndf['Description'] = df.Description.str.lower()\r\n\r\ndf = df.dropna()\r\n\r\ndf = df[df['Quantity'] > 0]\r\ndf = df[df['UnitPrice'] > 0.05]\r\n\r\ndf['TotalAmount'] = df['Quantity']*df['UnitPrice']\r\ndf['InvoiceDate'] = pd.to_datetime(df['InvoiceDate'], errors='coerce')\r\nretail = df.copy()\r\n\r\n\r\n#introduction section\r\nif rad == 'Introduction':\r\n st.markdown(\"

Customer Segmentation using RFM and K-Means Clustering

\", unsafe_allow_html=True)\r\n st.markdown(\"

by Marwan Hafez

\", unsafe_allow_html=True)\r\n st.write(\"Customer segmentation is a key aspect of Marketing, allowing the businesses to better understand the behavior of their customers and targeting them more efficiently. Traditional methods include certain segmentation bases such as Geographical, Demographic, or Behavioral. One of the most famous methods is by using RFM which tracks customers' buying behavior including the recency, frequency and monetary value of their purchases. However, RFM scores are usually pre-determined and can take a long time to calculate and apply. Here is where Machine Learning comes in and makes things much easier. By using unsupervised ML models, we can automatically detect different clusters in our customers based on their transactions.\")\r\n \r\n\r\n\r\n\r\n\r\n#data exploration\r\nif rad == 'Data Exploration':\r\n st.markdown(\"

Data Exploration

\", unsafe_allow_html=True)\r\n \r\n st.write('')\r\n st.write('')\r\n st.write('')\r\n st.write('')\r\n\r\n st.image('Top_Selling_products.png')\r\n\r\n st.write('')\r\n st.write('')\r\n st.write('')\r\n st.write('') \r\n\r\n st.image('Country_count.png')\r\n\r\n retail_month = retail[retail.InvoiceDate.dt.year==2010]\r\n monthly_gross = retail_month.groupby(retail_month.InvoiceDate.dt.month).TotalAmount.sum()\r\n\r\n st.write('')\r\n st.write('')\r\n st.write('')\r\n st.write('') \r\n\r\n st.image('Total_income_2010png.png')\r\n\r\n st.write('')\r\n st.write('')\r\n st.write('')\r\n st.write('')\r\n\r\n retail_month = retail[retail.InvoiceDate.dt.year==2011]\r\n monthly_gross = retail_month.groupby(retail_month.InvoiceDate.dt.month).TotalAmount.sum()\r\n\r\n st.image('income_2011.png')\r\n\r\n st.write('')\r\n st.write('')\r\n st.write('')\r\n st.write('')\r\n\r\n st.image('transactions.png')\r\n\r\n st.write('')\r\n st.write('')\r\n st.write('')\r\n st.write('')\r\n\r\n\r\n fig5 = plt.figure(figsize = (20,5))\r\n fig5.suptitle(\"Visualisation of outliers\",size=20)\r\n\r\n axes = fig5.add_subplot(1, 3, 1)\r\n sns.boxplot(data=df,y=\"UnitPrice\")\r\n\r\n axes = fig5.add_subplot(1, 3, 2)\r\n sns.boxplot(data=df,y=\"Quantity\")\r\n\r\n axes = fig5.add_subplot(1, 3, 3)\r\n sns.boxplot(data=df,y=\"TotalAmount\")\r\n\r\n st.pyplot(fig5)\r\n\r\n st.write('')\r\n st.write('')\r\n st.write('')\r\n st.write('')\r\n\r\n#RFM\r\nif rad == 'RFM Analysis':\r\n st.markdown(\"

RFM Analysis

\", unsafe_allow_html=True)\r\n st.subheader(\"Now, we must calculate each element of the RFM. But before doing that, we must create a new dataframe that contains each unique customer id and then we can add the relevant values.\")\r\n\r\n # Recency\r\n st.write(\"To calculate Recency, we must first get the date of the most recent purchase.\")\r\n st.write(\"We will take our reference point as the max invoice date in our dataset which represents the most recent date, and our recency will be based on days.\")\r\n # Frequency\r\n st.write(\"For Frequency, we will count the distinct number of times that each customer has placed an order.\")\r\n\r\n ## Monetary\r\n st.write(\"And finally, for Monetary Value, we will sum up the Sales of each customer to find how much he has spent in total.\")\r\n\r\n today = dt.date(2011,12,10)\r\n #Create a new column called date which contains the date of invoice only\r\n df['date'] = df['InvoiceDate'].dt.date\r\n\r\n #group by customers and check last date of purshace\r\n recency_df = df.groupby(by='CustomerID', as_index=False)['date'].max()\r\n recency_df.columns = ['CustomerID','LastPurshaceDate']\r\n st.table(recency_df.head())\r\n\r\n #calculate recency\r\n recency_df['Recency'] = recency_df['LastPurshaceDate'].apply(lambda x: (today - x).days)\r\n st.table(recency_df.head())\r\n\r\n #drop LastPurchaseDate as we don't need it anymore\r\n recency_df.drop('LastPurshaceDate',axis=1,inplace=True)\r\n\r\n # drop duplicates\r\n df.copy = df\r\n df.copy.drop_duplicates(subset=['InvoiceNo', 'CustomerID'], keep=\"first\", inplace=True)\r\n #calculate frequency of purchases\r\n frequency_df = df.copy.groupby(by=['CustomerID'], as_index=False)['InvoiceNo'].count()\r\n frequency_df.columns = ['CustomerID','Frequency']\r\n st.table(frequency_df.head())\r\n\r\n monetary_df = df.groupby(by='CustomerID',as_index=False).agg({'TotalAmount': 'sum'})\r\n monetary_df.columns = ['CustomerID','Monetary']\r\n st.table(monetary_df.head())\r\n\r\n\r\n\r\n #merge recency dataframe with frequency dataframe\r\n temp_df = recency_df.merge(frequency_df,on='CustomerID')\r\n\r\n #merge with monetary dataframe to get a table with the 3 columns\r\n rfm_df = temp_df.merge(monetary_df,on='CustomerID')\r\n #use CustomerID as index\r\n rfm_df.set_index('CustomerID',inplace=True)\r\n #check the head\r\n st.table(rfm_df.head())\r\n\r\n st.write('')\r\n st.write('')\r\n st.write('')\r\n st.write('')\r\n\r\n st.image('RFM_outliers.png')\r\n\r\n # Removing (statistical) outliers for Monetary\r\n Q1 = rfm_df.Monetary.quantile(0.05)\r\n Q3 = rfm_df.Monetary.quantile(0.95)\r\n IQR = Q3 - Q1\r\n rfm_df = rfm_df[(rfm_df.Monetary >= Q1 - 1.5*IQR) & (rfm_df.Monetary <= Q3 + 1.5*IQR)]\r\n\r\n # Removing (statistical) outliers for Recency\r\n Q1 = rfm_df.Recency.quantile(0.05)\r\n Q3 = rfm_df.Recency.quantile(0.95)\r\n IQR = Q3 - Q1\r\n rfm_df = rfm_df[(rfm_df.Recency >= Q1 - 1.5*IQR) & (rfm_df.Recency <= Q3 + 1.5*IQR)]\r\n\r\n # Removing (statistical) outliers for Frequency\r\n Q1 = rfm_df.Frequency.quantile(0.05)\r\n Q3 = rfm_df.Frequency.quantile(0.95)\r\n IQR = Q3 - Q1\r\n rfm_df = rfm_df[(rfm_df.Frequency >= Q1 - 1.5*IQR) & (rfm_df.Frequency <= Q3 + 1.5*IQR)]\r\n\r\n # Rescaling the attributes\r\n import sklearn\r\n from sklearn.preprocessing import StandardScaler\r\n\r\n rfm = rfm_df[['Recency','Frequency', 'Monetary']]\r\n\r\n # Instantiate\r\n scaler = StandardScaler()\r\n\r\n # fit_transform\r\n rfm_scaled = scaler.fit_transform(rfm)\r\n rfm_scaled.shape\r\n\r\n rfm_scaled = pd.DataFrame(rfm_scaled)\r\n rfm_scaled.columns = ['Recency', 'Frequency', 'Monetary']\r\n st.table(rfm_scaled.head())\r\n\r\n \r\n st.image('heatmap.png')\r\n\r\n st.write('')\r\n st.write('')\r\n st.write('')\r\n st.write('')\r\n\r\n st.image('PAIRPLOT.png')\r\n\r\n st.write('')\r\n st.write('')\r\n st.write('')\r\n st.write('')\r\n\r\n st.header('To get a better understanding of the dataset, we can construct a scatter matrix of each of the three features present in the RFM data')\r\n\r\n st.write('')\r\n st.write('')\r\n\r\n st.image('scatter1.png')\r\n\r\n st.write('')\r\n st.write('')\r\n \r\n st.header('We can notice that we have a skewed distribution of the 3 variables and there exist outliers. This indicates how normalization is required to make the data features normally distributed as clustering algorithms require them to be normally distributed.')\r\n\r\n st.write('')\r\n st.write('')\r\n\r\n #log transformation\r\n rfm_r_log = np.log(rfm['Recency']+0.1) #can't take log(0) and so add a small number\r\n rfm_f_log = np.log(rfm['Frequency'])\r\n rfm_m_log = np.log(rfm['Monetary']+0.1)\r\n\r\n log_data = pd.DataFrame({'Monetary': rfm_m_log,'Recency': rfm_r_log,'Frequency': rfm_f_log})\r\n\r\n st.table(log_data.head())\r\n\r\n st.image('scatter2.png')\r\n\r\n st.header('The distributions of Frequency and Monetary are better, more normalized, but it\"s not the case with Recency Distribution, which is improved but not as much.')\r\n\r\n st.write('')\r\n st.write('')\r\n\r\n st.image('heatmap2.png')\r\n\r\n customers_rank = rfm_df\r\n # Create a new column that is the rank of the value of coverage in ascending order\r\n customers_rank['Rank'] = customers_rank['Monetary'].rank(ascending=0)\r\n #customers_rank.drop('RevenueRank',axis=1,inplace=True)\r\n st.table(customers_rank.head())\r\n\r\n st.write('')\r\n st.write('')\r\n st.header('Top Customers')\r\n st.table(customers_rank.sort_values('Rank',ascending=True).head())\r\n\r\n #get top 20% of the customers\r\n top_20 = 3863 *20 /100\r\n \r\n\r\n #sum the monetary values over the customer with rank <=773\r\n RevenueByTop20 = customers_rank[customers_rank['Rank'] <= 772]['Monetary'].sum()\r\n \r\n st.subheader('#### In our case, the 80% of total revenue is not achieved by the 20% of TOP customers but approximately, it does, because they are less than our 20% TOP customers who achieve it. It would be interesting to study this group of customers because they are those who make our most revenue.')\r\n\r\n st.subheader('Applying RFM score formula. The simplest way to create customers segments from RFM Model is to use Quartiles. We assign a score from 1 to 4 to Recency, Frequency and Monetary. Four is the best/highest value, and one is the lowest/worst value. A final RFM score is calculated simply by combining individual RFM score numbers.')\r\n\r\n quantiles = rfm_df.quantile(q=[0.25,0.5,0.75])\r\n\r\n quantiles.to_dict()\r\n\r\n # Arguments (x = value, p = recency, monetary_value, frequency, d = quartiles dict)\r\n def RScore(x,p,d):\r\n if x <= d[p][0.25]:\r\n return 4\r\n elif x <= d[p][0.50]:\r\n return 3\r\n elif x <= d[p][0.75]: \r\n return 2\r\n else:\r\n return 1\r\n # Arguments (x = value, p = recency, monetary_value, frequency, k = quartiles dict)\r\n def FMScore(x,p,d):\r\n if x <= d[p][0.25]:\r\n return 1\r\n elif x <= d[p][0.50]:\r\n return 2\r\n elif x <= d[p][0.75]: \r\n return 3\r\n else:\r\n return 4\r\n\r\n\r\n #create rfm segmentation table\r\n rfm_segmentation = rfm_df\r\n rfm_segmentation['R_Quartile'] = rfm_segmentation['Recency'].apply(RScore, args=('Recency',quantiles,))\r\n rfm_segmentation['F_Quartile'] = rfm_segmentation['Frequency'].apply(FMScore, args=('Frequency',quantiles,))\r\n rfm_segmentation['M_Quartile'] = rfm_segmentation['Monetary'].apply(FMScore, args=('Monetary',quantiles,)) \r\n\r\n st.table(rfm_segmentation.head())\r\n\r\n rfm_segmentation['RFMScore'] = rfm_segmentation.R_Quartile.map(str) \\\r\n + rfm_segmentation.F_Quartile.map(str) \\\r\n + rfm_segmentation.M_Quartile.map(str)\r\n st.table(rfm_segmentation.head())\r\n\r\n st.write('')\r\n st.write('')\r\n\r\n st.subheader('Best Recency score = 4: Most recently purchase. Best Frequency score = 4: Most quantity purchase. Best Monetary score = 4: Spent the most.')\r\n \r\n st.write('')\r\n\r\n st.subheader(\"Let's take a look on our best customers \")\r\n st.table(rfm_segmentation[rfm_segmentation['RFMScore']=='444'].sort_values('Monetary', ascending=False).head(10))\r\n\r\n st.write('')\r\n st.write('')\r\n\r\n st.subheader(\"And then, Let's take a look on how many customer do we have in each segment\")\r\n\r\n st.write(\"Best Customers: \",len(rfm_segmentation[rfm_segmentation['RFMScore']=='444']))\r\n st.write('Loyal Customers: ',len(rfm_segmentation[rfm_segmentation['F_Quartile']==4]))\r\n st.write(\"Big Spenders: \",len(rfm_segmentation[rfm_segmentation['M_Quartile']==4]))\r\n st.write('Almost Lost: ', len(rfm_segmentation[rfm_segmentation['RFMScore']=='244']))\r\n st.write('Lost Customers: ',len(rfm_segmentation[rfm_segmentation['RFMScore']=='144']))\r\n st.write('Lost Cheap Customers: ',len(rfm_segmentation[rfm_segmentation['RFMScore']=='111']))\r\n\r\n st.write('')\r\n st.write('')\r\n\r\n st.subheader('Now that we knew our customers segments we can choose how to target or deal with each segment.')\r\n\r\n st.subheader('Apparently there are 15% of customers considered as Champions. These customers are responsible for a big share of your revenue so we can put a lot of effort into keeping imroving their experience. What we can do: Give them something extra that the regulars do not get, for example, limited series of products or special discounts to make them feel valued. Use communication similar to the Loyal segment. For example making them ambassadors, giving them a margin of your profits for bringing you, new customers. Ask them for feedbacks as they might know the products and services very well.')\r\n\r\n\r\n#K-Means\r\nif rad == 'K-Means Clustering':\r\n st.markdown(\"

K-Means Clustering for Segmentation

\", unsafe_allow_html=True)\r\n st.subheader('For this task, we will be using an unsupervised Machine Learning algorithm which is K-Means which identifies k number of centroids, and then allocates every data point to the nearest cluster (based on similarities), while keeping the centroids as small as possible.')\r\n # # # Applying K-Means Clustering\r\n st.write(\"The algorithm works as follows, First we initialize k points called means, randomly. We categorize each item to its closest mean and we update the means coordinates, which are the averages of the items categorized in that mean so far.We repeat the process for a given number of iterations and at the end, we have our clusters.\")\r\n \r\n st.write('')\r\n st.write('')\r\n\r\n today = dt.date(2011,12,10)\r\n #Create a new column called date which contains the date of invoice only\r\n df['date'] = df['InvoiceDate'].dt.date\r\n\r\n #group by customers and check last date of purshace\r\n recency_df = df.groupby(by='CustomerID', as_index=False)['date'].max()\r\n recency_df.columns = ['CustomerID','LastPurshaceDate']\r\n\r\n #calculate recency\r\n recency_df['Recency'] = recency_df['LastPurshaceDate'].apply(lambda x: (today - x).days)\r\n\r\n #drop LastPurchaseDate as we don't need it anymore\r\n recency_df.drop('LastPurshaceDate',axis=1,inplace=True)\r\n\r\n # drop duplicates\r\n df.copy = df\r\n df.copy.drop_duplicates(subset=['InvoiceNo', 'CustomerID'], keep=\"first\", inplace=True)\r\n #calculate frequency of purchases\r\n frequency_df = df.copy.groupby(by=['CustomerID'], as_index=False)['InvoiceNo'].count()\r\n frequency_df.columns = ['CustomerID','Frequency']\r\n\r\n monetary_df = df.groupby(by='CustomerID',as_index=False).agg({'TotalAmount': 'sum'})\r\n monetary_df.columns = ['CustomerID','Monetary']\r\n\r\n #merge recency dataframe with frequency dataframe\r\n temp_df = recency_df.merge(frequency_df,on='CustomerID')\r\n\r\n #merge with monetary dataframe to get a table with the 3 columns\r\n rfm_df = temp_df.merge(monetary_df,on='CustomerID')\r\n #use CustomerID as index\r\n rfm_df.set_index('CustomerID',inplace=True)\r\n \r\n\r\n # Removing (statistical) outliers for Monetary\r\n Q1 = rfm_df.Monetary.quantile(0.05)\r\n Q3 = rfm_df.Monetary.quantile(0.95)\r\n IQR = Q3 - Q1\r\n rfm_df = rfm_df[(rfm_df.Monetary >= Q1 - 1.5*IQR) & (rfm_df.Monetary <= Q3 + 1.5*IQR)]\r\n\r\n # Removing (statistical) outliers for Recency\r\n Q1 = rfm_df.Recency.quantile(0.05)\r\n Q3 = rfm_df.Recency.quantile(0.95)\r\n IQR = Q3 - Q1\r\n rfm_df = rfm_df[(rfm_df.Recency >= Q1 - 1.5*IQR) & (rfm_df.Recency <= Q3 + 1.5*IQR)]\r\n\r\n # Removing (statistical) outliers for Frequency\r\n Q1 = rfm_df.Frequency.quantile(0.05)\r\n Q3 = rfm_df.Frequency.quantile(0.95)\r\n IQR = Q3 - Q1\r\n rfm_df = rfm_df[(rfm_df.Frequency >= Q1 - 1.5*IQR) & (rfm_df.Frequency <= Q3 + 1.5*IQR)]\r\n\r\n # Rescaling the attributes\r\n import sklearn\r\n from sklearn.preprocessing import StandardScaler\r\n\r\n rfm = rfm_df[['Recency','Frequency', 'Monetary']]\r\n\r\n # Instantiate\r\n scaler = StandardScaler()\r\n\r\n # fit_transform\r\n rfm_scaled = scaler.fit_transform(rfm)\r\n\r\n rfm_scaled = pd.DataFrame(rfm_scaled)\r\n rfm_scaled.columns = ['Recency', 'Frequency', 'Monetary']\r\n \r\n\r\n kmeans = KMeans(n_clusters=4, max_iter=50)\r\n kmeans.fit(rfm_scaled)\r\n\r\n st.header('Finding the Optimal Number of Clusters')\r\n st.subheader('The Elbow Method is one of the most popular methods to determine this optimal value of k.')\r\n\r\n st.write('')\r\n st.write('')\r\n\r\n # Elbow-curve\r\n\r\n ssd = []\r\n range_n_clusters = [2, 3, 4, 5, 6, 7, 8]\r\n for num_clusters in range_n_clusters:\r\n kmeans = KMeans(n_clusters=num_clusters, max_iter=50)\r\n kmeans.fit(rfm_scaled)\r\n \r\n ssd.append(kmeans.inertia_)\r\n \r\n st.image('elbow.png')\r\n\r\n \r\n\r\n\r\n \r\n # Final model with k=3\r\n kmeans = KMeans(n_clusters=3, max_iter=50)\r\n kmeans.fit(rfm_scaled)\r\n\r\n # assign the label\r\n rfm['Cluster_Id'] = kmeans.labels_\r\n\r\n st.write('')\r\n st.write('')\r\n\r\n st.image('monetary.png')\r\n\r\n st.write('')\r\n st.write('')\r\n\r\n st.image('frequency.png')\r\n\r\n st.write('')\r\n st.write('')\r\n\r\n st.image('recency.png')\r\n\r\n st.write('')\r\n st.write('')\r\n\r\n st.image('3D.png')\r\n\r\n st.write('')\r\n st.write('')\r\n\r\n st.image('maxtrans.png')\r\n\r\n st.write('')\r\n st.write('')\r\n \r\n st.image('recenttrans.png')\r\n\r\n\r\nif rad == \"Cluster Calculator\":\r\n st.markdown(\"

Cluster Calculator

\", unsafe_allow_html=True)\r\n st.header('Now you can try inputting some values for RFM and see which cluster this imaginary customer can be apart of')\r\n st.subheader(\"To be able to do that, we will consider that any customer whose RFM values fall between the IQR of the Cluster average values, can be part of that cluster.\")\r\n today = dt.date(2011,12,10)\r\n #Create a new column called date which contains the date of invoice only\r\n df['date'] = df['InvoiceDate'].dt.date\r\n\r\n #group by customers and check last date of purshace\r\n recency_df = df.groupby(by='CustomerID', as_index=False)['date'].max()\r\n recency_df.columns = ['CustomerID','LastPurshaceDate']\r\n\r\n #calculate recency\r\n recency_df['Recency'] = recency_df['LastPurshaceDate'].apply(lambda x: (today - x).days)\r\n\r\n #drop LastPurchaseDate as we don't need it anymore\r\n recency_df.drop('LastPurshaceDate',axis=1,inplace=True)\r\n\r\n # drop duplicates\r\n df.copy = df\r\n df.copy.drop_duplicates(subset=['InvoiceNo', 'CustomerID'], keep=\"first\", inplace=True)\r\n #calculate frequency of purchases\r\n frequency_df = df.copy.groupby(by=['CustomerID'], as_index=False)['InvoiceNo'].count()\r\n frequency_df.columns = ['CustomerID','Frequency']\r\n\r\n monetary_df = df.groupby(by='CustomerID',as_index=False).agg({'TotalAmount': 'sum'})\r\n monetary_df.columns = ['CustomerID','Monetary']\r\n\r\n\r\n\r\n #merge recency dataframe with frequency dataframe\r\n temp_df = recency_df.merge(frequency_df,on='CustomerID')\r\n\r\n #merge with monetary dataframe to get a table with the 3 columns\r\n rfm_df = temp_df.merge(monetary_df,on='CustomerID')\r\n #use CustomerID as index\r\n rfm_df.set_index('CustomerID',inplace=True)\r\n \r\n # Removing (statistical) outliers for Monetary\r\n Q1 = rfm_df.Monetary.quantile(0.05)\r\n Q3 = rfm_df.Monetary.quantile(0.95)\r\n IQR = Q3 - Q1\r\n rfm_df = rfm_df[(rfm_df.Monetary >= Q1 - 1.5*IQR) & (rfm_df.Monetary <= Q3 + 1.5*IQR)]\r\n\r\n # Removing (statistical) outliers for Recency\r\n Q1 = rfm_df.Recency.quantile(0.05)\r\n Q3 = rfm_df.Recency.quantile(0.95)\r\n IQR = Q3 - Q1\r\n rfm_df = rfm_df[(rfm_df.Recency >= Q1 - 1.5*IQR) & (rfm_df.Recency <= Q3 + 1.5*IQR)]\r\n\r\n # Removing (statistical) outliers for Frequency\r\n Q1 = rfm_df.Frequency.quantile(0.05)\r\n Q3 = rfm_df.Frequency.quantile(0.95)\r\n IQR = Q3 - Q1\r\n rfm_df = rfm_df[(rfm_df.Frequency >= Q1 - 1.5*IQR) & (rfm_df.Frequency <= Q3 + 1.5*IQR)]\r\n\r\n # Rescaling the attributes\r\n import sklearn\r\n from sklearn.preprocessing import StandardScaler\r\n\r\n rfm = rfm_df[['Recency','Frequency', 'Monetary']]\r\n\r\n # Instantiate\r\n scaler = StandardScaler()\r\n\r\n # fit_transform\r\n rfm_scaled = scaler.fit_transform(rfm)\r\n\r\n rfm_scaled = pd.DataFrame(rfm_scaled)\r\n rfm_scaled.columns = ['Recency', 'Frequency', 'Monetary']\r\n \r\n\r\n kmeans = KMeans(n_clusters=4, max_iter=50)\r\n kmeans.fit(rfm_scaled)\r\n\r\n st.write('')\r\n st.write('')\r\n\r\n # Elbow-curve\r\n\r\n ssd = []\r\n range_n_clusters = [2, 3, 4, 5, 6, 7, 8]\r\n for num_clusters in range_n_clusters:\r\n kmeans = KMeans(n_clusters=num_clusters, max_iter=50)\r\n kmeans.fit(rfm_scaled)\r\n \r\n ssd.append(kmeans.inertia_)\r\n \r\n \r\n\r\n \r\n # Final model with k=3\r\n kmeans = KMeans(n_clusters=3, max_iter=50)\r\n kmeans.fit(rfm_scaled)\r\n\r\n # assign the label\r\n rfm['Cluster_Id'] = kmeans.labels_\r\n\r\n unique_clusters = rfm['Cluster_Id'].unique()\r\n col1, col2, col3 = st.columns((1,1,1))\r\n r= col1.number_input('Add Recency')\r\n f= col2.number_input('Add Frequency')\r\n m= col3.number_input('Add Monetary Value')\r\n c= ' '\r\n\r\n #Recency\r\n rec_q1 = rfm.groupby('Cluster_Id')['Recency'].quantile(0.25)\r\n rec_q3 = rfm.groupby('Cluster_Id')['Recency'].quantile(0.75)\r\n\r\n #Frequency\r\n freq_q1 = rfm.groupby('Cluster_Id')['Frequency'].quantile(0.25)\r\n freq_q3 = rfm.groupby('Cluster_Id')['Frequency'].quantile(0.75)\r\n\r\n #monetary\r\n monetary_q1 = rfm.groupby('Cluster_Id')['Monetary'].quantile(0.25)\r\n monetary_q3 = rfm.groupby('Cluster_Id')['Monetary'].quantile(0.25)\r\n\r\n \r\n\r\n #iterate for each cluster to see if it fits\r\n if st.button(\"Click here to calculate!\"):\r\n for n in range(len(unique_clusters)):\r\n if (rec_q1[n] <= r <= rec_q3[n]) and (freq_q1[n] <= f <= freq_q3[n]) and (monetary_q1[n] <= m <= monetary_q3[n]):\r\n c = n\r\n st.balloons()\r\n st.success('Congratulations! This customer can be added to cluster number $d' % c)\r\n else:\r\n st.error('Try again! This customer does not fit in cluster %d' % n)","repo_name":"MarwanHafezz/STREAMLIT","sub_path":"STREAM.py","file_name":"STREAM.py","file_ext":"py","file_size_in_byte":23582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12332137150","text":"\"\"\"\nPackage responsable for fetching the translations.\n\"\"\"\n\nimport asyncio\nimport logging\nimport os\nfrom concurrent.futures._base import CancelledError\n\nimport telepot\nfrom telepot.namedtuple import InlineQueryResultArticle, InputTextMessageContent\n\nimport helpers\n\n\nBASE_URL = os.environ['BOT_BASE_URL']\n\n\nclass Translator(object):\n \"\"\" Responsable for fetching the page results of inline queries.\n \"\"\"\n\n def __init__(self, rank, queue, translator, loop=None):\n self.rank = rank\n self.queue = queue\n self.translator = translator\n self._loop = loop if loop is not None else asyncio.get_event_loop()\n self._working_tasks = {}\n\n def fetch_translation(self, query, lang_id):\n \"\"\" Queries the Microsoft translator API and fetchs\n the translations for the given user query.\n \"\"\"\n translated_query = self.translator.translate(query, lang_id)\n return InlineQueryResultArticle(\n id=lang_id,\n title=translated_query,\n description=helpers.get_lang_name(lang_id),\n input_message_content=InputTextMessageContent(message_text=translated_query),\n thumb_url=BASE_URL + '/img/thumb.png',\n thumb_width=64,\n thumb_height=64\n )\n\n def cache(self, inline_query):\n \"\"\" Creates the task and get it running in the event loop.\n \"\"\"\n _, from_id, query = telepot.glance(inline_query, flavor='inline_query')\n\n async def compute_and_cache():\n \"\"\"Wraps the function to be executed in order to proper\n handling exceptions.\n \"\"\"\n try:\n for lang_id in self.rank:\n future = self._loop.run_in_executor(\n None, self.fetch_translation, query, lang_id)\n translation = await future\n await self.queue.put(translation)\n logging.info('Caching result for %s', lang_id)\n except CancelledError:\n # Cancelled. Record has been occupied by new task. Don't touch.\n raise\n except:\n # Die accidentally. Remove myself from record.\n del self._working_tasks[from_id]\n raise\n else:\n # Die naturally. Remove myself from record.\n del self._working_tasks[from_id]\n\n if from_id in self._working_tasks:\n self._working_tasks[from_id].cancel()\n\n caching_task = self._loop.create_task(compute_and_cache())\n self._working_tasks[from_id] = caching_task\n","repo_name":"gabrielaraujof/Babelegram","sub_path":"translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"20703395767","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"test src/tuling.py\n\"\"\"\nimport Queue\nfrom src.tuling import Tuling\nimport responses\n\n\n@responses.activate\ndef test_Tuling():\n \"\"\"test Tuling server\n \"\"\"\n queue_in = Queue.Queue()\n queue_out = Queue.Queue()\n with responses.RequestsMock() as resp:\n resp.add(responses.POST,\n 'http://www.tuling123.com/openapi/api',\n json={'text':'hello client', 'code':100000})\n ai_server = Tuling(queue_in, queue_out, key='test')\n assert ai_server._payload_['key'] == 'test'\n try:\n ai_server.start()\n queue_in.put('hello server')\n message = queue_out.get(timeout=5)\n finally:\n ai_server.stop()\n ai_server.join()\n assert message == 'hello client'\n","repo_name":"wankaiss/autoWechatReply","sub_path":"test/test_tuling.py","file_name":"test_tuling.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"41511887939","text":"from flask import Flask, render_template, request, send_file\nfrom pytube import YouTube\nimport os\n\napp = Flask(__name__)\napp.config['OUTPUT_FOLDER'] = 'outputs'\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/search', methods=['GET', 'POST'])\ndef search():\n if request.method == 'POST':\n url = request.form['url']\n try:\n video = YouTube(url)\n title = video.title\n thumbnail = video.thumbnail_url\n return render_template('search.html', url=url, title=title, thumbnail=thumbnail)\n except:\n return render_template('search.html', error='Invalid URL')\n else:\n return render_template('search.html')\n\n@app.route('/download', methods=['POST'])\ndef download():\n url = request.form['url']\n video = YouTube(url)\n title = video.title\n streams = video.streams.filter(only_audio=True)\n filename = f\"{title}.mp3\"\n path = os.path.join(app.config['OUTPUT_FOLDER'], filename)\n streams.first().download(output_path=app.config['OUTPUT_FOLDER'], filename=filename)\n return send_file(path, as_attachment=True)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"yassinelkhantach/containerize-flask-youtube-converter-app-with-docker","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17975270399","text":"from reportlab.pdfgen import canvas\n\ndataSiswa = {\n\t\"Nama\" : \"Neville\",\n\t\"Kelas\" : \" 8.2\",\n\t\"Laporan\" : \"Raport Kelas 8 Semester 2\"\n}\nclass Data:\n\n\tdef __init__(self, filename, documentTitle, heading):\n\t\tself.filename = filename\n\t\tself.documentTitle = documentTitle\n\t\tself.heading = heading\n\nmyData = Data(str(dataSiswa[\"Nama\"]+dataSiswa[\"Kelas\"]+\".pdf\"), \"Hasil Ujian\", dataSiswa[\"Laporan\"])\nmyPDF = canvas.Canvas(myData.filename)\nmyPDF.setTitle(myData.documentTitle)\n\n#Print on Papper\nmyPDF.drawString(227,780,myData.heading) #x,y,heading\n\nmyPDF.save()\n#print(\"OK\")","repo_name":"Neville07/mySchoolProject","sub_path":"createPdf/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32583401415","text":"import random\n\nnucleotides = ['A', 'T', 'G', 'C']\nerror_types = ['_substitute', '_insert', '_delete']\nerror_types_prob = [0.5, 0.25, 0.25]\n\n\ndef decision(probability):\n return random.random() < probability\n\ndef decision_error_type():\n return random.choices(error_types, weights=error_types_prob)[0]\n\ndef errors_add(data, percent_error, k=30):\n datas = []\n while k > 0:\n d = \"\"\n for nuc in data:\n assert nuc in nucleotides, nuc + ' not in ' + str(nucleotides) + ' - ' + data\n if decision(percent_error):\n d += globals()[decision_error_type()](nuc)\n else:\n d += nuc\n datas.append(d)\n k -= 1\n return datas\n\ndef _substitute(c):\n nucs = nucleotides.copy()\n nucs.remove(c)\n return random.choice(nucs)\n\ndef _insert(c):\n if random.random() < 0.5:\n return c + random.choice(nucleotides)\n return random.choice(nucleotides) + c\n\ndef _delete(c):\n return ''","repo_name":"SownBanana/DNA-Decoder-Simulator","sub_path":"utils/mutant.py","file_name":"mutant.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30542783474","text":"'''\n Trae la información de posición de la api de transporte de los gps\n y luego la almacena en el storage de transporte. \n'''\nimport time\n\nfrom velocidad_trp.objects import VelocidadTransporte\nfrom velocidad_trp.config import *\n\nRETURN_EXITOSO = True\n\ndef main():\n velocidad_transporte = VelocidadTransporte(url_transporte, client_id, client_secret)\n while True:\n velocidad_response = velocidad_transporte.get_json\n velocidad = velocidad_response.json()\n print(velocidad)\n time.sleep(TIEMPO_ESPERA)\n return RETURN_EXITOSO\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"v44p/velocidad-trasnporte-client","sub_path":"velocidad_trp/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"11605883022","text":"import tensorflow as tf\r\nfrom tensorflow.keras import layers\r\nfrom tensorflow.keras.utils import plot_model\r\n\r\n\r\ndef Conv2d_BN(x, nb_filter, kernel_size, strides=(1, 1), padding='same'):\r\n x = layers.Conv2D(nb_filter, kernel_size, strides=strides, padding=padding)(x)\r\n x = layers.BatchNormalization(axis=3)(x)\r\n x = layers.LeakyReLU(alpha=0.1)(x)\r\n return x\r\n\r\n\r\ndef Conv2dT_BN(x, filters, kernel_size, strides=(2, 2), padding='same'):\r\n x = layers.Conv2DTranspose(filters, kernel_size, strides=strides, padding=padding)(x)\r\n x = layers.BatchNormalization(axis=3)(x)\r\n x = layers.LeakyReLU(alpha=0.1)(x)\r\n return x\r\n\r\nheight = 80\r\nwidth = 240\r\n\r\ninpt = layers.Input(shape=(height, width, 3))\r\nconv1 = Conv2d_BN(inpt, 8, (3, 3))\r\nconv1 = Conv2d_BN(conv1, 8, (3, 3))\r\npool1 = layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same')(conv1)\r\n\r\nconv2 = Conv2d_BN(pool1, 16, (3, 3))\r\nconv2 = Conv2d_BN(conv2, 16, (3, 3))\r\npool2 = layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same')(conv2)\r\n\r\nconv3 = Conv2d_BN(pool2, 32, (3, 3))\r\nconv3 = Conv2d_BN(conv3, 32, (3, 3))\r\npool3 = layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same')(conv3)\r\n\r\nconv4 = Conv2d_BN(pool3, 64, (3, 3))\r\nconv4 = Conv2d_BN(conv4, 64, (3, 3))\r\npool4 = layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same')(conv4)\r\n\r\nconv5 = Conv2d_BN(pool4, 128, (3, 3))\r\nconv5 = layers.Dropout(0.5)(conv5)\r\nconv5 = Conv2d_BN(conv5, 128, (3, 3))\r\nconv5 = layers.Dropout(0.5)(conv5)\r\n\r\nconvt1 = Conv2dT_BN(conv5, 64, (3, 3))\r\nconcat1 = layers.concatenate([conv4, convt1], axis=3)\r\nconcat1 = layers.Dropout(0.5)(concat1)\r\nconv6 = Conv2d_BN(concat1, 64, (3, 3))\r\nconv6 = Conv2d_BN(conv6, 64, (3, 3))\r\n\r\nconvt2 = Conv2dT_BN(conv6, 32, (3, 3))\r\nconcat2 = layers.concatenate([conv3, convt2], axis=3)\r\nconcat2 = layers.Dropout(0.5)(concat2)\r\nconv7 = Conv2d_BN(concat2, 32, (3, 3))\r\nconv7 = Conv2d_BN(conv7, 32, (3, 3))\r\n\r\nconvt3 = Conv2dT_BN(conv7, 16, (3, 3))\r\nconcat3 = layers.concatenate([conv2, convt3], axis=3)\r\nconcat3 = layers.Dropout(0.5)(concat3)\r\nconv8 = Conv2d_BN(concat3, 16, (3, 3))\r\nconv8 = Conv2d_BN(conv8, 16, (3, 3))\r\n\r\nconvt4 = Conv2dT_BN(conv8, 8, (3, 3))\r\nconcat4 = layers.concatenate([conv1, convt4], axis=3)\r\nconcat4 = layers.Dropout(0.5)(concat4)\r\nconv9 = Conv2d_BN(concat4, 8, (3, 3))\r\nconv9 = Conv2d_BN(conv9, 8, (3, 3))\r\nconv9 = layers.Dropout(0.5)(conv9)\r\noutpt = layers.Conv2D(filters=3, kernel_size=(1, 1), strides=(1, 1), padding='same', activation='relu')(conv9)\r\n\r\n# 创建模型\r\nmodel = tf.keras.Model(inputs=inpt, outputs=outpt)\r\n\r\n# 保存模型结构图\r\ntf.keras.utils.plot_model(model, to_file='model1.png', show_shapes=True, show_layer_names=True, dpi=96)","repo_name":"listen001-star/End-to-End-plate-recognition","sub_path":"plot_model.py","file_name":"plot_model.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40589137311","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('qabel_provider', '0002_prefix'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='prefix',\n name='downloads',\n field=models.PositiveIntegerField(verbose_name='Download traffic of this user', default=0),\n ),\n migrations.AddField(\n model_name='prefix',\n name='size',\n field=models.PositiveIntegerField(verbose_name='Combined size of all files in the prefix', default=0),\n ),\n migrations.AddField(\n model_name='profile',\n name='used_storage',\n field=models.PositiveIntegerField(verbose_name='Used storage', default=0),\n ),\n ]\n","repo_name":"Qabel/qabel-accounting","sub_path":"qabel_provider/migrations/0003_auto_20160203_1052.py","file_name":"0003_auto_20160203_1052.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"72043853334","text":"import torch\nimport torch.nn as nn\nfrom parameterized import parameterized\nfrom torch.testing._internal.common_utils import run_tests\nfrom torch_tensorrt import Input\n\nfrom .harness import DispatchTestCase\n\n\nclass TestNegConverter(DispatchTestCase):\n @parameterized.expand(\n [\n (\"2d_dim_dtype_float\", (2, 2), torch.float),\n (\"3d_dim_dtype_float\", (2, 2, 2), torch.float),\n (\"2d_dim_dtype_half\", (2, 2), torch.half),\n (\"3d_dim_dtype_half\", (2, 2, 2), torch.half),\n ]\n )\n def test_neg_float(self, _, x, type):\n class neg(nn.Module):\n def forward(self, input):\n return torch.ops.aten.neg.default(input)\n\n inputs = [torch.randn(x, dtype=type)]\n self.run_test(\n neg(),\n inputs,\n precision=type,\n )\n\n @parameterized.expand(\n [\n (\"2d_dim_dtype_int32\", (2, 2), torch.int32, 0, 5),\n (\"3d_dim_dtype_int32\", (2, 2, 2), torch.int32, 0, 5),\n ]\n )\n def test_neg_int(self, _, x, type, min, max):\n class neg(nn.Module):\n def forward(self, input):\n return torch.ops.aten.neg.default(input)\n\n inputs = [torch.randint(min, max, x, dtype=type)]\n self.run_test(\n neg(),\n inputs,\n check_dtype=False,\n )\n\n\nif __name__ == \"__main__\":\n run_tests()\n","repo_name":"pytorch/TensorRT","sub_path":"tests/py/dynamo/conversion/test_neg_aten.py","file_name":"test_neg_aten.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":2158,"dataset":"github-code","pt":"67"} +{"seq_id":"23568937036","text":"import logging\nimport os.path as path\nimport argparse\nfrom tqdm import tqdm\n\nimport path_to_kapture\nimport kapture\nimport kapture.utils.logging\nfrom kapture.io.structure import delete_existing_kapture_files\nfrom kapture.io.csv import kapture_to_dir\nimport kapture.io.features\n\nlogger = logging.getLogger('LTVL2020')\n\n\ndef export_ltvl(kapture_dirpath: str,\n ltvl_filepath: str,\n prepend_camera_name: bool = False) -> None:\n \"\"\"\n Export kapture data to a Long-term Visual Localization challenge format file.\n\n :param kapture_dirpath: kapture data top directory\n :param ltvl_filepath: LTVL file path to write\n :param prepend_camera_name: if True, it will prepend the camera name to the image file names.\n \"\"\"\n # only load (1) image records + (2) trajectories (that all that matters).\n # 1: load records\n records_camera_filepath = kapture.io.csv.get_csv_fullpath(kapture.RecordsCamera, kapture_dirpath)\n logger.debug(f'loading {records_camera_filepath}')\n records_cameras = kapture.io.csv.records_camera_from_file(records_camera_filepath)\n # 2: load trajectories\n trajectories_filepath = kapture.io.csv.get_csv_fullpath(kapture.Trajectories, kapture_dirpath)\n logger.debug(f'loading {trajectories_filepath}')\n trajectories = kapture.io.csv.trajectories_from_file(trajectories_filepath)\n # 3: find (timestamp, camera_id) that are both in records and trajectories.\n valid_keys = set(records_cameras.key_pairs()).intersection(set(trajectories.key_pairs()))\n # collect data for those timestamps.\n image_poses = ((k[1], path.basename(records_cameras[k]), trajectories[k]) for k in valid_keys)\n # prepend the camera name or drop it.\n if prepend_camera_name:\n image_poses = ((path.join(camera_id, image_filename), pose) for camera_id, image_filename, pose in image_poses)\n else:\n image_poses = ((image_filename, pose) for camera_id, image_filename, pose in image_poses)\n\n # write the files\n image_poses = {image_filename: pose\n for image_filename, pose in image_poses}\n with open(ltvl_filepath, 'wt') as f:\n for image_filename, pose in tqdm(image_poses.items(), disable=logger.getEffectiveLevel() > logging.INFO):\n line = [image_filename] + pose.r_raw + pose.t_raw\n line = ' '.join(str(v) for v in line) + '\\n'\n f.write(line)\n\n\ndef export_ltvl2020_command_line() -> None:\n \"\"\"\n Do the LTVL 2020 export using the parameters given on the command line.\n \"\"\"\n parser = argparse.ArgumentParser(\n description='convert file to Long-term Visual Localization challenge format '\n '(https://www.visuallocalization.net/submission/).')\n parser_verbosity = parser.add_mutually_exclusive_group()\n parser_verbosity.add_argument(\n '-v', '--verbose', nargs='?', default=logging.WARNING, const=logging.INFO,\n action=kapture.utils.logging.VerbosityParser,\n help='verbosity level (debug, info, warning, critical, ... or int value) [warning]')\n parser_verbosity.add_argument(\n '-q', '--silent', '--quiet', action='store_const', dest='verbose', const=logging.CRITICAL)\n parser.add_argument('-f', '-y', '--force', action='store_true', default=False,\n help='Force delete output if already exists.')\n # export ###########################################################################################################\n parser.add_argument('-i', '--input', required=True, help='input path to kapture directory')\n parser.add_argument('-o', '--output', required=True, help='output file.')\n parser.add_argument('-p', '--prepend_cam', action='store_true', default=False,\n help='prepend camera names to filename (required for some dataset).')\n ####################################################################################################################\n args = parser.parse_args()\n\n logger.setLevel(args.verbose)\n if args.verbose <= logging.DEBUG:\n # also let kapture express its logs\n kapture.utils.logging.getLogger().setLevel(args.verbose)\n\n export_ltvl(args.input, args.output, args.prepend_cam)\n\n\nif __name__ == '__main__':\n export_ltvl2020_command_line()\n","repo_name":"zebrajack/kapture","sub_path":"tools/kapture_export_LTVL2020.py","file_name":"kapture_export_LTVL2020.py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"38326228614","text":"import tkinter\r\nimport random\r\nfrom tkinter import messagebox\r\n# import tkMessageBox\r\ntop = tkinter.Tk()\r\ntop.configure(bg=\"white\")\r\n\r\ntop.title(\"RESOLUTIONS-2019\")\r\ntop.geometry(\"325x275\")\r\ntasks = []\r\n#tasks = [\"get Inked!\",\"buy a camera\",\"Visit Goa\"]\r\n\r\n\r\n\r\ndef update_listbox():\r\n clear_listbox()\r\n for task in tasks:\r\n lb_tasks.insert(\"end\",task)\r\n\r\ndef clear_listbox():\r\n lb_tasks.delete(0,\"end\")\r\n\r\n\r\ndef add_task():\r\n #get the task\r\n task = txt_input.get()\r\n if task !=\"\":\r\n #add it to list\r\n tasks.append(task)\r\n #update\r\n update_listbox()\r\n else:\r\n messagebox.showwarning(\"Warning\",\"Please add the task to display\")\r\n #lbl_display[\"text\"]= \"Please enter the task.\"\r\n txt_input.delete(0,\"end\")\r\n\r\ndef delete_task():\r\n #get text of current selected item\r\n task = lb_tasks.get(\"active\")\r\n #confirm if its list\r\n if task in tasks:\r\n tasks.remove(task)\r\n #update\r\n update_listbox()\r\n\r\ndef delete_all():\r\n if messagebox.askokcancel(\"Please Confirm\", \"Do you really wanna delete all tasks?? \"):\r\n global tasks\r\n #clear the list box and update\r\n tasks = []\r\n update_listbox()\r\n\r\ndef sort_asc():\r\n tasks.sort()\r\n update_listbox()\r\n\r\ndef sort_dsc():\r\n tasks.sort()\r\n tasks.reverse()\r\n update_listbox()\r\n\r\n\r\ndef Choose_random():\r\n#choose a random task\r\n task = random.choice(tasks)\r\n #uddate the label\r\n lbl_display[\"text\"]=task\r\n\r\ndef number_of_tasks():\r\n #to get no of tasks\r\n number_of_tasks= len(tasks)\r\n #create the msg\r\n msg=\"number of tasks: %s\" %number_of_tasks\r\n #display the msg\r\n lbl_display[\"text\"]=msg\r\n\r\n\r\ndef exit():\r\n pass\r\n\r\n\r\nlbl_title = tkinter.Label(top, text = \"New Year To_Do List \" , bg= \"white\")\r\nlbl_title.grid(row=0,column=0)\r\n\r\n\r\nlbl_display = tkinter.Label(top, text = \" \" , bg= \"white\")\r\nlbl_display.grid(row=0,column=1)\r\n\r\ntxt_input = tkinter.Entry(top,width=15)\r\ntxt_input.grid(row=1,column=1)\r\n\r\nbtn_add_task = tkinter.Button(top, text=\"ADD TASKS\", fg=\"green\" ,bg=\"white\" , command = add_task)\r\nbtn_add_task.grid(row=1,column=0)\r\n\r\n\r\nbtn_delete_task = tkinter.Button(top, text=\"DELETE TASKS\", fg=\"green\" ,bg=\"white\" , command = delete_task)\r\nbtn_delete_task.grid(row=2,column=0)\r\n\r\n\r\nbtn_del_all_task = tkinter.Button(top, text=\"DELETE ALL TASKS\", fg=\"green\" ,bg=\"white\" , command =delete_all )\r\nbtn_del_all_task.grid(row=3,column=0)\r\n\r\n\r\nbtn_sort_task_asc = tkinter.Button(top, text=\"SORT_ASC\", fg=\"green\" ,bg=\"white\" , command = sort_asc)\r\nbtn_sort_task_asc.grid(row=4,column=0)\r\n\r\n\r\nbtn_add_task_dsc = tkinter.Button(top, text=\" SORT_DSC\", fg=\"green\" ,bg=\"white\" , command = sort_dsc)\r\nbtn_add_task_dsc.grid(row=5,column=0)\r\n\r\n\r\nbtn_random_task = tkinter.Button(top, text=\"CHOOSE RANDOM\", fg=\"green\" ,bg=\"white\" , command = Choose_random)\r\nbtn_random_task.grid(row=6,column=0)\r\n\r\n\r\nbtn_total_task = tkinter.Button(top, text=\"TOTAL TASKS\", fg=\"green\" ,bg=\"white\" , command = number_of_tasks)\r\nbtn_total_task.grid(row=7,column=0)\r\n\r\n\r\nbtn_exit_task = tkinter.Button(top, text=\"EXIT TASKS\", fg=\"green\" ,bg=\"white\" , command =exit )\r\nbtn_exit_task.grid(row=8,column=0)\r\n\r\nlb_tasks= tkinter.Listbox(top)\r\nlb_tasks.grid(row=2,column=1,rowspan=7)\r\n\r\n\r\n\r\n\r\ntop.mainloop()\r\n","repo_name":"Shrinidhiharish/Python-tkinter","sub_path":"todo.py","file_name":"todo.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71237232853","text":"import unittest\nimport collections\nimport dafpy as dfp\nfrom dafpy import coroutine, coroutine_from_func, func_from_coroutine, coroutine_from_class\nfrom dafpy import generator_from_func, func_from_generator\n\n\n###############################\n# DEFINITIONS\n###############################\n\ndef f(x):\n y = x + 10\n return y\n\n\ndef f2(x, y):\n return x + 10 * y\n\n\ndef g(y):\n z = y + 1000\n return z\n\n\nclass DataGenerator(object):\n def __init__(self):\n self.data = range(10)\n\n def func(self):\n if self.data:\n return self.data.pop(0)\n else:\n raise (StopIteration())\n\n @coroutine\n def co(self, env):\n yield\n for x in self.data:\n env.send(x)\n yield\n\n def gen(self):\n for x in self.data:\n yield x\n\n\nclass DataAppender(object):\n def __init__(self):\n self.out = []\n\n def func(self, z):\n self.out.append(z)\n\n def gen(self, z_gen):\n while True:\n z = z_gen.next()\n self.out.append(z)\n yield\n\n @coroutine\n def co(self):\n while True:\n z = yield\n self.out.append(z)\n\n\n###############################\n# TESTS\n###############################\n\ndef test_coroutine_from_callable_obj():\n mlag = dfp.Lag(0)\n f_lag = func_from_coroutine(coroutine_from_func(mlag))\n assert 0 == f_lag(10)\n assert 10 == f_lag(11)\n\n\ndef test_coroutine_from_class():\n f_lag = func_from_coroutine(coroutine_from_class(dfp.Lag, 0))\n assert 0 == f_lag(10)\n assert 10 == f_lag(11)\n assert callable(f_lag)\n\n\ndef test_coroutine_from_func():\n f_func = func_from_coroutine(coroutine_from_func(f))\n for x in range(10):\n d1, d2 = f_func(x), f(x)\n assert d1 == d2\n\n\nclass TestGenerator(unittest.TestCase):\n def setUp(self):\n self.terminal_node = DataAppender()\n self.mlag = dfp.Lag(0)\n\n def data_gen():\n for i in range(10):\n yield [i + 1], {}\n\n self.data_gen = data_gen\n\n def test_generator_from_func(self):\n \"\"\"\n Play with generators\n Remarks: we can play a lot whith it be cause when the generator is created, it has already the data i.\n\n At contrario coroutine can be piped without sending data.\n data is send at the end\n \"\"\"\n mlag = self.mlag\n data_gen = self.data_gen\n gen_lag = generator_from_func(mlag)\n for i, res in enumerate(gen_lag(data_gen())):\n assert i == res\n\n def test_generator_from_func2(self):\n mlag = self.mlag\n data_gen = self.data_gen\n gen_lag = generator_from_func(mlag)\n f_func = func_from_generator(gen_lag(data_gen()))\n for i in range(10):\n assert i == f_func()\n\n\ndef test_workflow_with_lag():\n \"\"\"\n We test a workkflow with a feedback implemented with a lag\n \"\"\"\n results = [0, 1000, 11001, 111012, 1111123, 11112234, 111123345, 1111234456, 11112345567, 111123456678,\n 1111234567789]\n attr_f = {'color': 'red'}\n data_in = DataGenerator()\n data_out = DataAppender()\n mlag = dfp.Lag(0)\n\n dtf = dfp.DataflowEnvironment()\n dtf.add_gentask('indata', data_in.gen, initial=True)\n dtf.add_task('f', f2,\n filters=dict(args=['indata', 'lag', ]),\n **attr_f)\n dtf.add_task('g', g, filters=dict(args=['f']))\n dtf.add_task('lag', mlag, filters=dict(args=['g']))\n dtf.add_task('terminal', data_out.func, filters=dict(args=['lag']))\n dtf.start()\n dtf.run()\n\n assert data_out.out == results\n\n\n####################################\n# Some tests on different possible Lag implementations\n# This test permits to test some more advanced featchures of dataflowEnv such as passing argmuments to generators or coroutines\n####################################\n\n\nclass LagEnvGen(dfp.DataflowEnvironment):\n def __init__(self, initial_state=None, **attr):\n dfp.DataflowEnvironment.__init__(self, **attr)\n\n self.add_gentask('lag', dfp.gen_lag,\n filters='call_args',\n gen_args=dict(args=[initial_state]),\n reset=True)\n self.add_edge_call_rets('lag')\n self.start()\n\n def reset(self):\n return self()\n # self.task['lag']['co_started'].send(None)\n # return self.task['receive']['gen_started'].next()\n\n\ndef test_lag_env_gen():\n \"\"\"\n LagEnvGen is iso functioal to dfp.Lag\n \"\"\"\n lag = LagEnvGen(10)\n\n assert lag(11) == 10\n assert lag('a') == 11\n assert lag.reset() == 'a'\n assert lag('b') is None\n assert lag.reset() == 'b'\n assert lag('c') == None\n assert ['c'] + range(9) == list(lag.gen(iter(range(10))))\n\n\nclass LagEnvCo(dfp.DataflowEnvironment):\n def __init__(self, initial_state=None, **attr):\n dfp.DataflowEnvironment.__init__(self, **attr)\n\n self.add_cotask('lag', dfp.co_lag,\n filters='call_args',\n co_args=dict(args=[initial_state]),\n reset=True)\n self.add_edge_call_rets('lag')\n self.start()\n\n def reset(self):\n return self()\n # self.task['lag']['co_started'].send(None)\n # return self.task['receive']['gen_started'].next()\n\n\ndef test_lag_env_co():\n \"\"\"\n LagEnvCo is iso functioal to dfp.Lag\n \"\"\"\n lag = LagEnvCo(10)\n\n assert lag(11) == 10\n assert lag('a') == 11\n assert lag.reset() == 'a'\n assert lag('b') is None\n assert lag.reset() == 'b'\n assert lag('c') == None\n assert ['c'] + range(9) == list(lag.gen(iter(range(10))))\n","repo_name":"eserie/dafpy","sub_path":"tests/test_lag_workflows.py","file_name":"test_lag_workflows.py","file_ext":"py","file_size_in_byte":5636,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"15012907832","text":"\"\"\"\nMinimum Number of Refueling Stops\n\nA car travels from a starting position to a destination which is target miles east of the starting position.\n\nThere are gas stations along the way. The gas stations are represented as an array stations where stations[i] = [positioni, fueli] indicates that the ith gas station is positioni miles east of the starting position and has fueli liters of gas.\n\nThe car starts with an infinite tank of gas, which initially has startFuel liters of fuel in it. It uses one liter of gas per one mile that it drives. When the car reaches a gas station, it may stop and refuel, transferring all the gas from the station into the car.\n\nReturn the minimum number of refueling stops the car must make in order to reach its destination. If it cannot reach the destination, return -1.\n\nNote that if the car reaches a gas station with 0 fuel left, the car can still refuel there. If the car reaches the destination with 0 fuel left, it is still considered to have arrived.\n\n \n\nExample 1:\n\nInput: target = 1, startFuel = 1, stations = []\nOutput: 0\nExplanation: We can reach the target without refueling.\nExample 2:\n\nInput: target = 100, startFuel = 1, stations = [[10,100]]\nOutput: -1\nExplanation: We can not reach the target (or even the first gas station).\nExample 3:\n\nInput: target = 100, startFuel = 10, stations = [[10,60],[20,30],[30,30],[60,40]]\nOutput: 2\nExplanation: We start with 10 liters of fuel.\nWe drive to position 10, expending 10 liters of fuel. We refuel from 0 liters to 60 liters of gas.\nThen, we drive from position 10 to position 60 (expending 50 liters of fuel),\nand refuel from 10 liters to 50 liters of gas. We then drive to and reach the target.\nWe made 2 refueling stops along the way, so we return 2.\n\n\n\"\"\"\n\n# dynamic programming\n\n# solution 1, wrong\nclass Solution:\n def minRefuelStops(self, target: int, startFuel: int, stations: List[List[int]]) -> int:\n if startFuel >= target:\n return 0 # no refueling needed\n elif not stations:\n return -1 # no station but can't reach target without refueling\n \n stations = [[0, 0]] + stations # number of stations = stations index\n dp = [[-1, -1] for _ in range(len(stations))]\n dp[0] = [0, startFuel]\n for i in range(1, len(stations)):\n for j in range(i):\n if dp[j][0] != -1 and dp[j][1] + stations[j][0] >= stations[i][0]:\n if (dp[i][0] != -1 and dp[i][0] > dp[j][0] + 1) or dp[i][0] == -1: # this condition does not consider reachability and thus wrong\n dp[i][0] = dp[j][0] + 1\n dp[i][1] = dp[j][1] + stations[i][1] + stations[j][0] - stations[i][0]\n if dp[i][1] + stations[i][0] >= target: # can reach the target\n return dp[i][0] \n if dp[i][0] == -1: # still can't reach station i\n return -1\n # if can't jump out of the for loop earlier --> cannot reach target\n return -1\n\n\n# solution 2, correctm space complexity O(n2)\nclass Solution(object):\n def minRefuelStops(self, target, startFuel, stations):\n if not stations:\n if startFuel >= target:\n return 0\n else:\n return -1\n #dp[i], the farthest location we can get to using i refueling stops. \n dp = [startFuel] + [0] * len(stations)\n for i, (location, capacity) in enumerate(stations):\n for t in range(i, -1, -1): # i, i-1, .... 0, why backwards?\n if dp[t] >= location: # can reach station i\n dp[t+1] = max(dp[t+1], dp[t] + capacity)\n\n for i, d in enumerate(dp):\n if d >= target: \n return i\n return -1\n \n# solution 3, heap\n# When driving past a gas station, let's remember the amount of fuel it contained. We don't need to decide yet whether to fuel up here or not - \n# for example, there could be a bigger gas station up ahead that we would rather refuel at. \n# When we run out of fuel before reaching the next station, we'll retroactively fuel up: greedily choosing the largest gas stations first.\nclass Solution(object):\n def minRefuelStops(self, target, tank, stations):\n pq = [] # A maxheap is simulated using negative values\n stations.append((target, float('inf')))\n\n ans = prev = 0\n for location, capacity in stations:\n tank -= location - prev # remaining fuel at this station\n while pq and tank < 0: # must refuel in past\n tank += -heapq.heappop(pq) # find the largest gas station to fuel\n ans += 1\n if tank < 0: \n return -1\n heapq.heappush(pq, -capacity)\n prev = location\n\n return ans\n\n \n ","repo_name":"CathyQian/Data-Structures-and-Algorithms","sub_path":"AllSolutions/Minimum Number of Refueling Stops.py","file_name":"Minimum Number of Refueling Stops.py","file_ext":"py","file_size_in_byte":4819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"75355876053","text":"\r\n\r\ndef main():\r\n seq=range(11)\r\n from math import pi\r\n seq2=[(x,x*2) for x in seq]\r\n seq3=[round(pi,i) for i in seq]\r\n seq4={x:x**2 for x in seq}\r\n print(seq)\r\n print(seq2)\r\n print(seq3)\r\n print(seq4)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"ketan-analytics/learnpython","sub_path":"IntermediatePython/Lynda_Bill_PYEssential/Set.py","file_name":"Set.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4403550520","text":"\"\"\"\nMark Fraser\nm_fraser3@u.pacific.edu\nCOMP 151: Project 1\n\"\"\"\n\nfrom chromosome import (\n Chromosome\n)\n\nfrom constants import (\n BUY,\n DAY_ONE,\n DAY_TWO,\n ELITIST,\n KPOINT,\n MATCH_NOT_FOUND,\n MILESTONE_GENERATION,\n PROFIT,\n SHORT,\n TOURNAMENT,\n UNIFORM\n)\n\nfrom math import (\n ceil\n)\n\nfrom operator import (\n itemgetter\n)\n\nfrom random import (\n choice,\n gauss,\n randint,\n random\n)\n\nfrom sys import (\n exit\n)\n\n\ndef runGeneticAlgorithm(finances, num_chromosomes, num_generations,\n selection_algorithm, selection_percent, init_mutation_rate,\n crossover_algorithm, decrease_rate):\n # initialize algorithm variables\n best_chromosome = None\n curr_generation = 1\n mutation_rate = init_mutation_rate\n chromosomes = generateChromosomePopulation(num_chromosomes)\n \n while curr_generation <= num_generations:\n fitness_dict = {} # stores pair of chromosome and fitness score\n\n # gather fitness data for current chromosomes\n for chromosome in chromosomes:\n fitness_dict[chromosome] = calculateFitnessScore(chromosome,\n finances)\n\n rankings = sorted(fitness_dict.items(), key=lambda i: i[1],\n reverse=True)\n\n # intermediary command output every MILESTONE_GENERATION generations\n if curr_generation % MILESTONE_GENERATION == 0:\n fitness_scores = fitness_dict.values()\n print('Generation ' + str(curr_generation) + ':\\n' +\n ' max fitness score: ' + str(max(fitness_scores)) + '\\n'\n + ' min fitness score: ' + str(min(fitness_scores)) +\n '\\n mean fitness score: ' + str(average(fitness_scores))\n + '\\n\\n')\n\n if curr_generation == num_generations:\n best_chromosome = rankings[0]\n else:\n chromosomes = generateNextGeneration(rankings,\n selection_algorithm,\n selection_percent,\n crossover_algorithm,\n num_chromosomes,\n mutation_rate)\n\n\n mutation_rate = mutation_rate * (1 - decrease_rate)\n\n curr_generation = curr_generation + 1\n\n return best_chromosome\n\n\ndef generateChromosomePopulation(num):\n chromosomes = []\n for i in range(num):\n chromosomes.append(generateRandomChromosome())\n\n return chromosomes\n\n\ndef generateRandomChromosome():\n mu = 0\n sigma = 1.15\n\n # generate random percentage ranges and a random recommendation\n lb1 = gauss(mu, sigma)\n ub1 = gauss(mu, sigma)\n lb2 = gauss(mu, sigma)\n ub2 = gauss(mu, sigma)\n rec = randint(0,1)\n\n return Chromosome(lb1, ub1, lb2, ub2, rec)\n\n\ndef calculateFitnessScore(chromosome, finances):\n fitness_score = 0\n isMatchFound = False\n\n for data in finances:\n if chromosome.isMatch(data[DAY_ONE], data[DAY_TWO]):\n isMatchFound = True\n if chromosome.getRecommendation() == BUY:\n fitness_score = fitness_score + data[PROFIT]\n else:\n fitness_score = fitness_score - data[PROFIT]\n\n # invalidate chromosome if it provides no matches for finances\n if not isMatchFound:\n fitness_score = MATCH_NOT_FOUND\n\n return fitness_score\n\n\ndef average(array):\n return sum(array) / len(array)\n\n\ndef generateNextGeneration(rankings, selection_algorithm, selection_percent,\n crossover_algorithm, num_chromosomes,mutation_rate):\n selected_chromosomes = runSelectionAlgorithm(rankings,\n selection_algorithm,\n selection_percent)\n\n next_gen_chromosomes = runCrossoverAlgorithm(selected_chromosomes,\n crossover_algorithm,\n num_chromosomes)\n\n next_gen_chromosomes = performMutations(next_gen_chromosomes, mutation_rate)\n\n return next_gen_chromosomes\n\n\ndef runSelectionAlgorithm(rankings, selection_algorithm,\n selection_percent):\n if selection_algorithm == ELITIST:\n chromosomes = runElitistAlgorithm(rankings, selection_percent)\n elif selection_algorithm == TOURNAMENT:\n chromosomes = runTournamentAlgorithm(rankings, selection_percent)\n else:\n exit('selection algorithm not supported: ' + selection_algorithm)\n\n return chromosomes\n\n\ndef runElitistAlgorithm(rankings, selection_percent):\n cutoff_index = ceil(len(rankings) * selection_percent)\n chromosomes = [c[0] for c in rankings]\n return chromosomes[:cutoff_index]\n\n\ndef runTournamentAlgorithm(rankings, selection_percent):\n i = 0\n chromosomes = []\n length = len(rankings)\n num_tournaments = ceil(length * selection_percent)\n\n # check if tournament can be held\n if length * selection_percent <= 1:\n exit('unable to hold tournament: not enough chromosomes')\n\n # hold num_tournaments tournaments\n while i <= num_tournaments:\n c1 = rankings[randint(0, length-1)]\n c2 = rankings[randint(0, length-1)]\n \n # ensure uniqueness of chromosomes\n if c1 != c2:\n if c1[1] > c2[1]:\n chromosomes.append(c1[0])\n else:\n chromosomes.append(c2[0])\n\n i = i + 1\n\n return chromosomes\n\n\ndef runCrossoverAlgorithm(chromosomes, crossover_algorithm,\n num_total_chromosomes):\n if crossover_algorithm == UNIFORM:\n chromosomes = runUniformAlgorithm(chromosomes, crossover_algorithm,\n num_total_chromosomes)\n elif crossover_algorithm == KPOINT:\n chromosomes = runKpointAlgorithm(chromosomes, crossover_algorithm,\n num_total_chromosomes)\n else:\n exit('crossover algorithm not supported: ' + crossover_algorithm)\n\n return chromosomes\n\n\ndef runUniformAlgorithm(sample_chromosomes, crossover_algorithm,\n num_total_chromosomes):\n num_sample_chromosomes = len(sample_chromosomes)\n if num_sample_chromosomes <= 1:\n exit('not enough sample chromosomes to perform uniform crossover' +\n ' (must have >= 2 chromosomes)')\n \n chromosomes = sample_chromosomes # set of chromosomes to return\n\n while len(chromosomes) != num_total_chromosomes:\n c1 = sample_chromosomes[randint(0, num_sample_chromosomes-1)]\n c2 = sample_chromosomes[randint(0, num_sample_chromosomes-1)]\n\n # ensure uniqueness of chromosomes\n if c1 != c2:\n lb1 = choice([c1.getLowerBoundDayOne(), c2.getLowerBoundDayOne()])\n ub1 = choice([c1.getUpperBoundDayOne(), c2.getUpperBoundDayOne()])\n lb2 = choice([c1.getLowerBoundDayTwo(), c2.getLowerBoundDayTwo()])\n ub2 = choice([c1.getUpperBoundDayTwo(), c2.getUpperBoundDayTwo()])\n rec = choice([c2.getRecommendation(), c2.getRecommendation()])\n\n chromosomes.append(Chromosome(lb1, ub1, lb2, ub2, rec))\n\n return chromosomes\n\n\ndef runKpointAlgorithm(sample_chromosomes, crossover_algorithm,\n num_total_chromosomes):\n num_sample_chromosomes = len(sample_chromosomes)\n if num_sample_chromosomes <= 1:\n exit('not enough sample chromosomes to perform kpoint crossover' +\n ' (must have >= 2 chromosomes)')\n \n chromosomes = sample_chromosomes # set of chromosomes to return\n\n while len(chromosomes) != num_total_chromosomes:\n c1 = sample_chromosomes[randint(0, num_sample_chromosomes-1)]\n c2 = sample_chromosomes[randint(0, num_sample_chromosomes-1)]\n\n # ensure uniqueness of chromosomes\n if c1 != c2:\n lb1 = c1.getLowerBoundDayOne()\n ub1 = c1.getUpperBoundDayOne()\n lb2 = c2.getLowerBoundDayTwo()\n ub2 = c2.getUpperBoundDayTwo()\n rec = c2.getRecommendation()\n\n chromosomes.append(Chromosome(lb1, ub1, lb2, ub2, rec))\n\n return chromosomes\n\n\ndef performMutations(next_gen_chromosomes, mutation_rate):\n mu = 0\n sigma = 1.15\n chromosomes = []\n\n for chromosome in next_gen_chromosomes:\n # this breaks encapsulation, but is more concise\n data = chromosome.data\n gene_range = len(data) - 1 # confines range to percentage bound values\n for i in range(gene_range):\n # check whether to mutate chromosome bound\n if random() <= mutation_rate:\n data[i] = gauss(mu, sigma)\n\n # check for mutation of recommendation\n if random() <= mutation_rate:\n if data[4] == BUY:\n data[4] == SHORT\n else:\n data[4] == BUY\n\n # create chromosome with (potentially new) data\n chromosomes.append(Chromosome(data[0], data[1], data[2], data[3],\n data[4]))\n\n return chromosomes","repo_name":"mfraser4/comp151-project1","sub_path":"geneticalgorithm.py","file_name":"geneticalgorithm.py","file_ext":"py","file_size_in_byte":9253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42700698539","text":"import ssl\r\nimport json\r\n\r\nimport websocket\r\nimport bitstamp.client\r\n\r\nimport credenciais\r\n\r\n\r\ndef user():\r\n return bitstamp.client.Trading(username=credenciais.USERNAME,\r\n key=credenciais.KEY,\r\n secret=credenciais.SECRET)\r\n\r\n\r\ndef buy(quantidade):\r\n trading_client = user()\r\n trading_client.buy_market_order(quantidade)\r\n\r\n\r\ndef sell(quantidade):\r\n trading_client = user()\r\n trading_client.sell_market_order(quantidade)\r\n\r\n\r\ndef on_open(ws):\r\n print(\"conexão aberta\")\r\n\r\n json_subscribe = \"\"\"\r\n{\r\n \"event\": \"bts:subscribe\",\r\n \"data\": {\r\n \"channel\": \"live_trades_btcusd\"\r\n }\r\n}\r\n\"\"\"\r\n\r\n ws.send(json_subscribe)\r\n\r\n\r\ndef on_close(ws):\r\n print(\"conexão fechada\")\r\n\r\n\r\ndef on_error(ws, error):\r\n print(\"Deu erro\")\r\n print(error)\r\n\r\n\r\ndef on_message(ws, message):\r\n message = json.loads(message)\r\n price = message['data']['price']\r\n print(price)\r\n\r\n if price > 10000:\r\n sell()\r\n elif price < 8100:\r\n buy()\r\n else:\r\n print(\"Aguardar\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ws = websocket.WebSocketApp(\"wss://ws.bitstamp.net.\",\r\n on_open=on_open,\r\n on_close=on_close,\r\n on_message=on_message,\r\n on_error=on_error)\r\n ws.run_forever(sslopt={\"cert_reqs\": ssl.CERT_NONE})","repo_name":"GuilhermeBeltrao/bitcoin_bot","sub_path":"bitcoin.py","file_name":"bitcoin.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"10260022221","text":"from pandac.PandaModules import *\nfrom direct.showbase import DirectObject\nfrom direct.showbase.PythonUtil import Enum\nfrom direct.fsm.FSM import FSM\nfrom otp.otpbase import OTPGlobals\n\nclass CameraMode(DirectObject.DirectObject, FSM):\n Modes = Enum('NORMAL, BATTLE')\n\n def __init__(self):\n FSM.__init__(self, 'CameraMode')\n self.mouseControl = False\n self.mouseDelta = (0, 0)\n self.lastMousePos = (0, 0)\n self.origMousePos = (0, 0)\n self.request('Off')\n self.__inputEnabled = False\n\n def destroy(self):\n pass\n\n def getName(self):\n pass\n\n def start(self):\n if not self.isActive():\n self.request('Active')\n\n def stop(self):\n if self.isActive():\n self.request('Off')\n\n def isActive(self):\n return self.state == 'Active'\n\n def enterOff(self):\n pass\n\n def exitOff(self):\n pass\n\n def enterActive(self):\n self.cTravOnFloor = CollisionTraverser('CamMode.cTravOnFloor')\n self.camFloorRayNode = self.attachNewNode('camFloorRayNode')\n self.ccRay2 = CollisionRay(0.0, 0.0, 0.0, 0.0, 0.0, -1.0)\n self.ccRay2Node = CollisionNode('ccRay2Node')\n self.ccRay2Node.addSolid(self.ccRay2)\n self.ccRay2NodePath = self.camFloorRayNode.attachNewNode(self.ccRay2Node)\n self.ccRay2BitMask = OTPGlobals.FloorBitmask\n self.ccRay2Node.setFromCollideMask(self.ccRay2BitMask)\n self.ccRay2Node.setIntoCollideMask(BitMask32.allOff())\n self.ccRay2MoveNodePath = hidden.attachNewNode('ccRay2MoveNode')\n self.camFloorCollisionBroadcaster = CollisionHandlerFloor()\n self.camFloorCollisionBroadcaster.setInPattern('on-floor')\n self.camFloorCollisionBroadcaster.setOutPattern('off-floor')\n self.camFloorCollisionBroadcaster.addCollider(self.ccRay2NodePath, self.ccRay2MoveNodePath)\n self.cTravOnFloor.addCollider(self.ccRay2NodePath, self.camFloorCollisionBroadcaster)\n self.enableInput()\n\n def exitActive(self):\n self.disableInput()\n del self.cTravOnFloor\n del self.ccRay2\n del self.ccRay2Node\n self.ccRay2NodePath.removeNode()\n del self.ccRay2NodePath\n self.ccRay2MoveNodePath.removeNode()\n del self.ccRay2MoveNodePath\n self.camFloorRayNode.removeNode()\n del self.camFloorRayNode\n\n def enableInput(self):\n self.__inputEnabled = True\n self.accept('mouse3', self.enableMouseControl)\n self.accept('mouse3-up', self.disableMouseControl)\n if base.mouseWatcherNode.isButtonDown(MouseButton.three()):\n self.enableMouseControl()\n\n def disableInput(self):\n self.__inputEnabled = False\n self.disableMouseControl()\n self.ignore('mouse3')\n self.ignore('mouse3-up')\n\n def isInputEnabled(self):\n return self.__inputEnabled\n\n def enableMouseControl(self):\n if hasattr(base, 'oobeMode') and base.oobeMode:\n return\n\n self.mouseControl = True\n mouseData = base.win.getPointer(0)\n self.origMousePos = (mouseData.getX(), mouseData.getY())\n if 'localAvatar' in __builtins__:\n localAvatar.guiMgr._hideCursor()\n\n base.win.movePointer(0, base.win.getXSize() / 2, base.win.getYSize() / 2)\n self.lastMousePos = (base.win.getXSize() / 2, base.win.getYSize() / 2)\n if self.getCurrentOrNextState() == 'Active':\n self._startMouseControlTasks()\n\n def disableMouseControl(self):\n if hasattr(base, 'oobeMode') and base.oobeMode:\n return\n\n if self.mouseControl:\n self.mouseControl = False\n self._stopMouseControlTasks()\n if 'localAvatar' in __builtins__:\n localAvatar.guiMgr._showCursor()\n\n base.win.movePointer(0, int(self.origMousePos[0]), int(self.origMousePos[1]))\n\n def _startMouseControlTasks(self):\n if self.mouseControl:\n properties = WindowProperties()\n properties.setMouseMode(properties.MRelative)\n base.win.requestProperties(properties)\n self._startMouseReadTask()\n self._startMouseUpdateTask()\n\n def _stopMouseControlTasks(self):\n properties = WindowProperties()\n properties.setMouseMode(properties.MAbsolute)\n base.win.requestProperties(properties)\n self._stopMouseReadTask()\n self._stopMouseUpdateTask()\n\n def _startMouseReadTask(self):\n self._stopMouseReadTask()\n taskMgr.add(self._mouseReadTask, '%s-MouseRead' % self._getTopNodeName(), priority=-29)\n\n def _mouseReadTask(self, task):\n if hasattr(base, 'oobeMode') and base.oobeMode:\n self.mouseDelta = (0, 0)\n return task.cont\n elif not base.mouseWatcherNode.hasMouse():\n self.mouseDelta = (0, 0)\n else:\n winSize = (\n base.win.getXSize(), base.win.getYSize())\n mouseData = base.win.getPointer(0)\n if mouseData.getX() > winSize[0] or mouseData.getY() > winSize[1]:\n self.mouseDelta = (0, 0)\n else:\n self.mouseDelta = (mouseData.getX() - self.lastMousePos[0], mouseData.getY() - self.lastMousePos[1])\n base.win.movePointer(0, winSize[0] / 2, winSize[1] / 2)\n mouseData = base.win.getPointer(0)\n self.lastMousePos = (mouseData.getX(), mouseData.getY())\n return task.cont\n\n def _stopMouseReadTask(self):\n taskMgr.remove('%s-MouseRead' % self._getTopNodeName())\n\n def _startMouseUpdateTask(self):\n self._stopMouseUpdateTask()\n taskMgr.add(self._avatarFacingTask, '%s-AvatarFacing' % self._getTopNodeName(), priority=23)\n taskMgr.add(self._mouseUpdateTask, '%s-MouseUpdate' % self._getTopNodeName(), priority=40)\n\n def _avatarFacingTask(self, task):\n return task.cont\n\n def _mouseUpdateTask(self, task):\n return task.cont\n\n def _stopMouseUpdateTask(self):\n taskMgr.remove('%s-MouseUpdate' % self._getTopNodeName())\n taskMgr.remove('%s-AvatarFacing' % self._getTopNodeName())\n\n def avFaceCamera(self):\n pass\n","repo_name":"PiratesOnlineRewritten/Pirates-Online-Rewritten","sub_path":"pirates/pirate/CameraMode.py","file_name":"CameraMode.py","file_ext":"py","file_size_in_byte":6192,"program_lang":"python","lang":"en","doc_type":"code","stars":80,"dataset":"github-code","pt":"67"} +{"seq_id":"69865874135","text":"def dfs(cur, dist, visited, graph):\n global max_l\n for nxt in graph[cur]:\n if visited[nxt] == 0:\n visited[nxt] = 1\n dfs(nxt, dist+1, visited, graph)\n visited[nxt] = 0\n max_l = max(dist, max_l)\n\ndef solve(graph, N):\n global max_l\n max_l = 0\n for i in range(1, N+1):\n visited = [0] * (N+1)\n visited[i] = 1\n dfs(i, 1, visited, graph)\n return max_l\n \nT = int(input())\nfor test_case in range(1, T + 1):\n N, M = map(int, input().split())\n graph = [[] for _ in range(N+1)]\n for _ in range(M):\n v1, v2 = map(int, input().split())\n graph[v1].append(v2)\n graph[v2].append(v1)\n answer = solve(graph, N)\n print(f\"#{test_case} {answer}\")\n","repo_name":"eun-byeol/algorithm","sub_path":"python/backtracking/2814_최장_경로.py","file_name":"2814_최장_경로.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17875484712","text":"# pylint: disable=invalid-name\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"resources\", \"0007_podcasts\"),\n ]\n\n operations = [\n migrations.AddField(\n model_name=\"resourcefeed\",\n name=\"show_children\",\n field=models.BooleanField(default=False, verbose_name=\"show children\"),\n ),\n ]\n","repo_name":"thepointchurch/upperroom","sub_path":"upperroom/resources/migrations/0008_resourcefeed_show_children.py","file_name":"0008_resourcefeed_show_children.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33200804897","text":"import sys\nreader = (s.rstrip() for s in sys.stdin)\ninput = reader.__next__\n\ndef gift():\n for _ in range(t):\n x,y = list(map(int,input().split()))\n if abs(x-y)==0:\n yield x*2\n else:\n yield abs(x-y)*2-1+min(x,y)*2\nif __name__ == '__main__':\n t= int(input())\n ans = gift()\n print(*ans,sep='\\n')\n \n\n\n#\"{} {} {}\".format(maxele,minele,minele)\n","repo_name":"marcus-aurelianus/codeforce","sub_path":"edu98/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"40493306128","text":"from copy import deepcopy\nfrom collections import defaultdict\nfrom sentence import Sentence\nfrom predicate import Predicate\nfrom constants import Consts\nfrom itertools import combinations\n\n\nclass Logic:\n @staticmethod\n def neg(pred_name):\n if pred_name[0] == Consts.NOT:\n return pred_name[1:]\n return Consts.NOT + pred_name\n\n @staticmethod\n def ask(KB, query):\n ret = Logic.resolution(KB, query, False)\n if not ret:\n # ret = Logic.sos_resolution(KB, query, unit_only=True)\n # if not ret:\n ret = Logic.sos_resolution(KB, query, unit_only=False)\n return ret\n\n @staticmethod\n def merge_sentence(sentence):\n drop_counter = defaultdict(list)\n for name, pred_list in sentence.predicate_name_map.items():\n if len(pred_list) > 1:\n for pred1, pred2 in combinations(pred_list, 2):\n if Predicate.are_equal(pred1, pred2):\n if pred1 not in drop_counter[name]:\n drop_counter[name].append(pred1)\n elif pred2 not in drop_counter[name]:\n drop_counter[name].append(pred2)\n for key, drop_list in drop_counter.items():\n lst = sentence.predicate_name_map[key]\n\n for item in drop_list:\n if len(lst) == 1:\n break\n lst.remove(item)\n new_preds = []\n for k, lst in sentence.predicate_name_map.items():\n new_preds.extend(lst)\n if new_preds:\n sentence.ord_preds = new_preds\n\n @staticmethod\n def factor_sentence(sentence):\n can_factor = False\n preds_to_factor = None\n\n for name, pred_list in sentence.predicate_name_map.items():\n if len(pred_list) > 1:\n can_factor = True\n preds_to_factor = pred_list\n break\n\n if not can_factor:\n return\n\n for pred1, pred2 in combinations(preds_to_factor, 2):\n if pred1.negated ^ pred2.negated:\n continue\n subs = Logic.unify(pred1, pred2)\n\n if subs is not None:\n for pred in sentence.ord_preds:\n pred.subst(subs)\n Logic.merge_sentence(sentence)\n Logic.sort_sentence(sentence)\n\n @staticmethod\n def sort_sentence(sentence):\n sentence.ord_preds.sort(key=lambda x: x.name)\n\n @staticmethod\n def is_tautology(sentence):\n preds_to_check = []\n\n for name, pred_list in sentence.predicate_name_map.items():\n if len(pred_list) > 1:\n preds_to_check.append(name)\n\n if len(preds_to_check) == 0:\n return False\n\n for pred_name in preds_to_check:\n for pred1, pred2 in combinations(\n sentence.predicate_name_map[pred_name], 2):\n if Predicate.are_tautology(pred1, pred2):\n return True\n return False\n\n @staticmethod\n def is_variable(var):\n return isinstance(var, str) and var[0].islower()\n\n @staticmethod\n def cyclic_vars(var1, var2, mapping):\n if var1 == var2:\n return True\n elif Logic.is_variable(var2) and var1 in mapping:\n return Logic.cyclic_vars(var1, mapping[var2], mapping)\n else:\n return False\n\n @staticmethod\n def unify_variable(var1, var2, mapping):\n if var1 in mapping:\n return Logic.unify(mapping[var1], var2, mapping)\n elif var2 in mapping:\n return Logic.unify(var1, mapping[var2], mapping)\n elif Logic.cyclic_vars(var1, var2, mapping):\n return None\n else:\n temp_mapping = mapping.copy()\n temp_mapping[var1] = var2\n return temp_mapping\n\n @staticmethod\n def unify(args1, args2, mapping=dict()):\n if mapping is None:\n return None\n elif args1 == args2:\n return mapping\n elif isinstance(args1, Predicate) and isinstance(args2, Predicate):\n return Logic.unify(args1.ordered_args, args2.ordered_args)\n elif Logic.is_variable(args1):\n return Logic.unify_variable(args1, args2, mapping)\n elif Logic.is_variable(args2):\n return Logic.unify_variable(args2, args1, mapping)\n elif isinstance(args1, list) and \\\n isinstance(args2, list) and len(args1) == len(args2):\n if not args1:\n return mapping\n return Logic.unify(args1[1:], args2[1:],\n Logic.unify(args1[0], args2[0], mapping))\n return None\n\n @staticmethod\n def resolve(sentence1, sentence2):\n new_sentences = []\n\n s1_preds = sentence1.ord_preds\n s2_preds = sentence2.ord_preds\n\n for pred1 in s1_preds:\n for pred2 in s2_preds:\n if pred1.name == pred2.name and \\\n (pred1.negated ^ pred2.negated):\n\n substs = Logic.unify(pred1, pred2)\n\n if substs is None:\n continue\n\n new_sent = []\n\n for pred in s1_preds:\n if pred == pred1:\n continue\n new_pred = deepcopy(pred)\n new_pred.subst(substs)\n new_sent.append(new_pred)\n\n for pred in s2_preds:\n if pred == pred2:\n continue\n new_pred = deepcopy(pred)\n new_pred.subst(substs)\n new_sent.append(new_pred)\n\n if not new_sent:\n new_sentences.append(False)\n else:\n new_sentences.append(Sentence(new_sent))\n\n return new_sentences\n\n @staticmethod\n def resolution(KB, alpha, factoring=True):\n sentences = deepcopy(KB.sentences) + [Sentence(Consts.NOT + alpha)]\n\n sentence_set = set(sentences)\n prev_set = set(sentences)\n\n while True:\n pairs = []\n new = set()\n\n for sent1 in sentence_set:\n for sent2 in prev_set:\n if sent1 != sent2:\n pairs.append((sent1, sent2))\n\n for s1, s2 in pairs:\n if len(s1.ord_preds) > 1 and \\\n len(s2.ord_preds) > 1:\n continue\n resolvents = Logic.resolve(s1, s2)\n\n if False in resolvents:\n return True\n\n usable_resolvents = []\n for resolvent in resolvents:\n if factoring:\n Logic.factor_sentence(resolvent)\n\n if (Logic.is_tautology(resolvent)):\n continue\n rlen = len(resolvent.ord_preds)\n if rlen <= len(s1.ord_preds) or rlen <= len(s2.ord_preds):\n usable_resolvents.append(resolvent)\n\n new = new.union(set(usable_resolvents))\n\n if new.issubset(sentence_set):\n return False\n\n prev_set = new\n sentence_set.update(new)\n\n @staticmethod\n def sos_resolve(s1, s2, temp_set, sos_map, unit=False):\n if unit:\n if len(s1) > 1 and len(s2) > 1:\n return\n\n resolvents = Logic.resolve(s1, s2)\n\n if False in resolvents:\n return True\n\n for resolvent in resolvents:\n if not unit and len(resolvent) <= 3:\n Logic.factor_sentence(resolvent)\n else:\n Logic.merge_sentence(resolvent)\n Logic.sort_sentence(resolvent)\n\n if (Logic.is_tautology(resolvent)):\n continue\n\n usable_resolvents = []\n\n rlen = len(resolvent.ord_preds)\n if rlen <= len(s1.ord_preds) or \\\n rlen <= len(s2.ord_preds):\n usable_resolvents.append(resolvent)\n\n temp_set.update(set(usable_resolvents))\n\n for sentence in usable_resolvents:\n for pred in sentence.ord_preds:\n sos_map[pred.name][pred.negated].add(sentence)\n\n @staticmethod\n def gen_pairs(map1, map2):\n pairs = []\n for s1 in map1:\n s2s = set()\n for pred in s1.ord_preds:\n s2s |= map2[pred.name][not pred.negated]\n for s2 in s2s:\n pairs.append((s1, s2))\n return pairs\n\n @staticmethod\n def sos_resolution(KB, query, unit_only=False):\n query = query[1:] if query[0] == Consts.NOT else Consts.NOT + query\n query_sent = Sentence(query)\n prev_sos = [set([query_sent])]\n sos = prev_sos[-1]\n aux = set(KB.sentences)\n vis = set()\n vis.update(sos)\n vis.update(aux)\n\n sos_map = defaultdict(lambda: defaultdict(set))\n aux_map = defaultdict(lambda: defaultdict(set))\n\n loop_ctr = 0\n\n for sentence in KB.sentences:\n for pred in sentence.ord_preds:\n aux_map[pred.name][pred.negated].add(sentence)\n\n for pred in query_sent.ord_preds:\n sos_map[pred.name][pred.negated].add(query_sent)\n\n while True:\n temp_set = set()\n sos = prev_sos[-1]\n for s1, s2 in Logic.gen_pairs(sos, sos_map):\n ret = Logic.sos_resolve(s1, s2, temp_set, sos_map, unit=True)\n\n if ret is True:\n return True\n\n if loop_ctr == 0:\n for s1, s2 in Logic.gen_pairs(sos, aux_map):\n ret = Logic.sos_resolve(s1, s2, temp_set, sos_map, unit=True)\n\n if ret is True:\n return True\n\n if not temp_set.issubset(sos):\n sos.update(temp_set)\n continue\n\n if not unit_only:\n for s1, s2 in Logic.gen_pairs(sos, sos_map):\n ret = Logic.sos_resolve(s1, s2, temp_set, sos_map)\n\n if ret is True:\n return True\n \n if loop_ctr == 0:\n for s1, s2 in Logic.gen_pairs(sos, aux_map):\n ret = Logic.sos_resolve(s1, s2, temp_set, sos_map)\n\n if ret is True:\n return True\n\n if temp_set.issubset(vis):\n return False\n\n vis.update(temp_set)\n prev_sos.append(temp_set)\n sos_map = defaultdict(lambda: defaultdict(set))\n for sentence in temp_set:\n for pred in sentence.ord_preds:\n sos_map[pred.name][pred.negated].add(sentence)\n\n loop_ctr += 1\n # for x in sos:\n # print(x)\n # for x in aux:\n # print(x)\n # for x in temp_set:\n # print(x)\n # for x in sos:\n # print(x)\n # print(len(temp_set))\n if len(temp_set) > 1000:\n return False\n","repo_name":"harshsjani/FOL-Resolution-System","sub_path":"logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":11232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17643499248","text":"import psycopg2\nimport datetime\nimport pandas as pd\nimport numpy as np\n\ndef get_conn():\n conn = None\n try:\n conn = psycopg2.connect(\n host=\"localhost\",\n database=\"postgres\",\n user=\"postgres\",\n password=\"postgres\"\n )\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n return conn\n\n\ndef test_connect():\n \"\"\"测试数据库连接\"\"\"\n conn = None\n try:\n # connect to the PostgreSQL server\n print('Connecting to the PostgreSQL database...')\n conn = get_conn()\n # create a cursor\n cur = conn.cursor()\n\n\t # execute a statement\n print('PostgreSQL database version:')\n cur.execute('SELECT version()')\n\n # display the PostgreSQL database server version\n db_version = cur.fetchone()\n print(db_version)\n\n\t# close the communication with the PostgreSQL\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n print('Database connection closed.')\n\n\ndef query(sql):\n \"\"\"查询获取数据\"\"\"\n conn = None\n try:\n conn = get_conn()\n # create a cursor\n cur = conn.cursor()\n # execute a statement\n cur.execute(sql)\n rows = cur.fetchall()\n # close the communication with the PostgreSQL\n cur.close()\n return rows\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n print('Database connection closed.')\n\n\ndef insert(sql, data_array):\n \"\"\"插入数据\n data_array: 每条数据是一个元组\n \"\"\"\n conn = None\n try:\n conn = get_conn()\n # create a cursor\n cur = conn.cursor()\n # execute a statement\n cur.executemany(sql, data_array)\n # commit the changes to the database\n conn.commit()\n # close the communication with the PostgreSQL\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n print('Database connection closed.')\n\n\ndef insert_company_info(company_infos):\n \"\"\"插入上市公司信息\"\"\"\n \"\"\" insert multiple stocks into the company_info table \"\"\"\n sql = \"\"\"INSERT INTO company_info(code, name, exchange, listed)\n VALUES(%s, %s, %s, %s)\"\"\"\n insert(sql, company_infos)\n\n\ndef get_company_infos():\n \"\"\"\n 返回股票代码和上市日期\n \"\"\"\n sql = \"SELECT code, listed, exchange from company_info order by code asc\"\n return query(sql)\n\n\ndef get_existing_stock_infos():\n \"\"\"\n 返回当前数据库中的股票和最新的交易数据日期\n \"\"\"\n sql = \"select code, MAX(date) from stocks_daily_hfq group by code\"\n return query(sql)\n\n\ndef insert_stock_data(stock_data):\n \"\"\"插入股票数据,以天为单位\"\"\"\n \"\"\" insert multiple stocks data into the stocks table \"\"\"\n sql = \"\"\"INSERT INTO stocks_daily_hfq(code, date, open, high, low, close, volume)\n VALUES(%s, %s, %s, %s, %s, %s, %s)\"\"\"\n insert(sql, stock_data)\n\n\ndef get_stock_data(code):\n \"\"\"获取股票交易数据\"\"\"\n sql = \"select * from stocks_daily_hfq where code='{}'\".format(code)\n\n return query(sql)\n\n\ndef get_stock_data_between(code, start_date, end_date=datetime.date.today().isoformat()):\n \"\"\"获取股票交易数据\n code, date, open, high, low, close, volume\n \"\"\"\n sql = \"select * from stocks_daily_hfq where code='{}' and date>'{}' and date<'{}'\".format(code, start_date, end_date)\n\n return query(sql)\n\n\ndef generate_avg_5_hdf(hdf_file):\n \"\"\"生成5日均线数据\"\"\"\n companies = get_company_infos()\n\n for company in companies:\n code = company[0]\n exchange = company[2]\n data = get_stock_data(code)\n df = pd.DataFrame(data, columns=[\"code\", \"date\", \"open\", \"high\", \"low\", \"close\", \"volume\"])\n df.set_index(\"date\", inplace=True)\n df.sort_index(inplace=True)\n\n close_price = df[\"close\"]\n avg5 = np.zeros((len(close_price,)))\n avg5[:5] = np.nan\n\n for i in range(5, len(avg5)):\n avg5[i] = np.sum(close_price[i-5:i]) / 5.0\n\n df[\"avg5\"] = avg5\n\n data = df.loc[:, \"avg5\"]\n\n data.to_hdf(hdf_file, key=\"{}_{}\".format(exchange, code), mode='a', complevel=4, complib=\"zlib\")\n print(code, \" avg5 created\")\n\n\ndef generate_avg_10_hdf(hdf_file):\n \"\"\"生成10日均线数据\"\"\"\n companies = get_company_infos()\n\n for company in companies:\n code = company[0]\n exchange = company[2]\n data = get_stock_data(code)\n df = pd.DataFrame(data, columns=[\"code\", \"date\", \"open\", \"high\", \"low\", \"close\", \"volume\"])\n df.set_index(\"date\", inplace=True)\n df.sort_index(inplace=True)\n\n close_price = df[\"close\"]\n avg10 = np.zeros((len(close_price,)))\n avg10[:10] = np.nan\n\n for i in range(10, len(avg10)):\n avg10[i] = np.sum(close_price[i-10:i]) / 10.0\n\n df[\"avg10\"] = avg10\n\n data = df.loc[:, \"avg10\"]\n\n data.to_hdf(hdf_file, key=\"{}_{}\".format(exchange, code), mode='a', complevel=4, complib=\"zlib\")\n print(code, \" avg10 created\")\n\n\ndef generate_avg_20_hdf(hdf_file):\n \"\"\"生成20日均线数据\"\"\"\n companies = get_company_infos()\n\n for company in companies:\n code = company[0]\n exchange = company[2]\n data = get_stock_data(code)\n df = pd.DataFrame(data, columns=[\"code\", \"date\", \"open\", \"high\", \"low\", \"close\", \"volume\"])\n df.set_index(\"date\", inplace=True)\n df.sort_index(inplace=True)\n\n close_price = df[\"close\"]\n avg20 = np.zeros((len(close_price,)))\n avg20[:20] = np.nan\n\n for i in range(20, len(avg20)):\n avg20[i] = np.sum(close_price[i-20:i]) / 20.0\n\n df[\"avg20\"] = avg20\n\n data = df.loc[:, \"avg20\"]\n\n data.to_hdf(hdf_file, key=\"{}_{}\".format(exchange, code), mode='a', complevel=4, complib=\"zlib\")\n print(code, \" avg20 created\")\n\n\ndef generate_avg_30_hdf(hdf_file):\n \"\"\"生成30日均线数据\"\"\"\n companies = get_company_infos()\n\n for company in companies:\n code = company[0]\n exchange = company[2]\n data = get_stock_data(code)\n df = pd.DataFrame(data, columns=[\"code\", \"date\", \"open\", \"high\", \"low\", \"close\", \"volume\"])\n df.set_index(\"date\", inplace=True)\n df.sort_index(inplace=True)\n\n close_price = df[\"close\"]\n avg30 = np.zeros((len(close_price,)))\n avg30[:30] = np.nan\n\n for i in range(30, len(avg30)):\n avg30[i] = np.sum(close_price[i-30:i]) / 30.0\n\n df[\"avg30\"] = avg30\n\n data = df.loc[:, \"avg30\"]\n\n data.to_hdf(hdf_file, key=\"{}_{}\".format(exchange, code), mode='a', complevel=4, complib=\"zlib\")\n print(code, \" avg30 created\")\n\n\ndef generate_close_price_hdf(hdf_file):\n \"\"\"生成每日收盘数据\"\"\"\n companies = get_company_infos()\n\n for company in companies:\n code = company[0]\n exchange = company[2]\n data = get_stock_data(code)\n df = pd.DataFrame(data, columns=[\"code\", \"date\", \"open\", \"high\", \"low\", \"close\", \"volume\"])\n df.set_index(\"date\", inplace=True)\n\n data = df.loc[:, \"close\"]\n\n data.to_hdf(hdf_file, key=\"{}_{}\".format(exchange, code), mode='a', complevel=4, complib=\"zlib\")\n print(code, \" price created\")\n\n\ndef to_db_tuples(code, hist_df):\n result = [(code, listed, open, high, low, close, volume)\n for listed, open, high, low, close, volume\n in zip(hist_df['日期'], hist_df['开盘'], hist_df['最高'], hist_df['最低'], hist_df['收盘'], hist_df['成交额'])]\n return result\n\n\nif __name__ == '__main__':\n test_connect()","repo_name":"xiaoyaohu0325/quant","sub_path":"stock/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":7929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30043151089","text":"from urllib.parse import urlparse\n\nimport flask\nfrom telebot import TeleBot, types\n\nfrom config import TOKEN, server\nfrom aggregator import User\n\nbot = TeleBot(TOKEN)\napp = flask.Flask(__name__)\nAPP_URL = f\"https://{server.public_host}:{server.port}\"\n\n# content messages\nHELLO_MESSAGE = \"\"\"del + url - delete url\nurl + some text -add/change url\nsome text - search by tags\n\"\"\"\n\n\n# base skeleton of bot app\n@app.route('/', methods=['GET', 'HEAD'])\ndef index():\n return ''\n\n\n@app.route(f\"/{TOKEN}\", methods=['POST'])\ndef web():\n if flask.request.headers.get('content-type') == 'application/json':\n json_string = flask.request.get_data().decode('utf-8')\n update = types.Update.de_json(json_string)\n bot.process_new_updates([update])\n return ''\n\n\n@bot.message_handler(commands=['start', 'help'])\ndef hello(msg):\n # init user and create document of this user in database\n User(msg.chat.id)\n bot.send_message(\n msg.chat.id,\n HELLO_MESSAGE\n )\n\n\n@bot.message_handler(content_types=['text'])\ndef check_link(msg):\n user = User(msg.chat.id)\n splitted_text = msg.text.split()\n if urlparse(splitted_text[0]).netloc:\n bot.send_message(\n user.user_id,\n user.create_link(splitted_text[0], splitted_text[1:]))\n\n elif msg.text.startswith(\"del\"):\n if urlparse(splitted_text[1]).netloc:\n response = user.delete_link(splitted_text[1])\n bot.send_message(\n user.chat_id,\n response\n )\n\n else:\n links = user.get_links_by_tags(splitted_text)\n\n if links.count():\n response = \"result: \\n\"\n for link in links:\n response += f\"🍕 [{link['title']}]({link['url']})\" \\\n f\" tags: *{link['tags']}*\\n\"\n bot.send_message(\n user.user_id,\n response,\n disable_web_page_preview=True,\n parse_mode=\"MARKDOWN\"\n )\n\n else:\n bot.send_message(\n user.user_id,\n \"not found\")\n\n\nif __name__ == \"__main__\":\n bot.remove_webhook()\n\n bot.set_webhook(\n url=f\"{APP_URL}/{TOKEN}\",\n certificate=open(server.cert))\n app.run(host=server.host,\n port=server.port,\n ssl_context=(server.cert, server.pkey),\n debug=True)\n","repo_name":"orangeatom/link_aggregator","sub_path":"bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9338266753","text":"# Program to print a pattern of hourglass using aestrik in python\n\n# Hourglass pattern\n\"\"\"\nDemo eg -->\n* * * * * *\n * * * * *\n * * * *\n * * * \n * * \n *\n *\n * *\n * * *\n * * * *\n * * * * *\n* * * * * *\n\"\"\"\n# Code -->\ndef regular():\n for i in range(n):\n for j in range(i):\n print(\" \",end=\"\")\n for k in range(n-i):\n print(\"* \",end = \"\")\n print()\ndef invert():\n for i in range(n):\n for j in range(n-(i+1)):\n print(\" \",end=\"\")\n for k in range(i+1):\n print(\"* \",end = \"\")\n print()\nn = int(input(\"Enter a number\"))\nregular()\ninvert()","repo_name":"paras-13/Python_pattern_programs","sub_path":"Hourglass_pattern.py","file_name":"Hourglass_pattern.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16723976410","text":"from playerFile import Player\n# from FlagpoleFile import Flagpole\nclass Level:\n def __init__(self, scenes):\n self.scenes = scenes #List of scenes\n self.scene_index = 0\n self.over = ''\n self.player = Player(self.scenes[0][0].x1 + self.scenes[0][0].w/2, self.scenes[0][0].y - 50)\n def drawLevel(self):\n background(150)\n if self.player.y >= height:\n self.over = 'lose'\n if self.player.x > width:\n self.scene_index += 1\n if self.scene_index >= len(self.scenes):\n self.scene_index -= 1\n self.over = 'win'\n self.player.x = 10\n if self.over == '':\n self.player.onGround = False\n for ground in self.scenes[self.scene_index]:\n ground.display()\n self.player.checkOnGround(ground)\n self.player.move()\n self.player.display()\n elif self.over == 'lose':\n textSize(18)\n text(\"You died! Pres 'SPACE' to retry\", width/2 - 80, height/2-30)\n elif self.over == 'win':\n textSize(18)\n text(\"You Win! Pres 'SPACE' to continue\", width/2 - 80, height/2-30)\n ","repo_name":"itsalaidbacklife/platformer","sub_path":"levelFile.py","file_name":"levelFile.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21393057829","text":"from event import Event\nimport time\n\nclass Checker(Event):\n def __init__(self, args, type, cameraId):\n super().__init__(args, type, cameraId)\n self.eventType = type\n self.cameraId = cameraId\n self.eventInCamera = True\n self.timeFromLastClosed = True\n self.boundaries = True\n\n def is_time_passed_from_last_event(self, camera):\n if len(camera.lastEventsInCamera) > 0:\n if self.eventType != \"PERSONS\":\n tempList = [event for event in camera.lastEventsInCamera if event.eventType == self.eventType and not event.open]\n for event in tempList:\n if time.time() - event.closedTime < camera.timeToOpenAfterClose:\n self.timeFromLastClosed = False\n else:\n tempList = [event for event in camera.lastEventsInCamera if\n event.eventType == \"NO_CROSS_ZONE\" and not event.open]\n for event in tempList:\n if time.time() - event.closedTime < camera.timeToOpenAfterClose:\n self.timeFromLastClosed = False\n\n def is_time_passed_from_last_helmet_event(self, camera):\n flag = True\n if len(camera.lastEventsInCamera) > 0:\n tempList = [event for event in camera.lastEventsInCamera if event.eventType == \"PPE_HELMET\" and not event.open]\n if len(tempList) > 0:\n for event in tempList:\n if time.time() - event.closedTime < camera.timeToOpenAfterClose:\n return False\n return True\n\n def is_event_in_camera(self, event, eventsInCamera):\n if event == \"PERSONS\":\n if \"PPE_HELMET\" in eventsInCamera and \"NO_CROSS_ZONE\" in eventsInCamera:\n return\n if \"PPE_HELMET\" not in eventsInCamera and \"NO_CROSS_ZONE\" in eventsInCamera:\n self.eventType = \"NO_CROSS_ZONE\"\n elif \"PPE_HELMET\" in eventsInCamera:\n self.eventType = \"PPE_HELMET\"\n else:\n self.eventInCamera = False\n else:\n if event in eventsInCamera:\n self.eventType = event\n else:\n self.eventInCamera = False\n\n # def check_boundaries(self, camera, detection):\n # detection_x_start = detection.x[0]\n # detection_x_end = detection.x[1]\n # detection_x_size = detection_x_end - detection_x_start\n # detection_y_start = detection.y[0]\n # detection_y_end = detection.y[1]\n # detection_y_size = detection_y_end - detection_y_start\n # detectionTotalArea = detection_x_size * detection_y_size\n # if detection_x_start < float(camera.x_start):\n # # print(\"off limits!\")\n # detection.x[0] = camera.x_start\n # if detection_x_end > float(camera.x_end):\n # # print(\"off limits!\")\n # detection.x[1] = camera.x_end\n # if abs(detection.x[1] - detection.x[0]) <= 0.01:\n # self.boundaries = False\n # if (detection_y_size < camera.minSize or detection_y_size > camera.maxSize) and detection.eventType == \"PERSONS\":\n # self.boundaries = False\n # if detection_y_start < float(camera.y_start):\n # # print(\"off limits!\")\n # detection.y[0] = camera.y_start\n # if detection_y_end > float(camera.y_end):\n # # print(\"off limits!\")\n # detection.y[1] = camera.y_end\n # if abs(detection.y[1] - detection.y[0]) <= 0.01:\n # # print(\"problem\")\n # self.boundaries = False\n # detection_x_start = detection.x[0]\n # detection_x_end = detection.x[1]\n # detection_x_size = detection_x_end - detection_x_start\n # detection_y_start = detection.y[0]\n # detection_y_end = detection.y[1]\n # detection_y_size = detection_y_end - detection_y_start\n # newDetectionTotalArea = detection_x_size * detection_y_size\n # if newDetectionTotalArea / detectionTotalArea < 0.8:\n # self.boundaries = False\n # self.x, self.y = detection.x, detection.y\n\n def check_boundaries(self, camera, detection):\n detection_x_size = detection.x_end - detection.x_start\n detection_y_size = detection.y_end - detection.y_start\n detectionTotalArea = detection_x_size * detection_y_size\n if detection.x_start < float(camera.x_start):\n # print(\"off limits!\")\n detection.x_start = camera.x_start\n if detection.x_end > float(camera.x_end):\n # print(\"off limits!\")\n detection.x_end = camera.x_end\n if abs(detection.x_end - detection.x_start) <= 0.01:\n self.boundaries = False\n if (detection_y_size < camera.minSize or detection_y_size > camera.maxSize) and detection.eventType == \"PERSONS\":\n self.boundaries = False\n if detection.y_start < float(camera.y_start):\n # print(\"off limits!\")\n detection.y_start = camera.y_start\n if detection.y_end > float(camera.y_end):\n # print(\"off limits!\")\n detection.y_end = camera.y_end\n if abs(detection.y_end - detection.y_start) <= 0.01:\n self.boundaries = False\n detection_x_size = detection.x_end - detection.x_start\n detection_y_size = detection.y_end - detection.y_start\n newDetectionTotalArea = detection_x_size * detection_y_size\n if detectionTotalArea:\n if newDetectionTotalArea / detectionTotalArea < self.args[\"Ir\"]:\n self.boundaries = False\n self.x, self.y = [detection.x_start, detection.x_end], [detection.y_start, detection.y_end]\n\n def check_dead_man(self):\n pass\n # get pos vector in size of VECTOR_SIZE (numpy vector or list of floats) from yoav\n # make histogram on the last 30 frames.\n # checks if 90% of changes are in the same 10% of bins.\n # sends Dead man event","repo_name":"avihugoldman/EventCenter","sub_path":"checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":5976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22614562620","text":"# LESSON\n\nx = 3\nwhile x < 10:\n print(x)\n x += 1\n\n# EXERCISES\n\nprint(\"\\n\")\n\n# make a program that multiplies itself by 2 until it is larger than 128\n\ny = 1\nwhile y <= 128:\n print(y)\n y = y * 2\n\n# Ask a number until it is correct\n\nimport random\ntarget = random.randrange(1,10)\nguess = int(input(\"guess a number between 1 and 10: \"))\nwhile guess != target:\n if guess >= 1 and guess <=10:\n if guess != target:\n guess = int(input(\"try again: \"))\n if guess == target:\n print(\"Good guess!\")\n else:\n guess = int(input(\"Invalid input, try again: \"))\n","repo_name":"botnaysard/askPython","sub_path":"whileLoops.py","file_name":"whileLoops.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3477682112","text":"from turtle import Turtle\n\nCOUNTER_POSITION = (-380, 260)\nclass LevelCounter(Turtle):\n def __init__(self):\n super().__init__()\n self.hideturtle()\n self.penup()\n self.goto(COUNTER_POSITION)\n self.level = 0\n self.update_level()\n\n def update_level(self):\n self.write(f\"Level: {self.level}\", font=(\"Courier\", 20, \"bold\"))\n\n def level_up(self):\n self.clear()\n self.level +=1\n self.update_level()\n\n def game_over(self):\n game_over_text = Turtle()\n game_over_text.hideturtle()\n game_over_text.write(\"Game over you loser!\", align=\"center\", font=(\"Courier\", 20, \"bold\"))\n","repo_name":"tuanmonn/PythonProjects","sub_path":"pythonCourse/100-days-of-code/day-23-turtle-crossing/level_counter.py","file_name":"level_counter.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"13759728294","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport json\nfrom pathlib import Path\n\nimport requests\nfrom openpyxl import load_workbook\n\n\ndef load_dumpsters(filename):\n '''\n '''\n dumpsters = dict()\n wb = load_workbook(filename=filename)\n for sheet_name in (\"4.1.1.1 Автозаводский\", \"4.1.1.2 Канавинский\",\n \"4.1.1.3 Ленинский\", \"4.1.1.4 Московский\",\n \"4.1.1.5 Нижегородский\", \"4.1.1.6 Приокский\",\n \"4.1.1.7 Советский\", \"4.1.1.8 Сормовский\"):\n ws = wb[sheet_name]\n area_name = sheet_name.split()[-1]\n for row in ws.iter_rows(min_col=1, max_col=14, min_row=8):\n dumpster_number = row[0].value\n dumpster = {\n \"Округ\": area_name,\n \"Номер контейнерной площадки\": row[1].value,\n \"Адрес\": row[3].value,\n \"Наименование\": row[4].value,\n \"Владелец\": row[5].value,\n \"Транспортировщик\": row[6].value,\n \"Материал\": row[7].value,\n \"Количество\": row[8].value,\n \"Вместимомть\": row[9].value,\n \"Покрытие\": row[10].value,\n \"Навес\": row[11].value,\n \"Координаты дома\": row[13].value\n }\n dumpsters[f\"{area_name}_{dumpster_number}\"] = dumpster\n return dumpsters\n\n\ndef link_to_road(dumpsters):\n '''\n '''\n for dumpster_uid, dumpster in dumpsters.items():\n coordinates = dumpster.get(\"Координаты дома\", \"\")\n if not coordinates:\n print(f\"Нет координат для: {dumpster_uid}\", flush=True)\n continue\n coordinates = \",\".join(coordinates.split(\", \")[::-1])\n url = f\"http://osrm.vehicle:5000/nearest/v1/car/{coordinates}\"\n\n response = requests.get(url)\n nearest_point = response.json()\n status = nearest_point[\"code\"]\n if status == \"Ok\":\n points = nearest_point.get(\"waypoints\")\n dumpster.update({\"Расположение\": points})\n else:\n print(nearest_point, flush=True)\n return dumpsters\n\n\ndef save_to_json(output_filename, dumpsters):\n '''\n '''\n assert isinstance(dumpsters, dict)\n data = json.dumps(dumpsters, ensure_ascii=False, indent=4)\n Path(output_filename).write_text(data, encoding=\"utf-8\")\n\n\ndef load_from_json(filename):\n ''' Загружает данные из JSON-файла\n '''\n data = json.loads(Path(filename).read_text())\n assert isinstance(data, dict)\n return data\n\n\ndef link_to_nearest_dumpster(coords, dumpsters):\n '''\n '''\n nearest_dumpsters = dict()\n total = 0\n for address, coord in coords.items():\n # Контейнерные площадки, доступные для дома\n available_dumpsters = list()\n for coord_threshold in (0.005, 0.01, 0.015, 0.02, 0.03, 0.04, 0.05, 0.1):\n latitude = float(coord[\"lat\"])\n longitude = float(coord[\"lng\"])\n\n for dumpster_uid, dumpster in dumpsters.items():\n try:\n dumster_location = dumpster[\"Расположение\"][0][\"location\"]\n dumpster_longitude, dumpster_latitude = dumster_location\n if dumpster_latitude > latitude + coord_threshold:\n continue\n if dumpster_latitude < latitude - coord_threshold:\n continue\n if dumpster_longitude > longitude + coord_threshold:\n continue\n if dumpster_longitude < longitude - coord_threshold:\n continue\n except KeyError:\n # print(f\"Нет координат для площадки {dumpster_uid}\", flush=True)\n continue\n dumpster.update(uid=dumpster_uid)\n available_dumpsters.append(dumpster)\n if len(available_dumpsters) >= 3:\n break\n\n dumpster_coords = list()\n for dumpster in available_dumpsters:\n dumster_location = dumpster[\"Расположение\"][0][\"location\"]\n dumpster_longitude, dumpster_latitude = dumster_location\n dumpster_coords.append(f\"{dumpster_longitude},{dumpster_latitude}\")\n coordinates = \";\".join([f\"{longitude},{latitude}\"] + dumpster_coords)\n \n url = f\"http://osrm.human:5000/table/v1/foot/{coordinates}\"\n params = dict(\n sources=\"0\",\n destinations=\";\".join([str(n) for n, d in enumerate(available_dumpsters, 1)])\n )\n min_duration = None\n\n response = requests.get(url, params=params)\n data = response.json()\n status = data[\"code\"]\n if status == \"Ok\":\n durations = data.get(\"durations\")[0]\n for n, duration in enumerate(durations):\n if min_duration is None or duration < min_duration[1]:\n min_duration = (n, duration)\n else:\n print(data, flush=True)\n\n if min_duration:\n dumpster_index, duration_value = min_duration\n dumpster = available_dumpsters[dumpster_index]\n dumpster.update(duration=duration_value)\n nearest_dumpsters[address] = dumpster\n\n total += 1\n if total % 100 == 0:\n print(f\"Обработано домов: {total}\", flush=True)\n return nearest_dumpsters\n\n\ndef main():\n '''\n '''\n dumpsters = load_dumpsters(filename=\"/data/Реестр контейнерных площадок.xlsx\")\n # Привязка контейнеров к дорогам\n dumpsters = link_to_road(dumpsters)\n save_to_json(\"/data/Реестр контейнерных площадок.json\", dumpsters)\n print(len(dumpsters), flush=True)\n\n # Првязка домов к контейнерным площадкам\n coords = load_from_json(\"/data/Координаты жилых домов.json\")\n nearest_dumpsters = link_to_nearest_dumpster(coords, dumpsters)\n save_to_json(\"/data/Ближайшие контейнерные площадки.json\", nearest_dumpsters)\n","repo_name":"jubbon/GCH-2019","sub_path":"source/images/geoprocessing/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13614029753","text":"import numpy as np\nfrom skimage.io import imread, imsave, imshow\nfrom scipy.fftpack import dct, idct\n\ndef lum_color(matrix):\n if matrix == \"lum\":\n return np.array([[16, 11, 10, 16, 24, 40, 51, 61],\n [12, 12, 14, 19, 26, 58, 60, 55],\n [14, 13, 16, 24, 40, 57, 69, 56],\n [14, 17, 22, 29, 51, 87, 80, 62],\n [18, 22, 37, 56, 68, 109, 103, 77],\n [24, 35, 55, 64, 81, 104, 113, 92],\n [49, 64, 78, 87, 103, 121, 120, 101],\n [72, 92, 95, 98, 112, 100, 103, 99]])\n\n elif matrix == \"color\":\n return np.array([[17, 18, 24, 47, 99, 99, 99, 99],\n [18, 21, 26, 66, 99, 99, 99, 99],\n [24, 26, 56, 99, 99, 99, 99, 99],\n [47, 66, 99, 99, 99, 99, 99, 99],\n [99, 99, 99, 99, 99, 99, 99, 99],\n [99, 99, 99, 99, 99, 99, 99, 99],\n [99, 99, 99, 99, 99, 99, 99, 99],\n [99, 99, 99, 99, 99, 99, 99, 99]])\n\n\ndef rgb_yuv(img):\n x_form = np.array([[.299, .587, .114], [-.1687, -.3313, .5], [.5, -.4187, -.0813]])\n new_img = img.dot(x_form.T)\n new_img[:, :, [1, 2]] += 128\n\n return new_img.astype(np.uint8)\n\n\ndef yuv_rgb(img):\n x_form = np.array([[1, 0, 1.402], [1, -0.34414, -0.71414], [1, 1.772, 0]])\n rgb = img\n rgb[:, :, [1, 2]] -= 128\n rgb = rgb.dot(x_form.T)\n\n return np.clip(rgb, 0, 255).astype(np.uint8)\n\n\ndef decimate(img, block_size):\n rows, cols = img.shape[0], img.shape[1]\n blocks_count = rows // block_size * cols // block_size if (rows // block_size) and (cols // block_size) else None\n top_left_cells = np.empty((blocks_count, 3), dtype=np.int32)\n oth_cells = np.empty((blocks_count, 63, 3), dtype=np.int32)\n return rows, cols, blocks_count, top_left_cells, oth_cells\n\n\ndef interpol(block_size, blocks_count):\n image_size = int(np.sqrt(blocks_count)) * block_size\n blocks_per_line = image_size // block_size\n img = np.empty((image_size, image_size, 3), dtype=np.uint8)\n return img, blocks_per_line\n\n\ndef zero_center(img, i, j, k, block_size):\n return img[i: i + block_size, j: j + block_size, k] - 128\n\n\ndef center_return(img, i, j, k, block_size, block):\n img[i: i + block_size, j: j + block_size, k] = block + 128\n return img\n\n\ndef do_dct(img):\n return dct(dct(img.T, norm='ortho').T, norm='ortho')\n\n\ndef undo_dct(img):\n return idct(idct(img.T, norm='ortho').T, norm='ortho')\n\n\ndef quantize(block, matrix):\n quant_matrix = lum_color(matrix)\n return block // quant_matrix\n\n\ndef dequantize(block, matrix):\n quant_matrix = lum_color(matrix)\n return block * quant_matrix\n\n\ndef zigzag_points(rows, cols):\n \n UP, DOWN, RIGHT, LEFT, UP_RIGHT, DOWN_LEFT = range(6)\n\n def move(direction, point):\n return {\n UP: lambda point: (point[0] - 1, point[1]),\n DOWN: lambda point: (point[0] + 1, point[1]),\n LEFT: lambda point: (point[0], point[1] - 1),\n RIGHT: lambda point: (point[0], point[1] + 1),\n UP_RIGHT: lambda point: move(UP, move(RIGHT, point)),\n DOWN_LEFT: lambda point: move(DOWN, move(LEFT, point))\n }[direction](point)\n\n def inbounds(point):\n return 0 <= point[0] < rows and 0 <= point[1] < cols\n\n point = (0, 0)\n\n move_up = True\n\n for i in range(rows * cols):\n yield point\n if move_up:\n if inbounds(move(UP_RIGHT, point)):\n point = move(UP_RIGHT, point)\n else:\n move_up = False\n if inbounds(move(RIGHT, point)):\n point = move(RIGHT, point)\n else:\n point = move(DOWN, point)\n else:\n if inbounds(move(DOWN_LEFT, point)):\n point = move(DOWN_LEFT, point)\n else:\n move_up = True\n if inbounds(move(DOWN, point)):\n point = move(DOWN, point)\n else:\n point = move(RIGHT, point)\n\n\ndef block_to_zigzag(block):\n return np.array([block[point] for point in zigzag_points(*block.shape)])\n\n\ndef zigzag_to_block(zigzag):\n rows = cols = int(np.sqrt(len(zigzag)))\n\n block = np.empty((rows, cols), np.int32)\n\n for i, point in enumerate(zigzag_points(rows, cols)):\n block[point] = zigzag[i]\n\n return block\n\n\ndef huffman_rle(img):\n pass\n\n\ndef encode(img, block_size):\n\n yuv_img = rgb_yuv(img)\n rows, cols, blocks_count, top_left_cells, oth_cells = decimate(yuv_img, block_size)\n block_index = 0\n\n for i in range(0, rows, block_size):\n for j in range(0, cols, block_size):\n block_index += 1\n\n for k in range(3):\n block = zero_center(yuv_img, i, j, k, block_size)\n dct_matrix = do_dct(block)\n quant_matrix = quantize(dct_matrix, \"lum\" if k == 0 else \"color\")\n zigzag_block = block_to_zigzag(quant_matrix)\n\n top_left_cells[block_index if block_index < blocks_count else blocks_count - 1, k] = zigzag_block[0]\n oth_cells[block_index if block_index < blocks_count else blocks_count - 1, :, k] = zigzag_block[1:]\n\n return top_left_cells, oth_cells, blocks_count\n\n\ndef decode(img, block_size):\n\n top_left_cells, oth_cells, blocks_count = img\n new_img, blocks_per_line = interpol(block_size, blocks_count)\n\n for block_index in range(blocks_count):\n i = block_index // blocks_per_line * block_size\n j = block_index % blocks_per_line * block_size\n\n for k in range(3):\n zigzag = [top_left_cells[block_index, k]] + list(oth_cells[block_index, :, k])\n quant_matrix = zigzag_to_block(zigzag)\n dct_matrix = dequantize(quant_matrix, \"lum\" if k == 0 else \"color\")\n block = undo_dct(dct_matrix)\n\n new_img = center_return(new_img, i, j, k, block_size, block)\n\n rgb_img = yuv_rgb(new_img.astype(np.uint8))\n\n return rgb_img\n\n\ndef main():\n img = imread('image.jpg')\n block_size = 8\n enc_img = encode(img, block_size)\n dec_img = decode(enc_img, block_size)\n imsave('decoded_img.jpg', dec_img)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"marialarionova/cs102","sub_path":"lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":6247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18243519818","text":"# CBSE Statistics Grade 9\r\n# Exercise 14.1.1\r\n\r\n# Name: Ankit Saha\r\n# Roll number: AI21BTECH11004\r\n\r\n\"\"\" Problem Statement\r\nGive five examples of data that you can collect from your day-to-day life\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n# Figure 1\r\ndf = pd.read_excel(r'data.xlsx', 'Sheet1')\r\ndata = np.array(df)\r\nfig, ax = plt.subplots()\r\nax.bar(data[0], data[1])\r\nplt.title('Figure 1')\r\nplt.xlabel('Department')\r\nplt.ylabel('Number of Students')\r\nplt.savefig('../figs/fig-1')\r\nplt.show()\r\n\r\n\r\n# Figure 2\r\ndf = pd.read_excel(r'data.xlsx', 'Sheet2')\r\ndata = np.array(df)\r\nfig, ax = plt.subplots()\r\nax.bar(data[0], data[1])\r\nplt.title('Figure 2')\r\nplt.xlabel('Date (April)')\r\nplt.ylabel('Temperature (in C)')\r\nplt.savefig('../figs/fig-2')\r\nplt.show()\r\n\r\n# Figure 3\r\ndf = pd.read_excel(r'data.xlsx', 'Sheet3')\r\ndata = np.array(df)\r\nfig, ax = plt.subplots()\r\nax.bar(data[0], data[1])\r\nplt.title('Figure 3')\r\nplt.xlabel('Month')\r\nplt.ylabel('Rainfall (in mm)')\r\nplt.savefig('../figs/fig-3')\r\nplt.show()\r\n\r\n# Figure 4\r\ndf = pd.read_excel(r'data.xlsx', 'Sheet4')\r\ndata = np.array(df)\r\nfig, ax = plt.subplots()\r\nax.bar(data[0], data[1])\r\nplt.title('Figure 4')\r\nplt.xlabel('Program')\r\nplt.ylabel('Number of Students')\r\nplt.savefig('../figs/fig-4')\r\nplt.show()\r\n\r\n# Figure 5\r\ndf = pd.read_excel(r'data.xlsx', 'Sheet5')\r\ndata = np.array(df)\r\nfig, ax = plt.subplots()\r\nax.bar(data[0], data[1])\r\nplt.title('Figure 5')\r\nplt.xlabel('Medal')\r\nplt.ylabel('Number of Medals')\r\nplt.savefig('../figs/fig-5')\r\nplt.show()\r\n","repo_name":"Ankit-Saha-2003/AI1110","sub_path":"Assignment-3/codes/cbse9-14-1-1.py","file_name":"cbse9-14-1-1.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71472999895","text":"import os\nimport datetime\nimport json\nfrom keras.callbacks import (\n ReduceLROnPlateau, TensorBoard,\n ModelCheckpoint\n)\nfrom tgs import unpickle, pickle\nfrom tgs.metrics import (\n iou, map_iou, get_map_loss,\n weight_loss_wrapper,\n)\n\n\nclass TrainConfig:\n epochs = 200\n which_loss = 'map_iou'\n loss_at = False\n loss_log = False\n metrics = [\"accuracy\", iou, map_iou]\n log_base_dir = ''\n log_folder = None\n model_template = 'weights.{epoch:02d}-{val_loss:.2f}.model'\n save_best_only = True\n weight_wrapper_params = dict(\n protocol=0, pos_weight=.2,\n neg_weight=.8, pen_no_mask=False\n )\n unet_wrapper_params = dict(\n which='static', apply_threshold=False,\n input_shape=(128, 128, 1), unet_params=dict()\n )\n dataset_params = dict(\n validation_split=0.2,\n database_dir='/media/zadiq/ZHD/datasets/salt',\n img_shape=(101, 101, 1),\n train_img_shape=(128, 128, 1),\n seed=8090, extra_gen_params=dict(),\n extra_flow_params=dict()\n )\n meta = {\n 'Description': 'A Salt Model',\n 'Comments': ''\n }\n\n @classmethod\n def from_pickle(cls, path):\n return unpickle(path)\n\n @property\n def get_loss(self):\n if callable(self.which_loss):\n return self.which_loss\n return get_map_loss(self.which_loss, self.loss_at, self.loss_log)\n\n @property\n def get_wrapped_loss(self):\n return weight_loss_wrapper(self.get_loss, **self.weight_wrapper_params)\n\n @property\n def get_log_folder(self):\n if self.log_folder:\n return self.log_folder\n name = datetime.datetime.now().strftime('salt-%m-%d-%H-%M-%S')\n self.log_folder = os.path.join(self.log_base_dir, name)\n os.makedirs(self.log_folder)\n return self.log_folder\n\n @property\n def get_model_dir(self):\n folder = os.path.join(self.get_log_folder, 'models')\n os.makedirs(folder, exist_ok=True)\n path = os.path.join(folder, self.model_template)\n print('Saving models to: {}'.format(folder))\n return path\n\n @property\n def get_tensorboard_dir(self):\n path = os.path.join(self.get_log_folder, 'logs')\n os.makedirs(path, exist_ok=True)\n print('Logging histories to: {}'.format(path))\n return path\n\n @property\n def get_callbacks(self):\n callbacks = [\n ReduceLROnPlateau(factor=0.1, patience=5, min_lr=0.00001, verbose=1),\n ModelCheckpoint(self.get_model_dir, verbose=1, save_best_only=self.save_best_only),\n TensorBoard(self.get_tensorboard_dir),\n ]\n return callbacks\n\n def to_pickle(self, path=None):\n path = path or os.path.join(self.get_log_folder, 'train_config.pkl')\n print(\"pickling config to: {}\".format(path))\n pickle(self, path)\n\n def save_meta(self):\n path = os.path.join(self.get_log_folder, 'meta.json')\n print(\"saving meta to: {}\".format(path))\n with open(path, 'w') as fp:\n json.dump(self.meta, fp)\n\n def exit(self):\n self.to_pickle()\n self.save_meta()\n","repo_name":"zadiq/competitions","sub_path":"salt/salt-master/tgs/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21853365390","text":"import string\r\nimport random \r\n\r\nclass token:\r\n async def create(channel,bot):\r\n letters = list(string.ascii_lowercase)\r\n token = []\r\n for i in range(0,21):\r\n if random.randint(0,1) == 0: # => Letter\r\n token.append(str(letters[random.randrange(27)]))\r\n else:\r\n token.append(str(random.randint(0,9)))\r\n channel = bot.get_channel(channel)\r\n message = ''.join([str(elem) for elem in token])\r\n await channel.send(f\"**NEW MAP**\\n`Token` : {message}\")","repo_name":"W0lfan/Wolfan-s-Starblast-Projects","sub_path":"Miner_Bot/token_generator.py","file_name":"token_generator.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26218522469","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom builtins import str\nfrom builtins import int\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom qtpandas.compat import Qt, QtCore, QtGui\n\n\nimport pytest\nfrom qtpandas.views.BigIntSpinbox import BigIntSpinbox\n\nclass TestClass(object):\n\n @pytest.fixture\n def spinbox(self, qtbot):\n widget = BigIntSpinbox()\n qtbot.addWidget(widget)\n\n return widget\n\n def test_init(self, spinbox):\n assert spinbox\n\n def test_value(self, spinbox):\n assert spinbox.value() == 0\n spinbox._lineEdit.setText('') # runs into exception\n assert spinbox.value() == 0\n\n def test_minimumMaximum(self, spinbox):\n assert spinbox.minimum() == -18446744073709551616\n assert spinbox.maximum() == 18446744073709551615\n\n def test_setMinimumMaximum(self, spinbox):\n spinbox.setMinimum(0)\n spinbox.setMinimum(int(0))\n spinbox.setMinimum(1)\n spinbox.setMinimum(int(1))\n spinbox.setMinimum(-1)\n spinbox.setMinimum(int(-1))\n with pytest.raises(TypeError) as excinfo:\n spinbox.setMinimum('')\n assert \"int or long\" in str(excinfo.value)\n\n spinbox.setMaximum(0)\n spinbox.setMaximum(int(0))\n spinbox.setMaximum(1)\n spinbox.setMaximum(int(1))\n spinbox.setMaximum(-1)\n spinbox.setMaximum(int(-1))\n with pytest.raises(TypeError) as excinfo:\n spinbox.setMaximum('')\n assert \"int or long\" in str(excinfo.value)\n\n def test_setValue(self, spinbox):\n assert spinbox.setValue(10)\n assert spinbox.value() == 10\n\n assert spinbox.setValue(18446744073709551615 + 1)\n assert spinbox.value() == spinbox.maximum()\n\n assert spinbox.setValue(-18446744073709551616 - 1)\n assert spinbox.value() == spinbox.minimum()\n\n def test_singleStep(self, spinbox):\n assert spinbox.singleStep() == 1\n\n assert spinbox.setSingleStep(10) == 10\n assert spinbox.setSingleStep(-10) == 10\n with pytest.raises(TypeError) as excinfo:\n spinbox.setSingleStep('')\n spinbox.setSingleStep(0.1212)\n assert \"int\" in str(excinfo.value)\n\n assert spinbox.setSingleStep(0) == 0\n\n def test_stepEnabled(self, spinbox):\n assert spinbox.StepUpEnabled\n assert spinbox.StepDownEnabled\n assert spinbox.stepEnabled() == spinbox.StepUpEnabled | spinbox.StepDownEnabled\n\n spinbox.setMinimum(0)\n spinbox.setMaximum(10)\n spinbox._lineEdit.setText(str(-1))\n assert spinbox.stepEnabled() == spinbox.StepUpEnabled\n spinbox._lineEdit.setText(str(11))\n assert spinbox.stepEnabled() == spinbox.StepDownEnabled\n\n def test_stepBy(self, spinbox):\n spinbox.setMinimum(0)\n spinbox.setMaximum(10)\n spinbox.setValue(0)\n spinbox.stepBy(1)\n assert spinbox.value() == 1\n spinbox.stepBy(-1)\n assert spinbox.value() == 0\n\n spinbox.setMinimum(0)\n spinbox.setMaximum(10)\n spinbox.setValue(0)\n\n spinbox.stepBy(-1)\n assert spinbox.value() == 0 # should be minimum cause -1 is out of bounds\n\n spinbox.setValue(10)\n spinbox.stepBy(1)\n assert spinbox.value() == 10 # should be maximum cause 11 is out of bounds","repo_name":"draperjames/qtpandas","sub_path":"tests/test_BigIntSpinbox.py","file_name":"test_BigIntSpinbox.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"67"} +{"seq_id":"72792543894","text":"# code from https://machinelearningmastery.com/tutorial-first-neural-network-python-keras/\n\n# first neural network with keras tutorial\nfrom pathlib import Path\n\nfrom keras.engine.saving import model_from_json\nfrom numpy import loadtxt\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n# load the dataset as a matrix of numbers\ndataset = loadtxt('dataset/pima-indians-diabetes.csv', delimiter=',')\n# split into input (X: col 1-8) and output (y: col 9) variables\nX = dataset[:, 0:8]\ny = dataset[:, 8]\n\nout_path = 'model'\nmy_file = Path(out_path+\"/model.json\")\nif my_file.is_file():\n json_file = open(out_path+'/model.json', 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n model = model_from_json(loaded_model_json)\n # load weights into new model\n model.load_weights(out_path+'/model.h5')\n print(\"Loaded model from disk\")\nelse:\n # define the keras model\n model = Sequential()\n # the activation function is\n # responsible for transforming the summed weighted input from the node\n # into the activation of the node or output for that input.\n # activation\n # - Rectified Linear Unit (ReLU): output the input directly if it is positive, otherwise, it will output zero\n # to help models to learn faster and perform better.\n # - sigmoid: The input to the function is transformed into a value between 0.0 and 1.0\n # - hyperbolic tangent (tanh): The input to the function is transformed into a values between -1.0 and 1.0\n # 1st (input) layer with 12 nodes\n # receive array of 8 and return array of 12 members\n model.add(Dense(12, input_dim=8, activation='relu'))\n # 2nd (hidden) layer with 8 nodes\n # receive array of previous layer and return array of 8 members\n model.add(Dense(8, activation='relu'))\n # 3rd (output) layer with 1 node\n # receive array of previous layer and return array of 1 member\n model.add(Dense(1, activation='sigmoid'))\n\n# compile the keras model using backend tool.\n# The backend automatically chooses the best way to represent the network for training\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n# train model - fit the keras model on the dataset\n# epochs is number times that the learning algorithm will work through the entire training dataset\n# batch_size is number of samples to work through before updating the internal model parameters\nmodel.fit(X, y, epochs=150, batch_size=10)\n\n# evaluate the keras model\n_, accuracy = model.evaluate(X, y)\nprint('Accuracy: %.2f' % (accuracy*100))\n\n# serialize model to JSON\nmodel_json = model.to_json()\nwith open(\"model/model.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\nmodel.save_weights(\"model/model.h5\")\nprint(\"Saved model to disk\")","repo_name":"apichaya-s/ml","sub_path":"ml1-text-predict/build-keras-model.py","file_name":"build-keras-model.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21906049037","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 22 22:12:07 2017\n\n@author: drlego\n\"\"\"\n\n### Step 1: Import modules & set logging\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport logging\n\nimport numpy as np\n\nimport keras.backend as K\n\nfrom keras.datasets import imdb\nfrom keras.models import Model, Input\nfrom keras.layers.core import Dense\nfrom keras.layers.recurrent import LSTM\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing import sequence\n\n\n## Fix random seed for reproducibility\nnp.random.seed(20170704)\n\n\n## Check proper working directory\n#os.chdir('path/to/day_2/')\nif os.getcwd().split('/')[-1] == 'day_2':\n pass\nelse:\n raise OSError('Check current working directory.\\n'\n 'If not specified as instructed, '\n 'more errors will occur throught the code.\\n'\n '- Current working directory: %s' % os.getcwd())\n\n\n## Set logging\ndef set_logging(testlog=False):\n # 1. Make 'logger' instance\n logger = logging.getLogger()\n # 2. Make 'formatter'\n formatter = logging.Formatter(\n '[%(levelname)s:%(lineno)s] %(asctime)s > %(message)s'\n )\n # 3. Make 'streamHandler'\n streamHandler = logging.StreamHandler()\n # 4. Set 'formatter' to 'streamHandler'\n streamHandler.setFormatter(formatter)\n # 5. Add streamHandler to 'logger' instance\n logger.addHandler(streamHandler)\n # 6. Set level of log; DEBUG -> INFO -> WARNING -> ERROR -> CRITICAL\n logger.setLevel(logging.DEBUG)\n # 7. Print test INFO message\n if testlog: # default is 'False'\n logging.info(\"Stream logging available!\")\n \n return logger\n\n_ = set_logging()\n\n\n####################################################################################\n\n\n### Step 2: Load, view & preprocess data\n\n## 2-1. Load\n# Load dataset, but only keep the top n words\nlogging.info('Loading imdb dataset...')\ntop_words = 5000\n(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)\n\nlogging.debug('Shape of train data: {}'.format(X_train.shape))\nlogging.debug('Shape of test data: {}'.format(X_test.shape))\n\n\n## 2-2. View\n# word2idx, idx2word <- python dictionaries\nword2idx = imdb.get_word_index()\nidx2word = dict((v, k) for k, v in word2idx.items())\nlogging.info('Vocabulary size: {}'.format(len(idx2word)))\n\n# View the original review in text\ndef to_text(X, idx2word):\n text = [' '.join([idx2word[index] for index in review])for review in X]\n return text\n\ntext_train = to_text(X_train, idx2word)\ntext_test = to_text(X_test, idx2word)\nlogging.info('\\n{}\\n- {}'.format(text_train[0], ('pos' if y_train[0] == 1 else 'neg')))\nlogging.info('\\n{}\\n- {}'.format(text_test[245], ('pos' if y_test[245] == 1 else 'neg')))\n\n\n## 2-3. Preprocess\n# Truncate and pad input sequences\nmax_review_len = 500\nif X_train.shape == (25000, ) and X_test.shape == (25000, ):\n X_train = sequence.pad_sequences(X_train, maxlen=max_review_len,\n padding='pre', truncating='pre',\n value=0.)\n X_test = sequence.pad_sequences(X_test, maxlen=max_review_len,\n padding='pre', truncating='pre',\n value=0.)\n\nlogging.info('Pad sequences shorter than %d with \"0\"' % max_review_len)\nlogging.info('Truncate sequences longer than {0} to {0}'.format(max_review_len))\nlogging.debug('Shape of train data (preprocessed): {}'.format(X_train.shape))\nlogging.debug('Shape of test data (preprocessed) : {}'.format(X_test.shape))\n\n\n####################################################################################\n\n\n### Step 3: Build model\n\n## 3-1. Hyperparameters\nepochs = 5\nbatch_size = 128\nhidden_size = 100\nembedding_vector_len = 32\n\n## 3-2. Define RNN model with LSTM cells for IMDB data\n\n# Define input (SHAPE IS IMPORTANT!!!)\ninput_sequence = Input(shape=(max_review_len, ), # max_review_len = 500\n dtype='int32', \n name='input_sequence')\n\n\n# Define Embedding layer\nx = Embedding(input_dim=top_words, # top_words = 5000 \n output_dim=embedding_vector_len, # embedding_vetor_len = 32\n input_length=max_review_len, # max_review_len = 500\n mask_zero=True,\n name='embedding')(input_sequence)\n\n\n# Define LSTM layer\nx = LSTM(units=hidden_size,\n dropout=0.,\n recurrent_dropout=0.,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n return_sequences=False,\n name='lstm')(x)\n\n\n# Define Dense layer\nx = Dense(units=100, activation='relu', name='fc')(x)\n\n\n# Define prediction layer; use sigmoid for binary classification\nprediction = Dense(units=1, activation='sigmoid', name='prediction')(x)\n\n\n# Instantiate model\nmodel = Model(inputs=input_sequence,\n outputs=prediction,\n name='LSTM_imdb')\n\n\n####################################################################################\n\n\n### Step 4: Define callbacks\n\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.callbacks import EarlyStopping\nfrom keras.callbacks import ReduceLROnPlateau\nfrom keras.callbacks import TensorBoard\n\n# List of callbacks\ncallbacks = []\n\n# Model checkpoints\nckpt_path = './demo/lstm_imdb_ckpts/weights.{epoch:02d}-{val_acc:.2f}.hdf5'\nif not os.path.exists(os.path.dirname(ckpt_path)):\n os.makedirs(os.path.dirname(ckpt_path))\n\ncheckpoint = ModelCheckpoint(filepath=ckpt_path,\n monitor='val_acc',\n save_best_only=False,\n verbose=1)\ncallbacks.append(checkpoint)\n\n# Stop training early\nearlystopping = EarlyStopping(monitor='val_loss',\n patience=5,\n verbose=1)\ncallbacks.append(earlystopping)\n\n# Reduce learning rate when learning does not improve\nreducelr = ReduceLROnPlateau(monitor='val_loss',\n factor=0.1, \n patience=10,\n verbose=1)\ncallbacks.append(reducelr)\n\n# Tensorboard for visualization\nif K.backend() == 'tensorflow':\n tb_logdir = './demo/lstm_imdb_logs/'\n if not os.path.exists(tb_logdir):\n os.makedirs(tb_logdir)\n tensorboard = TensorBoard(log_dir=tb_logdir,\n histogram_freq=1,\n write_graph=True)\n callbacks.append(tensorboard)\n\n####################################################################################\n\n\n### Step 5: Compile & train model\n\nmodel.compile(loss='binary_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\n\nprint(model.summary())\n\n\nhistory = model.fit(X_train, y_train,\n epochs=epochs,\n batch_size=batch_size,\n validation_split=0.1,\n callbacks=callbacks,\n verbose=1)\n\n\n####################################################################################\n\n\n### Step 6: Save & load weights\n\n# Save model weights\nmodel.save_weights('weights/lstm_imdb_weights.h5')\n\n# Load model weights\nmodel.load_weights('weights/lstm_imdb_weights_master.h5')\n\n\n####################################################################################\n\n\n### Step 7: Test final model performance\n\ntest_scores = model.evaluate(X_test, y_test, verbose=1)\nlogging.info('Test accuracy: %.2f%%' %(test_scores[1] * 100))\n#print(\"Test accuracy: %.2f%%\" % (test_scores[1] * 100))\n\n#train_scores = model.evaluate(X_train, y_train, verbose=1)\n#print(\"Train accuracy: %.2f%%\" % (train_scores[1] * 100))\n","repo_name":"liahcha/DataScience","sub_path":"Deep Learning/DLdata/2_lstm_imdb.py","file_name":"2_lstm_imdb.py","file_ext":"py","file_size_in_byte":7722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43967497548","text":"lista_regressiva = [6, 5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]\n# ctrl + alt + L para corrigir os espaços\n\ndef valida(cnpj):\n\n\n novo_cnpj = tiracaracter(cnpj[:-2])\n cnpj = tiracaracter(cnpj)\n\n verifica = verfsequencia(novo_cnpj)\n\n novo_cnpj = transforma_lista(novo_cnpj)\n\n soma = calculardigito(novo_cnpj)\n formula_1 = 11 - (soma % 11)\n if formula_1 > 9:\n formula_1 = 0\n novo_cnpj.append(formula_1)\n\n soma_2 = calculardigito(novo_cnpj)\n formula_2 = 11 - (soma_2 % 11)\n if formula_2 >9:\n formula_2 = 0\n novo_cnpj.append(formula_2)\n novo_cnpj = transformar_string(novo_cnpj)\n print(f'Final : {novo_cnpj}')\n\n if novo_cnpj == cnpj:\n return print(f'CNPJ Válido! {novo_cnpj} e {cnpj}')\n else:\n return print(f'CNPJ Inválido! {novo_cnpj} e {cnpj}')\n\n\ndef transformar_string(cnpj):\n novo_cnpj = []\n for elemento in cnpj:\n novo_cnpj.append(str(elemento))\n novo = ''.join(novo_cnpj)\n return novo\n\ndef calculardigito(cnpj):\n soma = 0\n contador = 0\n if len(cnpj) == 12:\n contador = 1\n elif len(cnpj) == 13:\n contador = 0\n for elemento in cnpj:\n soma += elemento * lista_regressiva[contador]\n contador += 1\n return soma\n\ndef tiracaracter(cnpj):\n cnpj = cnpj.replace('/', '')\n cnpj = cnpj.replace('.', '')\n cnpj = cnpj.replace('-', '')\n return cnpj\n\ndef transforma_lista(cnpj):\n lista = []\n for elemento in cnpj:\n lista.append(int(elemento))\n return lista\n\ndef verfsequencia(cnpj):\n sequencia = cnpj[0] * len(cnpj)\n if sequencia == cnpj:\n print('Há uma sequência, o cnpj não é válido')\n\n\n","repo_name":"Bindeli/cursopython","sub_path":"desafiocnpj/funcoes.py","file_name":"funcoes.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"33328908081","text":"from typing import Optional, Tuple\n\nfrom PyQt5.QtGui import QPixmap, QColor, QImage\nfrom PyQt5.QtWidgets import QWidget, QColorDialog, QLabel\n\nfrom gui.dialogs.edit.locstring import LocalizedStringDialog\nfrom pykotor.common.geometry import Vector2\nfrom pykotor.common.misc import Color, ResRef\nfrom pykotor.resource.formats.gff import write_gff\nfrom pykotor.resource.generics.are import ARE, dismantle_are, ARENorthAxis, AREWindPower, read_are\nfrom pykotor.resource.type import ResourceType\n\nfrom data.installation import HTInstallation\nfrom gui.editor import Editor\nfrom gui.widgets.long_spinbox import LongSpinBox\n\n\nclass AREEditor(Editor):\n def __init__(self, parent: Optional[QWidget], installation: Optional[HTInstallation] = None):\n supported = [ResourceType.ARE]\n super().__init__(parent, \"ARE Editor\", \"none\", supported, supported, installation)\n self.resize(400, 250)\n\n self._are: ARE = ARE()\n\n from toolset.uic.editors.are import Ui_MainWindow\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self._setupMenus()\n self._setupSignals()\n self._setupInstallation(installation)\n\n self.ui.dirtColor1Edit.allowAlpha = True\n self.ui.dirtColor2Edit.allowAlpha = True\n self.ui.dirtColor3Edit.allowAlpha = True\n\n self.new()\n\n def _setupSignals(self) -> None:\n self.ui.tagGenerateButton.clicked.connect(self.generateTag)\n\n def _setupInstallation(self, installation: HTInstallation) -> None:\n self._installation = installation\n\n self.ui.nameEdit.setInstallation(installation)\n\n cameras = installation.htGetCache2DA(HTInstallation.TwoDA_CAMERAS)\n\n self.ui.cameraStyleSelect.clear()\n [self.ui.cameraStyleSelect.addItem(label.title()) for label in cameras.get_column(\"name\")]\n\n self.ui.dirtGroup.setVisible(installation.tsl)\n self.ui.grassEmissiveEdit.setVisible(installation.tsl)\n self.ui.grassEmissiveLabel.setVisible(installation.tsl)\n self.ui.snowCheck.setVisible(installation.tsl)\n self.ui.rainCheck.setVisible(installation.tsl)\n self.ui.lightningCheck.setVisible(installation.tsl)\n\n def load(self, filepath: str, resref: str, restype: ResourceType, data: bytes) -> None:\n super().load(filepath, resref, restype, data)\n\n are = read_are(data)\n self._loadARE(are)\n\n def _loadARE(self, are: ARE) -> None:\n self._are = are\n\n # Basic\n self.ui.nameEdit.setLocstring(are.name)\n self.ui.tagEdit.setText(are.tag)\n self.ui.cameraStyleSelect.setCurrentIndex(are.camera_style)\n self.ui.envmapEdit.setText(are.default_envmap.get())\n self.ui.disableTransitCheck.setChecked(are.disable_transit)\n self.ui.unescapableCheck.setChecked(are.unescapable)\n self.ui.alphaTestSpin.setValue(are.alpha_test)\n self.ui.stealthCheck.setChecked(are.stealth_xp)\n self.ui.stealthMaxSpin.setValue(are.stealth_xp_max)\n self.ui.stealthLossSpin.setValue(are.stealth_xp_loss)\n\n # Map\n self.ui.mapAxisSelect.setCurrentIndex(are.north_axis)\n self.ui.mapZoomSpin.setValue(are.map_zoom)\n self.ui.mapResXSpin.setValue(are.map_res_x)\n self.ui.mapImageX1Spin.setValue(are.map_point_1.x)\n self.ui.mapImageX2Spin.setValue(are.map_point_2.x)\n self.ui.mapImageY1Spin.setValue(are.map_point_1.y)\n self.ui.mapImageY2Spin.setValue(are.map_point_2.y)\n self.ui.mapWorldX1Spin.setValue(are.world_point_1.x)\n self.ui.mapWorldX2Spin.setValue(are.world_point_2.x)\n self.ui.mapWorldY1Spin.setValue(are.world_point_1.y)\n self.ui.mapWorldY2Spin.setValue(are.world_point_2.y)\n\n # Weather\n self.ui.fogEnabledCheck.setChecked(are.fog_enabled)\n self.ui.fogColorEdit.setColor(are.fog_color)\n self.ui.fogNearSpin.setValue(are.fog_near)\n self.ui.fogFarSpin.setValue(are.fog_far)\n self.ui.ambientColorEdit.setColor(are.sun_ambient)\n self.ui.diffuseColorEdit.setColor(are.sun_diffuse)\n self.ui.dynamicColorEdit.setColor(are.dynamic_light)\n self.ui.windPowerSelect.setCurrentIndex(are.wind_power)\n self.ui.rainCheck.setChecked(are.chance_rain == 100)\n self.ui.snowCheck.setChecked(are.chance_snow == 100)\n self.ui.lightningCheck.setChecked(are.chance_lightning == 100)\n self.ui.shadowsCheck.setChecked(are.shadows)\n self.ui.shadowsSpin.setValue(are.shadow_opacity)\n\n # Terrain\n self.ui.grassTextureEdit.setText(are.grass_texture.get())\n self.ui.grassDiffuseEdit.setColor(are.grass_diffuse)\n self.ui.grassAmbientEdit.setColor(are.grass_ambient)\n self.ui.grassEmissiveEdit.setColor(are.grass_emissive)\n self.ui.grassDensitySpin.setValue(are.grass_density)\n self.ui.grassSizeSpin.setValue(are.grass_size)\n self.ui.grassProbLLSpin.setValue(are.grass_prob_ll)\n self.ui.grassProbLRSpin.setValue(are.grass_prob_lr)\n self.ui.grassProbULSpin.setValue(are.grass_prob_ul)\n self.ui.grassProbURSpin.setValue(are.grass_prob_ur)\n self.ui.dirtColor1Edit.setColor(are.dirty_argb_1)\n self.ui.dirtColor2Edit.setColor(are.dirty_argb_2)\n self.ui.dirtColor3Edit.setColor(are.dirty_argb_3)\n self.ui.dirtFormula1Spin.setValue(are.dirty_formula_1)\n self.ui.dirtFormula2Spin.setValue(are.dirty_formula_2)\n self.ui.dirtFormula3Spin.setValue(are.dirty_formula_3)\n self.ui.dirtFunction1Spin.setValue(are.dirty_func_1)\n self.ui.dirtFunction2Spin.setValue(are.dirty_func_2)\n self.ui.dirtFunction3Spin.setValue(are.dirty_func_3)\n self.ui.dirtSize1Spin.setValue(are.dirty_size_1)\n self.ui.dirtSize2Spin.setValue(are.dirty_size_2)\n self.ui.dirtSize3Spin.setValue(are.dirty_size_3)\n\n # Scripts\n self.ui.onEnterEdit.setText(are.on_enter.get())\n self.ui.onExitEdit.setText(are.on_exit.get())\n self.ui.onHeartbeatEdit.setText(are.on_heartbeat.get())\n self.ui.onUserDefinedEdit.setText(are.on_user_defined.get())\n\n # Comments\n self.ui.commentsEdit.setPlainText(are.comment)\n\n def build(self) -> Tuple[bytes, bytes]:\n are = self._are\n\n # Basic\n are.name = self.ui.nameEdit.locstring()\n are.tag = self.ui.tagEdit.text()\n are.camera_style = self.ui.cameraStyleSelect.currentIndex()\n are.default_envmap = ResRef(self.ui.envmapEdit.text())\n are.unescapable = self.ui.unescapableCheck.isChecked()\n are.disable_transit = self.ui.disableTransitCheck.isChecked()\n are.alpha_test = self.ui.alphaTestSpin.value()\n are.stealth_xp = self.ui.stealthCheck.isChecked()\n are.stealth_xp_max = self.ui.stealthMaxSpin.value()\n are.stealth_xp_loss = self.ui.stealthLossSpin.value()\n\n # Map\n are.north_axis = ARENorthAxis(self.ui.mapAxisSelect.currentIndex())\n are.map_zoom = self.ui.mapZoomSpin.value()\n are.map_res_x = self.ui.mapResXSpin.value()\n are.map_point_1 = Vector2(self.ui.mapImageX1Spin.value(), self.ui.mapImageY1Spin.value())\n are.map_point_2 = Vector2(self.ui.mapImageX2Spin.value(), self.ui.mapImageY2Spin.value())\n are.world_point_1 = Vector2(self.ui.mapWorldX1Spin.value(), self.ui.mapWorldY1Spin.value())\n are.world_point_2 = Vector2(self.ui.mapWorldX2Spin.value(), self.ui.mapWorldY2Spin.value())\n\n # Weather\n are.fog_enabled = self.ui.fogEnabledCheck.isChecked()\n are.fog_color = self.ui.fogColorEdit.color()\n are.fog_near = self.ui.fogNearSpin.value()\n are.fog_far = self.ui.fogFarSpin.value()\n are.sun_ambient = self.ui.ambientColorEdit.color()\n are.sun_diffuse = self.ui.diffuseColorEdit.color()\n are.dynamic_light = self.ui.dynamicColorEdit.color()\n are.wind_power = AREWindPower(self.ui.windPowerSelect.currentIndex())\n are.chance_rain = 100 if self.ui.rainCheck.isChecked() else 0\n are.chance_snow = 100 if self.ui.snowCheck.isChecked() else 0\n are.chance_lightning = 100 if self.ui.lightningCheck.isChecked() else 0\n are.shadows = self.ui.shadowsCheck.isChecked()\n are.shadow_opacity = self.ui.shadowsSpin.value()\n\n # Terrain\n are.grass_texture = ResRef(self.ui.grassTextureEdit.text())\n are.grass_diffuse = self.ui.grassDiffuseEdit.color()\n are.grass_ambient = self.ui.ambientColorEdit.color()\n are.grass_emissive = self.ui.grassEmissiveEdit.color()\n are.grass_size = self.ui.grassSizeSpin.value()\n are.grass_density = self.ui.grassDensitySpin.value()\n are.grass_prob_ll = self.ui.grassProbLLSpin.value()\n are.grass_prob_lr = self.ui.grassProbLRSpin.value()\n are.grass_prob_ul = self.ui.grassProbULSpin.value()\n are.grass_prob_ur = self.ui.grassProbURSpin.value()\n are.dirty_argb_1 = self.ui.dirtColor1Edit.color()\n are.dirty_argb_2 = self.ui.dirtColor2Edit.color()\n are.dirty_argb_3 = self.ui.dirtColor3Edit.color()\n are.dirty_formula_1 = self.ui.dirtFormula1Spin.value()\n are.dirty_formula_2 = self.ui.dirtFormula2Spin.value()\n are.dirty_formula_3 = self.ui.dirtFormula3Spin.value()\n are.dirty_func_1 = self.ui.dirtFunction1Spin.value()\n are.dirty_func_2 = self.ui.dirtFunction2Spin.value()\n are.dirty_func_3 = self.ui.dirtFunction3Spin.value()\n are.dirty_size_1 = self.ui.dirtSize1Spin.value()\n are.dirty_size_2 = self.ui.dirtSize2Spin.value()\n are.dirty_size_3 = self.ui.dirtSize3Spin.value()\n\n # Scripts\n are.on_enter = ResRef(self.ui.onEnterEdit.text())\n are.on_exit = ResRef(self.ui.onExitEdit.text())\n are.on_heartbeat = ResRef(self.ui.onHeartbeatEdit.text())\n are.on_user_defined = ResRef(self.ui.onUserDefinedEdit.text())\n\n # Comments\n are.comment = self.ui.commentsEdit.toPlainText()\n\n data = bytearray()\n write_gff(dismantle_are(self._are), data)\n return data, b''\n\n def new(self) -> None:\n super().new()\n self._loadARE(ARE())\n\n def changeColor(self, colorSpin: LongSpinBox) -> None:\n qcolor = QColorDialog.getColor(QColor(colorSpin.value()))\n color = Color.from_bgr_integer(qcolor.rgb())\n colorSpin.setValue(color.bgr_integer())\n\n def redoColorImage(self, value: int, colorLabel: QLabel) -> None:\n color = Color.from_bgr_integer(value)\n r, g, b = int(color.r * 255), int(color.g * 255), int(color.b * 255)\n data = bytes([r, g, b] * 16 * 16)\n pixmap = QPixmap.fromImage(QImage(data, 16, 16, QImage.Format_RGB888))\n colorLabel.setPixmap(pixmap)\n\n def changeName(self) -> None:\n dialog = LocalizedStringDialog(self, self._installation, self.ui.nameEdit.locstring)\n if dialog.exec_():\n self._loadLocstring(self.ui.nameEdit, dialog.locstring)\n\n def generateTag(self) -> None:\n self.ui.tagEdit.setText(\"newarea\" if self._resref is None or self._resref == \"\" else self._resref)\n\n","repo_name":"NickHugi/HolocronToolset","sub_path":"toolset/gui/editors/are.py","file_name":"are.py","file_ext":"py","file_size_in_byte":11067,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"42689241896","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split, learning_curve\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import r2_score\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import normalize\nfrom sklearn.linear_model import LinearRegression\n\n\ncauses_list = [\"508\", \"509\", \"510\", \"511\", \"512\", \"513\", \"514\", \"515\", \"516\", \"520\", \"-1\"]\n\n# For each cause load the dataset\nfor cause in causes_list:\n print(f\"Loading dataset for cause_id {cause}\")\n file_path = f\"training_data/cause_id_{cause}.csv\"\n if (cause == \"-1\"):\n file_path = \"training_data/all_data.csv\"\n training_df = pd.read_csv(file_path,\n dtype={\n \"parameter_85101\": \"float32\",\n \"parameter_88101\": \"float32\",\n \"parameter_44201\": \"float32\",\n \"parameter_42602\": \"float32\",\n \"parameter_42401\": \"float32\",\n \"parameter_42101\": \"float32\",\n \"mortality_rate\": \"float32\",\n \"cause_id\": \"float32\",\n })\n training_df = training_df.drop(columns=[\"fips\", \"year\"])\n\n if (cause != \"-1\"):\n training_df = training_df.drop(columns=[\"cause_id\"])\n \n training_df.columns = training_df.columns.astype(str) # Convert column names to strings\n\n # Split the data into training and validation sets\n train_df, val_df = train_test_split(training_df, test_size=0.2, random_state=42)\n\n\n train_df = normalize(train_df)\n val_df = normalize(val_df)\n train_df = pd.DataFrame(train_df, columns=training_df.columns)\n val_df = pd.DataFrame(val_df, columns=training_df.columns)\n\n X_train = train_df.drop(columns=[\"mortality_rate\"]).to_numpy()\n y_train = train_df[[\"mortality_rate\"]].to_numpy()\n X_val = val_df.drop(columns=[\"mortality_rate\"]).to_numpy()\n y_val = val_df[[\"mortality_rate\"]].to_numpy()\n \n # Train Linear Regression plotting the learning curve using learning_curve\n print(\"Training Linear Regression\")\n lr = LinearRegression()\n train_sizes, train_scores, test_scores = learning_curve(lr, X_train, y_train, scoring='neg_mean_squared_error')\n train_scores_mean = -train_scores.mean(axis = 1)\n plt.plot(train_sizes, train_scores_mean, label=\"Training error\")\n plt.xlabel(\"Training set size\")\n plt.ylabel(\"MSE\")\n plt.title(\"Learning curve for Linear Regression\")\n plt.legend()\n plt.savefig(f\"/home/enrico/Desktop/air_pollution_ai/sklearn/results/Linear_regressor/learning_curve_linear_regression_cause_{cause}.png\")\n plt.clf()","repo_name":"gp-1108/air_pollution_ai","sub_path":"sklearn/LinearRegressor_sklearn.py","file_name":"LinearRegressor_sklearn.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"38170130528","text":"import math\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import log_loss\nfrom time import time\nimport argparse\nimport LoadData as DATA\nfrom tensorflow.contrib.layers.python.layers import batch_norm as batch_norm\n\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \n#################### Arguments ####################\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Run FM.\")\n parser.add_argument('--path', nargs='?', default='data/',\n help='Input data path.')\n parser.add_argument('--dataset', nargs='?', default='frappe',\n help='Choose a dataset.')\n parser.add_argument('--epoch', type=int, default=1000,\n help='Number of epochs.')\n parser.add_argument('--pretrain', type=int, default=-1,\n help='flag for pretrain. 1: initialize from pretrain; 0: randomly initialize; -1: save the model to pretrain file')\n parser.add_argument('--batch_size', type=int, default=2048,\n help='Batch size.')\n parser.add_argument('--hidden_factor', type=int, default=32,\n help='Number of hidden factors.')\n parser.add_argument('--relation_layers', nargs='?', default='[16, 4]',\n help=\"Size of each relation layer.\")\n parser.add_argument('--deep_layers', nargs='?', default='[64]',\n help=\"Size of each relation layer.\")\n parser.add_argument('--lamda', type=float, default=0,\n help='Regularizer for bilinear part.')\n parser.add_argument('--reg_scale', type=float, default=0.01,\n help='Regularizer for bilinear part.')\n parser.add_argument('--keep_prob', nargs='?', default='[0.5, 0.8, 0.8]', \n help='Keep probility (1-dropout_ratio) for the Bi-Interaction layer. 1: no dropout')\n parser.add_argument('--lr', type=float, default=0.05,\n help='Learning rate.')\n parser.add_argument('--loss_type', nargs='?', default='square_loss',\n help='Specify a loss type (square_loss or log_loss).')\n parser.add_argument('--optimizer', nargs='?', default='AdagradOptimizer',\n help='Specify an optimizer type (AdamOptimizer, AdagradOptimizer, GradientDescentOptimizer, MomentumOptimizer).')\n parser.add_argument('--verbose', type=int, default=1,\n help='Show the results per X epochs (0, 1 ... any positive integer)')\n parser.add_argument('--batch_norm', type=int, default=0,\n help='Whether to perform batch normaization (0 or 1)')\n parser.add_argument('--RFM', type=int, default=1,\n help='Whether to perform Relational network(0 or 1)')\n parser.add_argument('--DEEP_FM', type=int, default=0,\n help='Whether to perform deep neural FM (0 or 1)')\n\n\n return parser.parse_args()\n\nclass FM(BaseEstimator, TransformerMixin):\n def __init__(self, RFM, DEEP_FM, features_M, users_M, features_dim, pretrain_flag, save_file, hidden_factor, \n relation_layers, deep_layers, loss_type, epoch, batch_size, learning_rate, lamda_bilinear, reg_scale, keep,\n optimizer_type, batch_norm, verbose, random_seed=2016):\n # bind params to class\n self.RFM = RFM\n self.DEEP_FM = DEEP_FM\n self.batch_size = batch_size\n self.learning_rate = learning_rate\n self.hidden_factor = hidden_factor\n self.relation_layers = relation_layers\n self.deep_layers = deep_layers\n self.save_file = save_file\n self.pretrain_flag = pretrain_flag\n self.loss_type = loss_type\n self.features_M = features_M\n self.users_M = users_M\n self.features_dim = features_dim\n self.lamda_bilinear = lamda_bilinear\n self.reg_scale = reg_scale\n self.keep = keep\n self.epoch = epoch\n self.random_seed = random_seed\n self.optimizer_type = optimizer_type\n self.batch_norm = batch_norm\n self.verbose = verbose\n # performance of each epoch\n self.train_rmse, self.valid_rmse, self.test_rmse = [], [], []\n self.num_interations = 90 \n \n # init all variables in a tensorflow graph\n self._init_graph()\n\n def _init_graph(self):\n '''\n Init a tensorflow Graph containing: input data, variables, model, loss, optimizer\n '''\n self.graph = tf.Graph()\n with self.graph.as_default(): # , tf.device('/cpu:0'):\n # Set graph level random seed\n tf.set_random_seed(self.random_seed)\n # Input data.\n self.train_features = tf.placeholder(tf.int32, shape=[None, None]) # None * features_M\n self.train_users = tf.placeholder(tf.int32, shape=[None]) # None \n self.train_labels = tf.placeholder(tf.float32, shape=[None, 1]) # None * 1\n self.dropout_keep = tf.placeholder(tf.float32, shape=[len(self.keep)])\n self.train_phase = tf.placeholder(tf.bool)\n\n # Variables.\n self.weights = self._initialize_weights()\n nonzero_embeddings = tf.nn.embedding_lookup(self.weights['feature_embeddings'], self.train_features)\n user_embeddings = tf.nn.embedding_lookup(self.weights['user_embeddings'], self.train_users)\n regularizer = tf.contrib.layers.l2_regularizer(scale=self.reg_scale)\n # Model.\n\n self.FM = self.permutate(nonzero_embeddings, user_embeddings, regularizer)\n self.FM = tf.nn.dropout(self.FM, self.dropout_keep[1]) # dropout at the FM layer\n if not self.DEEP_FM:\n self.FM = tf.reduce_sum(self.FM, 1, keepdims=True) # None * 1\n #self.perm_result = self.FM \n \"\"\" \n # _________ sum_square part _____________\n # get the summed up embeddings of features.\n self.summed_features_emb = tf.reduce_sum(nonzero_embeddings, 1) # None * K\n # get the element-multiplication\n self.summed_features_emb_square = tf.square(self.summed_features_emb) # None * K\n\n # _________ square_sum part _____________\n self.squared_features_emb = tf.square(nonzero_embeddings)\n self.squared_sum_features_emb = tf.reduce_sum(self.squared_features_emb, 1) # None * K\n\n # ________ FM __________\n self.FM = 0.5 * tf.subtract(self.summed_features_emb_square, self.squared_sum_features_emb) # None * K\n if self.batch_norm:\n self.FM = self.batch_norm_layer(self.FM, train_phase=self.train_phase, scope_bn='bn_fm')\n self.FM = tf.nn.dropout(self.FM, self.dropout_keep) # dropout at the FM layer\n self.FM = tf.reduce_sum(self.FM, 1, keepdims=True) # None * 1\n \n self.sqr_result = self.FM\n \"\"\"\n\n if self.DEEP_FM:\n self.FM = self.deep_FM(self.FM, regularizer)\n # _________out _________\n #self.Bilinear = tf.reduce_sum(self.FM, 1, keepdims=True) # None * 1\n self.Bilinear = self.FM\n self.Feature_bias = tf.reduce_sum(tf.nn.embedding_lookup(self.weights['feature_bias'], self.train_features) , 1) # None * 1\n Bias = self.weights['bias'] * tf.ones_like(self.train_labels) # None * 1\n self.out = tf.add_n([self.Bilinear, self.Feature_bias, Bias]) # None * 1\n\n\n\n # Compute the loss.\n try:\n self.reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n self.reg_losses = tf.add_n(self.reg_losses)\n except:\n self.reg_losses = 0\n if self.loss_type == 'square_loss':\n if self.lamda_bilinear > 0:\n self.loss = tf.nn.l2_loss(\n tf.subtract(self.train_labels, self.out)) + \\\n tf.contrib.layers.l2_regularizer(self.lamda_bilinear)(\n self.weights['feature_embeddings']) + self.reg_losses # regulizer\n else:\n self.loss = tf.nn.l2_loss(tf.subtract(self.train_labels, self.out)) \n \"\"\"\n elif self.loss_type == 'log_loss':\n self.out = tf.sigmoid(self.out)\n if self.lamda_bilinear > 0:\n self.loss = tf.contrib.losses.log_loss(\n self.out, self.train_labels, weight=1.0, epsilon=1e-07, scope=None) + \\\n tf.contrib.layers.l2_regularizer(self.lamda_bilinear)(\n self.weights['feature_embeddings']) # regulizer\n else:\n self.loss = tf.contrib.losses.log_loss(self.out, self.train_labels, weight=1.0, epsilon=1e-07, scope=None)\n \"\"\"\n # Optimizer.\n if self.optimizer_type == 'AdamOptimizer':\n self.optimizer = tf.train.AdamOptimizer(\n learning_rate=self.learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-8).minimize(self.loss)\n elif self.optimizer_type == 'AdagradOptimizer':\n self.optimizer = tf.train.AdagradOptimizer(\n learning_rate=self.learning_rate, initial_accumulator_value=1e-8).minimize(self.loss)\n elif self.optimizer_type == 'GradientDescentOptimizer':\n self.optimizer = tf.train.GradientDescentOptimizer(\n learning_rate=self.learning_rate).minimize(self.loss)\n elif self.optimizer_type == 'MomentumOptimizer':\n self.optimizer = tf.train.MomentumOptimizer(\n learning_rate=self.learning_rate, momentum=0.95).minimize(self.loss)\n\n # init\n self.saver = tf.train.Saver()\n init = tf.global_variables_initializer()\n self.sess = tf.Session()\n self.sess.run(init)\n\n # number of params\n total_parameters = 0\n for variable in self.weights.values():\n shape = variable.get_shape() # shape is an array of tf.Dimension\n variable_parameters = 1\n for dim in shape:\n variable_parameters *= dim.value\n total_parameters += variable_parameters\n if self.verbose > 0:\n print(\"#params: %d\" %total_parameters) \n\n\n def permutate(self, embeddings, user_embeddings, regularizer):\n v_perm_list = []\n\n for i in range(self.features_dim):\n starter = i+1 if self.RFM else i+1 \n for j in range(starter, self.features_dim):\n if i == j:\n continue\n v_i = embeddings[:,i,:]\n v_j = embeddings[:,j,:]\n #dot product of two vectors\n if self.RFM:\n v_output = self.relation_network(v_i, v_j, user_embeddings, regularizer) \n #v_output = tf.add(v_i, v_j)\n else:\n v_output = tf.multiply(v_i, v_j)\n #v_output = tf.add(v_i,v_j)\n v_perm_list.append(tf.expand_dims(v_output, 1))\n v_concat = tf.concat(v_perm_list, axis=1) \n return tf.reduce_sum(v_concat, 1)\n \n def relation_network(self, v1, v2, user_emb, reg, reuse=tf.AUTO_REUSE):\n #handle with two feature embeddings\n layers = tf.multiply(v1, v2)\n #layers = tf.add(tf.matmul(xinput, self.weights['feature_relation_layer_0']), self.weights['feature_relation_bias_0']) # None * layer[i] * 1\n #layers = tf.nn.relu(layers)\n #layers = tf.add(tf.matmul(layers, self.weights['feature_relation_layer_1']), self.weights['feature_relation_bias_1']) # None * layer[i] * 1\n #layers = tf.nn.relu(layers)\n #layers = tf.layers.dropout(layers, rate=self.dropout_keep[0])\n\n # combines with user embedding\n layers = tf.concat([layers, user_emb], 1)\n layers = tf.add(tf.matmul(layers, self.weights['user_relation_layer_0']), self.weights['user_relation_bias_0']) # None * layer[i] * 1\n layers = tf.nn.relu(layers)\n layers = tf.layers.dropout(layers, rate=self.dropout_keep[0])\n\n return layers \n \n def deep_FM(self, xinput, reg):\n layers = xinput\n # ________ Deep Layers __________\n layers = tf.add(tf.matmul(layers, self.weights['deeplayer_0']), self.weights['deepbias_0']) # None * layer[i] * 1\n if self.batch_norm:\n layers = self.batch_norm_layer(layers, train_phase=self.train_phase, scope_bn='bn_0') # None * layer[i] * 1\n layers = tf.nn.relu(layers)\n layers = tf.nn.dropout(layers, self.dropout_keep[2]) # dropout at each Deep layer\n layers = tf.matmul(layers, self.weights['prediction']) # None * 1\n\n\n \"\"\"\n with tf.variable_scope('deep_FM', reuse=tf.AUTO_REUSE) as scope:\n layers = tf.layers.dense(xinput, 64, activation=tf.nn.relu, kernel_regularizer=reg)\n if self.batch_norm:\n layers = self.batch_norm_layer(layers, train_phase=self.train_phase, scope_bn='bn_1') # None * layer[i] * 1\n layers = tf.layers.dropout(layers, rate=0.5)\n \n layers = tf.layers.dense(layers, 1, activation=None, kernel_regularizer=reg)\n \"\"\"\n return layers\n \n def _initialize_weights(self):\n all_weights = dict()\n if self.pretrain_flag > 0:\n weight_saver = tf.train.import_meta_graph(self.save_file + '.meta')\n pretrain_graph = tf.get_default_graph()\n feature_embeddings = pretrain_graph.get_tensor_by_name('feature_embeddings:0')\n feature_bias = pretrain_graph.get_tensor_by_name('feature_bias:0')\n bias = pretrain_graph.get_tensor_by_name('bias:0')\n with tf.Session() as sess:\n weight_saver.restore(sess, self.save_file)\n fe, fb, b = sess.run([feature_embeddings, feature_bias, bias])\n all_weights['feature_embeddings'] = tf.Variable(fe, dtype=tf.float32)\n all_weights['feature_bias'] = tf.Variable(fb, dtype=tf.float32)\n all_weights['bias'] = tf.Variable(b, dtype=tf.float32)\n else:\n all_weights['feature_embeddings'] = tf.Variable(\n tf.random_normal([self.features_M, self.hidden_factor], 0.0, 0.01),\n name='feature_embeddings') # features_M * K\n all_weights['user_embeddings'] = tf.Variable(\n tf.random_normal([self.users_M, self.hidden_factor], 0.0, 0.01),\n name='user_embeddings') # features_M * K\n all_weights['feature_bias'] = tf.Variable(\n tf.random_uniform([self.features_M, 1], 0.0, 0.0), name='feature_bias') # features_M * 1\n all_weights['bias'] = tf.Variable(tf.constant(0.0), name='bias') # 1 * 1\n # relation layers\n glorot = np.sqrt(2.0 / (self.hidden_factor + self.relation_layers[0]))\n all_weights['feature_relation_layer_0'] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(self.hidden_factor, self.relation_layers[0])), dtype=np.float32)\n all_weights['feature_relation_bias_0'] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(1, self.relation_layers[0])), dtype=np.float32) # 1 * layers[0]\n\n glorot = np.sqrt(2.0 / (self.relation_layers[0] + self.relation_layers[1]))\n all_weights['feature_relation_layer_1'] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(self.relation_layers[0], self.relation_layers[1])), dtype=np.float32)\n all_weights['feature_relation_bias_1'] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(1, self.relation_layers[1])), dtype=np.float32) # 1 * layers[0]\n\n glorot = np.sqrt(2.0 / (self.hidden_factor + self.relation_layers[-1]))\n #all_weights['user_relation_layer_0'] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(self.relation_layers[-1] + self.hidden_factor, self.relation_layers[1])), dtype=np.float32)\n all_weights['user_relation_layer_0'] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(self.hidden_factor + self.hidden_factor, 1)), dtype=np.float32)\n\n all_weights['user_relation_bias_0'] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(1, 1)), dtype=np.float32) # 1 * layers[0]\n\n glorot = np.sqrt(2.0 / (self.relation_layers[-1] + self.deep_layers[0]))\n if self.RFM:\n all_weights['deeplayer_0'] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(self.relation_layers[-1], self.deep_layers[0])), dtype=np.float32)\n else:\n all_weights['deeplayer_0'] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(self.hidden_factor, self.deep_layers[0])), dtype=np.float32)\n all_weights['deepbias_0'] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[0])), dtype=np.float32) # 1 * layers[0]\n\n all_weights['prediction'] = tf.Variable(np.ones((self.deep_layers[-1], 1), dtype=np.float32)) # hidden_factor * 1\n\n return all_weights\n\n def batch_norm_layer(self, x, train_phase, scope_bn):\n # Note: the decay parameter is tunable\n bn_train = batch_norm(x, decay=0.9, center=True, scale=True, updates_collections=None,\n is_training=True, reuse=None, trainable=True, scope=scope_bn)\n bn_inference = batch_norm(x, decay=0.9, center=True, scale=True, updates_collections=None,\n is_training=False, reuse=True, trainable=True, scope=scope_bn)\n z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)\n return z\n\n def partial_fit(self, data): # fit a batch\n feed_dict = {self.train_features: data['X'], self.train_users: data['U'], self.train_labels: data['Y'], self.dropout_keep: self.keep, self.train_phase: True}\n loss, opt = self.sess.run((self.loss, self.optimizer), feed_dict=feed_dict)\n #p_fm, s_fm = self.sess.run((self.perm_result, self.sqr_result), feed_dict=feed_dict)\n #print(p_fm[0], s_fm[0]) \n return loss\n\n def get_random_block_from_data(self, data, batch_size): # generate a random block of training data\n start_index = np.random.randint(0, len(data['Y']) - batch_size)\n X , Y, U = [], [], []\n # forward get sample\n i = start_index\n while len(X) < batch_size and i < len(data['X']):\n if len(data['X'][i]) == len(data['X'][start_index]):\n Y.append([data['Y'][i]])\n X.append(data['X'][i])\n U.append(data['U'][i])\n i = i + 1\n else:\n break\n # backward get sample\n i = start_index\n while len(X) < batch_size and i >= 0:\n if len(data['X'][i]) == len(data['X'][start_index]):\n Y.append([data['Y'][i]])\n X.append(data['X'][i])\n U.append(data['U'][i])\n i = i - 1\n else:\n break\n return {'X': X, 'Y': Y, 'U': U}\n\n def shuffle_in_unison_scary(self, a, b, c): # shuffle all lists simutaneously\n rng_state = np.random.get_state()\n np.random.shuffle(a)\n np.random.set_state(rng_state)\n np.random.shuffle(b)\n np.random.set_state(rng_state)\n np.random.shuffle(c)\n\n\n def train(self, Train_data, Validation_data, Test_data): # fit a dataset\n # Check Init performance\n if self.verbose > 0:\n t2 = time()\n init_train = self.evaluate(Train_data)\n #init_train = 0\n init_valid = self.evaluate(Validation_data)\n init_test = self.evaluate(Test_data)\n print(\"Init: \\t train=%.4f, validation=%.4f, test=%.4f [%.1f s]\" %(init_train, init_valid, init_test, time()-t2))\n\n for epoch in range(self.epoch):\n t1 = time()\n self.shuffle_in_unison_scary(Train_data['X'], Train_data['Y'], Train_data['U'])\n total_batch = int(len(Train_data['Y']) / self.batch_size)\n for i in range(total_batch):\n # generate a batch\n batch_xs = self.get_random_block_from_data(Train_data, self.batch_size)\n # Fit training\n self.partial_fit(batch_xs)\n t2 = time()\n\n # output validation\n train_result = self.evaluate(Train_data)\n #train_result = 0\n valid_result = self.evaluate(Validation_data)\n test_result = self.evaluate(Test_data)\n\n self.train_rmse.append(train_result)\n self.valid_rmse.append(valid_result)\n self.test_rmse.append(test_result)\n if self.verbose > 0 and epoch%self.verbose == 0:\n print(\"Epoch %d [%.1f s]\\ttrain=%.4f, validation=%.4f, test=%.4f [%.1f s]\"\n %(epoch+1, t2-t1, train_result, valid_result, test_result, time()-t2))\n #if self.eva_termination(self.valid_rmse):\n # break\n\n #if self.pretrain_flag < 0:\n # print(\"Save model to file as pretrain.\")\n # self.saver.save(self.sess, self.save_file)\n\n def eva_termination(self, valid):\n if self.loss_type == 'square_loss':\n if len(valid) > 5:\n if valid[-1] > valid[-2] and valid[-2] > valid[-3] and valid[-3] > valid[-4] and valid[-4] > valid[-5]:\n return True\n else:\n if len(valid) > 5:\n if valid[-1] < valid[-2] and valid[-2] < valid[-3] and valid[-3] < valid[-4] and valid[-4] < valid[-5]:\n return True\n return False\n\n def evaluate(self, data): # evaluate the results for an input set\n num_example = len(data['Y'])\n feed_dict = {\n self.train_features: data['X'], \n self.train_users: data['U'],\n self.train_labels: [[y] for y in data['Y']], \n self.dropout_keep: [1.0 for i in range(len(self.keep))], \n self.train_phase: False}\n predictions = self.sess.run((self.out), feed_dict=feed_dict)\n y_pred = np.reshape(predictions, (num_example,))\n y_true = np.reshape(data['Y'], (num_example,))\n if self.loss_type == 'square_loss': \n predictions_bounded = np.maximum(y_pred, np.ones(num_example) * min(y_true)) # bound the lower values\n predictions_bounded = np.minimum(predictions_bounded, np.ones(num_example) * max(y_true)) # bound the higher values\n RMSE = math.sqrt(mean_squared_error(y_true, predictions_bounded))\n return RMSE\n elif self.loss_type == 'log_loss':\n logloss = log_loss(y_true, y_pred) # I haven't checked the log_loss\n return logloss\n''' # for testing the classification accuracy \n predictions_binary = [] \n for item in y_pred:\n if item > 0.5:\n predictions_binary.append(1.0)\n else:\n predictions_binary.append(0.0)\n Accuracy = accuracy_score(y_true, predictions_binary)\n return Accuracy '''\n\nif __name__ == '__main__':\n # Data loading\n args = parse_args()\n data = DATA.LoadData(args.path, args.dataset, args.loss_type, args.batch_size)\n if args.verbose > 0:\n print(\"FM: dataset=%s, factors=%d, loss_type=%s, #epoch=%d, batch=%d, lr=%.4f, lambda=%.1e, relation_layers=%s, keep=%s, optimizer=%s, batch_norm=%d, RFM=%s, DEEP_FM=%s\"\n %(args.dataset, args.hidden_factor, args.loss_type, args.epoch, args.batch_size,\n args.lr, args.lamda, args.relation_layers, args.keep_prob, args.optimizer, args.batch_norm, args.RFM, args.DEEP_FM))\n\n save_file = '../pretrain/%s_%d/%s_%d' %(args.dataset, args.hidden_factor, args.dataset, args.hidden_factor)\n # Training\n t1 = time()\n model = FM(\n args.RFM, \n args.DEEP_FM,\n data.features_M, \n data.users_M,\n data.feature_dim, \n args.pretrain, \n save_file, \n args.hidden_factor, \n eval(args.relation_layers),\n eval(args.deep_layers),\n args.loss_type, \n args.epoch, \n args.batch_size, \n args.lr, \n args.lamda, \n args.reg_scale, \n eval(args.keep_prob), \n args.optimizer, \n args.batch_norm, \n args.verbose)\n\n model.train(data.Train_data, data.Validation_data, data.Test_data)\n \n # Find the best validation result across iterations\n best_valid_score = 0\n if args.loss_type == 'square_loss':\n best_valid_score = min(model.valid_rmse)\n elif args.loss_type == 'log_loss':\n best_valid_score = max(model.valid_rmse)\n best_epoch = model.valid_rmse.index(best_valid_score)\n print (\"Best Iter(validation)= %d\\t train = %.4f, valid = %.4f, test = %.4f [%.1f s]\" \n %(best_epoch+1, model.train_rmse[best_epoch], model.valid_rmse[best_epoch], model.test_rmse[best_epoch], time()-t1))\n","repo_name":"ZSCDumin/Attribute_Relational_collaborative_filtering","sub_path":"RFM.py","file_name":"RFM.py","file_ext":"py","file_size_in_byte":25210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15856720551","text":"'''\nThis script extracts data about space startups from spacebandits.io\n\nI'm thinking about applying for job at a space company. \nIt would be great to have a list and compare them (or even perform some analysis).\nLet's automate the boring extraction.\n'''\nfrom bs4 import BeautifulSoup\nfrom openpyxl import Workbook\nimport requests\n\n\nspacebandits = 'https://www.spacebandits.io/startups'\ndef space_csv():\n\t# Export company's name, description and url to csv\n\tres = requests.get(spacebandits)\n\tres.raise_for_status\n\tsoup = BeautifulSoup(res.text, 'lxml')\n\tcompany_url = soup.select('a.home-startups-link-block')\n\n\twith open('datasets/space_startups.csv', 'w') as file:\n\t\tfile.write('\"url\", \"name\", \"description\"\\n')\n\t\tfor company in company_url:\n\t\t\turl = 'https://www.spacebandits.io'+company['href']\n\t\t\tname = company.contents[0].contents[2].contents[0].string\n\t\t\tdesc = company.contents[0].contents[2].contents[1].string\n\t\t\tfile.write('\"{}\", \"{}\", \"{}\"\\n'.format(url, name, desc))\n\ndef extract_info(url):\n\t# Extract full details of each company from url\n\tres = requests.get(url)\n\tres.raise_for_status\n\tsoup = BeautifulSoup(res.text, 'lxml')\n\n\tindustry_tag = soup.find('div', 'startup-industry-tag')\n\tcountry_flag = industry_tag.next_sibling\n\tinfo = soup.find_all('li', 'list-item-51')\n\n\turl = 'https://www.spacebandits.io'+soup.find('a', 'w--current')['href']\n\tname = soup.find('h1', 'startup-page-heading-name').string\n\tdescription = soup.find('h2', 'heading-15').string\n\twebsite = soup.find('a', 'button-startup-website-link')['href']\n\tmission = soup.find('p', 'startup-mission').string\n\tindustry = industry_tag.string\n\tyear_founded = info[0].string\n\tfunding_type = info[1].string\n\ttotal_funding = info[2].string\t\t\n\temployees = info[3].string\n\n\t# Country is invalid for mu-space because country_flag is empty\n\t# Also, I just noticed a country named flag. I looked into the site and discover that it's spain\n\ttry:\n\t\tcountry = country_flag['src'].split('-')[-2]\n\texcept:\n\t\tcountry = 'unknown'\n\tprint('Extraction from {} successful'.format(url))\n\treturn (name, country, website, industry, description, mission, employees, year_founded, funding_type, total_funding, url)\n\ndef fetch_urls():\n\t# Fetch the url for each company\n\tprint('Fetching Company URLs')\n\tres = requests.get(spacebandits)\n\tres.raise_for_status\n\tsoup = BeautifulSoup(res.text, 'lxml')\n\n\tprint('Soup made')\n\turl_tag = soup.find_all('a', 'home-startups-link-block')\n\turls = []\n\tprint('Got the tags')\n\n\tfor tag in url_tag:\n\t\turls.append('https://www.spacebandits.io'+tag['href'])\n\tprint('Got the url. Now exiting fetch')\n\treturn urls\n\ndef space_xl():\n\t# Save full company details into 'spacebandits.xlsx'\n\tprint('Create Workbook and sheet')\n\twb = Workbook(write_only=True)\n\tws = wb.create_sheet('Space companies', 0)\n\tws.append(('name', 'country', 'website', 'industry', 'description', 'mission', 'employees', 'year_founded', 'funding_type', 'total_funding', 'url'))\n\n\tprint('Go and fetch URLs')\n\turls = fetch_urls()\n\tnum = len(urls)\n\n\tfor i in range(num):\n\t\tprint('{}/{} Go and extract info from'.format(i+1, num, urls[i]))\n\t\tws.append(extract_info(urls[i]))\n\n\twb.save('datasets/spacebandits.xlsx')\n\tprint('spacebandits.xlsx successfully saved in datasets')\n\nspace_xl()\n","repo_name":"bizzyvinci/automate-the-boring-stuffs","sub_path":"space_companies.py","file_name":"space_companies.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15254935131","text":"import torch.nn as nn\nimport torch\nimport layer_utils\nimport math\n\nclass globalNet(nn.Module):\n\n def __init__(self, input_sizes, output_shape, num_points):\n super(globalNet, self).__init__()\n\n self.layer1_1 = self._make_layer1(input_sizes[0])\n self.layer1_2 = self._make_layer2()\n self.layer1_3 = self._make_layer3(output_shape, num_points)\n\n self.layer2_1 = self._make_layer1(input_sizes[1])\n self.layer2_2 = self._make_layer2()\n self.layer2_3 = self._make_layer3(output_shape, num_points)\n\n self.layer3_1 = self._make_layer1(input_sizes[2])\n self.layer3_2 = self._make_layer2()\n self.layer3_3 = self._make_layer3(output_shape, num_points)\n\n self.layer4_1 = self._make_layer1(input_sizes[3])\n self.layer4_3 = self._make_layer3(output_shape, num_points)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer1(self, input_size):\n\n layers = []\n\n layers.append(nn.Conv2d(input_size, 256,\n kernel_size=1, stride=1, bias=False))\n layers.append(nn.BatchNorm2d(256))\n layers.append(nn.ReLU(inplace=True))\n\n return nn.Sequential(*layers)\n\n def _make_layer2(self):\n\n layers = []\n\n layers.append(torch.nn.Upsample(scale_factor=2, mode='bilinear'))\n layers.append(torch.nn.Conv2d(256, 256,\n kernel_size=1, stride=1, bias=True))\n\n return nn.Sequential(*layers)\n\n def _make_layer3(self, output_shape, num_points):\n\n layers = []\n\n layers.append(nn.Conv2d(256, 256,\n kernel_size=1, stride=1, bias=False))\n layers.append(nn.BatchNorm2d(256))\n layers.append(nn.ReLU(inplace=True))\n\n layers.append(nn.Conv2d(256, num_points,\n kernel_size=3, stride=1, padding=1, bias=False))\n layers.append(nn.BatchNorm2d(num_points))\n layers.append(nn.Upsample(size=output_shape, mode='bilinear'))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n\n x1_1 = self.layer1_1(x[0])\n x1_2 = self.layer1_2(x1_1)\n x1_3 = self.layer1_3(x1_1)\n\n x2_1 = self.layer2_1(x[1]) + x1_2\n x2_2 = self.layer2_2(x2_1)\n x2_3 = self.layer2_3(x2_1)\n\n x3_1 = self.layer3_1(x[2]) + x2_2\n x3_2 = self.layer3_2(x3_1)\n x3_3 = self.layer3_3(x3_1)\n\n x4_1 = self.layer4_1(x[3]) + x3_2\n x4_3 = self.layer4_3(x4_1)\n\n return [x4_1, x3_1, x2_1, x1_1], [x4_3, x3_3, x2_3, x1_3]\n\nclass refineNet(nn.Module):\n\n def __init__(self, input_size, out_shape, num_points):\n super(refineNet, self).__init__()\n self.layer1 = self._make_layer1(input_size, 0, out_shape)\n self.layer2 = self._make_layer1(input_size, 1, out_shape)\n self.layer3 = self._make_layer1(input_size, 2, out_shape)\n self.layer4 = self._make_layer1(input_size, 3, out_shape)\n\n self.final_branch = self._make_layer2(1024, num_points)\n\n def _make_layer1(self, input_size, num, output_shape):\n\n layers = []\n\n for i in range(num):\n layers.append(layer_utils.Bottleneck(input_size, 128))\n\n layers.append(nn.Upsample(size=output_shape, mode='bilinear'))\n\n return nn.Sequential(*layers)\n\n def _make_layer2(self, input_size, num_points):\n\n layers = []\n\n layers.append(layer_utils.Bottleneck(input_size, 128))\n layers.append(nn.Conv2d(256, num_points,\n kernel_size=3, stride=1, padding=1, bias=False))\n layers.append(nn.BatchNorm2d(num_points))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n\n x1 = self.layer1(x[0])\n x2 = self.layer2(x[1])\n x3 = self.layer3(x[2])\n x4 = self.layer4(x[3])\n\n out = torch.cat([x1, x2, x3, x4], dim=1)\n out = self.final_branch(out)\n\n return out\n","repo_name":"last-one/PyTorch-Cascaded-Pyramid-Network","sub_path":"lib/nets/global_refine_net.py","file_name":"global_refine_net.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"67"} +{"seq_id":"19847782094","text":"import logging\nfrom ..text import Text\nfrom ..roles import SEER\nfrom .base import BaseCommand, match_agent\n\nLOG = logging.getLogger(__name__)\n\n\nclass SorcerorCommand(BaseCommand):\n def welcome(self, srv=None, game=None, role=None):\n srv.broadcast(channel=role.channel,\n text=Text(\"At night, you may chose someone to OBSERVE in your private channel.\\n\"\n \"You will determine if that person is a seer.\"))\n\n def on_message(self, srv=None, game=None, role=None, channel=None, text=None):\n LOG.debug(\"SorcerorCommand examines: game=%s, role=%s, channel=%s, text=%s\", game, role, channel, text)\n if channel != role.channel:\n return\n\n scry = text.match(\"observe\", match_agent(game.players, any_agent=True))\n if scry is not None:\n target = scry[0]\n if not self.is_relevant(game=game):\n srv.broadcast(channel=channel, text=Text(\"You may not currently observe.\"))\n return True\n if target not in game.players:\n srv.broadcast(channel=channel, text=Text(target, \" isn't a valid sorcery target.\"))\n return True\n srv.broadcast(channel=channel, text=Text(\"You chose to use your sorcerous sight on \", target))\n game.scratchpad.phase_actions.sorceror[role] = game.roles[target]\n return True\n\n def ready(self, srv=None, game=None):\n if game.current_phase.get('can_vote', False):\n return\n game.scratchpad.phase_actions.sorceror = {}\n\n def resolve(self, srv=None, game=None):\n if game.current_phase.get('can_vote', False):\n return\n\n for role, target in game.scratchpad.phase_actions.sorceror.items():\n if target is not None:\n if target.role.name == SEER:\n srv.broadcast(channel=role.channel,\n text=Text(\"During the night you observe \", target.player, \".\\n\",\n \"They are a seer!\"))\n else:\n srv.broadcast(channel=role.channel,\n text=Text(\"During the night you observe \", target.player, \".\\n\",\n \"They are not a seer.\"))\n","repo_name":"jan-g/slackwolf","sub_path":"werewolf/commands/sorceror.py","file_name":"sorceror.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"69916579415","text":"import os\nimport sys\nimport ssl\nimport json\nimport time\nimport urllib\nimport urllib3\nimport certifi\nimport logging\nimport pathlib\nimport requests\nimport textwrap\nimport subprocess\n\nfrom typing import Union\nfrom typing import List\nfrom typing import Dict\n\nfrom urllib3.exceptions import InsecureRequestWarning\nfrom ibw.clientportal import ClientPortal\n\nurllib3.disable_warnings(category=InsecureRequestWarning)\n# http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())\n\ntry:\n _create_unverified_https_context = ssl._create_unverified_context\nexcept AttributeError:\n # Legacy Python that doesn't verify HTTPS certificates by default\n pass\nelse:\n # Handle target environment that doesn't support HTTPS verification\n ssl._create_default_https_context = _create_unverified_https_context\n\nlogging.basicConfig(\n filename='app.log',\n format='%(levelname)s - %(name)s - %(message)s',\n level=logging.DEBUG\n)\n\n\nclass IBClient():\n\n def __init__(self, username: str, account: str, client_gateway_path: str = None, is_server_running: bool = True) -> None:\n \"\"\"Initalizes a new instance of the IBClient Object.\n\n Arguments:\n ----\n username {str} -- Your IB account username for either your paper or regular account.\n\n account {str} -- Your IB account number for either your paper or regular account.\n\n Keyword Arguments:\n ----\n password {str} -- Your IB account password for either your paper or regular account. (default:{\"\"})\n\n Usage:\n ----\n >>> ib_paper_session = IBClient(\n username='IB_PAPER_USERNAME',\n account='IB_PAPER_ACCOUNT',\n )\n >>> ib_paper_session\n >>> ib_regular_session = IBClient(\n username='IB_REGULAR_USERNAME',\n account='IB_REGULAR_ACCOUNT',\n )\n >>> ib_regular_session\n \"\"\"\n\n self.account = account\n self.username = username\n self.client_portal_client = ClientPortal()\n\n self.api_version = 'v1/'\n self._operating_system = sys.platform\n self.session_state_path: pathlib.Path = pathlib.Path(__file__).parent.joinpath('server_session.json').resolve()\n self.authenticated = False\n self._is_server_running = is_server_running\n\n # Define URL Components\n ib_gateway_host = r\"https://localhost\"\n ib_gateway_port = r\"5000\"\n self.ib_gateway_path = ib_gateway_host + \":\" + ib_gateway_port\n self.backup_gateway_path = r\"https://cdcdyn.interactivebrokers.com/portal.proxy\"\n self.login_gateway_path = self.ib_gateway_path + \"/sso/Login?forwardTo=22&RL=1&ip2loc=on\"\n\n\n if client_gateway_path is None:\n\n # Grab the Client Portal Path.\n self.client_portal_folder: pathlib.Path = pathlib.Path(__file__).parents[1].joinpath(\n 'resources/clientportal.beta.gw'\n ).resolve()\n\n # See if it exists.\n if not self.client_portal_folder.exists() and not self._is_server_running:\n print(\"The Client Portal Gateway doesn't exist. You need to download it before using the Library.\")\n print(\"Downloading the Client Portal file...\")\n self.client_portal_client.download_and_extract()\n \n else:\n\n self.client_portal_folder = client_gateway_path\n\n if not self._is_server_running:\n\n # Load the Server State.\n self.server_process = self._server_state(action='load')\n\n # Log the initial Info.\n logging.info(textwrap.dedent('''\n =================\n Initialize Client:\n =================\n Server Process: {serv_proc}\n Operating System: {op_sys}\n Session State Path: {state_path}\n Client Portal Folder: {client_path}\n ''').format(\n serv_proc=self.server_process,\n op_sys=self._operating_system,\n state_path=self.session_state_path,\n client_path=self.client_portal_folder\n )\n )\n else:\n self.server_process = None\n\n\n def create_session(self, set_server=True) -> bool:\n \"\"\"Creates a new session.\n\n Creates a new session with Interactive Broker using the credentials\n passed through when the Robot was initalized.\n\n Usage:\n ----\n >>> ib_client = IBClient(\n username='IB_PAPER_username',\n password='IB_PAPER_PASSWORD',\n account='IB_PAPER_account',\n )\n >>> server_response = ib_client.create_session()\n >>> server_response\n True\n\n Returns:\n ----\n bool -- True if the session was created, False if wasn't created.\n \"\"\"\n\n # first let's check if the server is running, if it's not then we can start up.\n if self.server_process is None and not self._is_server_running:\n\n # If it's None we need to connect first.\n if set_server:\n self.connect(start_server=True, check_user_input=True)\n else:\n self.connect(start_server=True, check_user_input=False)\n return True\n\n # then make sure the server is updated.\n if self._set_server():\n return True\n\n # Try and authenticate.\n auth_response = self.is_authenticated()\n\n # Log the initial Info.\n logging.info(textwrap.dedent('''\n =================\n Create Session:\n =================\n Auth Response: {auth_resp}\n ''').format(\n auth_resp=auth_response,\n )\n )\n\n # Finally make sure we are authenticated.\n if 'authenticated' in auth_response.keys() and auth_response['authenticated'] and self._set_server():\n self.authenticated = True\n return True\n else:\n # In this case don't connect, but prompt the user to log in again.\n self.connect(start_server=False)\n \n if self._set_server():\n self.authenticated = True\n return True\n\n def _set_server(self) -> bool:\n \"\"\"Sets the server info for the session.\n\n Sets the Server for the session, and if the server cannot be set then\n script will halt. Otherwise will return True to continue on in the script.\n\n Returns:\n ----\n bool -- True if the server was set, False if wasn't\n \"\"\"\n success = '\\nNew session has been created and authenticated. Requests will not be limited.\\n'.upper()\n failure = '\\nCould not create a new session that was authenticated, exiting script.\\n'.upper()\n\n # Grab the Server accounts.\n server_account_content = self.server_accounts()\n\n # Try to do the quick way.\n if (server_account_content and 'accounts' in server_account_content):\n accounts = server_account_content['accounts']\n if self.account in accounts:\n\n # Log the response.\n logging.debug(textwrap.dedent('''\n =================\n Set Server:\n =================\n Server Response: {serv_resp}\n ''').format(\n serv_resp=server_account_content\n )\n )\n\n print(success)\n return True\n else:\n\n # Update the Server.\n server_update_content = self.update_server_account(\n account_id=self.account,\n check=False\n )\n\n # Grab the accounts.\n server_account_content = self.server_accounts()\n\n # Log the response.\n logging.debug(textwrap.dedent('''\n =================\n Set Server:\n =================\n Server Response: {serv_resp}\n Server Update Response: {auth_resp}\n ''').format(\n auth_resp=server_update_content,\n serv_resp=server_account_content\n )\n )\n\n # TO DO: Add check market hours here and then check for a mutual fund.\n if (server_account_content and 'accounts' in server_account_content) or (server_update_content and 'message' in server_update_content):\n print(success)\n return True\n else:\n print(failure)\n sys.exit()\n\n # # TO DO: Add check market hours here and then check for a mutual fund.\n # news = self.data_news(conid='265598')\n # if news and 'news' in news:\n # print(success)\n # return True\n # if server_account_content is not None and 'set' in server_update_content.keys() and server_update_content['set'] == True:\n # print(success)\n # return True\n # elif ('message' in server_update_content.keys()) and (server_update_content['message'] == 'Account already set'):\n # print(success)\n # return True\n # else:\n # print(failure)\n # sys.exit()\n\n def _server_state(self, action: str = 'save') -> Union[None, int]:\n \"\"\"Determines the server state.\n\n Maintains the server state, so we can easily load a previous session,\n save a new session, or delete a closed session.\n\n Arguments:\n ----\n action {str} -- The action you wish to take to the `json` file. Can be one of the following options:\n\n 1. save - saves the current state and overwrites the old one.\n 2. load - loads the previous state from a session that has a server still running.\n 3. delete - deletes the state because the server has been closed.\n\n Returns:\n ----\n Union[None, int] -- The Process ID of the Server.\n \"\"\"\n\n # Define file components.\n file_exists = self.session_state_path.exists()\n\n # Log the response.\n logging.debug(textwrap.dedent('''\n =================\n Server State:\n =================\n Server State: {state}\n State File: {exist}\n ''').format(\n state=action,\n exist=file_exists\n )\n )\n\n if action == 'save':\n\n # Save the State.\n with open(self.session_state_path, 'w') as server_file:\n json.dump(\n obj={'server_process_id': self.server_process},\n fp=server_file\n )\n\n # If we are loading check the file exists first.\n elif action == 'load' and file_exists:\n\n try:\n self.is_authenticated(check=True)\n check_proc_id = False\n except:\n check_proc_id = True\n\n # Load it.\n with open(self.session_state_path, 'r') as server_file:\n server_state = json.load(fp=server_file)\n\n # Grab the Process Id.\n proc_id = server_state['server_process_id']\n\n # If it's running return the process ID.\n if check_proc_id:\n is_running = self._check_if_server_running(process_id=proc_id)\n else:\n is_running = True\n\n if is_running:\n return proc_id\n\n # Delete it.\n elif action == 'delete' and file_exists:\n self.session_state_path.unlink()\n\n def _check_if_server_running(self, process_id: str) -> bool:\n \"\"\"Used to see if the Clientportal Gateway is running.\n\n Arguments:\n ----\n process_id (str): The process ID of the clientportal.\n\n Returns:\n ----\n bool: `True` if running, `False` otherwise.\n \"\"\"\n\n if self._operating_system == 'win32':\n\n # See if the Process is running.\n with os.popen('tasklist') as task_list:\n\n # Grab each task.\n for process in task_list.read().splitlines()[4:]:\n\n if str(process_id) in process:\n\n # Log the response.\n logging.debug(textwrap.dedent('''\n =================\n Server Process:\n =================\n Process ID: {process}\n ''').format(\n process=process\n )\n )\n\n return True\n\n else:\n\n try:\n os.kill(process_id, 0)\n return True\n except OSError:\n return False\n\n def _check_authentication_user_input(self) -> bool:\n \"\"\"Used to check the authentication of the Server.\n\n Returns:\n ----\n bool: `True` if authenticated, `False` otherwise.\n \"\"\"\n\n max_retries = 0\n while (max_retries > 4 or self.authenticated == False):\n \n # Grab the User Request.\n user_input = input(\n 'Would you like to make an authenticated request (Yes/No)? '\n ).upper()\n\n # If no, close the session.\n if user_input == 'NO':\n self.close_session()\n # Else try and see if we are authenticated.\n else:\n auth_response = self.is_authenticated(check=True)\n\n # Log the Auth Response.\n logging.debug('Check User Auth Inital: {auth_resp}'.format(\n auth_resp=auth_response\n )\n )\n\n if 'statusCode' in auth_response.keys() and auth_response['statusCode'] == 401:\n print(\"Session isn't connected, closing script.\")\n self.close_session()\n\n elif 'authenticated' in auth_response.keys() and auth_response['authenticated'] == True:\n self.authenticated = True\n break\n\n elif 'authenticated' in auth_response.keys() and auth_response['authenticated'] == False:\n valid_resp = self.validate()\n reauth_resp = self.reauthenticate()\n auth_response = self.is_authenticated()\n\n try:\n serv_resp = self.server_accounts()\n if 'accounts' in serv_resp:\n self.authenticated = True\n\n # Log the response.\n logging.debug('Had to do Server Account Request: {auth_resp}'.format(\n auth_resp=serv_resp\n )\n )\n break\n except:\n pass\n\n logging.debug(\n '''\n Validate Response: {valid_resp}\n Reauth Response: {reauth_resp}\n '''.format(\n valid_resp=valid_resp,\n reauth_resp=reauth_resp\n )\n )\n\n max_retries += 1\n\n return self.authenticated\n\n def _check_authentication_non_input(self) -> bool:\n \"\"\"Runs the authentication protocol but without user input.\n\n Returns:\n ----\n bool: `True` if authenticated, `False` otherwise.\n \"\"\"\n\n # Grab the auth response.\n auth_response = self.is_authenticated(check=True)\n\n # Log the Auth response.\n logging.debug('Check Non-User Auth Inital: {auth_resp}'.format(\n auth_resp=auth_response\n )\n )\n\n # Fail early, status code means we can't authenticate.\n if 'statusCode' in auth_response:\n print(\"Session isn't connected, closing script.\")\n self.close_session()\n\n # Grab the Auth Response Flag.\n auth_response_value = auth_response.get('authenticated', None)\n\n # If it it's True we are good.\n if auth_response_value:\n self.authenticated = True\n\n # If not, try and reauthenticate.\n elif not auth_response_value:\n\n # Validate the session first.\n self.validate()\n\n # Then reauthenticate the session.\n reauth_response = self.reauthenticate()\n\n # See if it was triggered.\n if 'message' in reauth_response:\n self.authenticated = True\n else:\n self.authenticated = False\n\n def _start_server(self) -> str:\n \"\"\"Starts the Server.\n\n Returns:\n ----\n str: The Server Process ID.\n \"\"\"\n\n # windows will use the command line application.\n if self._operating_system == 'win32':\n IB_WEB_API_PROC = [\"cmd\", \"/k\", r\"bin\\run.bat\", r\"root\\conf.yaml\"]\n self.server_process = subprocess.Popen(\n args=IB_WEB_API_PROC,\n cwd=self.client_portal_folder,\n creationflags=subprocess.CREATE_NEW_CONSOLE\n ).pid\n\n # mac will use the terminal.\n elif self._operating_system == 'darwin':\n IB_WEB_API_PROC = [\n \"open\", \"-F\", \"-a\",\n \"Terminal\", r\"bin/run.sh\", r\"root/conf.yaml\"\n ]\n self.server_process = subprocess.Popen(\n args=IB_WEB_API_PROC,\n cwd=self.client_portal_folder\n ).pid\n\n return self.server_process\n\n def connect(self, start_server: bool = True, check_user_input: bool = True) -> bool:\n \"\"\"Connects the session with the API.\n\n Connects the session to the Interactive Broker API by, starting up the Client Portal Gateway,\n prompting the user to log in and then returns the results back to the `create_session` method.\n\n Arguments:\n ----\n start_server {bool} -- True if the server isn't running but needs to be started, False if it\n is running and just needs to be authenticated.\n\n Returns:\n ----\n bool -- `True` if it was connected.\n \"\"\"\n\n logging.debug('Running Client Folder at: {file_path}'.format(\n file_path=self.client_portal_folder))\n\n # If needed, start the server and save the State.\n if start_server:\n self._start_server()\n self._server_state(action='save')\n\n # Display prompt if needed.\n if check_user_input:\n\n print(textwrap.dedent(\"\"\"{lin_brk}\n The Interactive Broker server is currently starting up, so we can authenticate your session.\n STEP 1: GO TO THE FOLLOWING URL: {url}\n STEP 2: LOGIN TO YOUR account WITH YOUR username AND PASSWORD.\n STEP 3: WHEN YOU SEE `Client login succeeds` RETURN BACK TO THE TERMINAL AND TYPE `YES` TO CHECK IF THE SESSION IS AUTHENTICATED.\n SERVER IS RUNNING ON PROCESS ID: {proc_id}\n {lin_brk}\"\"\".format(\n lin_brk='-'*80,\n url=self.login_gateway_path,\n proc_id=self.server_process\n )\n )\n )\n\n # Check the auth status\n auth_status = self._check_authentication_user_input()\n\n else:\n\n auth_status = True\n\n return auth_status\n\n def close_session(self) -> None:\n \"\"\"Closes the current session and kills the server using Taskkill.\"\"\"\n\n print('\\nCLOSING SERVER AND EXITING SCRIPT.')\n\n # Define the process.\n process = \"TASKKILL /F /PID {proc_id} /T\".format(\n proc_id=self.server_process\n )\n\n # Kill the process.\n subprocess.call(process, creationflags=subprocess.DETACHED_PROCESS)\n\n # Delete the state\n self._server_state(action='delete')\n\n # and exit.\n sys.exit()\n\n def _headers(self, mode: str = 'json') -> Dict:\n \"\"\"Builds the headers.\n\n Returns a dictionary of default HTTP headers for calls to Interactive \n Brokers API, in the headers we defined the Authorization and access \n token.\n\n Arguments:\n ----\n mode {str} -- Defines the content-type for the headers dictionary.\n default is 'json'. Possible values are ['json','form']\n\n Returns:\n ----\n Dict\n \"\"\"\n\n if mode == 'json':\n headers = {\n 'Content-Type': 'application/json'\n }\n elif mode == 'form':\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n elif mode == 'none':\n headers = None\n\n return headers\n\n def _build_url(self, endpoint: str) -> str:\n \"\"\"Builds a url for a request.\n\n Arguments:\n ----\n endpoint {str} -- The URL that needs conversion to a full endpoint URL.\n\n Returns:\n ----\n {srt} -- A full URL path.\n \"\"\"\n\n # otherwise build the URL\n return urllib.parse.unquote(\n urllib.parse.urljoin(\n self.ib_gateway_path,\n self.api_version\n ) + r'portal/' + endpoint\n )\n\n def _make_request(self, endpoint: str, req_type: str, headers: str = 'json', params: dict = None, data: dict = None, json: dict = None) -> Dict:\n \"\"\"Handles the request to the client.\n\n Handles all the requests made by the client and correctly organizes\n the information so it is sent correctly. Additionally it will also\n build the URL.\n\n Arguments:\n ----\n endpoint {str} -- The endpoint we wish to request.\n\n req_type {str} -- Defines the type of request to be made. Can be one of four\n possible values ['GET','POST','DELETE','PUT']\n\n params {dict} -- Any arguments that are to be sent along in the request. That\n could be parameters of a 'GET' request, or a data payload of a\n 'POST' request.\n\n Returns:\n ----\n {Dict} -- A response dictionary.\n\n \"\"\"\n # First build the url.\n url = self._build_url(endpoint=endpoint)\n\n # Define the headers.\n headers = self._headers(mode=headers)\n\n # Make the request.\n if req_type == 'POST':\n response = requests.post(url=url, headers=headers, params=params, json=json, verify=False)\n elif req_type == 'GET':\n response = requests.get(url=url, headers=headers, params=params, json=json, verify=False)\n elif req_type == 'DELETE':\n response = requests.delete(url=url, headers=headers, params=params, json=json, verify=False)\n\n # grab the status code\n status_code = response.status_code\n\n # grab the response headers.\n response_headers = response.headers\n \n # Check to see if it was successful\n if response.ok:\n\n if response_headers.get('Content-Type','null') == 'application/json;charset=utf-8':\n data = response.json()\n else:\n data = response.json()\n\n # Log it.\n logging.debug('''\n Response Text: {resp_text}\n Response URL: {resp_url}\n Response Code: {resp_code}\n Response JSON: {resp_json}\n Response Headers: {resp_headers}\n '''.format(\n resp_text=response.text,\n resp_url=response.url,\n resp_code=status_code,\n resp_json=data,\n resp_headers=response_headers\n )\n )\n\n return data\n\n # if it was a bad request print it out.\n elif not response.ok and url != 'https://localhost:5000/v1/portal/iserver/account':\n print(url)\n raise requests.HTTPError()\n\n def _prepare_arguments_list(self, parameter_list: List[str]) -> str:\n \"\"\"Prepares the arguments for the request.\n\n Some endpoints can take multiple values for a parameter, this\n method takes that list and creates a valid string that can be\n used in an API request. The list can have either one index or\n multiple indexes.\n\n Arguments:\n ----\n parameter_list {List} -- A list of paramater values assigned to an argument.\n\n Usage:\n ----\n >>> SessionObject._prepare_arguments_list(parameter_list=['MSFT','SQ'])\n\n Returns:\n ----\n {str} -- The joined list.\n\n \"\"\"\n\n # validate it's a list.\n if type(parameter_list) is list:\n\n # specify the delimiter and join the list.\n delimiter = ','\n parameter_list = delimiter.join(parameter_list)\n\n return parameter_list\n\n \"\"\"\n SESSION ENDPOINTS\n \"\"\"\n\n def validate(self) -> Dict:\n \"\"\"Validates the current session for the SSO user.\"\"\"\n\n # define request components\n endpoint = r'sso/validate'\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def tickle(self) -> Dict:\n \"\"\"Keeps the session open.\n\n If the gateway has not received any requests for several minutes an open session will \n automatically timeout. The tickle endpoint pings the server to prevent the \n session from ending.\n \"\"\"\n\n # define request components\n endpoint = r'tickle'\n req_type = 'POST'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def logout(self) -> Dict:\n \"\"\"Logs the session out.\n\n Overview:\n ----\n Logs the user out of the gateway session. Any further \n activity requires re-authentication.\n\n Returns:\n ----\n (dict): A logout response.\n \"\"\"\n\n # Define request components.\n endpoint = r'logout'\n req_type = 'POST'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def reauthenticate(self) -> Dict:\n \"\"\"Reauthenticates an existing session.\n\n Overview:\n ----\n Provides a way to reauthenticate to the Brokerage \n system as long as there is a valid SSO session, \n see /sso/validate.\n\n Returns:\n ----\n (dict): A reauthentication response. \n \"\"\"\n\n # Define request components.\n endpoint = r'iserver/reauthenticate'\n req_type = 'POST'\n\n # Make the request.\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def is_authenticated(self, check: bool = False) -> Dict:\n \"\"\"Checks if session is authenticated.\n\n Overview:\n ----\n Current Authentication status to the Brokerage system. Market Data and \n Trading is not possible if not authenticated, e.g. authenticated \n shows `False`.\n\n Returns:\n ----\n (dict): A dictionary with an authentication flag. \n \"\"\"\n\n # define request components\n endpoint = 'iserver/auth/status'\n\n if not check:\n req_type = 'POST'\n else:\n req_type = 'GET'\n\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n headers='none'\n )\n\n return content\n\n def _fundamentals_summary(self, conid: str) -> Dict:\n \"\"\"Grabs a financial summary of a company.\n\n Return a financial summary for specific Contract ID. The financial summary\n includes key ratios and descriptive components of the Contract ID.\n\n Arguments:\n ---- \n conid {str} -- The contract ID.\n\n Returns:\n ----\n {Dict} -- The response dictionary.\n \"\"\"\n\n # define request components\n endpoint = 'iserver/fundamentals/{}/summary'.format(conid)\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def _fundamentals_financials(self, conid: str, financial_statement: str, period: str = 'annual') -> Dict:\n \"\"\"Grabs fundamental financial data.\n\n Overview:\n ----\n Return a financial summary for specific Contract ID. The financial summary\n includes key ratios and descriptive components of the Contract ID.\n\n Arguments:\n ----\n conid (str): The contract ID.\n\n financial_statement (str): The specific financial statement you wish to request \n for the Contract ID. Possible values are ['balance','cash','income']\n\n period (str, optional): The specific period you wish to see. \n Possible values are ['annual','quarter']. Defaults to 'annual'.\n\n Returns:\n ----\n Dict: Financial data for the specified contract ID.\n \"\"\"\n\n # define the period\n if period == 'annual':\n period = True\n else:\n period = False\n\n # Build the arguments.\n params = {\n 'type': financial_statement,\n 'annual': period\n }\n\n # define request components\n endpoint = 'tws.proxy/fundamentals/financials/{}'.format(conid)\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n params=params\n )\n\n return content\n\n def _fundamentals_key_ratios(self, conid: str) -> Dict:\n \"\"\"Returns analyst ratings for a specific conid.\n\n NAME: conid\n DESC: The contract ID.\n TYPE: String\n \"\"\"\n\n # Build the arguments.\n params = {\n 'widgets': 'key_ratios'\n }\n\n # define request components\n endpoint = 'fundamentals/landing/{}'.format(conid)\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n params=params\n )\n\n return content\n\n def _fundamentals_dividends(self, conid: str) -> Dict:\n \"\"\"Returns analyst ratings for a specific conid.\n\n NAME: conid\n DESC: The contract ID.\n TYPE: String\n \"\"\"\n\n # Build the arguments.\n params = {\n 'widgets': 'dividends'\n }\n\n # define request components\n endpoint = 'fundamentals/landing/{}'.format(conid)\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n params=params\n )\n\n return content\n\n def _fundamentals_esg(self, conid: str) -> Dict:\n \"\"\"\n Returns analyst ratings for a specific conid.\n\n NAME: conid\n DESC: The contract ID.\n TYPE: String\n\n \"\"\"\n\n # Build the arguments.\n params = {\n 'widgets': 'esg'\n }\n\n # define request components\n endpoint = 'fundamentals/landing/{}'.format(conid)\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n params=params\n )\n\n return content\n\n def _data_news(self, conid: str) -> Dict:\n \"\"\"\n Return a financial summary for specific Contract ID. The financial summary\n includes key ratios and descriptive components of the Contract ID.\n\n NAME: conid\n DESC: The contract ID.\n TYPE: String\n \"\"\"\n\n # Build the arguments.\n params = {\n 'widgets': 'news',\n 'lang': 'en'\n }\n\n # define request components\n endpoint = 'fundamentals/landing/{}'.format(conid)\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n params=params\n )\n\n return content\n\n def _data_ratings(self, conid: str) -> Dict:\n \"\"\"Returns analyst ratings for a specific conid.\n\n NAME: conid\n DESC: The contract ID.\n TYPE: String\n \"\"\"\n\n # Build the arguments.\n params = {\n 'widgets': 'ratings'\n }\n\n # define request components\n endpoint = 'fundamentals/landing/{}'.format(conid)\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n params=params\n )\n\n return content\n\n def _data_events(self, conid: str) -> Dict:\n \"\"\"Returns analyst ratings for a specific conid.\n\n NAME: conid\n DESC: The contract ID.\n TYPE: String\n \"\"\"\n\n # Build the arguments.\n params = {\n 'widgets': 'ratings'\n }\n\n # define request components\n endpoint = 'fundamentals/landing/{}'.format(conid)\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n params=params\n )\n\n return content\n\n def _data_ownership(self, conid: str) -> Dict:\n \"\"\"Returns analyst ratings for a specific conid.\n\n NAME: conid\n DESC: The contract ID.\n TYPE: String\n \"\"\"\n\n # Build the arguments.\n params = {\n 'widgets': 'ownership'\n }\n\n # define request components\n endpoint = 'fundamentals/landing/{}'.format(conid)\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n params=params\n )\n\n return content\n\n def _data_competitors(self, conid: str) -> Dict:\n \"\"\"Returns analyst ratings for a specific conid.\n\n NAME: conid\n DESC: The contract ID.\n TYPE: String\n \"\"\"\n\n # Build the arguments.\n params = {\n 'widgets': 'competitors'\n }\n\n # define request components\n endpoint = 'fundamentals/landing/{}'.format(conid)\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n params=params\n )\n\n return content\n\n def _data_analyst_forecast(self, conid: str) -> Dict:\n \"\"\"Returns analyst ratings for a specific conid.\n\n NAME: conid\n DESC: The contract ID.\n TYPE: String\n \"\"\"\n\n # Build the arguments.\n params = {\n 'widgets': 'analyst_forecast'\n }\n\n # define request components\n endpoint = 'fundamentals/landing/{}'.format(conid)\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n params=params\n )\n\n return content\n\n def market_data(self, conids: List[str], since: str, fields: List[str]) -> Dict:\n \"\"\"\n Get Market Data for the given conid(s). The end-point will return by \n default bid, ask, last, change, change pct, close, listing exchange. \n See response fields for a list of available fields that can be request \n via fields argument. The endpoint /iserver/accounts should be called \n prior to /iserver/marketdata/snapshot. To receive all available fields \n the /snapshot endpoint will need to be called several times.\n\n NAME: conid\n DESC: The list of contract IDs you wish to pull current quotes for.\n TYPE: List\n\n NAME: since\n DESC: Time period since which updates are required.\n Uses epoch time with milliseconds.\n TYPE: String\n\n NAME: fields\n DESC: List of fields you wish to retrieve for each quote.\n TYPE: List\n \"\"\"\n\n # define request components\n endpoint = 'iserver/marketdata/snapshot'\n req_type = 'GET'\n\n # join the two list arguments so they are both a single string.\n conids_joined = self._prepare_arguments_list(parameter_list=conids)\n\n if fields is not None:\n fields_joined = \",\".join(str(n) for n in fields)\n else:\n fields_joined = \"\"\n\n # define the parameters\n if since is None:\n params = {\n 'conids': conids_joined,\n 'fields': fields_joined\n }\n else:\n params = {\n 'conids': conids_joined,\n 'since': since,\n 'fields': fields_joined\n }\n\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n params=params\n )\n\n return content\n\n def market_data_history(self, conid: str, period: str, bar: str) -> Dict:\n \"\"\"\n Get history of market Data for the given conid, length of data is controlled by period and \n bar. e.g. 1y period with bar=1w returns 52 data points.\n\n NAME: conid\n DESC: The contract ID for a given instrument. If you don't know the contract ID use the\n `search_by_symbol_or_name` endpoint to retrieve it.\n TYPE: String\n\n NAME: period\n DESC: Specifies the period of look back. For example 1y means looking back 1 year from today.\n Possible values are ['1d','1w','1m','1y']\n TYPE: String\n\n NAME: bar\n DESC: Specifies granularity of data. For example, if bar = '1h' the data will be at an hourly level.\n Possible values are ['5min','1h','1w']\n TYPE: String\n \"\"\"\n\n # define request components\n endpoint = 'iserver/marketdata/history'\n req_type = 'GET'\n params = {\n 'conid': conid,\n 'period': period,\n 'bar': bar\n }\n\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n params=params\n )\n\n return content\n\n def server_accounts(self):\n \"\"\"\n Returns a list of accounts the user has trading access to, their\n respective aliases and the currently selected account. Note this\n endpoint must be called before modifying an order or querying\n open orders.\n \"\"\"\n\n # define request components\n endpoint = 'iserver/accounts'\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def update_server_account(self, account_id: str, check: bool = False) -> Dict:\n \"\"\"\n If an user has multiple accounts, and user wants to get orders, trades, \n etc. of an account other than currently selected account, then user \n can update the currently selected account using this API and then can \n fetch required information for the newly updated account.\n\n NAME: account_id\n DESC: The account ID you wish to set for the API Session. This will be used to\n grab historical data and make orders.\n TYPE: String\n \"\"\"\n\n # define request components\n endpoint = 'iserver/account'\n req_type = 'POST'\n params = {\n 'acctId': account_id\n }\n\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n params=params\n )\n\n return content\n\n def server_account_pnl(self):\n \"\"\"\n Returns an object containing PnLfor the selected account and its models \n (if any).\n \"\"\"\n\n # define request components\n endpoint = 'iserver/account/pnl/partitioned'\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def symbol_search(self, symbol: str) -> Dict:\n \"\"\"\n Performs a symbol search for a given symbol and returns \n information related to the symbol including the contract id.\n \"\"\"\n\n # define the request components\n endpoint = 'iserver/secdef/search'\n req_type = 'POST'\n payload = {\n 'symbol': symbol\n }\n\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n json=payload\n )\n\n return content\n\n def contract_details(self, conid: str) -> Dict:\n \"\"\"\n Get contract details, you can use this to prefill your order before you submit an order.\n\n NAME: conid\n DESC: The contract ID you wish to get details for.\n TYPE: String\n\n RTYPE: Dictionary\n \"\"\"\n\n # define the request components\n endpoint = '/iserver/contract/{conid}/info'.format(conid=conid)\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def contracts_definitions(self, conids: List[str]) -> Dict:\n \"\"\"\n Returns a list of security definitions for the given conids.\n\n NAME: conids\n DESC: A list of contract IDs you wish to get details for.\n TYPE: List\n\n RTYPE: Dictionary\n \"\"\"\n\n # Define the request components.\n endpoint = '/trsrv/secdef'\n req_type = 'POST'\n payload = {\n 'conids': conids\n }\n\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n json=payload\n )\n\n return content\n\n def futures_search(self, symbols: List[str]) -> Dict:\n \"\"\"\n Returns a list of non-expired future contracts for given symbol(s).\n\n NAME: Symbol\n DESC: List of case-sensitive symbols separated by comma.\n TYPE: List\n\n RTYPE: Dictionary\n \"\"\"\n\n # define the request components\n endpoint = '/trsrv/futures'\n req_type = 'GET'\n params = {\n 'symbols': '{}'.format(','.join(symbols))\n }\n\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n params=params\n )\n\n return content\n\n def symbols_search_list(self, symbols: List[str]) -> Dict:\n \"\"\"\n Returns a list of non-expired future contracts for given symbol(s).\n\n NAME: Symbol\n DESC: List of case-sensitive symbols separated by comma.\n TYPE: List\n\n RTYPE: Dictionary\n \"\"\"\n\n # define the request components\n endpoint = '/trsrv/stocks'\n req_type = 'GET'\n params = {'symbols': '{}'.format(','.join(symbols))}\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n params=params\n )\n\n return content\n\n def portfolio_accounts(self):\n \"\"\"\n In non-tiered account structures, returns a list of accounts for which the \n user can view position and account information. This endpoint must be called prior \n to calling other /portfolio endpoints for those accounts. For querying a list of accounts \n which the user can trade, see /iserver/accounts. For a list of subaccounts in tiered account \n structures (e.g. financial advisor or ibroker accounts) see /portfolio/subaccounts.\n\n \"\"\"\n\n # define request components\n endpoint = 'portfolio/accounts'\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def portfolio_sub_accounts(self):\n \"\"\"\n Used in tiered account structures (such as financial advisor and ibroker accounts) to return a \n list of sub-accounts for which the user can view position and account-related information. This \n endpoint must be called prior to calling other /portfolio endpoints for those subaccounts. To \n query a list of accounts the user can trade, see /iserver/accounts.\n\n \"\"\"\n\n # define request components\n endpoint = r'​portfolio/subaccounts'\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def portfolio_account_info(self, account_id: str) -> Dict:\n \"\"\"\n Used in tiered account structures (such as financial advisor and ibroker accounts) to return a \n list of sub-accounts for which the user can view position and account-related information. This \n endpoint must be called prior to calling other /portfolio endpoints for those subaccounts. To \n query a list of accounts the user can trade, see /iserver/accounts.\n\n NAME: account_id\n DESC: The account ID you wish to return info for.\n TYPE: String\n \"\"\"\n\n # define request components\n endpoint = r'portfolio/{}/meta'.format(account_id)\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def portfolio_account_summary(self, account_id: str) -> Dict:\n \"\"\"\n Returns information about margin, cash balances and other information \n related to specified account. See also /portfolio/{accountId}/ledger. \n /portfolio/accounts or /portfolio/subaccounts must be called \n prior to this endpoint.\n\n NAME: account_id\n DESC: The account ID you wish to return info for.\n TYPE: String\n \"\"\"\n\n # define request components\n endpoint = r'portfolio/{}/summary'.format(account_id)\n req_type = 'GET'\n content = self._make_request(endpoint=endpoint, req_type=req_type)\n\n return content\n\n def portfolio_account_ledger(self, account_id: str) -> Dict:\n \"\"\"\n Information regarding settled cash, cash balances, etc. in the account's \n base currency and any other cash balances hold in other currencies. /portfolio/accounts \n or /portfolio/subaccounts must be called prior to this endpoint. The list of supported \n currencies is available at https://www.interactivebrokers.com/en/index.php?f=3185.\n\n NAME: account_id\n DESC: The account ID you wish to return info for.\n TYPE: String\n \"\"\"\n\n # define request components\n endpoint = r'portfolio/{}/ledger'.format(account_id)\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def portfolio_account_allocation(self, account_id: str) -> Dict:\n \"\"\"\n Information about the account's portfolio allocation by Asset Class, Industry and \n Category. /portfolio/accounts or /portfolio/subaccounts must be called prior to \n this endpoint.\n\n NAME: account_id\n DESC: The account ID you wish to return info for.\n TYPE: String\n \"\"\"\n\n # define request components\n endpoint = r'portfolio/{}/allocation'.format(account_id)\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def portfolio_accounts_allocation(self, account_ids: List[str]) -> Dict:\n \"\"\"\n Similar to /portfolio/{accountId}/allocation but returns a consolidated view of of all the \n accounts returned by /portfolio/accounts. /portfolio/accounts or /portfolio/subaccounts must \n be called prior to this endpoint.\n\n NAME: account_ids\n DESC: A list of Account IDs you wish to return alloacation info for.\n TYPE: List\n \"\"\"\n\n # define request components\n endpoint = r'portfolio/allocation'\n req_type = 'POST'\n payload = account_ids\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n json=payload\n )\n\n return content\n\n def portfolio_account_positions(self, account_id: str, page_id: int = 0) -> Dict:\n \"\"\"\n Returns a list of positions for the given account. The endpoint supports paging, \n page's default size is 30 positions. /portfolio/accounts or /portfolio/subaccounts \n must be called prior to this endpoint.\n\n NAME: account_id\n DESC: The account ID you wish to return positions for.\n TYPE: String\n\n NAME: page_id\n DESC: The page you wish to return if there are more than 1. The\n default value is `0`.\n TYPE: String\n\n ADDITIONAL ARGUMENTS NEED TO BE ADDED!!!!!\n \"\"\"\n\n # define request components\n endpoint = r'portfolio/{}/positions/{}'.format(account_id, page_id)\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def portfolio_account_position(self, account_id: str, conid: str) -> Dict:\n \"\"\"\n Returns a list of all positions matching the conid. For portfolio models the conid \n could be in more than one model, returning an array with the name of the model it \n belongs to. /portfolio/accounts or /portfolio/subaccounts must be called prior to \n this endpoint.\n\n NAME: account_id\n DESC: The account ID you wish to return positions for.\n TYPE: String\n\n NAME: conid\n DESC: The contract ID you wish to find matching positions for.\n TYPE: String\n \"\"\"\n\n # Define request components.\n endpoint = r'portfolio/{}/position/{}'.format(account_id, conid)\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def portfolio_positions_invalidate(self, account_id: str) -> Dict:\n \"\"\"\n Invalidates the backend cache of the Portfolio. ???\n\n NAME: account_id\n DESC: The account ID you wish to return positions for.\n TYPE: String\n \"\"\"\n\n # Define request components.\n endpoint = r'portfolio/{}/positions/invalidate'.format(account_id)\n req_type = 'POST'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def portfolio_positions(self, conid: str) -> Dict:\n \"\"\"\n Returns an object of all positions matching the conid for all the selected accounts. \n For portfolio models the conid could be in more than one model, returning an array \n with the name of the model it belongs to. /portfolio/accounts or /portfolio/subaccounts \n must be called prior to this endpoint.\n\n NAME: conid\n DESC: The contract ID you wish to find matching positions for.\n TYPE: String \n \"\"\"\n\n # Define request components.\n endpoint = r'portfolio/positions/{}'.format(conid)\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def trades(self):\n \"\"\"\n Returns a list of trades for the currently selected account for current day and \n six previous days.\n \"\"\"\n\n # define request components\n endpoint = r'iserver/account/trades'\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def get_live_orders(self):\n \"\"\"\n The end-point is meant to be used in polling mode, e.g. requesting every \n x seconds. The response will contain two objects, one is notification, the \n other is orders. Orders is the list of orders (cancelled, filled, submitted) \n with activity in the current day. Notifications contains information about \n execute orders as they happen, see status field.\n \"\"\"\n\n # define request components\n endpoint = r'iserver/account/orders'\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def place_order(self, account_id: str, order: dict) -> Dict:\n \"\"\"\n Please note here, sometimes this end-point alone can't make sure you submit the order \n successfully, you could receive some questions in the response, you have to to answer \n them in order to submit the order successfully. You can use \"/iserver/reply/{replyid}\" \n end-point to answer questions.\n\n NAME: account_id\n DESC: The account ID you wish to place an order for.\n TYPE: String\n\n NAME: order\n DESC: Either an IBOrder object or a dictionary with the specified payload.\n TYPE: IBOrder or Dict\n \"\"\"\n\n if type(order) is dict:\n order = order\n else:\n order = order.create_order()\n\n # define request components\n endpoint = r'iserver/account/{}/order'.format(account_id)\n req_type = 'POST'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n json=order\n )\n\n return content\n\n def place_orders(self, account_id: str, orders: List[Dict]) -> Dict:\n \"\"\"\n An extension of the `place_order` endpoint but allows for a list of orders. Those orders may be\n either a list of dictionary objects or a list of IBOrder objects.\n\n NAME: account_id\n DESC: The account ID you wish to place an order for.\n TYPE: String\n\n NAME: orders\n DESC: Either a list of IBOrder objects or a list of dictionaries with the specified payload.\n TYPE: List or List\n \"\"\"\n\n # EXTENDED THIS\n if type(orders) is list:\n orders = orders\n else:\n orders = orders\n\n # define request components\n endpoint = r'iserver/account/{}/orders'.format(account_id)\n req_type = 'POST'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n json=orders\n )\n\n return content\n\n def place_order_scenario(self, account_id: str, order: dict) -> Dict:\n \"\"\"\n This end-point allows you to preview order without actually submitting the \n order and you can get commission information in the response.\n\n NAME: account_id\n DESC: The account ID you wish to place an order for.\n TYPE: String\n\n NAME: order\n DESC: Either an IBOrder object or a dictionary with the specified payload.\n TYPE: IBOrder or Dict\n \"\"\"\n\n if type(order) is dict:\n order = order\n else:\n order = order.create_order()\n\n # define request components\n endpoint = r'iserver/account/{}/order/whatif'.format(account_id)\n req_type = 'POST'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n json=order\n )\n\n return content\n\n def place_order_reply(self, reply_id: str = None, reply: str = None):\n \"\"\"\n An extension of the `place_order` endpoint but allows for a list of orders. Those orders may be\n either a list of dictionary objects or a list of IBOrder objects.\n\n NAME: account_id\n DESC: The account ID you wish to place an order for.\n TYPE: String\n\n NAME: orders\n DESC: Either a list of IBOrder objects or a list of dictionaries with the specified payload.\n TYPE: List or List\n \"\"\"\n\n # define request components\n endpoint = r'iserver/reply/{}'.format(reply_id)\n req_type = 'POST'\n reply = {\n 'confirmed': reply\n }\n\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n json=reply\n )\n\n return content\n\n def modify_order(self, account_id: str, customer_order_id: str, order: dict) -> Dict:\n \"\"\"\n Modifies an open order. The /iserver/accounts endpoint must first\n be called.\n\n NAME: account_id\n DESC: The account ID you wish to place an order for.\n TYPE: String\n\n NAME: customer_order_id\n DESC: The customer order ID for the order you wish to MODIFY.\n TYPE: String\n\n NAME: order\n DESC: Either an IBOrder object or a dictionary with the specified payload.\n TYPE: IBOrder or Dict\n \"\"\"\n\n if type(order) is dict:\n order = order\n else:\n order = order.create_order()\n\n # define request components\n endpoint = r'iserver/account/{}/order/{}'.format(\n account_id, customer_order_id)\n req_type = 'POST'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n json=order\n )\n\n return content\n\n def delete_order(self, account_id: str, customer_order_id: str) -> Dict:\n \"\"\"Deletes the order specified by the customer order ID.\n\n NAME: account_id\n DESC: The account ID you wish to place an order for.\n TYPE: String\n\n NAME: customer_order_id\n DESC: The customer order ID for the order you wish to DELETE.\n TYPE: String\n \"\"\"\n\n # define request components\n endpoint = r'iserver/account/{}/order/{}'.format(\n account_id, customer_order_id)\n req_type = 'DELETE'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def get_scanners(self):\n \"\"\"Returns an object contains four lists contain all parameters for scanners.\n\n RTYPE Dictionary\n \"\"\"\n # define request components\n endpoint = r'iserver/scanner/params'\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def run_scanner(self, instrument: str, scanner_type: str, location: str, size: str = '25', filters: List[dict] = None) -> Dict:\n \"\"\"Run a scanner to get a list of contracts.\n\n NAME: instrument\n DESC: The type of financial instrument you want to scan for.\n TYPE: String\n\n NAME: scanner_type\n DESC: The Type of scanner you wish to run, defined by the scanner code.\n TYPE: String\n\n NAME: location\n DESC: The geographic location you wish to run the scan. For example (STK.US.MAJOR)\n TYPE: String\n\n NAME: size\n DESC: The number of results to return back. Defaults to 25.\n TYPE: String\n\n NAME: filters\n DESC: A list of dictionaries where the key is the filter you wish to set and the value is the value you want set\n for that filter.\n TYPE: List\n\n RTYPE Dictionary\n \"\"\"\n\n # define request components\n endpoint = r'iserver/scanner/run'\n req_type = 'POST'\n payload = {\n \"instrument\": instrument,\n \"type\": scanner_type,\n \"filter\": filters,\n \"location\": location,\n \"size\": size\n }\n\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n json=payload\n )\n\n return content\n\n def customer_info(self) -> Dict:\n \"\"\"Returns Applicant Id with all owner related entities \n\n RTYPE Dictionary\n \"\"\"\n\n # define request components\n endpoint = r'ibcust/entity/info'\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def get_unread_messages(self) -> Dict:\n \"\"\"Returns the unread messages associated with the account.\n\n RTYPE Dictionary\n \"\"\"\n\n # define request components\n endpoint = r'fyi/unreadnumber'\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def get_subscriptions(self) -> Dict:\n \"\"\"Return the current choices of subscriptions, we can toggle the option.\n\n RTYPE Dictionary\n \"\"\"\n\n # define request components\n endpoint = r'fyi/settings'\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def change_subscriptions_status(self, type_code: str, enable: bool = True) -> Dict:\n \"\"\"Turns the subscription on or off.\n\n NAME: type_code\n DESC: The subscription code you wish to change the status for.\n TYPE: String\n\n NAME: enable\n DESC: True if you want the subscription turned on, False if you want it turned of.\n TYPE: Boolean\n\n RTYPE Dictionary\n \"\"\"\n\n # define request components\n endpoint = r'fyi/settings/{}'\n req_type = 'POST'\n payload = {'enable': enable}\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n json=payload\n )\n\n return content\n\n def subscriptions_disclaimer(self, type_code: str) -> Dict:\n \"\"\"Returns the disclaimer for the specified subscription.\n\n NAME: type_code\n DESC: The subscription code you wish to change the status for.\n TYPE: String\n\n RTYPE Dictionary\n \"\"\"\n\n # define request components\n endpoint = r'fyi/disclaimer/{}'\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def mark_subscriptions_disclaimer(self, type_code: str) -> Dict:\n \"\"\"Sets the specified disclaimer to read.\n\n NAME: type_code\n DESC: The subscription code you wish to change the status for.\n TYPE: String\n\n RTYPE Dictionary\n \"\"\"\n\n # define request components\n endpoint = r'fyi/disclaimer/{}'\n req_type = 'PUT'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def subscriptions_delivery_options(self):\n \"\"\"Options for sending fyis to email and other devices.\n\n RTYPE Dictionary\n \"\"\"\n\n # define request components\n endpoint = r'fyi/deliveryoptions'\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def mutual_funds_portfolios_and_fees(self, conid: str) -> Dict:\n \"\"\"Grab the Fees and objectives for a specified mutual fund.\n\n NAME: conid\n DESC: The Contract ID for the mutual fund.\n TYPE: String\n\n RTYPE Dictionary\n \"\"\"\n\n # define request components\n endpoint = r'fundamentals/mf_profile_and_fees/{mutual_fund_id}'.format(\n mutual_fund_id=conid)\n req_type = 'GET'\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type\n )\n\n return content\n\n def mutual_funds_performance(self, conid: str, risk_period: str, yield_period: str, statistic_period: str) -> Dict:\n \"\"\"Grab the Lip Rating for a specified mutual fund.\n\n NAME: conid\n DESC: The Contract ID for the mutual fund.\n TYPE: String\n\n NAME: yield_period\n DESC: The Period threshold for yield information\n possible values: ['6M', '1Y', '3Y', '5Y', '10Y']\n TYPE: String\n\n NAME: risk_period\n DESC: The Period threshold for risk information\n possible values: ['6M', '1Y', '3Y', '5Y', '10Y']\n TYPE: String\n\n NAME: statistic_period\n DESC: The Period threshold for statistic information\n possible values: ['6M', '1Y', '3Y', '5Y', '10Y']\n TYPE: String\n\n RTYPE Dictionary\n \"\"\"\n\n # define request components\n endpoint = r'fundamentals/mf_performance/{mutual_fund_id}'.format(\n mutual_fund_id=conid)\n req_type = 'GET'\n params = {\n 'risk_period': None,\n 'yield_period': None,\n 'statistic_period': None\n }\n content = self._make_request(\n endpoint=endpoint,\n req_type=req_type,\n params=params\n )\n\n return content\n","repo_name":"areed1192/interactive-broker-python-api","sub_path":"ibw/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":65782,"program_lang":"python","lang":"en","doc_type":"code","stars":333,"dataset":"github-code","pt":"67"} +{"seq_id":"25682819731","text":"import numpy as np\nimport cv2\nfrom scipy import stats\n\n# Malisiewicz et al.\ndef non_max_suppression_fast(boxes, overlapThresh):\n\tif len(boxes) == 0:\n\t\treturn []\n\n\tif boxes.dtype.kind == \"i\":\n\t\tboxes = boxes.astype(\"float\")\n\n\tpick = []\n\n\tx1 = boxes[:,0]\n\ty1 = boxes[:,1]\n\tx2 = boxes[:,2]\n\ty2 = boxes[:,3]\n\n\tarea = (x2 - x1 + 1) * (y2 - y1 + 1)\n\tidxs = np.argsort(y2)\n\n\twhile len(idxs) > 0:\n\t\tlast = len(idxs) - 1\n\t\ti = idxs[last]\n\t\tpick.append(i)\n\n\t\txx1 = np.maximum(x1[i], x1[idxs[:last]])\n\t\tyy1 = np.maximum(y1[i], y1[idxs[:last]])\n\t\txx2 = np.minimum(x2[i], x2[idxs[:last]])\n\t\tyy2 = np.minimum(y2[i], y2[idxs[:last]])\n\n\t\tw = np.maximum(0, xx2 - xx1 + 1)\n\t\th = np.maximum(0, yy2 - yy1 + 1)\n\n\t\toverlap = (w * h) / area[idxs[:last]]\n\n\t\tidxs = np.delete(idxs, np.concatenate(([last],\n\t\t\tnp.where(overlap > overlapThresh)[0])))\n\n\treturn boxes[pick].astype(\"int\")\n#\n\ndef Liang_Barsky_line_rect_collision(boxcoord, linecoord):\n\n\tline_x_start, line_y_start, line_x_end, line_y_end = linecoord\n\tx, y, x2, y2 = boxcoord\n\n\tp = [-(line_x_end - line_x_start), (line_x_end - line_x_start), -(line_y_end - line_y_start), (line_y_end - line_y_start)]\n\tq = [line_x_start - x, x2 - line_x_start, line_y_start - y, y2 - line_y_start ]\n\n\tu1 = -np.inf\n\tu2 = np.inf\n\n\tfor i in range(4):\n\t\tt = float(q[i])/p[i]\n\t\tif (p[i] < 0 and u1 < t): u1 = t\n\t\telif (p[i] > 0 and u2 > t): u2 = t\n\t#\n\n\tif (u1 > u2 or u1 > 1 or u1 < 0):\n\t\tcollision = False\n\telse:\n\t\tcollision = True\n\n\treturn collision\n#\n\ndef bb_intersection_over_union(boxA, boxB):\n\n\txA = max(boxA[0], boxB[0])\n\tyA = max(boxA[1], boxB[1])\n\txB = min(boxA[2], boxB[2])\n\tyB = min(boxA[3], boxB[3])\n\n\tinterArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)\n\n\tboxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)\n\tboxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)\n\n\tiou = interArea / float(boxAArea + boxBArea - interArea)\n\n\treturn iou\n#\n\ndef mark_Box_to_Img(outimg, boxlist, color):\n\n\tfor i in boxlist:\n\t\tif len(i) == 4:\n\t\t\tx, y, x2, y2 = i\n\t\telse:\n\t\t\tx, y, x2, y2, origx, origy = i\n\t\tcv2.rectangle(outimg, (x,y), (x2, y2), color, 2)\n\t#\n\treturn outimg\n#\n\ndef mark_Dot_to_Img(outimg, boxlist, color):\n\n\tfor i in boxlist:\n\t\tif len(i) == 4:\n\t\t\tx, y, x2, y2 = i\n\t\telse:\n\t\t\tx, y, x2, y2, origx, origy = i\n\t\n\t\tcenter_x = x + (x2 - x)/2\n\t\tcenter_y = y + (y2 - y)/2\n\n\t\tcv2.circle(outimg, (center_x, center_y), 2, color, -1)\n\t#\n\treturn outimg\n#\n\ndef calc_gradCenter(grey_patch, boxcoord):\n\n\tgradcenter = (-1, -1)\n\tx, y, x2, y2 = boxcoord\n\th = y2 - y\n\tw = x2 - x\n\n\timg_h, img_w = grey_patch.shape\n\tif not img_h%2 == 0: img_h += 1\n\tif not img_w%2 == 0: img_w += 1\n\tgrey_patch = cv2.resize(grey_patch, (img_w, img_h))\n\n\tcrop_up_half = np.hsplit(np.vsplit(grey_patch, 2)[0], 2)\n\tcrop_down_half = np.hsplit(np.vsplit(grey_patch, 2)[-1], 2)\n\n\tcrop_up_left = crop_up_half[0]\n\tcrop_up_right = crop_up_half[-1]\n\tcrop_down_left = crop_down_half[0]\n\tcrop_down_right = crop_down_half[-1]\n\n\tscore_list = [np.sum(crop_up_left),np.sum(crop_up_right),np.sum(crop_down_left),np.sum(crop_down_right)]\n\t#score_list = [stats.mode(crop_up_left,axis=None),stats.mode(crop_up_right,axis=None),stats.mode(crop_down_left,axis=None),stats.mode(crop_down_right,axis=None)]\n\tseq = sorted(score_list)\n\tscore_index = [seq.index(v) for v in score_list]\n\t\n\tmaxindex = score_list.index(max(score_list))\n\tminindex = score_list.index(min(score_list))\t\n\t#secondindex = score_list.index(score_list[score_index.index(1)])\n\tif maxindex == 0:\n\t\t#if minindex == 3: gradcenter = (x, y)\n\t\t#elif secondindex == 3: gradcenter = (x + w/2, y + h/2)\t\t\t\n\t\tgradcenter = (x, y)\n\n\telif maxindex == 1:\n\t\t#if minindex == 2: gradcenter = (x2, y)\n\t\t#elif secondindex == 2: gradcenter = (x + w/2, y + h/2)\t\t\t\n\t\tgradcenter = (x2, y)\t\n\n\telif maxindex == 2:\n\t\t#if minindex == 1: gradcenter = (x, y2)\n\t\t#elif secondindex == 1: gradcenter = (x + w/2, y + h/2)\t\t\t\n\t\tgradcenter = (x, y2)\n\n\telif maxindex == 3:\n\t\t#if minindex == 0: gradcenter = (x2, y2)\n\t\t#elif secondindex == 0: gradcenter = (x + w/2, y + h/2)\t\t\t\n\t\tgradcenter = (x2, y2)\n\t#\t\n\n\treturn gradcenter[0], gradcenter[1], maxindex\n#\n\ndef save_Boxlist_to_Img(img, samplename, chrname, boxlist, savepath, patch_class=0, padding=0, resize=None):\n\n\tfor idx,box in enumerate(boxlist):\n\t\tx, y, x2, y2 = box\n\t\timgpatch = img[y-padding:y2+padding, x-padding:x2+padding]\n\t\n\t\timgsavename = samplename + \"_\" + chrname + \"_\" + str(patch_class) + \"_\" + str(idx) + \".png\"\n\t\tfullimgsavename = savepath + \"/\" + imgsavename\n\t\tif resize == None:\n\t\t\tcv2.imwrite(fullimgsavename, imgpatch)\n\t\telse:\n\t\t\timgpatch = cv2.resize(imgpatch, (resize, resize))\n\t\t\tcv2.imwrite(fullimgsavename, imgpatch)\n\t\t#\n\t#\t\n\treturn\n#\n\n\n","repo_name":"kaistcbfg/CAPReSEv1","sub_path":"imgutils.py","file_name":"imgutils.py","file_ext":"py","file_size_in_byte":4624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7348820830","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n\nimport itertools\n\n\nclass Poly:\n\t\"\"\"Klasa reprezentująca wielomiany.\"\"\"\n\n\t# wg Sedgewicka - tworzymy wielomian c*x^n\n\tdef __init__(self, c=0, n=0):\n\t\tself.size = n + 1\t # rozmiar tablicy\n\t\tself.a = self.size * [0]\n\t\tself.a[self.size-1] = c\n\n\tdef __str__(self):\n\t\treturn str(self.a)\n\n\tdef __add__(self, other): # poly1 + poly2\n\t\tif isinstance(other, (int,long,float)) :\n\t\t\tself.a[0] = self.a[0] + other\n\t\t\treturn self\n\t\telif isinstance(self, (int,long,float)) :\n\t\t\tother.a[0] = other.a[0] + self\n\t\t\treturn other\n\t\telif self.is_zero() and other.is_zero():\n\t\t\treturn Poly(0,0)\n\t\telif self.is_zero() :\n\t\t\treturn other\n\t\telif other.is_zero():\n\t\t\treturn self\n\t\tnewPoly = Poly()\n\t\tL = [x+y for (x,y) in itertools.izip_longest(self.a, other.a, fillvalue=0 )]\n\t\tnewPoly.a = L\n\t\tnewPoly.size = len(L)\n\t\treturn newPoly\n\n\t__radd__ = __add__\t\t\t # int+poly\n\n\tdef __rsub__(self, other): # int-poly\n\t\tif isinstance(other, (int,float,long)):\n\t\t\tP = Poly(other,0)\n\t\t\treturn P - self\n\t\telse:\n\t\t\treturn other - self\n\n\n\tdef __sub__(self, other): # poly1 - poly2\n\t\tif isinstance(self, (int,float,long)):\n\t\t\tP = Poly(self,0)\n\t\t\treturn other - P\n\t\telif isinstance(other, (int,float,long)):\n\t\t\tP = Poly(other,0)\n\t\t\treturn self - P\n\t\telif self.is_zero() and other.is_zero():\n\t\t\treturn Poly(0,0)\n\t\telif self.is_zero() :\n\t\t\treturn other\n\t\telif other.is_zero():\n\t\t\treturn self\n\t\tnewPoly = Poly()\n\t\tL = [x-y for (x,y) in itertools.izip_longest(self.a, other.a, fillvalue=0 )]\n\t\tnewPoly.a = L\n\t\tnewPoly.size = len(L)\n\t\treturn newPoly\n\n\tdef __mul__(self, other): # poly1 * poly2\n\t\tif isinstance(other, int):\n\t\t\treturn Poly(other,0)*self\n\t\tL = [0]*(self.size+other.size-1)\n\t\tfor i in range(self.size):\n\t\t\tfor j in range(other.size):\n\t\t\t\tL[i+j] = L[i+j] + self.a[i] * other.a[j]\n\t\tnewPoly = Poly()\n\t\tnewPoly.a = L\n\t\tnewPoly.size = len(L)\n\t\treturn newPoly\n\n\t__rmul__ = __mul__\t\t\t # int*poly\n\n\tdef __pos__(self):\t\t # +poly1 = (+1)*poly1\n\t\treturn self\n\n\tdef __neg__(self):\t\t # -poly1 = (-1)*poly1\n\t\tnewPoly = Poly(0,self.size-1)\n\t\tfor i in range(self.size):\n\t\t\tnewPoly.a[i] = self.a[i] * -1 \n\t\treturn newPoly\n\n\tdef __eq__(self, other):\t# obsługa poly1 == poly2\n\t\tif self.size != other.size :\n\t\t\treturn False\n\t\telse:\n\t\t\tfor i in range(self.size):\n\t\t\t\tif self.a[i] != other.a[i]:\n\t\t\t\t\treturn False\n\t\treturn True\n\n\tdef __ne__(self, other):\t\t# obsługa poly1 != poly2\n\t\treturn not self == other\n\n\tdef eval(self, x):\t\t # schemat Hornera\n\t\tres = 0 \n\t\tfor item in reversed(self.a):\n\t\t\tres = res*x+item\n\t\treturn res\n\t\n\n\tdef combine(self, other):\t # złożenie poly1(poly2(x))\n\t\tR2 = Poly(0,0)\n\t\tfor i in range(self.size):\n\t\t\tR1 = other\n\t\t\tfor j in range(i-1):\n\t\t\t\tR1 = R1 * other\n\t\t\tR2 = R2 + ( R1 * Poly(self.a[i],0) )\n\t\t\t\n\t\treturn R2\n\n\n\tdef __pow__(self, n):\t # poly(x)**n lub pow(poly(x),n)\n\t\tif n <= 1 :\n\t\t\traise ValueError \n\t\tG = self\n\t\tfor i in range(n-1):\n\t\t\tG = G * self\n\t\treturn G\n\n\n\tdef diff(self):\t\t\t # różniczkowanie\n\t\tif self.is_zero():\n\t\t\traise ValueError\n\t\tL = [0]*(self.size-1)\t\n\t\tfor i in range(self.size-1):\n\t\t\tL[i] = self.a[i+1]*(i+1)\n\t\tnewPoly = Poly(len(L),0)\n\t\tnewPoly.size = len(L)\n\t\tnewPoly.a = L\n\t\treturn newPoly\n\n\n\tdef integrate(self):\t\t# całkowanie\n\t\tif self.is_zero():\n\t\t\traise ValueError\n\t\tL = [0.0]*(self.size+1)\n\t\tfor i in range(self.size):\n\t\t\tL[i+1] = float(self.a[i])/float((i+1))\n\t\tnewPoly = Poly(len(L),0)\n\t\tnewPoly.size = len(L)\n\t\tnewPoly.a = L\n\t\treturn newPoly\n\n\tdef is_zero(self):\t\t # bool, True dla [0], [0, 0],...\n\t\tfor i in range(self.size):\n\t\t\tif self.a[i] != 0 :\n\t\t\t\treturn False\n\t\treturn True\n\n\n\tdef __len__(self):\t\t # len(poly), rozmiar self.a\n\t\treturn len(self.a)\n\n\tdef __getitem__(self, i):\t\t\t # poly[i], współczynnik przy x^i\n\t\treturn self.a[i]\n\n\tdef __setitem__(self, i, value):\t # poly[i] = value\n\t\tself.a[i] = value\n\n\tdef __call__(self, x):\t # poly(x)\n\t\tif isinstance(x, (int,long,float) ):\n\t\t\treturn self.eval(x)\n\t\telif isinstance(x, Poly):\n\t\t\treturn self.combine(x)\n\n\t# dla isinstance(x, (int,long,float)) odpowiada eval(),\n\t# dla isinstance(x, Poly) odpowiada combine()\n\n# Kod testujący moduł.\n\nimport unittest\n\nclass TestPoly(unittest.TestCase): \n\n\tdef setUp(self): \n\t\tself.one = Poly(5,5)\n\t\tself.one.a[4] = 3\n\t\tself.one.a[3] = 2\n\t\tself.one.a[2] = 1\n\t\tself.one.a[1] = 8\n\t\tself.two = Poly(4,4)\n\t\tself.two.a[2] = 2\n\t\tself.two.a[1] = 1\n\t\tself.two.a[0] = 8\n\n\n\tdef test_print(self): \n\t\tself.assertEquals( str(self.one), \"[0, 8, 1, 2, 3, 5]\")\n\n\tdef test_add(self): \n\t\tself.assertEquals( str(self.one + Poly(self.one.a[1],0)), \"[8, 8, 1, 2, 3, 5]\" )\n\t\tself.assertEquals( str(self.one + self.two) , \"[8, 9, 3, 2, 7, 5]\")\n\t\tself.assertEquals( str(self.two + self.one) , \"[8, 9, 3, 2, 7, 5]\")\n\t\tself.assertEquals( str(self.one + self.one) , \"[0, 16, 2, 4, 6, 10]\")\n\t\tself.assertEquals( str(self.one + 12 )\t , \"[12, 8, 1, 2, 3, 5]\") \n\n\tdef test_sub(self): \n\t\tself.assertEquals( str(self.one - self.two) , \"[-8, 7, -1, 2, -1, 5]\")\n\t\tself.assertEquals( str(self.two - self.one) , \"[8, -7, 1, -2, 1, -5]\")\n\t\tself.assertEquals( str(self.one - self.one) , \"[0, 0, 0, 0, 0, 0]\")\n\t\tself.assertEquals( str(12 - self.one)\t , \"[12, -8, -1, -2, -3, -5]\") \n\t\tself.assertEquals( str(self.one - 12)\t , \"[-12, 8, 1, 2, 3, 5]\") \n\n\tdef test_mul(self):\n\t\tone = Poly(2,3)\n\t\tone.a[2] = 4\n\t\tone.a[1] = -1\n\t\tone.a[0] = -1\n\t\ttwo = Poly(1,4)\n\t\ttwo.a[1] = 1\n\t\ttwo.a[0] = 1\n\t\tself.assertEquals( str(two * Poly(1,1)), \"[0, 1, 1, 0, 0, 1]\") \n\t\tself.assertEquals( str(self.one * Poly(self.one.a[1],0)), \"[0, 64, 8, 16, 24, 40]\" )\n\t\tself.assertEquals( str(one * two) , \"[-1, -2, 3, 6, 1, -1, 4, 2]\")\n\t\tself.assertEquals( str(one * 12), \"[-12, -12, 48, 24]\")\n\n\tdef test_pos(self):\n\t\tself.assertEquals( str(+self.one), \"[0, 8, 1, 2, 3, 5]\")\n\t\tself.assertEquals( str(-self.one), \"[0, -8, -1, -2, -3, -5]\" ) \n\n\tdef test_eq(self):\n\t\tself.assertFalse( self.one == self.two )\n\t\tself.assertTrue( self.two == self.two )\n\n\tdef test_eval(self):\n\t\tself.assertEquals( self.two.eval(4), 8+4+2*16+4*16*16 )\n\n\tdef test_comb(self):\n\t\tone = Poly(2,2)\n\t\tone.a[1] = 3\n\t\ttwo = Poly(3,3)\n\t\ttwo.a[0] = 6\n\t\tself.assertEquals( str(one.combine(two)) , \"[90, 0, 0, 81, 0, 0, 18]\")\n\n\tdef test_pow(self):\n\t\ttwo = Poly(3,3)\n\t\ttwo.a[0] = 6\n\t\tself.assertEquals( str(two**2), \"[36, 0, 0, 36, 0, 0, 9]\" )\n\t\tself.assertEquals( str(two**3), \"[216, 0, 0, 324, 0, 0, 162, 0, 0, 27]\" )\n\t\tself.assertEquals( str(two**9), str(two*two*two*two*two*two*two*two*two) )\n\n\tdef test_diff(self):\n\t\tself.assertEquals(str(self.one.diff()),\"[8, 2, 6, 12, 25]\")\n\n\tdef test_inf(self):\n\t\tone = Poly(25.0,4)\n\t\tone.a[3] = 12.0\n\t\tone.a[2] = 6.0\n\t\tone.a[1] = 2.0\n\t\tone.a[0] = 8.0\n\t\t\n\t\tself.assertEquals(str(one.integrate()), \"[0.0, 8.0, 1.0, 2.0, 3.0, 5.0]\")\n\n\tdef test_zero(self):\n\t\tfor i in range(100):\n\t\t\tone = Poly(0,i)\n\t\t\tself.assertTrue(one.is_zero())\n\t\tself.assertFalse(self.one.is_zero())\n\n\tdef test_len(self):\n\t\tfor i in range(100):\n\t\t\tone = Poly(0,i)\n\t\t\tself.assertEquals(len(one) , i+1)\n\n\tdef test_getItem(self):\n\t\tfor i in range(len(self.one)):\n\t\t\tself.assertEquals(self.one[i] , self.one.a[i])\n\n\tdef test_setItem(self):\n\t\tself.one[3] = 12\n\t\tself.assertEquals( self.one[3] , 12)\n\n\tdef test_cal(self):\n\t\tone = Poly(2,3)\n\t\tone.a[2] = 4\n\t\tone.a[1] = -1\n\t\tone.a[0] = -1\n\t\t\n\t\tself.assertEquals( one(2), -1-2+16+16 )\n\t\tself.assertEquals( one(2.0), -1.0-2.0+16.0+16.0)\n\t\tone = Poly(2,2)\n\t\tone.a[1] = 3\n\t\ttwo = Poly(3,3)\n\t\ttwo.a[0] = 6\n\t\tself.assertEquals( str(one(two)), \"[90, 0, 0, 81, 0, 0, 18]\" )\n\n\tdef tearDown(self): pass\n\n\n\nif __name__ == \"__main__\":\n\tunittest.main()\t # wszystkie testy\n\n","repo_name":"AkaiTobira/Python_Zadania","sub_path":"7/7_2.py","file_name":"7_2.py","file_ext":"py","file_size_in_byte":7479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20519353897","text":"class Solution:\n # @param A : integer\n # @return a boolean value ( True / False )\n def isPalindrome(self, A):\n myStr=str(A)\n left=0\n right=len(myStr)-1\n #print left,right,myStr\n while left<=right:\n if myStr[left]!=myStr[right]:\n return False\n left+=1\n right-=1\n return True\n","repo_name":"san7988/InterviewBit","sub_path":"Math/palindromeInt.py","file_name":"palindromeInt.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43317095899","text":"import numpy\nimport numpy.matlib\nimport time\nimport math\nfrom scipy.stats import norm\nfrom sklearn import preprocessing\nfrom sklearn.neighbors import KNeighborsClassifier \n\n\ndef RSFS(dataset, Parameters):\n Feature_train, Feature_test, label_train, label_test = dataset.X_train,dataset.X_test,dataset.y_train,dataset.y_test\n max_iters = 3000000\n n_dummyfeats = Parameters['Dummy feats']\n max_delta = 0.05\n k_neighbors = 3\n #label_test = label_test.astype('int')\n #label_train = label_train.astype('int')\n verbose = 1\n N_classes = len(numpy.unique(label_train))\n number_of_features = numpy.size(Feature_train, axis=1)\n relevance = numpy.zeros((number_of_features,))\n dummy_relevance = numpy.zeros((n_dummyfeats,))\n #stored =[]\n\n if (Parameters['fn'] == 'sqrt'):\n feats_to_take = round(math.sqrt(number_of_features))\n #feats_to_take = feats_to_take.astype('int')\n dummy_feats_to_take = round(math.sqrt(n_dummyfeats))\n #dummy_feats_to_take = dummy_feats_to_take.astype('int')\n if (Parameters['fn'] == '10log'):\n feats_to_take = round(10 * math.log10(number_of_features))\n #feats_to_take = feats_to_take.astype('int')\n dummy_feats_to_take = round(10 * math.log10(n_dummyfeats))\n #dummy_feats_to_take = dummy_feats_to_take.astype('int')\n\n feat_N = numpy.zeros(max_iters)\n\n totcorrect = numpy.zeros(N_classes)\n totwrong = numpy.zeros(N_classes)\n\n iteration = 1\n deltaval = math.inf\n cutoff = Parameters['cutoff'] \n Threshold = Parameters['Threshold']\n probs = numpy.zeros(numpy.shape(relevance))\n #if(Parameters['Classifier'] == 'KNN'):\n clf = KNeighborsClassifier(n_neighbors=k_neighbors)\n while (iteration <= max_iters and deltaval > max_delta):\n feature_indices = numpy.floor(number_of_features * numpy.random.rand(1, feats_to_take))\n feature_indices = feature_indices.astype('int')\n # if ('stored' in locals()):\n # for i in list(range(0, len(stored))):\n # feature_indices = feature_indices(feature_indices != stored(i))\n\n \n \n class_hypos = clf.fit(Feature_train[:, numpy.resize(feature_indices,(numpy.size(feature_indices),))], label_train).predict(Feature_test[:,numpy.resize(feature_indices,(numpy.size(feature_indices),))])\n \n correct = numpy.zeros(N_classes)\n wrong = numpy.zeros(N_classes)\n\n for j in list(numpy.arange(0, numpy.size(label_test))):\n if (label_test[j] == class_hypos[j]):\n correct[label_test[j] - 1] = correct[label_test[j] - 1] + 1\n else:\n wrong[label_test[j] - 1] = wrong[label_test[j] - 1] + 1\n\n totcorrect = totcorrect + correct\n totwrong = totwrong + wrong\n\n performance_criterion = numpy.mean(numpy.array(correct) * 100 / (numpy.array(correct) + numpy.array(wrong)))\n expected_criterion_value = numpy.mean(numpy.array(totcorrect) * 100 / (numpy.array(totcorrect) + numpy.array(totwrong)))\n\n target = performance_criterion - expected_criterion_value\n pos = feature_indices\n relevance[pos] += target\n\n dummy_indices = numpy.floor(n_dummyfeats * numpy.random.rand(1,dummy_feats_to_take))\n dummy_indices = dummy_indices.astype('int')\n target = dummy_relevance[dummy_indices] + performance_criterion - expected_criterion_value\n pos = dummy_indices\n for x, y in zip(pos, target):\n dummy_relevance[x] = y\n if(iteration>5):\n probs = norm.cdf(relevance, loc=numpy.mean(dummy_relevance), scale=numpy.std(dummy_relevance))\n\n\n feat_N[iteration] = numpy.size(numpy.where(probs > cutoff))\n\n if (iteration % Threshold == 0):\n if (verbose == 1):\n deltaval = numpy.std(feat_N[iteration - (Threshold-1):iteration]) / numpy.mean(feat_N[iteration - (Threshold-1):iteration])\n print('RSFS: ', feat_N[iteration], 'features chosen so far (iteration: ', iteration, '/', max_iters,'). Delta: ', deltaval)\n\n iteration = iteration + 1\n\n # if (Parameters['RSFS']['stored'] == 1):\n # top = Parameters['RSFS']['top']\n # Threshold = Parameters['RSFS']['Threshold']\n # if (iteration > Threshold):\n # S = numpy.where(probs > cutoff)\n # W = relevance[S]\n # comm = [S, W]\n # comm = comm[comm[:, 1].argsort(),]\n # if (len(S) >= top):\n # stored.extend(comm[0:top-1, 1])\n # else:\n # stored.extend(comm[0:len(S)-1, 1])\n # stored = list(numpy.unique(stored))\n\n S = numpy.where(probs>cutoff)\n W = relevance[S]\n dataset.X_train = Feature_train[:,list(S)[0]]\n dataset.X_test = Feature_test[:,list(S)[0]]\n \n\n","repo_name":"gksriharsha/AutomatedLearning","sub_path":"process/Algorithms/rsfs_py.py","file_name":"rsfs_py.py","file_ext":"py","file_size_in_byte":4814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9996945461","text":"# -*- coding: utf-8 -*-\nfrom openerp import models, fields, api\nfrom openerp.modules.registry import RegistryManager\nfrom openerp.tools import SUPERUSER_ID\nfrom .printing import _available_action_types\n\n\nclass ResUsers(models.Model):\n _inherit = 'res.users'\n\n def _user_available_action_types_inherit(self):\n return [(code, string) for code, string\n in _available_action_types(self)\n if code != 'user_default']\n\n printing_action = fields.Selection(\n _user_available_action_types_inherit)\n work_location_id = fields.Many2one(\n 'work_location', string='Work Location', required=False)\n reset_work_location = fields.Boolean(\n 'Reset work location upon login',\n help=('Reset work location upon login, and prevent printing of any '\n 'documents until the work location is reconfigured for the '\n 'current session'))\n\n @api.multi\n def write(self, values):\n if values.get('work_location_id'):\n self.search(\n [('work_location_id', '=', values.get('work_location_id'))]\n ).sudo().write({'work_location_id': False})\n return super(ResUsers, self).write(values)\n\n _sql_constraints = [\n ('work_location_id_uniq', 'unique(work_location_id)',\n 'Work Location must be unique!'),\n ]\n\n def authenticate(self, db, login, password, user_agent_env):\n uid = super(ResUsers, self).authenticate(\n db, login, password, user_agent_env)\n if uid:\n registry = RegistryManager.get(db)\n with registry.cursor() as cr:\n env = api.Environment(cr, SUPERUSER_ID, {})\n user = env['res.users'].browse(uid)\n if user.reset_work_location:\n user.write({'work_location_id': False})\n return uid\n","repo_name":"Niboo/legal1","sub_path":"dyn_report_to_printer_location/models/res_users.py","file_name":"res_users.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"39051183535","text":"\"\"\"\nScript to compare CombinedVariantOutput tsv files against compressed VCFs to\nidentify variants unique to either.\n\nThis takes a directory of directories as input (one per run), containing all\nvcfs and tsvs together.\n\nWill output up to 3 files:\n - all_tsv_only.tsv => original entries of variants only found in the tsvs\n - all_vcf_only.tsv => original entries of variants only found in the vcfs\n - all_tsv_no_annotation.tsv => original entires of variants in the tsvs\n for which there is no annotation and are not compared\n\"\"\"\nimport gzip\nimport os\nfrom pathlib import Path\nimport sys\n\nimport pandas as pd\n\n\ndef read_tsv(tsv):\n \"\"\"\n Read in CombinedVariantOutput.tsv to a dataframe\n\n Parameters\n ----------\n tsv : str\n path to tsv file to read in\n \n Returns\n -------\n pd.DataFrame\n dataframe of SNVs read in from CombinedVariantOutput tsv\n \"\"\"\n variants = []\n\n with open(tsv) as fh:\n # get just the variants from the tsv after the line '[Small Variants]'\n snvs = False\n for line in fh.readlines():\n if snvs:\n variants.append(line)\n if line.startswith('[Small Variants]'):\n snvs = True\n\n variants = [x.split('\\t') for x in variants if set(x) != {'\\t', '\\n'}]\n variants = variants[1:]\n\n # if no variants found this will be returned as [['NA', '', '\\n']] =>\n # set it to an empty list\n if variants == [['NA', '', '\\n']]:\n variants = []\n\n columns = [\n 'Gene', 'CHROM', 'POS', 'REF', 'ALT', 'Allele Frequency', 'Depth',\n 'P-Dot Notation', 'C-Dot Notation', 'Consequence(s)', 'Affected Exon(s)'\n ]\n\n df = pd.DataFrame(variants, columns=columns)\n\n df['CHROM'] = df['CHROM'].apply(lambda x: x.replace('chr', ''))\n df['POS'] = pd.to_numeric(df['POS'])\n\n df['Affected Exon(s)'] = df['Affected Exon(s)'].str.replace('\\n', '')\n\n return df\n\n\ndef read_vcf(vcf):\n \"\"\"\n Read in compressed VCF to a dataframe\n\n Parameters\n ----------\n vcf : str\n path to vcf file to read in\n\n Returns\n -------\n pd.DataFrame\n dataframe of variants read from vcf\n \"\"\"\n with gzip.open(vcf) as fh:\n # get column names\n for line in fh.readlines():\n line = line.decode()\n if line.startswith('#CHROM'):\n column_names = line.strip('#').split('\\t')\n column_names[-1] = 'SAMPLE_FORMAT'\n break\n\n df = pd.read_csv(\n vcf, sep='\\t', comment='#', names=column_names, compression='infer'\n )\n\n df['CHROM'] = df['CHROM'].astype(str)\n\n return df\n\n\ndef match_tsvs_and_vcfs(files):\n \"\"\"\n Match tsvs and vcfs for samples from list of files in directory.\n\n Works based off prefix of filename of both tsv and vcf\n\n Parameters\n ----------\n files : list\n glob list of tsv and vcf files from a given dir\n\n Returns\n -------\n tsvs : list\n list of tsv files\n vcfs : list\n list of vcf files\n \"\"\"\n tsvs = sorted([x for x in files if x.endswith('.tsv')])\n vcfs = sorted([x for x in files if x.endswith('.vcf.gz')])\n\n # we expect tsvs to be named as SAMPLE1_CominedVariantOutput.tsv and\n # vcfs to be named SAMPLE1-more-fields.vcf.gz, therefore match on the\n # sample ID from both\n tsv_prefixes = [Path(x).name.split('_')[0] for x in tsvs]\n vcf_prefixes = [Path(x).name.split('-')[0] for x in vcfs]\n\n\n # get the samples that have both tsvs and vcfs to compare\n common = list(set(tsv_prefixes) & set(vcf_prefixes))\n tsvs = [x for x in tsvs if Path(x).name.split('_')[0] in common]\n vcfs = [x for x in vcfs if Path(x).name.split('-')[0] in common]\n\n return tsvs, vcfs\n\n\ndef get_mismatch_variants(tsv_df, vcf_df):\n \"\"\"\n Get variants that are mismatched (i.e. unique to tsv or vcf)\n\n Parameters\n ----------\n tsv_df : pd.DataFrame\n df of variants from tsv\n vcf_df : pd.DataFrame\n df of variants from vcf\n\n Returns\n -------\n tsv_only : pd.DataFrame\n df of variants present only in the tsv (i.e. missing from the vcf)\n vcf_only : pd.DataFrame\n df of variants present only in the vcf (i.e. missing from the tsv)\n \"\"\"\n # get variants present only in tsv and vcf\n merge_cols = ['CHROM', 'POS', 'REF', 'ALT']\n tsv_only = tsv_df.merge(\n vcf_df[merge_cols], on=merge_cols, how='outer', indicator=True\n ).loc[lambda x: x.pop('_merge').eq('left_only')]\n\n vcf_only = vcf_df.merge(\n tsv_df[merge_cols], on=merge_cols, how='outer', indicator=True\n ).loc[lambda x: x.pop('_merge').eq('left_only')]\n\n return tsv_only, vcf_only\n\n\ndef main():\n\n # should be a directory of individual run directories\n all_runs_dir = Path(sys.argv[1]).absolute()\n\n # counters for printing at the end\n samples = 0\n runs = 0\n all_tsv_issues = 0\n all_vcf_issues = 0\n all_no_annotation_issues = 0\n\n for run_dir in os.listdir(all_runs_dir):\n run_dir = os.path.join(all_runs_dir, run_dir)\n run_name = run_dir.split('/')[-1]\n if not os.path.isdir(run_dir):\n # in case of any bonus files that aren't directories\n continue\n\n print(f\"\\nChecking run {run_name} ({run_dir})\")\n\n files = [os.path.join(run_dir, x) for x in os.listdir(run_dir)]\n\n tsvs, vcfs = match_tsvs_and_vcfs(files)\n\n # empty dfs to add all mismatches to\n all_tsv_only = pd.DataFrame(\n columns=[\n 'Sample', 'Gene', 'CHROM', 'POS', 'REF', 'ALT',\n 'Allele Frequency', 'Depth', 'P-Dot Notation',\n 'C-Dot Notation', 'Consequence(s)', 'Affected Exon(s)'\n ]\n )\n\n all_vcf_only = pd.DataFrame(\n columns=[\n 'SAMPLE', 'CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL',\n 'FILTER', 'INFO', 'FORMAT', 'SAMPLE_FORMAT'\n ]\n )\n\n # some variants in CombinedVariantOutput files have no real annotation\n # add these to their own df to write to a separate file\n all_tsv_no_annotation = pd.DataFrame(\n columns=[\n 'Sample', 'Gene', 'CHROM', 'POS', 'REF', 'ALT',\n 'Allele Frequency', 'Depth', 'P-Dot Notation',\n 'C-Dot Notation', 'Consequence(s)', 'Affected Exon(s)'\n ]\n )\n\n for tsv, vcf in zip(tsvs, vcfs):\n # sense check tsv and vcf for same sample\n if Path(tsv).name.split('_')[0] != Path(vcf).name.split('-')[0]:\n print(\n f\"Error: tsv and vcf file prefixes do not match:\\n{vcf}\\n{tsv}\"\n )\n continue\n\n tsv_df = read_tsv(tsv)\n vcf_df = read_vcf(vcf)\n\n tsv_only, vcf_only = get_mismatch_variants(tsv_df, vcf_df)\n\n # get any rows only in VCF AND not rescued in rescue app as\n # we know these will only be in the vcf since we are rescuing them\n # v1.0.0 reports workflow => tagged 'OPA'\n # v1.1.0 reports workflow => tagged 'rescued'\n vcf_only = vcf_only[~vcf_only['FILTER'].str.contains('OPA')]\n vcf_only = vcf_only[~vcf_only['FILTER'].str.contains('rescued')]\n\n # remove variants with no annotation => weird Illumina variants\n # add these to their own dataframe to dump out at the end\n no_annotation_only = tsv_only[tsv_only['Gene'] == '']\n tsv_only = tsv_only[tsv_only['Gene'] != '']\n\n if len(no_annotation_only.index) > 0:\n # some variants from tsv with no annotation, add to df with\n # sample ID as first column\n sample_col = [Path(tsv).name.split('_')[0]] * len(no_annotation_only.index)\n no_annotation_only.insert(0, 'Sample', sample_col)\n all_tsv_no_annotation = pd.concat(\n [all_tsv_no_annotation, no_annotation_only])\n\n\n if len(tsv_only.index) > 0:\n # some variants only in the tsv, add to df with\n # sample ID as first column\n sample_col = [Path(tsv).name.split('_')[0]] * len(tsv_only.index)\n tsv_only.insert(0, 'Sample', sample_col)\n all_tsv_only = pd.concat([all_tsv_only, tsv_only])\n\n if len(vcf_only.index) > 0:\n # some variants only in our vcf\n sample_col = [Path(tsv).name.split('_')[0]] * len(vcf_only.index)\n vcf_only.insert(0, 'SAMPLE', sample_col)\n all_vcf_only = pd.concat([all_vcf_only, vcf_only])\n\n samples += 1\n\n runs += 1\n\n print(f\"\\nTotal tsv mismatch for run {run_name}: {len(all_tsv_only.index)}\")\n print(f\"Total vcf mismatch for run {run_name}: {len(all_vcf_only.index)}\\n\\n\")\n\n if len(all_tsv_only.index) > 0:\n all_tsv_issues += len(all_tsv_only.index)\n\n all_tsv_only.sort_values(by=['Gene', 'POS'], inplace=True)\n\n all_tsv_only.to_csv(\n f'{run_name}_all_tsv_only.tsv', mode='w',\n sep='\\t', index=False, header=False\n )\n\n if len(all_vcf_only.index) > 0:\n all_vcf_issues += len(all_vcf_only.index)\n all_vcf_only.to_csv(\n f'{run_name}_all_vcf_only.tsv', mode='w',\n sep='\\t', index=False, header=False\n )\n\n if len(all_tsv_no_annotation.index) > 0:\n all_no_annotation_issues += len(all_tsv_no_annotation.index)\n all_tsv_no_annotation.to_csv(\n f'{run_name}_all_tsv_no_annotation.tsv', mode='w',\n sep='\\t', index=False, header=False\n )\n\n print(f\"Total samples checked: {samples}\")\n print(f\"Total runs checked: {runs}\")\n print(f\"Total tsv mismatch: {all_tsv_issues}\")\n print(f\"Total vcf mismatch: {all_vcf_issues}\")\n print(f\"Total tsv variants w/ no annotation: {all_no_annotation_issues}\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"eastgenomics/tso500_combined_tsv_vs_vcf_check","sub_path":"compare_tso500_tsv_to_vcf.py","file_name":"compare_tso500_tsv_to_vcf.py","file_ext":"py","file_size_in_byte":10012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10372660593","text":"from turtle import Turtle\nfrom turtle import Screen\n\ndoodle = Turtle()\n\nfor i in range(0, 50):\n doodle.forward(5)\n doodle.penup()\n doodle.forward(5)\n doodle.pendown()\n\nscreen = Screen()\nscreen.exitonclick()\n\n# turtle.penup() / turtle.up(): no drawing happens when the pen is up. Used to lift the pen and move the cursor\n# turtle.pendown() / turtle.down(): no drawing happens until the pen is put back down.\n","repo_name":"yashar1908/100DaysOfPython","sub_path":"Day 18/03_dotted_line.py","file_name":"03_dotted_line.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10855262728","text":"#Put all tables and graphs on to the same window \nfrom tables import table \nfrom plot_data import parse_bihour, parse_rates, plot_data\nimport matplotlib.pyplot as plt \ngraph_type = [\"Submissions\", \"Comments\", \"Submssion Upvotes\", \"Comment Upvotes\"]\ndef subreddit_graphs(data, subreddit_name, which_graph):\n plots = []\n for i in range(1, 5):\n plots.append((2,2,i))\n if which_graph == 'line graph':\n plt.figure(num = None, figsize=(16,6), dpi=80, facecolor = 'w', edgecolor = 'k')\n for i in range(len(data)):\n plt.subplot(plots[i][0], plots[i][1], plots[i][2])\n plot_data(data[i][subreddit_name], subreddit_name, \" \", compiling = True)\n plt.title(graph_type[i]) \n plt.suptitle(subreddit_name, y = 1) \n elif which_graph == 'table': \n _, axes = plt.subplots(2, 2) \n #Sub_Rates, Comement_Rates, Sub_Bi, Comment_Bi\n pos = ((-1.2, 1.2), (0, 1.2), (-1.2, 0), (0, 0))\n for i in range(len(data)):\n average = table(data[i][subreddit_name], subreddit_name, graph_type[i], axes, i)\n plt.text(pos[i][0], pos[i][1], \"Subreddit \" + subreddit_name + \" has an average rate of \" \n + str(average)[:6] + \" \" + graph_type[i] + \" per 2 hours.\")\n plt.show() \nif __name__ == \"__main__\":\n submission_rates = parse_rates('submissions_rate.txt')\n comment_rates = parse_rates('comments_rate.txt')\n submission_bihour = parse_bihour('submission_bihour.txt')\n comment_bihour = parse_bihour('comment_bihour.txt')\n data = [submission_rates, comment_rates, submission_bihour, comment_bihour]\n for key in submission_rates:\n #subreddit_graphs(data, key, 'line graph')\n subreddit_graphs(data, key, 'table')\n \n","repo_name":"Lwuuuuu/RedditAnalysis","sub_path":"data/compile_graphs.py","file_name":"compile_graphs.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2103888420","text":"'''\nThe Legislator class provides a standard template and format for legislator information to be used throughout the application\n'''\n\nimport requests, json\nfrom congress import Congress as national\nfrom pygeocoder import Geocoder\nimport os\n\nclass Legislator(object):\n\n def __init__(self, level, first_name, last_name, state, chamber, id, party, photo_url = None, district=None):\n self.level = level\n self.first_name = first_name\n self.last_name = last_name\n self.state = state\n self.chamber = chamber\n self.id = id\n self.party = party\n self.photo_url = photo_url\n self.district = district\n\n def print_info(self):\n print(\"Name: {first_name} {last_name} \".format(first_name = self.first_name, last_name = self.last_name))\n print(\"Chamber: {chamber}, State: {state}, Party: {party}\".format(chamber = self.chamber, state = self.state, party = self.party))\n if self.district is not None:\n print(\"District: {}\".format(self.district))\n\n print('\\n')\n\n def json(self):\n return {\n \"level\": self.level,\n \"first_name\": self.first_name,\n \"last_name\": self.last_name,\n \"state\": self.state,\n \"chamber\": self.chamber,\n \"id\": self.id,\n \"party\": self.party,\n \"photo_url\": self.photo_url,\n \"district\": self.district\n }\n\n @classmethod\n def get_national_legislators(cls, address, city, state, zipcode):\n google_base = \"https://www.googleapis.com/civicinfo/v2/\"\n \n national_base = \"https://api.propublica.org/congress/v1/\"\n\n legislators = []\n fullAddress = address + \", \" + city + \", \" + state + \" \" + zipcode\n google_params = {\"address\": fullAddress, \"includeOffices\": True, \"levels\": \"country\", \"roles\": [\"legislatorLowerBody\",\"legislatorUpperBody\"], \"key\": os.environ['GOOGLE_API_KEY']}\n google_response = requests.get(google_base+\"representatives/\", google_params)\n google_data = json.loads(google_response.text)\n\n offices = google_data['offices']\n districtId = offices[1]['divisionId'][36:]\n \n national_params = {\"X-API-Key\": os.environ['PROPUBLICA_API_KEY']}\n national_senate_response = requests.get(national_base+\"members/senate/{}/current.json\".format(state), headers=national_params)\n\n national_senate_data = json.loads(national_senate_response.text)\n\n senators = national_senate_data['results']\n for senator in senators:\n level = \"national\"\n first_name = senator['first_name']\n last_name = senator['last_name']\n chamber = \"Senate\"\n id = senator['id']\n party = senator['party']\n photo_url = \"https://theunitedstates.io/images/congress/original/{}.jpg\".format(id)\n legislators.append(cls(level, first_name, last_name, state, chamber, id, party, photo_url=photo_url))\n\n\n national_house_response = requests.get(national_base+\"members/house/{}/{}/current.json\".format(state, districtId), headers=national_params)\n\n national_house_data = json.loads(national_house_response.text)\n\n representatives = national_house_data['results']\n for rep in representatives:\n level = \"national\"\n first_name = rep['first_name']\n last_name = rep['last_name']\n chamber = \"House\"\n id = rep['id']\n party = rep['party']\n photo_url = \"https://theunitedstates.io/images/congress/original/{}.jpg\".format(id)\n legislators.append(cls(level, first_name, last_name, state, chamber, id, party, photo_url=photo_url, district=districtId))\n\n return legislators\n\n @classmethod\n def get_state_legislators(cls, address, city, state, zipcode, latitude=None, longitude=None):\n state_base = \"https://openstates.org/api/v1/\"\n\n legislators = []\n fullAddress = address + \", \" + city + \", \" + state + \" \" + zipcode\n if latitude is None:\n location = Geocoder.geocode(fullAddress)\n latitude = location.latitude\n longitude = location.longitude\n state_params = {\"lat\": latitude, \"long\": longitude, \"apikey\": os.environ['OPENSTATES_API_KEY']}\n\n state_legislative_response = requests.get(state_base+\"legislators/geo/\", state_params)\n state_legislative_data = json.loads(state_legislative_response.text)\n\n for rep in state_legislative_data:\n level = \"state\"\n first_name = rep['first_name']\n last_name = rep['last_name']\n chamber = rep['chamber']\n id = rep['id']\n party = rep['party']\n district = rep['district']\n photo_url = rep['photo_url']\n legislators.append(cls(level, first_name, last_name, state, chamber, id, party, photo_url =photo_url, district=district))\n\n return legislators\n","repo_name":"SChakravorti21/iCitizenFlask","sub_path":"iCitizenFlaskApp/models/legislator.py","file_name":"legislator.py","file_ext":"py","file_size_in_byte":4969,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"22583487879","text":"import numpy as np\n\ndef calculate(list):\n #check list length\n if(len(list) <9):\n raise ValueError('List must contain nine numbers.')\n \n # convert to 3x3 array\n arr = np.array(list[:9]).reshape(3, 3)\n # dict with required methods\n methods = {\n 'mean': np.mean,\n 'variance': np.var,\n 'standard deviation': np.std,\n 'max': np.max,\n 'min': np.min,\n 'sum': np.sum\n }\n # and a simple function to apply for all axis\n def allAxis(arr, method):\n return [method(arr, ax).tolist() for ax in [0, 1, None]]\n \n # then map methods to all axis and return result\n return {key: allAxis(arr, value) for key, value in methods.items()}","repo_name":"nickst74/FCC_Courses_Projects","sub_path":"Data_Analysis_with_Python/mean_variance_standard_deviation_calculator/mean_var_std.py","file_name":"mean_var_std.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11332457906","text":"\"\"\"init\n\nRevision ID: ec03a5430cfe\nRevises: \nCreate Date: 2023-06-24 15:09:34.663979\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom fastapi_utils.guid_type import GUID\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ec03a5430cfe'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('course',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('uuid', GUID(), nullable=True),\n sa.Column('course_name', sa.String(length=100), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_course_course_name'), 'course', ['course_name'], unique=False)\n op.create_index(op.f('ix_course_id'), 'course', ['id'], unique=False)\n op.create_table('student',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('uuid', GUID(), nullable=True),\n sa.Column('first_name', sa.String(length=50), nullable=False),\n sa.Column('last_name', sa.String(length=50), nullable=False),\n sa.Column('date_of_birth', sa.Date(), nullable=False),\n sa.Column('email', sa.String(length=50), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_student_email'), 'student', ['email'], unique=True)\n op.create_index(op.f('ix_student_first_name'), 'student', ['first_name'], unique=False)\n op.create_index(op.f('ix_student_id'), 'student', ['id'], unique=False)\n op.create_index(op.f('ix_student_last_name'), 'student', ['last_name'], unique=False)\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('uuid', GUID(), nullable=True),\n sa.Column('username', sa.String(length=50), nullable=False),\n sa.Column('password', sa.String(length=128), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_user_id'), 'user', ['id'], unique=False)\n op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)\n op.create_table('gradecard',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('uuid', GUID(), nullable=True),\n sa.Column('course_uuid', GUID(), nullable=True),\n sa.Column('student_uuid', GUID(), nullable=True),\n sa.Column('grade', sa.Enum('A', 'B', 'C', 'D', 'F'), nullable=False),\n sa.ForeignKeyConstraint(['course_uuid'], ['course.uuid'], ),\n sa.ForeignKeyConstraint(['student_uuid'], ['student.uuid'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_gradecard_id'), 'gradecard', ['id'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_gradecard_id'), table_name='gradecard')\n op.drop_table('gradecard')\n op.drop_index(op.f('ix_user_username'), table_name='user')\n op.drop_index(op.f('ix_user_id'), table_name='user')\n op.drop_table('user')\n op.drop_index(op.f('ix_student_last_name'), table_name='student')\n op.drop_index(op.f('ix_student_id'), table_name='student')\n op.drop_index(op.f('ix_student_first_name'), table_name='student')\n op.drop_index(op.f('ix_student_email'), table_name='student')\n op.drop_table('student')\n op.drop_index(op.f('ix_course_id'), table_name='course')\n op.drop_index(op.f('ix_course_course_name'), table_name='course')\n op.drop_table('course')\n # ### end Alembic commands ###\n","repo_name":"davidristovski/student-system-back-end","sub_path":"alembic/versions/ec03a5430cfe_init.py","file_name":"ec03a5430cfe_init.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12951937502","text":"import gym, gym.spaces, gym.utils, gym.utils.seeding\nimport numpy as np\nimport pybullet\nfrom pybullet_utils import bullet_client\n\nfrom pkg_resources import parse_version\n\n\nclass BaseBulletEnv(gym.Env):\n \"\"\"\n Base class for Bullet physics simulation environments in a Scene.\n These environments create single-player scenes and behave like normal Gym environments, if\n you don't use multiplayer.\n \"\"\"\n\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 60\n }\n\n def __init__(self, frame_skip=4, time_step=0.005, render=False):\n self.scene = None\n self.isRender = render\n self._cam_dist = 3\n self._cam_yaw = 0\n self._cam_pitch = -30\n self._render_width = 320\n self._render_height = 240\n\n self.frame_skip = frame_skip\n self.time_step = time_step\n\n self._robot_set = False\n self._scene_set = False\n\n if self.isRender:\n self._p = bullet_client.BulletClient(connection_mode=pybullet.GUI)\n else:\n self._p = bullet_client.BulletClient()\n\n def set_robot(self, robot):\n self.robot = robot\n self.action_space = robot.action_space\n self.observation_space = robot.observation_space\n self._robot_set = True\n\n def set_scene(self, scene_type):\n self.scene = scene_type(self._p, 9.8, self.time_step, self.frame_skip)\n self._scene_set = True\n\n def configure(self, args):\n if(self._robot_set):\n self.robot.args = args\n else:\n raise Exception(\"BaseBulletEnv::configure: must call set_robot first\")\n\n def _seed(self, seed=None):\n if(self._robot_set):\n self.np_random, seed = gym.utils.seeding.np_random(seed)\n self.robot.np_random = self.np_random # use the same np_randomizer for robot as for env\n else:\n raise Exception(\"BaseBulletEnv::_seed: must call set_robot first\")\n return [seed]\n\n def reset(self):\n self._seed()\n if(not self._robot_set or not self._scene_set):\n raise Exception(\"BaseBulletEnv::_reset: must call set_robot and set_scene first\")\n\n self.scene.reset()\n self.robot.scene = self.scene\n\n self._p.configureDebugVisualizer(pybullet.COV_ENABLE_GUI,0)\n obs = self.robot.reset(self._p)\n\n self.frame = 0\n self.done = 0\n self.reward = 0\n self.potential = self.robot.calc_potential()\n\n return obs\n\n def render(self, mode, close=False):\n if mode == \"human\":\n self.isRender = True\n if mode != \"rgb_array\":\n return np.array([])\n\n base_pos = [0, 0, 0]\n if hasattr(self,'robot'):\n if hasattr(self.robot,'body_xyz'):\n base_pos = self.robot.body_xyz\n\n view_matrix = self._p.computeViewMatrixFromYawPitchRoll(\n cameraTargetPosition=base_pos,\n distance=self._cam_dist,\n yaw=self._cam_yaw,\n pitch=self._cam_pitch,\n roll=0,\n upAxisIndex=2)\n proj_matrix = self._p.computeProjectionMatrixFOV(\n fov=60, aspect=float(self._render_width)/self._render_height,\n nearVal=0.1, farVal=100.0)\n\n (_, _, px, _, _) = self._p.getCameraImage(\n width = self._render_width, \n height=self._render_height, \n viewMatrix=view_matrix,\n projectionMatrix=proj_matrix,\n renderer=pybullet.ER_BULLET_HARDWARE_OPENGL\n )\n rgb_array = np.array(px)\n rgb_array = rgb_array[:, :, :3]\n\n return rgb_array\n\n def close(self):\n self._p.resetSimulation()\n self._p.disconnect()\n\n def step(self, action):\n self.robot.apply_action(action)\n self.scene.global_step()\n","repo_name":"PaddlePaddle/MetaGym","sub_path":"metagym/metalocomotion/envs/utils/env_bases.py","file_name":"env_bases.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"en","doc_type":"code","stars":255,"dataset":"github-code","pt":"67"} +{"seq_id":"38847343485","text":"import praw\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nimport pandas as pd\nimport re\nimport numpy as np\nimport time\nimport datetime\ndef RedditSub(sr):\n l=[]\n d_tmod={'title':[],'text':[],'total_text':[]}\n if 'Canada' in sr:\n d_tmod['country']='Canada'\n elif 'India' in sr:\n d_tmod['country']='India'\n else:\n d_tmod['country']='United States'\n for i in reddit.subreddit(sr).hot(limit=None):\n if i.created>int(time.mktime(time.strptime(str(datetime.datetime.now()-datetime.timedelta(days=115)).split('.')[0],'%Y-%m-%d %H:%M:%S'))):\n f=nltk.word_tokenize(i.title+' '+i.selftext)\n l.extend([i.lower() for i in f])\n try:\n if len(i.selftext)>2:\n d_tmod['title'].append(i.title)\n d_tmod['text'].append(i.selftext)\n d_tmod['total_text'].append(i.title+' '+i.selftext)\n except:\n continue\n df_temp=pd.DataFrame(d_tmod)\n l=[st.lemmatize(i,pos='v') for i in l]\n l=['cases' if i=='case' else i for i in l]\n l=['testing' if i=='test' else i for i in l]\n stop_pos=['IN','CD','MD']\n cl=[i for i in list(set(l)) if (i!='covid-19' and i!='covid' and i not in country_list and i.isalpha()==True and i!='coronavirus' and i not in stops and 25>len(i)>3 and '|' not in i and '\\'' not in i and nltk.pos_tag([i])[0][1] not in stop_pos and 'http' not in i)]\n d={}\n for i in cl:\n d[i]=l.count(i)\n d=sorted(d.items(),key=lambda x:x[1],reverse=True)\n return d,df_temp\ndef BigramCreation(df):\n df['processedtext']=df['total_text'].map(lambda x:re.sub('[,\\.!?]','',x))\n df['processedtext']=df['processedtext'].map(lambda x:x.lower())\n stop_pos=['IN','CD','MD']\n df_new=pd.DataFrame()\n for c in ['Canada','India','United States']:\n l_bigrams=[]\n df2=df[df['country']==c]\n for i in df2['processedtext']:\n try:\n l=nltk.word_tokenize(i)\n l=[st.lemmatize(j,pos='v') for j in l]\n l=['cases' if j=='case' else j for j in l]\n l=['testing' if j=='test' else j for j in l]\n l=['deceased' if j=='decease' else j for j in l]\n l=['confirmed' if j=='confirm' else j for j in l]\n l=['hospitalized' if j=='hospitalize' else j for j in l]\n l=[j for j in l if (j.isalpha()==True and j not in stops and 25>len(j)>3 and '|' not in j and '\\'' not in j and nltk.pos_tag([j])[0][1] not in stop_pos and 'http' not in j)]\n l_bi=list(nltk.bigrams(l))\n l_bigrams.extend(l_bi)\n except:\n continue\n l_bigrams_uni=list(set(l_bigrams))\n d={}\n for i in l_bigrams_uni:\n d[i]=l_bigrams.count(i)\n l_del=[]\n for i in d:\n if (i[1],i[0]) in d and (i[0],i[1]) not in l_del:\n d[i]+=d[(i[1],i[0])]\n l_del.append((i[1],i[0]))\n for i in l_del:\n del d[i]\n d=sorted(d.items(),key=lambda x:x[1],reverse=True)\n l1=[]\n l2=[]\n for i in d:\n l1.append(i[0])\n l2.append(i[1])\n df_new=df_new.append(pd.DataFrame({'bis':l1,'frequency':l2,'country':c}),ignore_index=True)\n df_new['bis']=df_new['bis'].map(lambda x:str(x).replace('\\'','').replace('(','').replace(')','').replace(',','').upper())\n df_new=df_new.replace('MASK WEAR','WEAR MASK')\n return df_new\nif __name__=='__main__':\n reddit=praw.Reddit(client_id='AufQL3euwJJj4g',client_secret='LptAeUr3_VeykZewe6W2hTa7d7w',password='Babie123$%^',user_agent='Test Script by /u/abhinavnope',username='abhinavnope')\n st=WordNetLemmatizer()\n stops=list(set(stopwords.words('english')))\n stops.extend(['even','also'])\n country_list=['india','canada','usa','united states']\n df=pd.DataFrame()\n df_tmod=pd.DataFrame()\n srl=['CoronavirusUS','CanadaCoronavirus','CoronavirusIndia']\n for i in srl:\n d=RedditSub(i)\n df_tmod=df_tmod.append(d[1])\n if 'US' in i:\n c='United States'\n elif 'Canada' in i:\n c='Canada'\n else:\n c='India'\n temp_dict={'word':[],'frequency':[],'country':c}\n for i in d[0]:\n try:\n if i[1]>3:\n temp_dict['word'].append(i[0].upper())\n temp_dict['frequency'].append(i[1])\n except:\n continue\n df=df.append(pd.DataFrame(temp_dict),ignore_index=True)\n writer=pd.ExcelWriter('Words.xlsx',engine='xlsxwriter')\n df.to_excel(writer,sheet_name='Sheet1',index=False)\n writer.save()\n df_new=BigramCreation(df_tmod)\n writer=pd.ExcelWriter('TwoWords.xlsx',engine='xlsxwriter')\n df_new.to_excel(writer,sheet_name='Sheet1',index=False)\n writer.save()\n","repo_name":"50hands/DataAnalysis-and-NLP-Abhinav","sub_path":"Reddit Sentiment/Tokens.py","file_name":"Tokens.py","file_ext":"py","file_size_in_byte":4888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"45759656248","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n c = ListNode(None)\n temp = c\n while l1 and l2:\n if l1.val <= l2.val:\n temp.next = l1\n l1 = l1.next\n else:\n temp.next = l2\n l2 = l2.next \n temp = temp.next\n if l1:\n temp.next = l1\n else:\n temp.next = l2\n return c.next\n","repo_name":"Lucas-Fan/LeetCode_fzyt","sub_path":"21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30075890","text":"def acmTeam(topic):\r\n # NAIVE APPORACH WILL DELYAY IT MORE UPTO; O(N**3) --> USE ZIP FUNC\r\n\r\n countt = 0\r\n n = len(topic)\r\n maxx = 0\r\n double = 1\r\n \r\n for i in range(n):\r\n for bit in range(1, n):\r\n for x, y in zip(topic[i], topic[bit]):\r\n if x == 1 or y == 1:\r\n countt += 1\r\n\r\n if maxx == countt:\r\n double += 1\r\n elif max < countt:\r\n max = countt \r\n countt = 0\r\n \r\n return [maxx, double]\r\n\r\n\r\n# a = \"10101\"\r\n# b = \"01010\"\r\n# a = bin(a, 2)\r\n# b = bin(b, 2)\r\n# print(bin(a|b))\r\n","repo_name":"GazPrash/Hackerrank_QS","sub_path":"acmTeam.py","file_name":"acmTeam.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1152809258","text":"from __future__ import annotations\n\nfrom typing import List\n\n\nclass GuiEntryData:\n \"\"\"\n Entry _entries schema\n\n \"\"\"\n\n key: str\n value: str\n type: str\n tags: List[str]\n\n def __init__(self, key, value, type, tags):\n if not key:\n raise Exception(\"Key is required\")\n\n if not value:\n raise Exception(\"Value is required\")\n\n self.key = key\n self.value = value\n self.type = type\n self.tags = tags\n","repo_name":"jeanCarloMachado/PythonSearch","sub_path":"python_search/entry_capture/entry_inserter_gui/entry_gui_data.py","file_name":"entry_gui_data.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"67"} +{"seq_id":"34054444116","text":"from enum import Enum\n\nclass CharType(Enum):\n CTUnknown = 0\n CTAF = 1\n CTE = 2\n CTLetter = 3\n CT01 = 4\n CT27 = 5\n CT89 = 6\n CTSpace = 7\n CTMinus = 8\n CTOper = 9\n CTCompar = 10\n CTEqual = 11\n CTDot = 12\n CTSepar = 13\n CTOpenCom = 14\n CTCloseCom = 15\n CTQuote = 16\n CTDollar = 17\n CTPercent = 18\n CTAmper = 19\n CTOctot = 20\n\n\nclass State(Enum):\n StStart = 1\n StFinal = 2\n StError = 3\n StSpace = 4\n StOpenCom = 5\n StCloseCom = 6\n StOpenDir = 7\n StCloseDir = 8\n StIdent = 9\n StDecimal = 10\n StBinary = 11\n StOctal = 12\n StHexadecimal = 13\n StRealWDot = 14\n StRealWDec = 15\n StRealWE = 16\n StRealWEMin = 17\n StRealFull = 18\n StOpenChar = 19\n StCloseChar = 20\n StOpenString = 21\n StCloseString = 22\n StOper = 23\n StSepar = 24\n StAmper = 25\n StASCII = 26\n\n\nclass Buffer:\n def __init__(self):\n self.inner = ''\n\n def clear(self):\n self.inner = ''\n\n def add(self,c):\n self.inner += c\n\n def get(self):\n return self.inner\n\n def isEmpty(self):\n return self.inner==''\n\nclass Lexem:\n def __init__(self,line,pos,state,bufferedString):\n self.line = line\n self.pos = pos\n self.original = bufferedString\n\n if state is State.StError:\n self.lexemType = 'Error'\n self.lexemValue = 'wrong sequence of symbols'\n\n elif state is State.StCloseDir:\n self.lexemType = 'Directory'\n self.lexemValue = bufferedString\n\n elif state is State.StIdent:\n if bufferedString in ['true','false']:\n self.lexemType = 'Boolean'\n elif bufferedString in Lexer.keywords:\n self.lexemType = 'Keyword'\n else:\n self.lexemType = 'Identif'\n self.lexemValue = bufferedString\n\n elif state is State.StDecimal:\n self.lexemType = 'Integer'\n self.lexemValue = int(bufferedString)\n if self.lexemValue > 2147483647 or self.lexemValue < -2147483648:\n self.lexemType = 'Error'\n self.lexemValue = 'unable to present as integer'\n\n elif state is State.StBinary:\n self.lexemType = 'Integer'\n self.lexemValue = int(bufferedString[1:],2)\n if self.lexemValue > 2147483647 or self.lexemValue < -2147483648:\n self.lexemType = 'Error'\n self.lexemValue = 'unable to present as integer'\n\n elif state is State.StOctal:\n self.lexemType = 'Integer'\n self.lexemValue = int(bufferedString[1:],8)\n if self.lexemValue > 2147483647 or self.lexemValue < -2147483648:\n self.lexemType = 'Error'\n self.lexemValue = 'unable to present as integer'\n\n elif state is State.StHexadecimal:\n self.lexemType = 'Integer'\n self.lexemValue = int(bufferedString[1:],16)\n if self.lexemValue > 2147483647 or self.lexemValue < -2147483648:\n self.lexemType = 'Error'\n self.lexemValue = 'unable to present as integer'\n \n\n elif state is State.StRealWDec or state is State.StRealFull:\n self.lexemType = 'Float'\n self.lexemValue = float(bufferedString)\n if self.lexemValue > 1.8e307+9 or self.lexemValue < -1.8e307-9:\n self.lexemType = 'Error'\n self.lexemValue = 'unable to present as float'\n\n elif state is State.StCloseChar:\n self.lexemType = 'Char'\n self.lexemValue = bufferedString\n\n elif state is State.StCloseString:\n self.lexemType = 'String'\n self.lexemValue = bufferedString\n\n elif state is State.StOper:\n self.lexemType = 'Operator'\n self.lexemValue = bufferedString\n\n elif state is State.StSepar:\n self.lexemType = 'Separator'\n self.lexemValue = bufferedString\n\n elif state is State.StASCII:\n tempervalue = int(bufferedString[1:])\n if False: #tempervalue>127 or tempervalue<0:\n self.lexemType = 'Error'\n self.lexemValue = 'unable to get ASCII symbol from utf-8 code'\n else:\n self.lexemType = 'Char'\n self.lexemValue = chr(tempervalue)\n\n elif state is State.StFinal:\n self.lexemType = 'Final'\n self.lexemValue = bufferedString\n\n def getString(self):\n if self.lexemType == 'Error':\n return f\"{self.line}\\t{self.pos}\\tError: {self.lexemValue}: {self.original}\"\n else:\n return f\"{self.line}\\t{self.pos}\\t{self.lexemType}\\t{self.lexemValue}\\t{self.original}\"\n\n\n def getType(self):\n return self.lexemType\n\n\n def getValue(self):\n return self.lexemValue\n\n\n def getPosition(self):\n return f\"line {self.line} position {self.pos}\"\n\n\n def get(self):\n return self\n \n \n\nclass Lexer:\n keywords = [ 'and', 'asm', 'array', 'begin', 'case', 'const', 'constructor',\n 'destructor', 'div', 'do', 'downto', 'else', 'end', 'exports',\n 'file', 'for', 'function', 'goto', 'if', 'implementation', 'in',\n 'inherited', 'inline', 'interface', 'label', 'library', 'mod',\n 'nil', 'not', 'object', 'of', 'or', 'packed', 'procedure',\n 'program', 'record', 'repeat', 'set', 'shl', 'shr', 'string',\n 'then', 'to', 'type', 'unit', 'until', 'uses', 'var', 'while',\n 'with', 'xor', 'as', 'class', 'dispose', 'except', 'exit',\n 'exports', 'finalization', 'finally', 'inherited', 'initialization',\n 'is', 'library', 'new', 'on', 'out', 'property', 'raise', 'self',\n 'threadvar', 'try' ]\n \n directives = [ 'absolute', 'abstract', 'alias', 'assembler', 'cdecl', 'cppdecl',\n 'default', 'export', 'external', 'forward', 'index', 'local',\n 'name', 'nostackframe', 'oldfpccall', 'override', 'pascal',\n 'private', 'protected', 'public', 'published', 'read', 'register',\n 'reintroduce', 'safecall', 'softfloat', 'stdcall', 'virtual',\n 'write' ]\n\n pairOpers = [ ':=', '<>', '<=', '>=', '><', '..']\n\n separs = [' ', '\\n', '\\t', '\\0', '\\r']\n\n transit = {State.StStart: {CharType.CTAF: State.StIdent,\n CharType.CTE: State.StIdent,\n CharType.CTLetter: State.StIdent,\n CharType.CT01: State.StDecimal,\n CharType.CT27: State.StDecimal,\n CharType.CT89: State.StDecimal,\n CharType.CTSpace: State.StSpace,\n CharType.CTMinus: State.StOper,\n CharType.CTOper: State.StOper,\n CharType.CTCompar: State.StOper,\n CharType.CTEqual: State.StOper,\n CharType.CTDot: State.StOper,\n CharType.CTSepar: State.StSepar,\n CharType.CTOpenCom: State.StOpenCom,\n CharType.CTCloseCom: State.StError,\n CharType.CTQuote: State.StOpenChar,\n CharType.CTDollar: State.StHexadecimal,\n CharType.CTPercent: State.StBinary,\n CharType.CTAmper: State.StAmper,\n CharType.CTOctot: State.StASCII,\n CharType.CTUnknown: State.StError},\n \n State.StFinal: {i: State.StError for i in CharType},\n\n State.StError: {i: State.StStart for i in CharType},\n\n State.StSpace: {i: State.StStart if i != CharType.CTUnknown\n else State.StError\n for i in CharType},\n\n State.StOpenCom: {i: State.StOpenCom if not i in [CharType.CTCloseCom, CharType.CTDollar, CharType.CTUnknown]\n else State.StCloseCom if not i in [CharType.CTDollar, CharType.CTUnknown]\n else State.StOpenDir if i!=CharType.CTUnknown\n else State.StError\n for i in CharType},\n\n State.StCloseCom: {i: State.StStart if i!=CharType.CTUnknown\n else State.StError\n for i in CharType},\n\n State.StOpenDir: {i: State.StOpenCom if not i in [CharType.CTCloseCom, CharType.CTUnknown]\n else State.StCloseDir if i!=CharType.CTUnknown\n else State.StError\n for i in CharType},\n\n State.StCloseDir: {i: State.StStart if i!=CharType.CTUnknown\n else State.StError\n for i in CharType},\n\n State.StIdent: {i: State.StStart if not i in [CharType.CT01, CharType.CT27, CharType.CT89,\n CharType.CTAF, CharType.CTE, CharType.CTLetter,\n CharType.CTCloseCom, CharType.CTQuote, CharType.CTDollar,\n CharType.CTPercent, CharType.CTAmper, CharType.CTOctot,\n CharType.CTUnknown]\n else State.StError if not i in [CharType.CT01, CharType.CT27, CharType.CT89,\n CharType.CTAF, CharType.CTE, CharType.CTLetter]\n else State.StIdent\n for i in CharType},\n\n State.StDecimal: {i: State.StError if not i in [CharType.CT01, CharType.CT27, CharType.CT89,\n CharType.CTSpace, CharType.CTMinus, CharType.CTOper,\n CharType.CTCompar, CharType.CTEqual,\n CharType.CTSepar, CharType.CTOpenCom,\n CharType.CTDot]\n else State.StStart if not i in [CharType.CT01, CharType.CT27, CharType.CT89,\n CharType.CTDot]\n else State.StDecimal if i != CharType.CTDot\n else State.StRealWDot\n for i in CharType},\n\n State.StBinary: {i: State.StError if not i in [CharType.CT01,\n CharType.CTSpace, CharType.CTMinus, CharType.CTOper,\n CharType.CTCompar, CharType.CTEqual,\n CharType.CTSepar, CharType.CTOpenCom]\n else State.StStart if i != CharType.CT01\n else State.StBinary\n for i in CharType},\n\n State.StOctal: {i: State.StError if not i in [CharType.CT01, CharType.CT27,\n CharType.CTSpace, CharType.CTMinus, CharType.CTOper,\n CharType.CTCompar, CharType.CTEqual,\n CharType.CTSepar, CharType.CTOpenCom]\n else State.StStart if not i in [CharType.CT01, CharType.CT27]\n else State.StOctal\n for i in CharType},\n\n State.StHexadecimal: {i: State.StError if not i in [CharType.CT01, CharType.CT27, CharType.CT89,\n CharType.CTAF, CharType.CTE,\n CharType.CTSpace, CharType.CTMinus, CharType.CTOper,\n CharType.CTCompar, CharType.CTEqual,\n CharType.CTSepar, CharType.CTOpenCom]\n else State.StStart if not i in [CharType.CT01, CharType.CT27, CharType.CT89,\n CharType.CTAF, CharType.CTE]\n else State.StHexadecimal\n for i in CharType},\n\n State.StRealWDot: {i: State.StError if not i in [CharType.CT01, CharType.CT27, CharType.CT89]\n else State.StRealWDec\n for i in CharType},\n\n State.StRealWDec: {i: State.StError if not i in [CharType.CT01, CharType.CT27, CharType.CT89,\n CharType.CTSpace, CharType.CTMinus, CharType.CTOper,\n CharType.CTCompar, CharType.CTEqual,\n CharType.CTSepar, CharType.CTOpenCom,\n CharType.CTE]\n else State.StStart if not i in [CharType.CT01, CharType.CT27, CharType.CT89,\n CharType.CTE, CharType.CTOpenCom]\n else State.StRealWDec if not i in [CharType.CTE, CharType.CTOpenCom]\n else State.StRealWE if i != CharType.CTOpenCom\n else State.StOpenCom\n for i in CharType},\n\n State.StRealWE: {i: State.StError if not i in [CharType.CT01, CharType.CT27, CharType.CT89,\n CharType.CTMinus]\n else State.StRealFull if i != CharType.CTMinus\n else State.StRealWEMin\n for i in CharType},\n\n State.StRealWEMin: {i: State.StError if not i in [CharType.CT01, CharType.CT27, CharType.CT89]\n else State.StRealFull\n for i in CharType},\n\n State.StRealFull: {i: State.StError if not i in [CharType.CT01, CharType.CT27, CharType.CT89,\n CharType.CTSpace, CharType.CTMinus, CharType.CTOper,\n CharType.CTCompar, CharType.CTEqual,\n CharType.CTSepar, CharType.CTOpenCom]\n else State.StStart if not i in [CharType.CT01, CharType.CT27, CharType.CT89]\n else State.StRealFull\n for i in CharType},\n\n State.StOpenChar: {i: State.StOpenString if i!=CharType.CTQuote\n else State.StCloseChar\n for i in CharType},\n\n State.StCloseChar: {i: State.StStart if i!=CharType.CTUnknown\n else State.StError\n for i in CharType},\n\n State.StOpenString: {i: State.StOpenString if i!=CharType.CTQuote\n else State.StCloseString\n for i in CharType},\n\n State.StCloseString: {i: State.StStart if i!=CharType.CTUnknown\n else State.StError\n for i in CharType},\n\n State.StOper: {i: State.StStart if not i in [CharType.CTMinus, CharType.CTOper, CharType.CTCompar,\n CharType.CTEqual, CharType.CTDot, CharType.CTUnknown]\n else State.StOper if i!=CharType.CTUnknown\n else State.StError\n for i in CharType},\n\n State.StSepar: {i: State.StStart if i!=CharType.CTUnknown\n else State.StError\n for i in CharType},\n\n State.StAmper: {i: State.StError if not i in [CharType.CTAF, CharType.CTE, CharType.CTLetter,\n CharType.CT01, CharType.CT27, CharType.CT89]\n else State.StIdent if not i in [CharType.CT01, CharType.CT27, CharType.CT89]\n else State.StOctal\n for i in CharType},\n\n State.StASCII: {i: State.StError if not i in [CharType.CT01, CharType.CT27, CharType.CT89,\n CharType.CTSpace, CharType.CTMinus, CharType.CTOper,\n CharType.CTCompar, CharType.CTEqual, CharType.CTDot,\n CharType.CTSepar, CharType.CTOpenCom]\n else State.StStart if not i in [CharType.CT01, CharType.CT27, CharType.CT89]\n else State.StASCII\n for i in CharType}\n }\n\n\n def __init__(self,fin):\n self.buf = Buffer()\n self.state = State.StStart\n self.fin = fin\n self.isEndOfFile = False\n self.isErrorCaught = False\n self.currentSymbol = ''\n self.currentLine = 1\n self.currentPosition = 0;\n self.lexemLine = 1;\n self.lexemPosition = 1;\n\n self.charTypeTurner = {i: CharType.CTAF for i in ['A', 'B', 'C', 'D', 'F', 'a', 'b', 'c', 'd', 'f']}\n self.charTypeTurner.update([(i, CharType.CTE) for i in ['E','e']])\n self.charTypeTurner.update([(i, CharType.CTLetter) for i in ['G','H','I','J','K','L','M','N','O','P','Q','R','S',\n 'T','U','V','W','X','Y','Z','g','h','i','j','k','l',\n 'm','n','o','p','q','r','s','t','u','v','w','x','y',\n 'z','А','Б','В','Г','Д','Е','Ё','Ж','З','И','Й','К',\n 'Л','М','Н','О','П','Р','С','Т','У','Ф','Х','Ц','Ч',\n 'Ш','Щ','Ъ','Ы','Ь','Э','Ю','Я','а','б','в','г','д',\n 'е','ё','ж','з','и','й','к','л','м','н','о','п','р',\n 'с','т','у','ф','х','ц','ч','ш','щ','ъ','ы','ь','э',\n 'ю','я']])\n self.charTypeTurner.update([(i, CharType.CT01) for i in ['0','1']])\n self.charTypeTurner.update([(i, CharType.CT27) for i in ['2','3','4','5','6','7']])\n self.charTypeTurner.update([(i, CharType.CT89) for i in ['8','9']])\n self.charTypeTurner.update([(i, CharType.CTSpace) for i in [' ', '\\n', '\\t', '\\0', '\\r', '']])\n self.charTypeTurner.update([('-', CharType.CTMinus)])\n self.charTypeTurner.update([(i, CharType.CTOper) for i in ['+', '*', '/', ':']])\n self.charTypeTurner.update([(i, CharType.CTCompar) for i in ['<', '>']])\n self.charTypeTurner.update([('=', CharType.CTEqual)])\n self.charTypeTurner.update([('.', CharType.CTDot)])\n self.charTypeTurner.update([(i, CharType.CTSepar) for i in ['(', ')', ';', '[', ']', ',']])\n self.charTypeTurner.update([('{', CharType.CTOpenCom)])\n self.charTypeTurner.update([('}', CharType.CTCloseCom)])\n self.charTypeTurner.update([('\\'', CharType.CTQuote)])\n self.charTypeTurner.update([('$', CharType.CTDollar)])\n self.charTypeTurner.update([('%', CharType.CTPercent)])\n self.charTypeTurner.update([('&', CharType.CTAmper)])\n self.charTypeTurner.update([('#', CharType.CTOctot)])\n\n\n def getNextSymbol(self):\n symbol = self.fin.read(1)\n if symbol == '\\n':\n self.currentLine += 1\n self.currentPosition = 0\n else:\n self.currentPosition += 1\n return symbol\n \n\n def getNextValue(self):\n if self.currentSymbol in self.charTypeTurner.keys():\n return self.charTypeTurner[self.currentSymbol]\n else:\n return CharType.CTUnknown\n\n\n def isError(self):\n return self.isErrorCaught\n\n\n def analyze(self):\n if self.isEndOfFile and not self.isErrorCaught:\n self.lexem = self.lexem = Lexem(self.lexemLine, self.lexemPosition, State.StFinal, '')\n else:\n self.lexemIsFound = False\n while not self.lexemIsFound:\n if not self.state is State.StStart or self.currentSymbol == '':\n self.currentSymbol = self.getNextSymbol()\n\n self.currentValue = self.getNextValue()\n\n self.prevState = self.state\n self.state = self.transit[self.state][self.currentValue]\n\n if self.state == State.StError:\n self.isErrorCaught = True\n\n if self.state is State.StOper and not self.buf.isEmpty():\n probOper = self.buf.get() + self.currentSymbol\n if not probOper in self.pairOpers:\n self.lexem = Lexem(self.lexemLine, self.lexemPosition, self.prevState, self.buf.get())\n self.lexemPosition = self.currentPosition\n self.lexemLine = self.currentLine\n self.lexemIsFound = True\n self.buf.clear()\n\n elif self.state is State.StStart:\n if self.prevState!=State.StSpace and self.prevState!=State.StCloseCom:\n self.lexem = Lexem(self.lexemLine, self.lexemPosition, self.prevState, self.buf.get())\n self.lexemIsFound = True\n self.lexemPosition = self.currentPosition\n self.lexemLine = self.currentLine\n self.buf.clear()\n \n\n if self.currentSymbol == '':\n self.isEndOfFile = True\n elif not self.state is State.StStart:\n self.buf.add(self.currentSymbol)\n\n return self.lexem\n \n \n","repo_name":"KrisJJ/PascalCompiler","sub_path":"lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":23558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27599331822","text":"import json\r\nimport pandas as pd\r\nfrom flask import Flask, jsonify, request\r\nfrom flask import render_template\r\nimport requests\r\nfrom pathlib import Path\r\nfrom googleapiclient.discovery import build\r\nimport scrapetube\r\nimport pytube\r\nimport time\r\nimport mysql.connector as connection\r\nimport pymongo\r\nfrom flask_cors import CORS, cross_origin\r\n#mport pdfkit as pdf\r\n\r\napp = Flask(__name__) #Create a flask application\r\n\r\n'''\r\nFirst page of project as an index, this function will be called automatically when clicked on url\r\nTo use this application You need to have a Youtube API Key - To get API Key Link has been Provided on first page\r\n'''\r\n\r\n@app.route(\"/\", methods=['POST','GET'])\r\n@cross_origin()\r\ndef index():\r\n return render_template('index.html')\r\n\r\n'''\r\nUsing get_channel_id function you are able to fetch channel id of any youtube channel name\r\n'''\r\ndef get_channel_id(Youtube, channel, API_KEY):\r\n try:\r\n channel_id = requests.get(\r\n f'https://www.googleapis.com/youtube/v3/search?part=id&q={channel}&type=channel&key={API_KEY}').json()[\r\n 'items'][0]['id']['channelId']\r\n\r\n channels_response = Youtube.channels().list(id=channel_id,\r\n part='id, snippet, statistics, contentDetails, topicDetails').execute()\r\n yc_id = channels_response['items'][0]['id']\r\n return yc_id\r\n except:\r\n return \"Unable to find Channel, Youtube is not responding to the request. Please try after sometime\"\r\n\r\n# @app.route('/viewchannel', methods=['POST', 'GET'])\r\n# @cross_origin()\r\n# def Browse_channel_videos():\r\n# channel_id = request.form.get(\"id\")\r\n# # path = \"C:/Users/bramb/Downloads/chromedriver\"\r\n# path = \"C:/Users/bramb/PycharmProjects/YoutubeScrapping/chromedriver.exe\"\r\n# url = f\"https://www.youtube.com/channel/{channel_id}/videos\"\r\n# driver = webdriver.Chrome(path)\r\n# driver.get(url)\r\n# time.sleep(300)\r\n# return \"Time Over Please, you can browse for 5 minutes only\"\r\n\r\n'''\r\n Using get_all_video_ids able to find all the letest uploaded video's id's using channel id.\r\n also you have to pass max_result parameter to get numbre of id's only\r\n'''\r\ndef get_all_video_ids(channel_id, max_result):\r\n try:\r\n videos = scrapetube.get_channel(f\"{channel_id}\")\r\n video_id = []\r\n for video in videos:\r\n if len(video_id) {v} points\")\n points += v\n return points\n","repo_name":"james-mattison/words","sub_path":"src/word.py","file_name":"word.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"937722238","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n# import sys\n# from tecs.rinex import label\nfrom tecs.rinex.label import OBS_TYPE_LABELS\n\nNAME = 'tecs.gtb.tools'\n\n\ndef parse_rec(rec):\n \"\"\"parse_rec(rec) -> rec\n\n parse on observation record to retrieve values.\n\n Parameters\n ----------\n rec : dict\n an observation record\n\n Returns\n -------\n datum : dict\n {obs type: (obs value, LLI, signal strength), ...}\n\n \"\"\"\n datum = {}\n obs_types = list(rec.keys())\n obs_types.sort()\n\n for ot_set in OBS_TYPE_LABELS:\n datum[ot_set] = (None,) * 3\n for ot in obs_types:\n if ot in ot_set:\n datum[ot_set] = rec[ot]\n obs_types.remove(ot)\n break\n\n return datum\n\n\n# def ask(yes=False, msg=None):\n# \"\"\"ask(yes=False, msg=None) -> True or raise SystemExit\n#\n# Raise SystemExit(0) if the answer differs from 'y' or 'Y'.\n#\n# Parameters\n# ----------\n# yes: bool\n# default answer\n# msg: str\n# message to ask\n#\n# Returns\n# -------\n# If user answer differs from 'y/Y' raise SystemExit otherwise return True.\n# \"\"\"\n# if yes:\n# return True\n#\n# question = 'continue processing? [Y/n]:'\n#\n# if msg:\n# msg = '{}\\n{}'.format(msg, question)\n# else:\n# msg = question\n#\n# print(msg, end=' ')\n# answer = sys.stdin.readline().rstrip()\n#\n# if not answer or answer in 'yY':\n# return True\n# else:\n# raise SystemExit(0)\n","repo_name":"gnss-lab/tec-suite","sub_path":"tecs/gtb/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"67"} +{"seq_id":"8907288470","text":"#!/usr/bin/env python\n\nfrom setuptools import setup, Extension\n\ninclude_dirs = [\n 'src/DASpec',\n '/home/dupu/Softwares/cmpfit/cmpfit-1.3a', # add or remove path if necessary\n]\n\nlibrary_dirs = [\n '/home/dupu/Softwares/cmpfit/cmpfit-1.3a', # add or remove path if necessary\n]\n\next_swigDASpec = Extension(\n name = '_swigDASpec',\n swig_opts = ['-c++'],\n sources = [\n 'compcontainer.cpp',\n 'component.cpp',\n 'curvefit.cpp',\n 'function.cpp',\n 'swigDASpec.i',\n ],\n include_dirs = include_dirs,\n library_dirs = library_dirs,\n extra_compile_args = [\n '-fPIC',\n ],\n extra_link_args = [\n '-lmpfit',\n '-lgsl',\n '-lgslcblas',\n ]\n)\n\next_carray = Extension(\n name = '_carray',\n swig_opts = ['-c++'],\n sources = [\n 'carray.cpp', \n 'carray.i',\n ],\n include_dirs = include_dirs\n)\n\nsetup(\n name = 'DASpec',\n version = '0.8',\n author = 'Pu Du', \n description = \"\"\"DASpec\"\"\", \n ext_modules = [ext_swigDASpec, ext_carray], \n py_modules = [\"DASpec\", \"carray\"],\n)\n","repo_name":"PuDu-Astro/DASpec","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"67"} +{"seq_id":"9964875357","text":"from __future__ import annotations\n\nimport logging\nimport sys\n\ntry:\n from systemd.journal import JournalHandler # type: ignore # pylint: disable=import-error\nexcept ImportError:\n pass\n\nLogger = logging.Logger\n\n\ndef init() -> None:\n logger = logging.getLogger(\"snerge\")\n\n if sys.stdout.isatty():\n handler = logging.StreamHandler()\n handler.setFormatter(\n logging.Formatter(\n \"%(asctime)s [%(name)s:%(levelname)s] %(message)s\", datefmt=\"%H:%M:%S\"\n )\n )\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n else:\n handler = JournalHandler(SYSLOG_IDENTIFIER=\"snerge-bot\")\n handler.setFormatter(logging.Formatter(\"[%(name)s:%(levelname)s] %(message)s\"))\n\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)\n\n\ndef get_logger(name: str = \"\") -> logging.Logger:\n if not name:\n return logging.getLogger(\"snerge\")\n\n return logging.getLogger(\"snerge.\" + name)\n","repo_name":"javajawa/snerge-bot","sub_path":"src/snerge/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40667131095","text":"import chainer\nfrom chainer import optimizers\nimport nutszebra_basic_print\n\n\nclass Optimizer(object):\n\n def __init__(self, model=None):\n self.model = model\n self.optimizer = None\n\n def __call__(self, i):\n pass\n\n def update(self):\n self.optimizer.update()\n\n\nclass OptimizerResnet(Optimizer):\n\n def __init__(self, model=None, schedule=(150, 200), lr=0.1, momentum=0.9, weight_decay=1.0e-4, warm_up_lr=0.01):\n super(OptimizerResnet, self).__init__(model)\n optimizer = optimizers.MomentumSGD(warm_up_lr, momentum)\n weight_decay = chainer.optimizer.WeightDecay(weight_decay)\n optimizer.setup(self.model)\n optimizer.add_hook(weight_decay)\n self.optimizer = optimizer\n self.schedule = schedule\n self.lr = lr\n self.warmup_lr = warm_up_lr\n self.momentum = momentum\n self.weight_decay = weight_decay\n\n def __call__(self, i):\n if i == 1:\n lr = self.lr\n print('finishded warming up')\n print('lr is changed: {} -> {}'.format(self.optimizer.lr, lr))\n self.optimizer.lr = lr\n if i in self.schedule:\n lr = self.optimizer.lr / 10\n print('lr is changed: {} -> {}'.format(self.optimizer.lr, lr))\n self.optimizer.lr = lr\n","repo_name":"nutszebra/SENets","sub_path":"nutszebra_optimizer.py","file_name":"nutszebra_optimizer.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"67"} +{"seq_id":"8586406373","text":"# -*- coding: utf-8 -*-\n\"\"\"\nImplementation project\nMilestone 4\nAccuracy calculation for all previous classifiers studied\n for wisconsin breast cancer database\n\n@author: KMShihab\n\"\"\"\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom LinearModel import runLinearModel\nfrom GaussianProcess import runGPModel\nfrom utilityFunctions import splitTrainingTestingData\n#Constant parameters\nNO_OF_FEATURES=30\nNO_OF_SAMPLES=569\nCROSS_VALIDATION_K=10\n\n#######################################\n#for i-th sample\n#X(i) is feature vector, Y(i) is label \n########################################\nX=np.ones((NO_OF_SAMPLES,NO_OF_FEATURES))\nY=np.zeros((NO_OF_SAMPLES,1))\n\n##########################################\n# Reading file and formatting data\n##########################################\nfileInput = open('wdbc.data', 'r')\nindexSample=0\nfor line in fileInput:\n data=line.split(',')\n #data[0] is patient ID\n features=list(map(float,data[2:]))\n # 1 for malignant tumor, -1 for benign tumor\n label=1 if data[1]=='M' else -1\n \n X[indexSample,:]=features\n Y[indexSample]=label\n indexSample+=1\n \nfileInput.close()\n\n##########################################\n# dimensionality reduction\n##########################################\n\n#pca=PCA()\n#pca.fit(X)\n#print(pca.explained_variance_ratio_[:5])\n#d=np.argmax(np.cumsum(pca.explained_variance_ratio_)>=0.99)+1\n#pca=PCA(n_components=d)\n\npca=PCA(n_components=0.99)\nX_reduced=pca.fit_transform(X)\n\n\n##########################################\n# calculate accuracy for all classifiers\n##########################################\n\n# resultAll contains accuracy for k fold cross vaidation\n#1st row: Linear without DR\n#2nd row: Linear with DR\n#3rd row: GP RBF without DR\n#4th row: GP RBF with DR\n#5th row: GP RQ without DR\n#6th row: GP RQ with DR\nresultAll=np.zeros((6,CROSS_VALIDATION_K))\n\n\nfor n_K in range(CROSS_VALIDATION_K):\n \n #original dataset\n xTrain,yTrain,xTest,yTest=splitTrainingTestingData(X,Y,CROSS_VALIDATION_K,n_K)\n \n lamda=0.1\n resultAll[0][n_K]=runLinearModel(xTrain,yTrain,xTest,yTest,lamda) \n kernel_type='RBF'\n resultAll[2][n_K]=runGPModel(xTrain,yTrain,xTest,yTest,kernel_type)\n kernel_type='RationalQuadratic'\n resultAll[4][n_K]=runGPModel(xTrain,yTrain,xTest,yTest,kernel_type)\n \n #reduced dataset\n xTrain,yTrain,xTest,yTest=splitTrainingTestingData(X_reduced,Y,CROSS_VALIDATION_K,n_K)\n \n lamda=0.1\n resultAll[1][n_K]=runLinearModel(xTrain,yTrain,xTest,yTest,lamda) \n kernel_type='RBF'\n resultAll[3][n_K]=runGPModel(xTrain,yTrain,xTest,yTest,kernel_type)\n kernel_type='RationalQuadratic'\n resultAll[5][n_K]=runGPModel(xTrain,yTrain,xTest,yTest,kernel_type)\n \n\nwith open('resultRun.txt','w') as fileOutput:\n for i in range(6):\n fileOutput.write('\\t'.join(map(str,resultAll[i,:]))+'\\n')\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"kmshihabuddin/Optimization-and-Machine-Learning","sub_path":"Machine Learning CSE 517/Application Projects/Milestone 4 Final Comparison/ClassifierAccuracyCalculation.py","file_name":"ClassifierAccuracyCalculation.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7166008361","text":"\"\"\"\nSettings for dash.\n\n- sunburst\n - last childs must have a value\n - second main_parent shouldn't have a value (at least one!)\n\"\"\"\nimport csv\n# import os\n\n# from owndash.models import Sunburst\n\n\ndef read_csv(filename):\n \"\"\"Read in a csv without header.\"\"\"\n holder = list()\n with open(filename, 'r', encoding='utf-8') as f:\n reader = csv.reader(f, delimiter=\",\")\n for row in reader:\n # print(row)\n row_list = list(map(lambda x: x, row[:-1]))\n # right = list(map(lambda x: x, row[-1:]))\n holder.append(row_list)\n return holder[1:]\n\n\ndef get_cha_par_val(liste_list):\n \"\"\"Return each list.\"\"\"\n return (\n [ho[0] for ho in liste_list],\n [ho[1] for ho in liste_list],\n [0.0 if ho[2] == '' else float(ho[2]) for ho in liste_list]\n )\n\n\ndef get_first_main_parent(character, parent,):\n \"\"\"Get the title which is a parent but not a child - any.\"\"\"\n for par in parent:\n if par not in character:\n return par\n\n\ndef get_main_childs(parent, character):\n \"\"\"Get all childs which are not a parent.\"\"\"\n return [cha if cha not in parent else '' for cha in character]\n\n\ndef add_values_parent_current(parent_current, parent, pos, value):\n \"\"\"Add values for the current parents.\"\"\"\n if parent[pos] not in parent_current:\n parent_current[parent[pos]] = value[pos]\n else:\n parent_current[parent[pos]] += value[pos]\n return value\n\n\ndef refill_values_from_bottom(character, parent, value,\n length,\n parents_old,\n first=False\n ):\n \"\"\"Get the child, add the values and set the value for the parent.\"\"\"\n parent_current = {}\n # get the value of childs and add to parents\n for pos in range(length):\n if first:\n if get_main_childs(parent, character)[pos]:\n value = add_values_parent_current(parent_current,\n parent,\n pos,\n value)\n else:\n if parent[pos] not in parents_old:\n value = add_values_parent_current(parent_current,\n parent,\n pos,\n value)\n # set the value for chosen parents\n for pos in range(length):\n if character[pos] in parent_current:\n value[pos] = parent_current[character[pos]]\n\n return value, parent_current\n\n\ndef value_is_not_filled(values):\n \"\"\"Check if all values are filled.\"\"\"\n if 0 in values:\n return True\n else:\n return False\n\n\ndef calculate_sunburst_info(character, parent, value):\n \"\"\"\n Calculate the rank of parents and character.\n\n Set the value starting from bottom for parent by child.\n The red csv file is a listed list of [[cha,par,val],...]\n \"\"\"\n # child_last = get_main_childs(parent, character)\n\n length = len(character)\n\n is_first = True\n parent_old = {}\n while value_is_not_filled(value):\n value, parent_old = refill_values_from_bottom(character,\n parent,\n value,\n length,\n parents_old=parent_old,\n first=is_first\n )\n # fill old parents\n for key in parent_old.keys():\n parent_old[key] = parent_old[key]\n\n is_first = False\n\n sunburst_title = get_first_main_parent(character, parent)\n return {'value': value,\n 'parent': parent,\n 'character': character}, sunburst_title\n\n\n# cwd = os.getcwd()\n# data_dir = cwd + '/data/'\n# print(data_dir)\n\n\ndef get_cha_par_val_DB(sun, key):\n \"\"\"Return each list from DB.\"\"\"\n character = []\n parent = []\n value = []\n\n for each in sun.objects.filter(key=key).all():\n character.append(each.character)\n parent.append(each.parent)\n value.append(each.value)\n\n return character, parent, value\n\n\n\"\"\" DB\ncharacter, parent, value = get_cha_par_val_DB(Sunburst, 'Bachelor')\n\nhold_up_ba = calculate_sunburst_info(character, parent, value)\n\ncharacter, parent, value = get_cha_par_val_DB(Sunburst, 'Master')\nhold_up_ma = calculate_sunburst_info(character, parent, value)\n\"\"\"\n\n\"\"\"\nabsFilePath = os.path.abspath(__file__)\nbasFilePath = os.path.basename(__file__)\ndirFilePath = os.path.dirname(__file__)\nprint(dirFilePath)\nfix_path_to_data = '../data'\ncomFilePath = os.path.commonprefix([fix_path_to_data, absFilePath])\nprint(comFilePath)\nfile_name_bachelor = os.path.join(dirFilePath, 'data', 'studies_ba.csv')\nprint(file_name_bachelor)\nfile_name_master = os.path.join(dirFilePath, 'data', 'studies_ma.csv')\n# #should already be the BASE_DIR(!) # get_cha_par_val()\n# file_name_bachelor = os.path.join(os.getcwd(), 'xdata', 'studies_ba.csv')\n# file_name_master = os.path.join(os.getcwd(), 'data', 'studies_ma.csv')\nfile_bachelor = read_csv(file_name_bachelor)\nfile_master = read_csv(file_name_master)\n\n# file_bachelor = read_csv(data_dir+'studies_ba.csv')\n# file_master = read_csv(data_dir+'studies_ma.csv')\n\"\"\"\n\n\"\"\"The following is a fixed input for the profile example.\"\"\"\nfile_bachelor = [\n ['Electrical Engineering / Information Technology',\n 'Bachelor of Science',\n ''],\n ['Economic Sciences', 'Bachelor of Science', ''],\n ['Thesis', 'Bachelor of Science', ''],\n [\"Bachelor's\", 'Thesis', ''],\n ['Professionalisation - Integration Subjects', 'Bachelor of Science', ''],\n ['Fundamentals Eco.', 'Economic Sciences', ''],\n ['Economics - Specialisation', 'Economic Sciences', ''],\n ['Fundamentals El.', 'Electrical Engineering / Information Technology', ''],\n ['Electrotechnical - Specialization',\n 'Electrical Engineering / Information Technology',\n ''],\n ['Fundamentals of Mathematics and Natural Science',\n 'Bachelor of Science',\n ''],\n ['Mathematics I', 'Fundamentals of Mathematics and Natural Science', '12'],\n ['Mathematics II', 'Fundamentals of Mathematics and Natural Science', '12'],\n ['Mechanics and Thermics for Electrical Engineering',\n 'Fundamentals of Mathematics and Natural Science',\n '6'],\n ['Fundamentals of Economics', 'Fundamentals Eco.', '8'],\n ['Accounting', 'Fundamentals Eco.', '5'],\n ['Fundamentals of Business Studies', 'Fundamentals Eco.', '10'],\n ['Electrical Engineereing', 'Fundamentals El.', '9'],\n ['Controll Engineereing', 'Fundamentals El.', '5'],\n ['Electrics', 'Fundamentals El.', '5'],\n ['Digital Systems Design', 'Fundamentals El.', '7'],\n ['Electromagnetic Fields I', 'Fundamentals El.', '5'],\n ['Basic Linear Circuit Theory', 'Fundamentals El.', '13'],\n ['Energy Technology Basics', 'Fundamentals El.', '5'],\n ['Informations Technology', 'Fundamentals El.', '6'],\n ['Focus Economics', 'Economics - Specialisation', '5'],\n ['Focus Finance', 'Economics - Specialisation', '5'],\n ['Focus Production and Logistics', 'Economics - Specialisation', '5'],\n ['Focus Marketing', 'Economics - Specialisation', '5'],\n ['Digital Circuit Design', 'Electrotechnical - Specialization', '5'],\n ['Industrial Internship', 'Bachelor of Science', ''],\n ['BioMedical Instruments', 'Industrial Internship', '8'],\n ['Civil Law', 'Professionalisation - Integration Subjects', '8'],\n ['Quantitative Methods in Economic Science',\n 'Professionalisation - Integration Subjects',\n '8'],\n ['Professionalisation', 'Professionalisation - Integration Subjects', '7'],\n ['Algorithms and Skills', 'Professionalisation - Integration Subjects', '5'],\n ['Camera-based detection and classification of traffic signs',\n \"Bachelor's\",\n '12']\n]\nfile_master = [\n ['Electrical Engineering / Information Technology', 'Master of Science', ''],\n ['Economic Sciences', 'Master of Science', ''],\n ['Lboratory Courses', 'Master of Science', '11'],\n ['Industrial Internship', 'Master of Science', '12'],\n ['Thesis', 'Master of Science', '30'],\n [\"Master's\", 'Thesis', '30'],\n ['Specialisation Eco.', 'Economic Sciences', '33'],\n ['Elective Area', 'Electrical Engineering / Information Technology', '11'],\n ['Specialisation: Mechatronics and Metrology',\n 'Electrical Engineering / Information Technology',\n '20'],\n ['Brain Computer Interface for head mounted display', \"Master's\", '30'],\n ['Marketing (Major)', 'Specialisation Eco.', '14'],\n ['Economics (Major)', 'Specialisation Eco.', '14'],\n ['Production and Logistics (Minor)', 'Specialisation Eco.', '5'],\n ['Facility Management', 'Production and Logistics (Minor)', '2.5'],\n ['Energy- and resource-efficient Production',\n 'Production and Logistics (Minor)',\n '2.5'],\n ['Insurance Economics', 'Economics (Major)', '5'],\n ['Theory and Politics of Taxation', 'Economics (Major)', '5'],\n ['Slows income inequality economic growth', 'Economics (Major)', '4'],\n ['Consumer Bahavior', 'Marketing (Major)', '5'],\n ['Marketing Research', 'Marketing (Major)', '2.5'],\n ['International Marketing', 'Marketing (Major)', '2.5'],\n ['Acceptance of QR-code-ased feedback solution', 'Marketing (Major)', '4'],\n ['Robotics I - Technical and Mathematical Basics',\n 'Specialisation: Mechatronics and Metrology',\n '5'],\n ['Robotics II - Programming Modelling Planning',\n 'Specialisation: Mechatronics and Metrology',\n '5'],\n ['Precision Measuring Techniques',\n 'Specialisation: Mechatronics and Metrology',\n '5'],\n ['Nanoelectronics', 'Specialisation: Mechatronics and Metrology', '5'],\n ['Basics of Medicine for Engineers', 'Elective Area', '5'],\n ['Bioanalysis', 'Elective Area', '6'],\n ['Electrical Machine', 'Lboratory Courses', '5'],\n ['Power Electronics', 'Lboratory Courses', '5'],\n ['Robotics', 'Lboratory Courses', '6'],\n ['add solution', 'Industrial Internship', '12']\n]\n\"\"\"\"\"\"\n\ncharacter, parent, value = get_cha_par_val(file_bachelor)\nhold_up_ba = calculate_sunburst_info(character, parent, value)\n\ncharacter, parent, value = get_cha_par_val(file_master)\n\nhold_up_ma = calculate_sunburst_info(character, parent, value)\n\n\n\"\"\"python\nfrom owndash.models import Sunburst\nfrom owndash.dash_apps.sunburst.settings_dash import *\ncha, par, val = get_cha_par_val(file_bachelor)\nfor (c,p),v in zip(zip(cha,par),val):\n sun = Sunburst(character=c, parent=p, value=v,key='Bachelor')\n sun.save()\n\"\"\"\n","repo_name":"mjwalz/own_dash","sub_path":"settings_dash_no_panda.py","file_name":"settings_dash_no_panda.py","file_ext":"py","file_size_in_byte":10850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36019908694","text":"from pathlib import Path\n\nimport pytest\n\nfrom op_mt_tools.cleanup import (\n cleanup_en,\n combine_chunks,\n find_failed_cleanup_chunks,\n split_document,\n)\n\n\n@pytest.mark.skip(reason=\"Need to Mock OpenAI API\")\ndef test_split_document():\n fn = Path(__file__).parent / \"manual\" / \"uncleaned_texts\" / \"01.txt\"\n text = fn.read_text()\n\n sents = split_document(text)\n\n assert sents == [\"Hello World.\", \"Hello World\"]\n\n\n@pytest.mark.skip(reason=\"Need to Mock OpenAI API\")\ndef test_run_cleanup():\n fn = Path(__file__).parent / \"manual\" / \"uncleaned_texts\" / \"01.txt\"\n\n cleaned_fn = cleanup_en(fn)\n\n assert cleaned_fn.is_file()\n\n\n@pytest.mark.skip(reason=\"Need to Mock OpenAI API\")\ndef test_find_failed_cleanup_chunks():\n text_path = Path(__file__).parent / \"manual\" / \"uncleaned_texts\"\n\n failed_chunks = find_failed_cleanup_chunks(text_path)\n\n assert failed_chunks == [1]\n\n\n@pytest.mark.skip(reason=\"need large data\")\ndef test_combine_chunks():\n text_dir = Path(__file__).parent / \"manual\" / \"uncleaned_texts\"\n chunks_dir = text_dir / \"chunks\"\n output_fn = text_dir / \"[AUTO_CLEANED]_01.txt\"\n\n combine_chunks(chunks_dir, output_fn)\n\n assert output_fn.is_file()\n","repo_name":"OpenPecha/mt-training-data-prep-tools","sub_path":"tests/test_cleanup.py","file_name":"test_cleanup.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16469677261","text":"#%% QTableView 활용(1) : comboBox에 이미지 추가하기\n# QComboBox, QStandardItemModel, QStandardItem, QPixmap\n'''\nenum ItemDataRole {\n DisplayRole = 0,\n DecorationRole = 1,\n EditRole = 2,\n ToolTipRole = 3,\n StatusTipRole = 4,\n WhatsThisRole = 5,\n // Metadata\n FontRole = 6,\n TextAlignmentRole = 7,\n BackgroundColorRole = 8,\n BackgroundRole = 8,\n TextColorRole = 9,\n ForegroundRole = 9,\n CheckStateRole = 10,\n // Accessibility\n AccessibleTextRole = 11,\n AccessibleDescriptionRole = 12,\n // More general purpose\n SizeHintRole = 13,\n InitialSortOrderRole = 14,\n // Internal UiLib roles. Start worrying when public roles go that high.\n DisplayPropertyRole = 27,\n DecorationPropertyRole = 28,\n ToolTipPropertyRole = 29,\n StatusTipPropertyRole = 30,\n WhatsThisPropertyRole = 31,\n // Reserved\n UserRole = 32\n \n Role Constant Desc\n \n Qt.DisplayRole 0 The key data to be rendered in the form of text. (QString)\n Qt.DecorationRole 1 The data to be rendered as a decoration in the form of an icon. (QColor, QIcon or QPixmap)\n Qt.EditRole 2 The data in a form suitable for editing in an editor. (QString)\n Qt.ToolTipRole 3 The data displayed in the item's tooltip. (QString)\n Qt.StatusTipRole 4 The data displayed in the status bar. (QString)\n Qt.WhatsThisRole 5 The data displayed for the item in \"What's This?\" mode. (QString)\n Qt.SizeHintRole 13 The size hint for the item that will be supplied to views. (QSize)\n \n \n Qt.FontRole 6 The font used for items rendered with the default delegate. (QFont)\n Qt.TextAlignmentRole 7 The alignment of the text for items rendered with the default delegate. (Qt.AlignmentFlag)\n Qt.BackgroundRole 8 The background brush used for items rendered with the default delegate. (QBrush)\n Qt.BackgroundColorRole 8 This role is obsolete. Use BackgroundRole instead.\n Qt.ForegroundRole 9 The foreground brush (text color, typically) used for items rendered with the default delegate. (QBrush)\n Qt.TextColorRole 9 This role is obsolete. Use ForegroundRole instead.\n Qt.CheckStateRole 10 This role is used to obtain the checked state of an item. (Qt.CheckState)\n \n \n Qt.AccessibleTextRole 11 The text to be used by accessibility extensions and plugins, such as screen readers. (QString)\n Qt.AccessibleDescriptionRole 12 A description of the item for accessibility purposes. (QString)\n\n Qt.InitialSortOrderRole 14 This role is used to obtain the initial sort order of a header view section. (Qt.SortOrder). This role was introduced in Qt 4.8. \n Qt.UserRole 32 The first role that can be used for application-specific purposes.\n}\n'''\nimport sys\nfrom PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtWidgets import QBoxLayout\nfrom PyQt5.QtWidgets import QLabel\nfrom PyQt5.QtWidgets import QComboBox\nfrom PyQt5.QtWidgets import QTableView\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtCore import Qt, QVariant\nfrom PyQt5.QtGui import QStandardItemModel, QStandardItem, QPixmap\n\nIMAGE_PATH = \"./images/\"\n\nclass UserModel(QStandardItemModel):\n def __init__(self, fruits = None, parent = None):\n #super().__init__()\n #super().__init__(parent)\n QStandardItemModel.__init__(self, parent)\n\n for no, dictData in enumerate(fruits):\n print(no, dictData)\n col_1 = QStandardItem(dictData[\"name\"])\n col_2 = QStandardItem(dictData[\"image\"])\n col_3 = QStandardItem(dictData[\"color\"])\n self.setItem(no, 0, col_1)\n self.setItem(no, 1, col_2)\n self.setItem(no, 2, col_3)\n \n self.setHorizontalHeaderLabels([\"Name\", \"Image\", \"Color\"])\n \n def data(self, QModelIndex, role = None): # 이 함수는 언제 호출되는지 ???\n data = self.itemData(QModelIndex)\n print(data, data[0])\n if role == Qt.DisplayRole:\n if QModelIndex.column() == 1: # 이미지 경로는 디스플레이 되지 않게 한다.\n return QVariant()\n return data[0]\n if role == Qt.DecorationRole:\n return QPixmap(data[0]).scaledToHeight(20)\n return QVariant()\n\nclass Form(QWidget):\n def __init__(self):\n QWidget.__init__(self, flags = Qt.Widget) # Default, 창의 형식과 로고 \n # QWidget.__init__(self, flags = Qt.Dialog) \n # QWidget.__init__(self, flags = Qt.Window)\n # super().__init__(flags = Qt.Widget)\n # super().__init__()\n self.init_widget()\n\n def init_widget(self):\n self.setWindowTitle(\"QComboBox Widget\")\n self.setMinimumWidth(350)\n layout = QBoxLayout(QBoxLayout.TopToBottom, parent = self)\n self.setLayout(layout)\n\n data = [\n {\"name\": \"Apple\", \"image\": IMAGE_PATH + \"apple.jpg\", \"color\": \"Red\"}, \n {\"name\": \"Banana\", \"image\": IMAGE_PATH + \"banana.jpg\", \"color\": \"Yellow\"}]\n\n model = UserModel(data)\n\n view = QTableView()\n view.setSelectionBehavior(view.SelectRows) # 한 줄 단위로 선택\n # self.resize(400,100)\n comboBox = QComboBox()\n comboBox.setView(view)\n comboBox.setModel(model)\n\n layout.addWidget(comboBox)\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n form = Form()\n form.show()\n sys.exit(app.exec_())\n\n#%% QTableView 활용(2) : QAbstractTableModel /DataFrame /pandas /numpy\n# DataFrame /QAbstractTableModel\n# https://stackoverflow.com/questions/10636024/python-pandas-gui-for-viewing-a-dataframe-or-matrix?lq=1\n\nfrom PyQt5.QtCore import QAbstractTableModel, Qt\nfrom PyQt5.QtWidgets import QApplication, QTableView\nfrom PyQt5 import QtGui\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\nclass PandasModel(QAbstractTableModel):\n \"\"\"\n Class to populate a table view with a pandas dataframe\n \"\"\"\n def __init__(self, data, parent=None):\n QAbstractTableModel.__init__(self, parent)\n self._data = data\n\n def rowCount(self, parent=None):\n return len(self._data.values)\n\n def columnCount(self, parent=None):\n return self._data.columns.size\n\n def data(self, index, role = Qt.DisplayRole):\n if index.isValid():\n if role == Qt.DisplayRole:\n if(index.column() != 0):\n return str('%.2f'%self._data.values[index.row()][index.column()])\n else:\n return str(self._data.values[index.row()][index.column()])\n return None\n\n def headerData(self, section, orientation, role):\n if orientation == Qt.Horizontal and role == Qt.DisplayRole:\n return self._data.columns[section]\n elif orientation == Qt.Vertical and role == Qt.DisplayRole:\n return str(self._data.index[section])\n return None\n\n def flags(self, index):\n flags = super(self.__class__,self).flags(index)\n flags |= Qt.ItemIsSelectable\n flags |= Qt.ItemIsEnabled\n return flags\n\nif __name__=='__main__':\n \n df = pd.DataFrame()\n df['Field1']=np.arange(0,10,.5)\n df['Field2']=np.arange(0,10,.5)\n \n app = QApplication([])\n table = QTableView()\n table.resize(300, 600)\n mymodel = PandasModel(df)\n table.setModel(mymodel)\n table.show()\n app.exec_()\n \n#%% QTableView 활용(3) : QStandardItemModel /QSortFilterProxyModel /setModel\n\nimport sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\n\nclass MainWindow(QWidget):\n def __init__(self):\n super().__init__()\n self.resize(500, 500)\n\n self.Table1()\n self.Table2()\n self.Layout()\n\n def Table1(self):\n self.select_guorpbox = QGroupBox()\n self.select_guorpbox.setTitle(\"Article 1\")\n\n self.rowcount = 10\n self.columncount = 10\n\n self.mainTable1_model = QStandardItemModel(self.rowcount, self.columncount)\n # self.mainTable1_model.setHorizontalHeaderLabels(['a', 'b', 'c', 'd'])\n\n for i in range(self.rowcount):\n for j in range(self.columncount):\n table = QStandardItem(\"TEST[{},{}]\".format(i,j))\n self.mainTable1_model.setItem(i, j, table)\n table.setTextAlignment(Qt.AlignCenter)\n\n self.textFilter = QSortFilterProxyModel()\n self.textFilter.setSourceModel(self.mainTable1_model)\n self.textFilter.setFilterKeyColumn(2)\n\n self.SerchLineEdit = QLineEdit()\n self.SerchLineEdit.textChanged.connect(self.textFilter.setFilterRegExp)\n\n self.mainTable1 = QTableView()\n self.mainTable1.setModel(self.textFilter)\n self.mainTable1.setColumnWidth(1, 150)\n self.mainTable1.setColumnWidth(2, 300)\n self.mainTable1.setEditTriggers(QTableView.NoEditTriggers)\n self.mainTable1.setSelectionBehavior(QTableView.SelectRows)\n # self.mainTable1.setContextMenuPolicy(Qt.CustomContextMenu)\n self.mainTable1.doubleClicked.connect(self.Table1_DoubleClicked)\n # self.mainTable1.customContextMenuRequested.connect(self.table1_CustomContextMenu)\n\n # column auto sort\n # self.mainTable1.setSizeAdjustPolicy(QAbstractScrollArea.AdjustToContents)\n # self.mainTable1.resizeColumnsToContents()\n\n v = QVBoxLayout()\n v.addWidget(self.mainTable1)\n self.select_guorpbox.setLayout(v)\n\n def Table2(self):\n self.serch_groupbox = QGroupBox()\n self.serch_groupbox.setTitle(\"Article 2\")\n lable = QLabel(\"~\")\n lable.setFixedWidth(10)\n lable.setAlignment(Qt.AlignCenter)\n insertbutton = QPushButton(\"insert\")\n self.startdate = QDateEdit()\n self.startdate.setDate(QDate.currentDate())\n self.startdate.setFixedWidth(150)\n self.startdate.setCalendarPopup(True)\n self.enddate = QDateEdit()\n self.enddate.setDate(QDate.currentDate())\n self.enddate.setFixedWidth(150)\n self.enddate.setCalendarPopup(True)\n self.article_serch_button = QPushButton(\"ARTICL SERTCH\")\n self.article_serch_button.setFixedWidth(250)\n\n self.mainTable2_model = QStandardItemModel()\n\n self.mainTable2 = QTableView()\n self.mainTable2.setSelectionBehavior(QTableView.SelectRows)\n self.mainTable2.setContextMenuPolicy(Qt.CustomContextMenu)\n self.mainTable2.customContextMenuRequested.connect(self.Table2_CustomContextMenu)\n\n h1 = QHBoxLayout()\n h1.addWidget(insertbutton)\n h1.addWidget(self.startdate)\n h1.addWidget(lable)\n h1.addWidget(self.enddate)\n h1.addWidget(self.article_serch_button)\n h2 = QHBoxLayout()\n h2.addWidget(self.mainTable2)\n\n v = QVBoxLayout()\n v.addLayout(h1)\n v.addLayout(h2)\n\n self.modelListSave = []\n self.codeSave = []\n self.serch_groupbox.setLayout(v)\n\n def Table1_DoubleClicked(self):\n row = []\n select_row = self.mainTable1.selectedIndexes()\n for row_value in range(len(select_row)):\n row.append(self.mainTable1.model().data(select_row[row_value]))\n\n if not self.codeSave:\n self.modelListSave.append(row)\n for i in range(len(self.modelListSave)):\n for j in range(self.columncount):\n self.mainTable2_model.setItem(i, j, QStandardItem(self.modelListSave[i][j]))\n self.mainTable2.setModel(self.mainTable2_model)\n self.codeSave.append(row[0])\n spinBox = QSpinBox()\n mainTable2_ModelIndex = self.mainTable2.model().index(0, 4)\n self.mainTable2.setIndexWidget(mainTable2_ModelIndex, spinBox)\n\n elif row[0] in self.codeSave:\n QMessageBox.about(self, \" \", \"overlap.\")\n\n else:\n self.modelListSave.append(row)\n for i in range(len(self.modelListSave)):\n for j in range(self.columncount):\n self.mainTable2_model.setItem(i, j, QStandardItem(self.modelListSave[i][j]))\n self.mainTable2.setModel(self.mainTable2_model)\n self.codeSave.append(row[0])\n for k in range(5):\n spinBox = QSpinBox()\n mainTable2_ModelIndex = self.mainTable2.model().index(k, 4)\n self.mainTable2.setIndexWidget(mainTable2_ModelIndex, spinBox)\n\n def Table2_CustomContextMenu(self, position):\n menu = QMenu()\n delete = menu.addAction(\"delete\")\n action = menu.exec_(self.mainTable2.mapToGlobal(position))\n indexRow = [index.row() for index in self.mainTable2.selectionModel().selectedRows()]\n if delete == action:\n del self.modelListSave[indexRow[0]]\n self.mainTable2.model().removeRow(indexRow[0], self.mainTable2.rootIndex())\n for i in range(len(self.modelListSave)):\n for j in range(self.columncount):\n self.mainTable2_model.setItem(i, j, QStandardItem(self.modelListSave[i][j]))\n self.mainTable2.setModel(self.mainTable2_model)\n for k in range(5):\n spinBox = QSpinBox()\n mainTable2_ModelIndex = self.mainTable2.model().index(k, 4)\n self.mainTable2.setIndexWidget(mainTable2_ModelIndex, spinBox)\n\n def Layout(self):\n self.vbox = QVBoxLayout()\n self.vbox.addWidget(self.SerchLineEdit)\n self.vbox.addWidget(self.select_guorpbox)\n self.vbox.addWidget(self.serch_groupbox)\n self.setLayout(self.vbox)\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n fream = MainWindow()\n fream.show()\n app.exec_()\n \n#%% QTableView 활용(4) : QAbstractTableModel /setModel\n\nimport sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\n\nclass TableModel(QAbstractTableModel):\n def __init__(self, data):\n super(TableModel, self).__init__()\n self._data = data\n \n def data(self, index, role):\n # print(f'index type={type(index)}, role={role},\\\n # rowCount={self.rowCount(index)}, colCount={self.columnCount(index)}')\n row = index.row()\n col = index.column()\n data = self._data[row][col]\n print(f'row={row}, col={col}, data={data}')\n \n if role == Qt.DisplayRole:\n # See below for the nested-list data structure.\n # .row() indexes into the outer list,\n # .column() indexes into the sub-list\n return self._data[row][col]\n\n def rowCount(self, index):\n # The length of the outer list.\n return len(self._data)\n\n def columnCount(self, index):\n # The following takes the first sub-list, and returns\n # the length (only works if all rows are an equal length)\n return len(self._data[0])\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.resize(400,300)\n self.table = QTableView()\n data = [[4, 9, 2],\n [1, 0, 0],\n [3, 5, 0],\n [3, 3, 2],\n [7, 8, 9]]\n self.model = TableModel(data)\n self.table.setModel(self.model)\n self.setCentralWidget(self.table)\n\napp = QApplication(sys.argv)\nwindow=MainWindow()\nwindow.show()\nsys.exit(app.exec())\n# app.exec_()\n\n#%% QTableView(5) : 틀 고정(Frozen Columns) /QAbstractTableModel\n# https://zbaekhk.blogspot.com/2021/02/pyqt5-qtableview-frozen-columns.html\n# This is FreezeTableWidget module\n\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n \nclass FreezeTableWidget(QTableView):\n def __init__(self, parent = None, fixed_col_count = 2, *args):\n QTableView.__init__(self, parent, *args)\n \n self._fixed_col_count = fixed_col_count\n self.frozenTableView = QTableView(self)\n self.frozenTableView.verticalHeader().hide()\n self.frozenTableView.setFocusPolicy(Qt.NoFocus)\n self.frozenTableView.setStyleSheet('''border: none; background-color: #CCC''')\n self.frozenTableView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n self.frozenTableView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n self.frozenTableView.horizontalHeader().setSectionResizeMode(QHeaderView.Fixed)\n \n self.viewport().stackUnder(self.frozenTableView)\n self.setShowGrid(True)\n \n hh = self.horizontalHeader()\n hh.setDefaultAlignment(Qt.AlignCenter)\n hh.setStretchLastSection(True)\n \n self.resizeColumnsToContents()\n \n vh = self.verticalHeader()\n vh.setDefaultSectionSize(25)\n vh.setDefaultAlignment(Qt.AlignCenter)\n vh.setVisible(True)\n self.frozenTableView.verticalHeader().setDefaultSectionSize(vh.defaultSectionSize())\n \n self.frozenTableView.show()\n self.updateFrozenTableGeometry()\n \n self.setHorizontalScrollMode(QAbstractItemView.ScrollPerPixel)\n self.setVerticalScrollMode(QAbstractItemView.ScrollPerPixel)\n self.frozenTableView.setVerticalScrollMode(QAbstractItemView.ScrollPerPixel)\n \n # connect the headers and scrollbars of both table view's together\n self.horizontalHeader().sectionResized.connect(self.updateSectionWidth)\n self.verticalHeader().sectionResized.connect(self.updateSectionHeight)\n self.frozenTableView.verticalScrollBar().valueChanged.connect(self.verticalScrollBar().setValue)\n self.verticalScrollBar().valueChanged.connect(self.frozenTableView.verticalScrollBar().setValue)\n \n @property\n def fixed_col_count(self):\n return self._fixed_col_count\n \n @fixed_col_count.setter\n def fixed_col_count(self, value):\n self._fixed_col_count = value\n \n def setModel(self, model: QAbstractTableModel):\n QTableView.setModel(self, model)\n self.frozenTableView.setModel(model)\n self.frozenTableView.verticalHeader().hide()\n self.frozenTableView.setFocusPolicy(Qt.NoFocus)\n \n # cols = model.columnCount()\n cols = model.columnCount(model.index)\n # print(model.data(model.index().row(), model.index().column()))\n for col in range(cols):\n if col not in range(self._fixed_col_count):\n self.frozenTableView.setColumnHidden(col, True)\n else:\n self.frozenTableView.setColumnWidth(col, self.columnWidth(col))\n \n def updateSectionWidth(self, logicalIndex, oldSize, newSize):\n if logicalIndex in range(self._fixed_col_count):\n self.frozenTableView.setColumnWidth(logicalIndex, newSize)\n self.updateFrozenTableGeometry()\n \n def updateSectionHeight(self, logicalIndex, oldSize, newSize):\n self.frozenTableView.setRowHeight(logicalIndex, newSize)\n \n def resizeEvent(self, event):\n QTableView.resizeEvent(self, event)\n self.updateFrozenTableGeometry()\n \n def scrollTo(self, index, hint):\n if index.column() >= self._fixed_col_count:\n QTableView.scrollTo(self, index, hint)\n \n def updateFrozenTableGeometry(self):\n frozen_width = sum([self.frozenTableView.columnWidth(col) for col in range(self._fixed_col_count)])\n if self.verticalHeader().isVisible():\n self.frozenTableView.setGeometry(self.verticalHeader().width() + self.frameWidth(),\n self.frameWidth(), frozen_width,\n self.viewport().height() + self.horizontalHeader().height())\n else:\n self.frozenTableView.setGeometry(self.frameWidth(),\n self.frameWidth(), frozen_width,\n self.viewport().height() + self.horizontalHeader().height())\n \n def moveCursor(self, cursorAction, modifiers):\n current = QTableView.moveCursor(self, cursorAction, modifiers)\n x = self.visualRect(current).topLeft().x()\n frozen_width = sum([self.frozenTableView.columnWidth(col) for col in range(self._fixed_col_count)])\n\n if cursorAction == self.MoveLeft:\n if current.column() >= self._fixed_col_count and x < frozen_width:\n new_value = self.horizontalScrollBar().value() + x - frozen_width\n self.horizontalScrollBar().setValue(new_value)\n elif current.column() < self._fixed_col_count:\n current = self.model().index(current.row(), current.column() + 1)\n \n elif cursorAction == self.MoveHome:\n new_value = self.horizontalScrollBar().value() + x - frozen_width\n self.horizontalScrollBar().setValue(new_value)\n current = self.model().index(current.row(), self._fixed_col_count)\n \n return current\n\nclass TableModel(QAbstractTableModel):\n def __init__(self, data):\n super(TableModel, self).__init__()\n self._data = data\n \n def data(self, index, role):\n # print(f'index type={type(index)}, role={role},\\\n # rowCount={self.rowCount(index)}, colCount={self.columnCount(index)}')\n row = index.row()\n col = index.column()\n data = self._data[row][col]\n print(f'row={row}, col={col}, data={data}')\n \n if role == Qt.DisplayRole:\n # See below for the nested-list data structure.\n # .row() indexes into the outer list,\n # .column() indexes into the sub-list\n return self._data[row][col]\n\n def rowCount(self, index):\n # The length of the outer list.\n return len(self._data)\n\n def columnCount(self, index):\n # The following takes the first sub-list, and returns\n # the length (only works if all rows are an equal length)\n return len(self._data[0])\n \nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.resize(400,300)\n data = [[4, 9, 2, 7,7,7],\n [1, 0, 0, 7,7,7],\n [3, 5, 0, 7,7,7],\n [3, 3, 2, 7,7,7],\n [7, 8, 9, 7,7,7]]\n self.table = FreezeTableWidget()\n self.model = TableModel(data)\n self.table.setModel(self.model)\n self.setCentralWidget(self.table)\n\napp = QApplication(sys.argv)\nwindow=MainWindow()\nwindow.show()\nsys.exit(app.exec())\n# app.exec_()\n\n#%% QTableView 활용(6) : QSqlQueryModel /QSqlDatabase /\n\nimport sys\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtSql import QSqlQuery, QSqlDatabase, QSqlQueryModel\n\nclass RowHeightSlider(QSlider):\n def __init__(self, parent=None):\n #QSlider.__init__(self, parent)\n super(RowHeightSlider, self).__init__(parent)\n self.setOrientation(Qt.Horizontal)\n self.setMinimum(4)\n self.setMaximum(72)\n self.setSingleStep(1)\n self.setPageStep(2)\n self.setTickPosition(QSlider.TicksAbove)\n self.setTickInterval(1)\n\nclass Window(QWidget):\n def __init__(self, parent=None):\n #QWidget.__init__(self, parent)\n super(Window, self).__init__(parent)\n self.parentModel = QSqlQueryModel(self)\n self.refreshParent()\n self.parentProxyModel = QSortFilterProxyModel()\n self.parentProxyModel.setSourceModel(self.parentModel)\n self.parentView = QTableView()\n self.parentView.setModel(self.parentProxyModel)\n self.parentView.setSelectionMode(QTableView.SingleSelection)\n self.parentView.setSelectionBehavior(QTableView.SelectRows)\n self.parentView.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.parentView.horizontalHeader().setStretchLastSection(True)\n self.parentView.verticalHeader().setVisible(False)\n self.parentView.setSortingEnabled(True)\n self.parentView.horizontalHeader().setSortIndicator(0, Qt.AscendingOrder)\n self.parentView.setAlternatingRowColors(True)\n self.parentView.setShowGrid(False)\n #self.parentView.verticalHeader().setDefaultSectionSize(24)\n self.parentView.setStyleSheet(\"QTableView::item:selected:!active { selection-background-color:#BABABA; }\")\n for i, header in enumerate(self.parentHeaders):\n self.parentModel.setHeaderData(i, Qt.Horizontal, self.parentHeaders[self.parentView.horizontalHeader().visualIndex(i)])\n self.parentView.resizeColumnsToContents()\n\n self.childModel = QSqlQueryModel(self)\n self.refreshChild()\n self.childProxyModel = QSortFilterProxyModel()\n self.childProxyModel.setSourceModel(self.childModel)\n self.childView = QTableView()\n self.childView.setModel(self.childProxyModel)\n self.childView.setSelectionMode(QTableView.SingleSelection)\n self.childView.setSelectionBehavior(QTableView.SelectRows)\n self.childView.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.childView.horizontalHeader().setStretchLastSection(True)\n self.childView.verticalHeader().setVisible(False)\n self.childView.setSortingEnabled(True)\n self.childView.horizontalHeader().setSortIndicator(0, Qt.AscendingOrder)\n self.childView.setAlternatingRowColors(True)\n self.childView.setShowGrid(False)\n #self.childView.verticalHeader().setDefaultSectionSize(24)\n self.childView.setStyleSheet(\"QTableView::item:selected:!active { selection-background-color:#BABABA; }\")\n for i, header in enumerate(self.childHeaders):\n self.childModel.setHeaderData(i, Qt.Horizontal, self.childHeaders[self.childView.horizontalHeader().visualIndex(i)])\n self.childView.resizeColumnsToContents()\n\n self.parentSlider = RowHeightSlider()\n self.childSlider = RowHeightSlider()\n\n self.parentRowHeightLabel = QLabel('Row height: 32')\n self.childRowHeightLabel = QLabel('Row height: 32')\n\n parentLayout = QVBoxLayout()\n parentLayout.addWidget(self.parentSlider)\n parentLayout.addWidget(self.parentRowHeightLabel)\n parentLayout.addWidget(self.parentView)\n\n childLayout = QVBoxLayout()\n childLayout.addWidget(self.childSlider)\n childLayout.addWidget(self.childRowHeightLabel)\n childLayout.addWidget(self.childView)\n\n layout = QHBoxLayout()\n layout.addLayout(parentLayout)\n layout.addLayout(childLayout)\n self.setLayout(layout)\n\n self.parentView.selectionModel().currentRowChanged.connect(self.parentChanged)\n self.parentSlider.valueChanged.connect(self.changeParentRowHeight)\n self.childSlider.valueChanged.connect(self.changeChildRowHeight)\n\n self.parentView.setCurrentIndex(self.parentView.model().index(0, 0))\n self.parentView.setFocus()\n\n self.parentSlider.setValue(36)\n self.childSlider.setValue(36)\n\n def refreshParent(self):\n self.parentHeaders = ['Parent']\n queryString = \"SELECT parent.parent_name FROM parent\"\n query = QSqlQuery()\n query.exec(queryString)\n self.parentModel.setQuery(query)\n while self.parentModel.canFetchMore():\n self.parentModel.fetchMore()\n\n def refreshChild(self, parent_name=''):\n #parent_name='parent_name_001'\n self.childHeaders = ['Child']\n queryString = (\"SELECT child.child_name FROM child WHERE child.parent_name = '{parent_name}'\").format(parent_name = parent_name)\n query = QSqlQuery()\n query.exec(queryString)\n self.childModel.setQuery(query)\n while self.childModel.canFetchMore():\n self.childModel.fetchMore()\n\n def parentChanged(self, index):\n if index.isValid():\n index = self.parentProxyModel.mapToSource(index)\n record = self.parentModel.record(index.row())\n temp=record.value(\"parent_name\")\n self.refreshChild(temp)\n #self.childView.scrollToBottom() # if needed\n\n def changeParentRowHeight(self, rowHeight):\n parentVerticalHeader = self.parentView.verticalHeader()\n\n # (any)one of these two rows (or both) has to be uncommented\n parentVerticalHeader.setMinimumSectionSize(rowHeight)\n #parentVerticalHeader.setMaximumSectionSize(rowHeight)\n\n for section in range(parentVerticalHeader.count()):\n parentVerticalHeader.resizeSection(section, rowHeight)\n self.displayParentRowHeightLabel(rowHeight)\n\n def changeChildRowHeight(self, rowHeight):\n childVerticalHeader = self.childView.verticalHeader()\n\n # (any)one of these two rows (or both) has to be uncommented\n childVerticalHeader.setMinimumSectionSize(rowHeight)\n childVerticalHeader.setMaximumSectionSize(rowHeight)\n\n for section in range(childVerticalHeader.count()):\n childVerticalHeader.resizeSection(section, rowHeight)\n self.displayChildRowHeightLabel(rowHeight)\n\n def displayParentRowHeightLabel(self, rowHeight):\n visibleRows = self.parentView.rowAt(self.parentView.height()) - self.parentView.rowAt(0)\n if self.parentView.rowAt(self.parentView.height()) == -1:\n visibleRowsString = str(self.parentView.model().rowCount()) + '+'\n else:\n visibleRowsString = str(visibleRows)\n self.parentRowHeightLabel.setText('Row height: ' + str(rowHeight) + ', Visible rows: ' + visibleRowsString)\n\n def displayChildRowHeightLabel(self, rowHeight):\n visibleRows = self.childView.rowAt(self.childView.height()) - self.childView.rowAt(0)\n if self.childView.rowAt(self.childView.height()) == -1:\n visibleRowsString = str(self.childView.model().rowCount()) + '+'\n else:\n visibleRowsString = str(visibleRows)\n self.childRowHeightLabel.setText('Row height: ' + str(rowHeight) + ', Visible rows: ' + visibleRowsString)\n\n def resizeEvent(self, event):\n # make it resize-friendly\n self.displayParentRowHeightLabel(self.parentSlider.value())\n self.displayChildRowHeightLabel(self.childSlider.value())\n\ndef createFakeData():\n parent_names = []\n #import random\n query = QSqlQuery()\n query.exec(\"CREATE TABLE parent(parent_name TEXT)\")\n for i in range(1, 101):\n parent_num = str(i).zfill(3)\n parent_name = 'parent_name_' + parent_num\n parent_names.append((parent_name, parent_num))\n query.prepare(\"INSERT INTO parent (parent_name) VALUES(:parent_name)\")\n query.bindValue(\":parent_name\", parent_name)\n query.exec_()\n query.exec(\"CREATE TABLE child(parent_name TEXT, child_name TEXT)\")\n counter = 1\n for parent_name, parent_num in parent_names:\n for i in range(1, 11):\n child_name = 'child_name_' + parent_num + '_' + str(counter).zfill(5)\n counter += 1\n query.prepare(\"INSERT INTO child (parent_name, child_name) VALUES(:parent_name, :child_name)\")\n query.bindValue(\":parent_name\", parent_name)\n query.bindValue(\":child_name\", child_name)\n query.exec_()\n\ndef createConnection():\n db = QSqlDatabase.addDatabase(\"QSQLITE\")\n #db.setDatabaseName(\"test04.db\")\n db.setDatabaseName(\":memory:\")\n db.open()\n createFakeData()\n\napp = QApplication(sys.argv)\ncreateConnection()\nwindow = Window()\nwindow.resize(800, 600)\nwindow.show()\n#window.showMaximized()\napp.exec()\n\n#%% QTableView 활용(7) : 테이블 출력 및 그래프 그리기 \n# QAbstractTableModel /QTableView /QVXYModelMapper /QChartView /QLineSeries\n\nimport sys\nfrom random import randrange\nfrom PySide6.QtCore import QAbstractTableModel, QModelIndex, QRect, Qt\nfrom PySide6.QtGui import QColor, QPainter\nfrom PySide6.QtWidgets import (QApplication, QGridLayout, QHeaderView, QTableView, QWidget)\nfrom PySide6.QtCharts import QChart, QChartView, QLineSeries, QVXYModelMapper\n\n# pySide6 설치 폴더 설정하기\npkgDir = 'C:\\\\Users\\\\Administrator\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python39\\\\Lib\\\\site-packages\\\\PySide6\\\\plugins'\n# pkgDir = \"C:\\\\ProgramData\\\\Miniconda3\\\\envs\\\\spyder-en\\v\\Lib\\\\site-packages\\\\PySide6\\\\plugins\"\n# pkgDir = 'C:\\ProgramData\\Anaconda3\\Lib\\site-packages\\PySide6\\plugins'\nQApplication.setLibraryPaths([pkgDir])\n\nclass CustomTableModel(QAbstractTableModel):\n def __init__(self):\n super().__init__()\n self.input_data = []\n self.mapping = {}\n self.column_count = 4\n self.row_count = 15\n\n for i in range(self.row_count):\n data_vec = [0] * self.column_count\n for k in range(len(data_vec)):\n if k % 2 == 0:\n data_vec[k] = i * 50 + randrange(30)\n else:\n data_vec[k] = randrange(100)\n self.input_data.append(data_vec)\n\n def rowCount(self, parent = QModelIndex()):\n return len(self.input_data)\n\n def columnCount(self, parent = QModelIndex()):\n return self.column_count\n\n def headerData(self, section, orientation, role):\n if role != Qt.DisplayRole:\n return None\n\n if orientation == Qt.Horizontal:\n if section % 2 == 0:\n return \"x\"\n else:\n return \"y\"\n else:\n return str(section + 1)\n\n def data(self, index, role=Qt.DisplayRole):\n if role == Qt.DisplayRole:\n return self.input_data[index.row()][index.column()]\n elif role == Qt.EditRole:\n return self.input_data[index.row()][index.column()]\n elif role == Qt.BackgroundRole:\n for color, rect in self.mapping.items():\n if rect.contains(index.column(), index.row()):\n return QColor(color)\n # cell not mapped return white color\n return QColor(Qt.white)\n return None\n\n def setData(self, index, value, role=Qt.EditRole):\n if index.isValid() and role == Qt.EditRole:\n self.input_data[index.row()][index.column()] = float(value)\n self.dataChanged.emit(index, index)\n return True\n return False\n\n def flags(self, index):\n return Qt.ItemIsEnabled | Qt.ItemIsEditable | Qt.ItemIsSelectable\n\n def add_mapping(self, color, area):\n self.mapping[color] = area\n\n def clear_mapping(self):\n self.mapping = {}\n\nclass TableWidget(QWidget):\n def __init__(self):\n super().__init__()\n\n self.model = CustomTableModel()\n\n self.table_view = QTableView()\n self.table_view.setModel(self.model)\n self.table_view.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.table_view.verticalHeader().setSectionResizeMode(QHeaderView.Stretch)\n\n self.chart = QChart()\n self.chart.setAnimationOptions(QChart.AllAnimations)\n\n self.series = QLineSeries()\n self.series.setName(\"Line 1\")\n self.mapper = QVXYModelMapper(self)\n self.mapper.setXColumn(0)\n self.mapper.setYColumn(1)\n self.mapper.setSeries(self.series)\n self.mapper.setModel(self.model)\n self.chart.addSeries(self.series)\n\n # for storing color hex from the series\n seriesColorHex = \"#000000\"\n\n # get the color of the series and use it for showing the mapped area\n self.model.add_mapping(self.series.pen().color().name(), QRect(0, 0, 2, self.model.rowCount()))\n\n # series 2\n self.series = QLineSeries()\n self.series.setName(\"Line 2\")\n\n self.mapper = QVXYModelMapper(self)\n self.mapper.setXColumn(2)\n self.mapper.setYColumn(3)\n self.mapper.setSeries(self.series)\n self.mapper.setModel(self.model)\n self.chart.addSeries(self.series)\n\n # get the color of the series and use it for showing the mapped area\n self.model.add_mapping(self.series.pen().color().name(), QRect(2, 0, 2, self.model.rowCount()))\n\n self.chart.createDefaultAxes()\n self.chart_view = QChartView(self.chart)\n self.chart_view.setRenderHint(QPainter.Antialiasing)\n self.chart_view.setMinimumSize(640, 480)\n\n # create main layout\n self.main_layout = QGridLayout()\n self.main_layout.addWidget(self.table_view, 1, 0)\n self.main_layout.addWidget(self.chart_view, 1, 1)\n self.main_layout.setColumnStretch(1, 1)\n self.main_layout.setColumnStretch(0, 0)\n self.setLayout(self.main_layout)\n\nif __name__ == \"__main__\":\n \n if not QApplication.instance():\n app = QApplication([])\n else:\n app = QApplication.instance() \n\n w = TableWidget()\n w.show()\n sys.exit(app.exec())","repo_name":"gbkim000/pythonProjects","sub_path":"PyQtTest/PyQt_Table02.py","file_name":"PyQt_Table02.py","file_ext":"py","file_size_in_byte":36756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10382707676","text":"import unittest\nimport io\n\nfrom generators import iter_sentinel\n\n\nclass TestIterSentinel(unittest.TestCase):\n def test_read_from_file(self):\n sut = '''\nthere is a cow\nis\nthere \nis \na cow\n'''\n fp = io.StringIO(sut)\n self.assertEqual(iter_sentinel.read_all(fp, 'is\\n', r'^there'),\n ['there is a cow\\n'])\n\n def test_random_till_lucky(self):\n it = iter_sentinel.random_till_lucky(1, 30, 15)\n print(list(it))\n","repo_name":"powergun/pyFunctional","sub_path":"generators/tests/test_iter_sentinel.py","file_name":"test_iter_sentinel.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"972259217","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('monitor', '0030_project_authority_t'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='minion_t',\n name='server_type',\n field=models.CharField(default='nginx', max_length=10, choices=[('nginx', 'nginx'), ('apache', 'apache'), ('vpn', 'vpn'), ('flask', 'flask')]),\n ),\n ]\n","repo_name":"sadwebing/phx_web_python37","sub_path":"monitor/migrations/0031_minion_t_server_type.py","file_name":"0031_minion_t_server_type.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"42544581773","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.preprocessing import RobustScaler\r\nfrom sklearn.metrics import accuracy_score, classification_report\r\nfrom sklearn.metrics import precision_score, recall_score, f1_score\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\n\r\n\r\ndata = pd.read_csv('C:\\\\Users\\\\BANTA\\\\Desktop\\\\ML projects\\\\dataSets\\\\winequality-red_decision_tree.csv')\r\n\r\n# Explore the data:\r\nprint(data.head().T)\r\nprint(data.shape) # Check the dimensions of the dataset\r\nprint(data.columns) # Check the column names\r\nprint(data.info()) # Get information about the data types\r\nprint(data.describe()) # Summary statistics of the dataset\r\n\r\nprint(data.isnull().sum()) # Check for missing values\r\ndata = data.fillna(data.mean())\r\n\r\n# Explore the distribution of the target variable\r\nprint(data['quality'].value_counts())\r\n# Assuming your DataFrame is called 'df' and the label column is 'quality'\r\ndata['quality'] = data['quality'].apply(lambda x: 1 if x > 6 else 0)\r\nprint(data)\r\n\r\n# Create histograms for each feature\r\ndata.hist(figsize=(12, 10))\r\nplt.show()\r\n\r\n# Create a correlation matrix\r\ncorr_matrix = data.corr()\r\nsns.heatmap(corr_matrix, annot=True, cmap='coolwarm')\r\nplt.show()\r\n\r\n# Perform exploratory data analysis (EDA)\r\n# Analyze the distribution of the target variable\r\n\r\n# Analyze the relationships between features and the target variable\r\nsns.boxplot(x='quality', y='alcohol', data=data)\r\nplt.show()\r\n\r\n# Splitting into training set & test set using scikit learn's function\r\nX = data.drop('quality', axis=1) # Features\r\ny = data['quality'] # Target variable\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\r\n\r\n\r\n# Create a RobustScaler object\r\nscaler = RobustScaler()\r\n# Fit the scaler on the training data\r\nscaler.fit(X_train)\r\n#By fitting the RobustScaler on the training data, it calculates the median and interquartile range (IQR) for each feature. Then, it scales the features using the formula (X - median) / IQR, where X is the original value of the feature. This scaling method is more robust to outliers compared to standard scaling techniques like the MinMaxScaler or StandardScaler\r\n# Transform the training and testing data\r\nX_train_scaled = scaler.transform(X_train)\r\nX_test_scaled = scaler.transform(X_test)\r\n\r\n\r\n# Training model on training set\r\nrf_classifier = RandomForestClassifier(random_state=42)\r\nrf_classifier.fit(X_train_scaled, y_train)\r\n\r\n# finding the feature importance\r\nfeature_importances = rf_classifier.feature_importances_\r\n# Create a DataFrame to display feature importances\r\nimportance_df = pd.DataFrame({'Feature': X_train.columns, 'Importance': feature_importances})\r\nimportance_df = importance_df.sort_values('Importance', ascending=False)\r\n\r\n# Plot feature importances\r\nplt.figure(figsize=(10, 6))\r\nsns.barplot(x='Importance', y='Feature', data=importance_df)\r\nplt.title('Feature Importances')\r\nplt.xlabel('Importance')\r\nplt.ylabel('Feature')\r\nplt.show()\r\n\r\n# Print feature importances in descending order\r\nprint(importance_df)\r\n\r\n\r\ny_pred = rf_classifier.predict(X_test_scaled)\r\naccuracy = accuracy_score(y_test, y_pred)\r\n\r\n# Calculate precision\r\nprecision = precision_score(y_test, y_pred)\r\n# Calculate F1-score\r\nf1 = f1_score(y_test, y_pred)\r\n\r\n\r\n\r\n# Calculate recall\r\nrecall = recall_score(y_test, y_pred)\r\nprint(\"Accuracy:\", accuracy)\r\nprint(\"Precision:\", precision)\r\nprint(\"Recall:\", recall)\r\nprint(\"F1-score:\", f1)\r\n#print(classification_report(y_test, y_pred))\r\n\r\n\r\n#define a parameter grid with different values for the hyperparameters n_estimators, max_depth, min_samples_split, and min_samples_leaf\r\nparam_grid = {\r\n 'n_estimators': [100, 200, 300],\r\n 'max_depth': [None, 5, 10],\r\n 'min_samples_split': [2, 5, 10],\r\n 'min_samples_leaf': [1, 2, 4]\r\n}\r\n\r\n# Create the random forest classifier\r\nrf_classifier = RandomForestClassifier(random_state=42)\r\n\r\n#The GridSearchCV object performs a grid search over these parameter values using 5-fold cross-validation (cv=5) and evaluates the models based on accuracy (scoring='accuracy').\r\n# Create the GridSearchCV object\r\ngrid_search = GridSearchCV(estimator=rf_classifier, param_grid=param_grid, cv=5, scoring='accuracy')\r\n\r\n#After fitting the GridSearchCV object on the training data, the best parameters is obtain using best_params_ and the best score using best_score_\r\n# Fit the GridSearchCV object on the training data\r\ngrid_search.fit(X_train, y_train)\r\n\r\ny_pred = grid_search.predict(y_test)\r\naccuracy = accuracy_score(y_test, y_pred)\r\n\r\n\r\n\r\n# Get the best parameters and best score\r\nbest_params = grid_search.best_params_\r\nbest_score = grid_search.best_score_\r\n\r\nprint(\"Best Parameters:\", best_params)\r\nprint(\"Best accuracy Score:\", best_score)\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Bismark-afanyu/My-ML-projects-","sub_path":"project 3 Random forest.py","file_name":"project 3 Random forest.py","file_ext":"py","file_size_in_byte":4915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24273332366","text":"from enum import Enum\n\nBOARD_WIDTH = 540\nCIRCLE_RADIUS = 100 # distance from either ball to center\n\nALIGN_TOL = 2\n\n\nclass Action(Enum):\n \"\"\"\n Enum for the different controller actions.\n \"\"\"\n IDLE = 1\n SPIN_LEFT = 2\n SPIN_RIGHT = 3\n ALIGN_VERTICAL = 4\n ALIGN_HORISONTAL = 5\n\n\nclass Controller(object):\n \"\"\"\n A simplistic controller for the duet.py game.\n \"\"\"\n def __init__(self):\n\n self.action = Action.IDLE\n self.curr_obstacle_set = None\n\n self.prev_obs_type = None\n\n def get_controll(self, obstacle_sets, red_pos, blue_pos):\n \"\"\"\n Returns the calculated controll input.\n \"\"\"\n\n self.red_x, self.red_y = red_pos\n self.blue_x, self.blue_y = blue_pos\n\n if self.curr_obstacle_set is None:\n self.curr_obstacle_set = obstacle_sets[0]\n\n if self.curr_obstacle_avoided():\n self.curr_obstacle_set = obstacle_sets[1]\n\n self.print_obstacle_type()\n\n self.determine_action()\n\n return self.calculate_controlls()\n\n def curr_obstacle_avoided(self):\n \"\"\"\n Determines if the current obstacle has been avoided.\n \"\"\"\n MARGIN = 9\n\n for obstacle in self.curr_obstacle_set:\n if max(self.red_y, self.blue_y) + MARGIN > obstacle.get_top():\n return False\n\n return True\n\n def print_obstacle_type(self):\n \"\"\"\n Prints the type of the obstacle set currently being avoided whenever\n switching to a new obstacle set.\n \"\"\"\n\n curr_obs_type = self.curr_obstacle_set[0].get_type()\n if curr_obs_type != self.prev_obs_type:\n print(\"Currently avoiding: \" + str(curr_obs_type))\n self.prev_obs_type = curr_obs_type\n\n def determine_action(self):\n \"\"\"\n Determines the next action (align vertical, align horiontal, spin left\n or spin right) based on the set of obstacle sets on the board.\n \"\"\"\n\n MARGIN = 15\n\n red_collision_course = False\n blue_collision_course = False\n for obstacle in self.curr_obstacle_set:\n\n left, right = obstacle.x_span()\n\n if self.red_x in range(left - MARGIN, right + 1 + MARGIN):\n red_collision_course = True\n\n if self.blue_x in range(left - MARGIN, right + 1 + MARGIN):\n blue_collision_course = True\n\n # Must be a double-obstacle\n if len(self.curr_obstacle_set) == 2:\n\n if abs(self.red_x - self.blue_x) <= ALIGN_TOL:\n self.action = Action.IDLE\n return\n\n if not(red_collision_course or blue_collision_course):\n self.action = Action.IDLE\n return\n\n self.action = Action.ALIGN_VERTICAL\n return\n\n if red_collision_course and blue_collision_course:\n\n obstacle = self.curr_obstacle_set[0]\n left, right = obstacle.x_span()\n\n # Must be a right-obstacle\n if right >= BOARD_WIDTH//2 + CIRCLE_RADIUS:\n self.action = Action.SPIN_RIGHT\n return\n\n # Must be a left-obstacle\n if left <= BOARD_WIDTH//2 - CIRCLE_RADIUS:\n self.action == Action.SPIN_LEFT\n return\n\n # Must be a mid-obstacle\n if abs(self.red_y - self.blue_y) <= ALIGN_TOL:\n self.action = Action.IDLE\n return\n\n self.action = Action.ALIGN_HORISONTAL\n return\n\n if red_collision_course:\n\n if self.action == Action.ALIGN_VERTICAL:\n self.action = Action.SPIN_LEFT\n return\n\n if self.red_y - obstacle.get_bottom() > 2.5*CIRCLE_RADIUS:\n self.action = Action.IDLE\n return\n\n if self.red_x < self.blue_x:\n self.action = Action.SPIN_LEFT\n return\n self.action = Action.SPIN_RIGHT\n return\n\n if blue_collision_course:\n\n if self.action == Action.ALIGN_VERTICAL:\n self.action = Action.SPIN_LEFT\n return\n\n if self.blue_y - obstacle.get_bottom() > 2.5*CIRCLE_RADIUS:\n self.action = Action.IDLE\n return\n\n if self.blue_x < self.red_x:\n self.action = Action.SPIN_LEFT\n return\n self.action = Action.SPIN_RIGHT\n return\n\n self.action = Action.IDLE\n\n def calculate_controlls(self):\n \"\"\"\n Calculates appropriate controll output based on current desired action.\n Spin left = -1\n Stay idle = 0\n Spin right = 1\n \"\"\"\n if self.action == Action.IDLE:\n return 0\n if self.action == Action.SPIN_LEFT:\n return -1\n if self.action == Action.SPIN_RIGHT:\n return 1\n\n if self.action == Action.ALIGN_HORISONTAL:\n\n # Red is to the left and ...\n if self.red_x < self.blue_x:\n if self.red_y > self.blue_y:\n return 1 # below\n return -1 # above\n\n # Blue is to the left and ...\n if self.blue_y > self.red_y:\n return 1 # below\n return -1 # above\n\n if self.action == Action.ALIGN_VERTICAL:\n\n # Red is above and ...\n if self.red_y < self.blue_y:\n if self.red_x < self.blue_x:\n return 1 # left\n return -1 # right\n\n # Blue is above and ...\n if self.blue_x < self.red_x:\n return 1 # left\n return -1 # right\n","repo_name":"josefmalmstrom/gym-duet","sub_path":"duet/duet_backend/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":5687,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"38877482172","text":"'''\nQuick sort\n\n기준 데이터(pivot)을 설정하고 그 기준보다 큰 데이터와 작은 데이터의 위치를 바꾸는 방법.\n일반적인 상황에서 가장 많이 사용되는 정렬 알고리즘 중 하나.\n병합 정렬과 더불어 대부분 프로그래밍 언어의 정렬 라이브러리 근간\n가장 기본적인 퀵 정렬: 첫 번쨰 데이터를 기준 데이터(pivot)으로 설정.\n\n'''\n\n\nfrom turtle import left, right\n\n\narray = [5, 7, 9, 0, 3, 1, 6, 2, 4, 8]\n\ndef quick_sort(array):\n if len(array) <= 1:\n return array\n pivot = array[0]\n tail = array[1:]\n \n left_side = [x for x in tail if x <= pivot]\n right_side = [x for x in tail if x > pivot]\n \n return quick_sort(left_side) + [pivot] + quick_sort(right_side)\n\nprint(quick_sort(array))\n\n\n#연습: 1번 더!\ndef quick_sort(array):\n if len(array) <= 1:\n return array\n \n pivot = array[0]\n tail = array[1:]\n \n leftside = [x for x in tail if x <= pivot]\n rightside = [x for x in tail if x > pivot]\n \n return quick_sort(leftside) + [pivot] + quick_sort(rightside)\n ","repo_name":"woonys/coding__test","sub_path":"손코딩 연습/정렬/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71053018455","text":"import jax\nimport jax.numpy as jnp\nimport haiku as hk\nfrom einops import rearrange\nfrom ... import config\n\n\nclass VanillaUNet1D:\n def __init__(self):\n cfg = config.model.predictor\n self.width = cfg.width\n self.depth = cfg.depth\n\n def __call__(self, x, t_emb):\n skip_connections = []\n\n W = self.width\n channel_seq = [W * 2**i for i in range(self.depth)]\n for channels in channel_seq:\n x = ResBlock(x, t_emb, channels)\n x = ResBlock(x, t_emb, channels)\n skip_connections.append(x)\n x = hk.max_pool(x, 2, 2, padding='SAME')\n\n x = ResBlock(x, t_emb, 2 * channel_seq[-1])\n x = ResBlock(x, t_emb, 2 * channel_seq[-1])\n\n for channels, skip in zip(reversed(channel_seq), reversed(skip_connections)):\n B, T, C = x.shape\n B_, T_, C_ = skip.shape\n\n upsampled = jax.image.resize(x, [B, T_, C], method='bilinear')\n x = hk.Conv1D(C_, 2, with_bias=False)(upsampled)\n x = LayerNorm()(x)\n x = jax.nn.silu(x)\n x = ResBlock(jnp.concatenate([x, skip], axis=-1), t_emb, channels)\n x = ResBlock(x, t_emb, channels)\n\n x = hk.Conv1D(2, 1, with_bias=False, w_init=jnp.zeros)(x)\n return x\n\n\ndef LayerNorm():\n return hk.LayerNorm(axis=-1, param_axis=-1,\n create_scale=True, create_offset=True)\n\n\ndef ResBlock(x, t_emb, channels):\n if x.shape[-1] == channels:\n skip = x\n else:\n skip = hk.Linear(channels)(x)\n\n t_emb = rearrange(hk.Linear(channels, with_bias=False)(t_emb), 'B C -> B 1 C')\n\n x = LayerNorm()(x)\n x = jax.nn.silu(x)\n x = hk.Conv1D(channels, 3, with_bias=False)(x)\n\n x = x + t_emb\n \n x = LayerNorm()(x)\n x = jax.nn.silu(x)\n x = hk.Conv1D(channels, 3, with_bias=False)(x)\n\n return x + skip\n","repo_name":"khdlr/DiffSnake","sub_path":"lib/models/predictors/vanilla_unet.py","file_name":"vanilla_unet.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"701314281","text":"n = int(input(\"Enter a Bineary\"))\n\nsum = 0\ni = 0\nwhile n!=0:\n rem = n%10\n sum = sum + rem * pow(2,i)\n n = int(n/10)\n i=i+1\n \n print(sum)\n \n ","repo_name":"PrinceSinghhub/Python-Programming","sub_path":"Python Programming/binaryto n .py","file_name":"binaryto n .py","file_ext":"py","file_size_in_byte":162,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"38400249412","text":"from typing import List\n\n\ndef combine(n: int, k: int) -> List[List[int]]:\n visited: List[bool] = [False for _ in range(n + 1)]\n route: List[int] = []\n result: List[List[int]] = []\n\n def dfs(now: int):\n if len(route) == k:\n result.append(route[:])\n\n for dest in range(now + 1, n + 1):\n if visited[dest]:\n continue\n\n visited[dest] = True\n route.append(dest)\n dfs(dest)\n\n visited[dest] = False\n route.pop()\n\n dfs(0)\n return result\n\n\nif __name__ == \"__main__\":\n print(combine(6, 3))\n","repo_name":"rxdcxdrnine/problem-solving","sub_path":"python/leetcode/LEETCODE_77.py","file_name":"LEETCODE_77.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"40254415233","text":"import numpy as np\nimport os\nimport feature\nimport label\n\n\ndef preprocess(sample_size=10, feature_path='data/feature_vectors', label_path='data/labels'):\n\n print('\\n===== REMOVE OLD FILES =====\\n')\n\n output_dirs = [\"./data/feature_vectors/\", \"./data/labels/\", 'model/']\n for path in output_dirs:\n filelist = [ path + f for f in os.listdir(path) if f.endswith(\".npy\") or f.endswith('.pkl') ]\n for f in filelist:\n os.remove(f)\n print('Deleted: ' + f)\n\n print('\\n===== FEATURE EXTRACTION =====\\n')\n\n feature.extract(sample_size)\n\n print('\\n===== BINARIZE LABELS =====\\n')\n\n label.binarize(sample_size)\n\n print('\\n===== LOADING FEATURE VECTORS AND LABELS =====\\n')\n\n X = []\n y = []\n i = 0\n for filename in os.listdir(feature_path):\n if i == int(sample_size):\n break\n if os.path.isfile(label_path + '/' + filename):\n feature_vector = np.load(feature_path + '/' + filename)\n label_vector = np.load(label_path + '/' + filename)\n X.append(feature_vector.tolist())\n y.append(label_vector.tolist())\n i = i + 1\n X = np.matrix(X)\n y = np.matrix(y)\n\n return X, y\n","repo_name":"rvanbekkum/tagger","sub_path":"tagger/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71390389335","text":"# imports\n\nprint(\"\"\"\n|******************|\n| Desafio096 |\n|******************|\n\"\"\")\nprint(\"Área do Terreno Retângular\")\nprint()\n\n# Funções\ndef area(l, c):\n a = l*c\n print(f'A área do terreno de {l}m x {c}m é de {a} m²')\n\n# Variáveis\nlargura = float(input(\"Largura do terreno (m): \"))\ncomprimento = float(input(\"Comprimento do terreno (m): \"))\narea(largura, comprimento)\n","repo_name":"iamtheluiz/curso_em_video_python","sub_path":"Mundo 3/aula20/desafio096.py","file_name":"desafio096.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9291764794","text":"\r\nfrom ppadb.client import Client as AdbClient\r\nfrom datetime import datetime\r\n\r\nclient = AdbClient(host='127.0.0.1', port=5037)\r\ndevices = client.devices()\r\ndevice1 = devices[0]\r\n\r\ndevice1.shell(\"dumpsys netstats | grep -v 'iface' > sdcard/network_list.txt\")\r\ndevice1.pull(\"sdcard/network_list.txt\", \"c:\\\\users\\\\jsj97\\\\desktop\\\\script_result\\\\network_list.txt\")\r\ndevice1.shell(\"rm sdcard/network_list.txt\")\r\n\r\np = open(\"c:\\\\users\\\\jsj97\\\\desktop\\\\script_result\\\\network_list.txt\", \"r\", encoding='utf-8')\r\nf = open(\"c:\\\\users\\\\jsj97\\\\desktop\\\\script_result\\\\network_info.txt\", \"a\", encoding='utf-8')\r\n\r\n#netstat에 grep 한 결과물 리스트화\r\nlines=p.readlines()\r\n\r\nselect = []\r\n\r\n#필요한 로그만 select 리스트에 수집\r\nfor i in range(len(lines)):\r\n if \"ident\" in lines[i]:\r\n if \"networkId\" in lines[i]:\r\n select.append(lines[i].split(\",\"))\r\n if \"st=\" in lines[i]:\r\n select.append(lines[i])\r\n\r\n#네트워크 ID만 수집\r\nonly_networkId=[]\r\nfor i in range(len(select)):\r\n if \"networkId\" in select[i][2]:\r\n only_networkId.append(select[i][2])\r\n#네트워크 ID 정렬\r\nonly_networkId2=set(only_networkId)\r\nonly_networkId3=list(only_networkId2)\r\n\r\n#네트워크 목록 작성\r\nf.write(\"연결된 네트워크 목록\\n\")\r\nfor i in range(len(only_networkId3)):\r\n f.write(\"{}. {}\\n\".format(i+1, only_networkId3[i].replace(\"networkId=\", \"\")))\r\n\r\n#세부사항 작성\r\nf.write(\"\\n세부사항\\n\")\r\n\r\ndetail_info =[]\r\nfor i in range(len(select)):\r\n if \"networkId\" in select[i][2]:\r\n detail_info.append(select[i][2])\r\n if \"st=\" in select[i]:\r\n detail_info.append(select[i].split(\" \"))\r\n\r\n#리스트 내 숫자 문자열 정수형으로 변환\r\nmodify=[]\r\nfor i in range(len(detail_info)):\r\n if \"st=\" in detail_info[i][6]:\r\n modify_element=[]\r\n modify_element.append(detail_info[i][6].replace(\"st=\", \"\"))\r\n modify_element.append(detail_info[i][7].replace(\"rb=\", \"\"))\r\n modify_element.append(detail_info[i][8].replace(\"rp=\", \"\"))\r\n modify_element.append(detail_info[i][9].replace(\"tb=\", \"\"))\r\n modify_element.append(detail_info[i][10].replace(\"tp=\", \"\"))\r\n int_modify = list(map(int, modify_element))\r\n modify.append(int_modify)\r\n else:\r\n modify.append(detail_info[i])\r\n\r\nnum=1\r\nfor i in range(len(modify)):\r\n if \"networkId\" in modify[i]:\r\n f.write(\"{}. {}\\n\".format(num, modify[i].replace(\"networkId=\", \"\")))\r\n num+=1\r\n else:\r\n f.write(\"\\t통신시각: {} 다운로드: {}byte 다운로드 패킷: {}개 업로드: {}byte 업로드 패킷: {}개\\n\".format(datetime.fromtimestamp(modify[i][0]), modify[i][1], modify[i][2], modify[i][3], modify[i][4]))","repo_name":"YooDongseon/IWATCHU","sub_path":"scripts/get_netstats.py","file_name":"get_netstats.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73551811093","text":"import os\nimport time\ndef auto_delete():\n path = \"C:\\\\datasets\\\\temp_files\"\n current_time = time.time()\n for file_name in os.listdir(path):\n file_path = os.path.join(path, file_name)\n if os.path.isfile(file_path) and current_time - os.path.getmtime(file_path) > 3600:\n os.remove(file_path)\nauto_delete()","repo_name":"shaharkr/final_project.sise.micro-rna-m-rna-targets-interactions.backend","sub_path":"auto_delete_temp_files.py","file_name":"auto_delete_temp_files.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"36767728756","text":"#! C:\\Users\\Xander\\PycharmProjects\\Autopython\\venv\\Scripts\\python.exe\nimport logging\nlogging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')\nimport os, pprint, traceback\n\nword = 'stuff'\nformatted_string = 'Do you want to do that %(message)s' % {'message': 'fuck you'}\nlogging.debug(word)\nprint(formatted_string)\n\npath = os.path.abspath(os.path.join('exam'))\n# path = os.path.join('exam')\nresult = os.walk(path)\nfor dirpath, subdirs, files in result:\n print('Текущая папка - ' + dirpath)\n for subdir in subdirs:\n print('Подпапка папки: ' + dirpath + ': ' + subdir)\n for file in files:\n print('Файл в папке: ' + dirpath + ': ' + file)\n print('')\n\n","repo_name":"XanderMidov/Autopython","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18402845903","text":"import contextlib\nimport os\nimport tempfile\n\n\n@contextlib.contextmanager\ndef serialized_image(image, format, extension=None):\n \"\"\"Creates an image file from a :class:`PIL.Image.Image`.\n\n This function is a context manager that yields a temporary file name. The\n file is removed when the block is exited.\n\n :param PIL.Image.Image image: The in-memory image.\n\n :param str format: The format of the image. This format must be handled by\n *Pillow*.\n\n :param extension: The file extension. This defaults to ``format``\n lowercased.\n :type extensions: str or None\n \"\"\"\n fd, path = tempfile.mkstemp('.%s' % (extension or format.lower()))\n try:\n with os.fdopen(fd, 'wb') as f:\n image.save(f, format=format)\n yield path\n\n finally:\n try:\n os.unlink(path)\n except:\n raise\n","repo_name":"moses-palmer/pystray","sub_path":"lib/pystray/_util/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":420,"dataset":"github-code","pt":"67"} +{"seq_id":"5042834472","text":"# pip install -r requirements.txt\n#dwonload the crome driver and set the path on environmental varible\n# you can refer to this video https://www.youtube.com/watch?v=mxVfa6q-03M&ab_channel=TechGeek \n#for running the script: py zohan.py $File_Name(CSV file)\n\nfrom csv import DictReader\nfrom selenium import webdriver\nimport time\nimport os\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.common.by import By\nimport time\nimport sys\nfrom PIL import Image\nimport re \n\nVERSION = 1.0\n\ndef banner(r, g, b, text):\n return \"\\033[38;2;{};{};{}m{} \\033[38;2;255;255;255m\".format(r, g, b, text)\nprint(banner(0,255,0,\"\"\"\n\n 8P d8P 888 d8 \n P d8P dP\"Y 888 ee e88 88e d88 dP\"Y \n d8P d C88b 888 88b d888 888b d88888 C88b \n d8P d8 Y88D 888 888 Y888 888P 888 Y88D \nd8P d88 d,dP 888 888 \"88 88\" 888 d,dP \n\n\n\n📡A simple malicious IP scanning tool.\n#########################################################\n# Project: https://github.com/zohan205/Zshots #\n# Creator: Zohan_404 #\n# Version: {} #\n#########################################################\n\n\"\"\").format(VERSION))\n\n\n\npotta = \"(^127\\.0\\.0\\.1)|(^192\\.168)|(^10\\.)|(^172\\.1[6-9])|(^172\\.2[0-9])|(^172\\.3[0-1])\"\n\n\ndef take_full_page_screenshot(ip):\n\n #Install chrome driver\n chrome_driver_path = ChromeDriverManager().install()\n service = Service(chrome_driver_path)\n service.start() \n\n #setup chrome options\n options = webdriver.ChromeOptions()\n options.add_argument('--headless')\n options.add_argument('--incognito')\n options.add_argument('--start-maximized') \n options.add_argument('--disable-gpu')\n driver = webdriver.Chrome(chrome_driver_path, options=options)\n\n #open url and wait for the page to load\n driver.get(\"https://www.abuseipdb.com/check/\"+ip)\n time.sleep(2)\n \n #find the element with longest height on page\n element = driver.find_element(By.TAG_NAME, 'body')\n total_height = element.size[\"height\"]+1000\n #set the window dimensions\n driver.set_window_size(1920, total_height) \n\n #save screenshot\n # driver.save_screenshot(\"screenshot.png\")\n driver.get_screenshot_as_file(os.getcwd()+\"/temp/\"+ip+\"ab.png\")\n im = Image.open(os.getcwd()+\"/temp/\"+ip+\"ab.png\")\n im = im.crop( (400, 150, 950, 880) )\n im.save(os.getcwd()+\"/final/\"+ip+\"ab.png\")\n\n\n\ndriver = webdriver.Chrome('C:/Users/GourabSarkar/ChromeDriver/chromedriver.exe')\n\nif(len(sys.argv) < 1):\n print(\"Please enter a file name!!!!!\")\n driver.quit()\nelif(len(sys.argv) > 2):\n print(\"Please enter only one file name!!!\")\n driver.quit()\nelse:\n try:\n fileName = sys.argv[1]\n with open(fileName, 'r') as read_obj: #we have to give the downloaded csv file name\n csv_dict_reader = DictReader(read_obj)\n i = 0\n for row in csv_dict_reader:\n # id = row['id']\n ip = row['attacker']\n if (re.search(potta,str(ip))):\n print(\"Private ip found\")\n\n else:\n driver.execute_script(\"window.open()\")\n driver.switch_to.window(driver.window_handles[i+1])\n url = r\"https://www.abuseipdb.com/check/\"+ip\n driver.get(url)\n source = driver.page_source\n\n\n if \"not found\" in source:\n print('not found')\n time.sleep(4)\n i = i + 1\n else:\n take_full_page_screenshot(ip)\n driver.get(\"https://www.virustotal.com/gui/ip-address/\"+ip)\n driver.save_screenshot(os.getcwd()+\"/final/\"+ip+\"vt.png\")\n i = i + 1\n\n driver.quit()\n\n except:\n print(\"Please enter the correct file name!!!\")\n driver.quit()\n","repo_name":"zohan205/Zshots","sub_path":"zshots.py","file_name":"zshots.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7081362202","text":"def merge(intervals):\n n = len(intervals)\n intervals.sort()\n i = 1\n if n == 1:\n return intervals\n # if n == 2 and (intervals[0][0] > intervals[1][0] or intervals[1][0] > intervals[0][1]):\n # return intervals\n res = [intervals[0]]\n for i in range(1,n):\n check = res[-1]\n if check[0] <= intervals[i][0] <= check[1]:\n res.pop()\n res.append([check[0],max(intervals[i][1],check[1])])\n else:\n res.append(intervals[i])\n return res\n\n\n\n# intervals = [[1,3],[2,6],[8,10],[15,18]]\nintervals = [[0 ,2], [1 ,4], [3, 5]]\nprint(merge(intervals))\n\n\n# 0 2, 1 4, 3 5\n\n\n# i = 0, j = 1","repo_name":"ngtuetam/dsa-notes","sub_path":"arrays/merge_intervals.py","file_name":"merge_intervals.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41435449036","text":"class Solution:\n def wordPattern(self, pattern: str, s: str) -> bool:\n s_list = s.split()\n\n if len(s_list) != len(pattern): return False\n\n\n a , b = {}, {}\n for i in range(len(pattern)):\n if pattern[i] in a:\n if a[pattern[i]] != s_list[i]: return False\n else:\n a[pattern[i]] = s_list[i]\n\n if s_list[i] in b:\n if b[s_list[i]] != pattern[i]: return False\n else:\n b[s_list[i]] = pattern[i]\n\n return True\n\n\ndef main():\n sol = Solution()\n print(sol.wordPattern(\"abba\", s = \"dog cat cat\"))\n print(sol.wordPattern(\"abba\", s = \"dog cat cat dog\"))\n\nif __name__ == \"__main__\": main()","repo_name":"samek571/leetcode-600","sub_path":"290. Word Pattern.py","file_name":"290. Word Pattern.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26286810811","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\nimport sys\n\n# Note: since this is specifically indented for file names,\n# injectivity and the use of standard characters (for example, avoiding\n# quotation marks) is prioritized oer conformance to standards.\n\nru_dict = {\n 'А': 'A',\n 'Б': 'B',\n 'В': 'V',\n 'Г': 'G',\n 'Д': 'D',\n 'Е': 'Je',\n 'Ё': 'Jo',\n 'Ж': 'Zh',\n 'З': 'Z',\n 'И': 'I',\n 'Й': 'Jj',\n 'К': 'K',\n 'Л': 'L',\n 'М': 'M',\n 'Н': 'N',\n 'О': 'O',\n 'П': 'P',\n 'Р': 'R',\n 'С': 'S',\n 'Т': 'T',\n 'У': 'U',\n 'Ф': 'F',\n 'Х': 'X',\n 'Ц': 'C',\n 'Ч': 'Ch',\n 'Ш': 'Sh',\n 'Щ': 'Shh',\n 'Ъ': 'Jy',\n 'Ы': 'Y',\n 'Ь': 'Ji',\n 'Э': 'E',\n 'Ю': 'Ju',\n 'Я': 'Ja',\n}\n\ndef cyr2lat(s):\n return ''.join(ru_dict[c] if 1039 < ord(c) <= 1071 else\n ru_dict[c.upper()].lower() if 1071 < ord(c) < 1104 else\n c\n for c in s)\n\ndef main():\n parser = argparse.ArgumentParser(description='Romanize (a) file name(s).')\n parser.add_argument('name', metavar='N', type=str)\n args = parser.parse_args()\n\n cwd = os.getcwd()\n name = args.name\n # Remove spaces.\n name = name.replace(' ', '_')\n\n name = cyr2lat(name)\n\n if os.path.exists(os.path.join(cwd, args.name)):\n os.rename(os.path.join(cwd, args.name),\n os.path.join(cwd, name))\n\nif __name__ == '__main__':\n main()\n","repo_name":"xivarri/romanize","sub_path":"romanize.py","file_name":"romanize.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"914233158","text":"# importing necessary libraries\nimport argparse\nimport pickle\nfrom pathlib import Path\n\n# import pandas as pd\n# from sklearn.model_selection import train_test_split\nfrom somemodel.models import SomeModel\n\n\n# def train_model(data: pd.DataFrame):\n \n# X = data.data\n# y = data.target\n \n# # dividing X, y into train and test data\n# X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 0)\n# model = SomeModel()\n# model.train(X_train, y_train)\n# model.score(X_test, y_test)\n# print(\"\\n*******************\")\n# print(\"Model's results\")\n# print(model.get_accuracy())\n# print(\"*******************\\n\")\n# return model.get_model()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--training-dataframe', type=str, dest='training_dataframe_path', help='path to the input dataframe for training')\n parser.add_argument('--trained-model', type=str, dest='trained_model_path', help='path to the trained model')\n args = parser.parse_args()\n\n training_dataframe_path = args.training_dataframe_path\n trained_model_path = args.trained_model_path\n\n input_pickle_path = Path(training_dataframe_path) / 'prepared_data.pkl'\n output_pickle_path = Path(trained_model_path) / 'trained_model.pkl'\n\n with open(input_pickle_path, \"rb\") as f:\n (df_data, labels) = pickle.load(f) \n\n model = SomeModel()\n trained_model = model.train(df_data, labels)\n\n with open(output_pickle_path, \"wb\") as f:\n pickle.dump(model.get_model(), f)\n","repo_name":"PAOLT/aml_pipeline","sub_path":"pipeline/pipe_scripts/train_stage/train_entry_script.py","file_name":"train_entry_script.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28004459273","text":"'''\n\nN개의 자연수와 자연수 M이 주어졌을 때, 아래 조건을 만족하는 길이가 M인 수열을 모두 구하는 프로그램을 작성하시오. N개의 자연수는 모두 다른 수이다.\n\nN개의 자연수 중에서 M개를 고른 수열\n같은 수를 여러 번 골라도 된다.\n고른 수열은 비내림차순이어야 한다.\n길이가 K인 수열 A가 A1 ≤ A2 ≤ ... ≤ AK-1 ≤ AK를 만족하면, 비내림차순이라고 한다.\n\n4 2\n9 8 7 1\n\n1 1\n1 7\n1 8\n1 9\n7 7\n7 8\n7 9\n8 8\n8 9\n9 9\n\n'''\n\nN, M = map(int, input().split())\nL = list(map(int, input().split()))\n\nL.sort()\nout = []\n\ndef solve(depth, idx, N, M):\n if depth == M:\n print(' '.join(map(str, out)))\n return\n for i in range(idx, N):\n out.append(L[i])\n solve(depth+1, i, N, M) # 여기 i + 1을 안해서 같은 것을 허용! idx를 계속 늘리니깐\n out.pop()\n\nsolve(0, 0, N, M)\n\n\n'''\nn, m = map(int, input().split())\nk = sorted(list(map(int, input().split()))) \nans = [] \ndef solve(depth, idx, n, m): \n if depth == m: \n print(' '.join(map(str, ans))) \n return \n for i in range(idx, n): \n ans.append(k[i]) \n solve(depth+1, i, n, m) \n ans.pop() \n\nsolve(0, 0, n, m)\n\n\n'''","repo_name":"joojeehwan/algo","sub_path":"pythonProject/백준/15657.py","file_name":"15657.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43259091360","text":"# python3\n\nclass Query:\n\n def __init__(self, query):\n self.type = query[0]\n if self.type == 'check':\n self.ind = int(query[1])\n else:\n self.s = query[1]\n\n\nclass QueryProcessor:\n _multiplier = 263\n _prime = 1000000007\n\n def __init__(self, bucket_count):\n self.bucket_count = bucket_count\n # store strings in a dictionary with hash values as keys\n self.elems = {}\n\n def _hash_func(self, s):\n ans = 0\n for c in reversed(s):\n ans = (ans * self._multiplier + ord(c)) % self._prime\n return ans % self.bucket_count\n\n def write_search_result(self, was_found):\n print('yes' if was_found else 'no')\n\n def write_chain(self, chain):\n print(' '.join(chain))\n\n def read_query(self):\n return Query(input().split())\n\n def process_query(self, query):\n if query.type == \"check\":\n # check if hash number exists in dictionary keys\n if query.ind in self.elems:\n for elem in reversed(self.elems[query.ind]): # print in reverse\n print(elem, end=\" \")\n print(\"\")\n else: # print blank line\n print(\"\")\n else:\n if query.type == 'find':\n hash_num = self._hash_func(query.s)\n # check if hash number exists in dictionary keys\n if hash_num in self.elems:\n # check if string is in chain\n if query.s in self.elems[hash_num]:\n print(\"yes\")\n else:\n print(\"no\")\n else: # print no\n print(\"no\")\n elif query.type == 'add':\n hash_num = self._hash_func(query.s)\n # check if hash number exists in dictionary keys\n if hash_num in self.elems:\n # append to chain of hash number key if element does not exist\n if query.s not in self.elems[hash_num]:\n self.elems[hash_num].append(query.s)\n else: # else, create new key and list as value\n self.elems[hash_num] = [query.s]\n else: # delete element\n hash_num = self._hash_func(query.s)\n if hash_num in self.elems:\n for i in range(len(self.elems[hash_num])):\n if self.elems[hash_num][i] == query.s:\n self.elems[hash_num].pop(i)\n break\n # delete key if chain is empty\n if len(self.elems[hash_num]) == 0:\n del self.elems[hash_num]\n\n def process_queries(self):\n n = int(input())\n for i in range(n):\n self.process_query(self.read_query())\n\nif __name__ == '__main__':\n bucket_count = int(input())\n proc = QueryProcessor(bucket_count)\n proc.process_queries()\n","repo_name":"KennethSee/UCSDx-ALGS201x","sub_path":"hash_chains.py","file_name":"hash_chains.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"35143643811","text":"#!/usr/bin/python\nimport requests, os, time, wikipedia\nfrom bs4 import BeautifulSoup\noldsong=''\ni=1\nos.system(\"clear\")\nwhile True:\n r = requests.get(\"http://freeuk2.listen2myradio.com:4491/\")\n soup = BeautifulSoup(r.text, 'html.parser')\n title=soup.findAll('td')\n songname=title[20:]\n song=str(songname)\n song=song[24:-6]\n if oldsong != song:\n print(str(i)+\". \"+song)\n os.system('echo \"'+song+'\" >title.txt')\n oldsong = song\n i+=1\n time.sleep(5)\n","repo_name":"chrislarry/chrislarry.old.github.io","sub_path":"radio/get_song_name.py","file_name":"get_song_name.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39754443686","text":"import cv2\n\nvideoCapture = cv2.VideoCapture(1)\n\nfps = 30\nsize = (int(videoCapture.get(cv2.CAP_PROP_FRAME_WIDTH)),\n int(videoCapture.get(cv2.CAP_PROP_FRAME_HEIGHT)))\nprint(\"fps:\", fps)\nprint(\"size:\", size)\n\n# videoWriter = cv2.VideoWriter('MyOutputVid.avi', cv2.VideoWriter_fourcc('I', '4', '2', '0'), fps, size)\nvideoWriter = cv2.VideoWriter('MyOutputVid.mp4', cv2.VideoWriter_fourcc(*'avc1'), fps, size)\n\nnumFramesRemaining = 10 * fps - 1\nsuccess, frame = videoCapture.read()\nwhile success and numFramesRemaining > 0:\n videoWriter.write(frame)\n success, frame = videoCapture.read()\n numFramesRemaining -= 1\n","repo_name":"BoyHsu/My_Learning_OpenCV","sub_path":"chapter_2/VideoCapture/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"75160735573","text":"#!/usr/bin/env python\n\n# Revised from https://github.com/histfitter/histfitter/blob/master/scripts/harvestToContours.py\nimport argparse\nimport copy\nimport math\nimport sys\n\nimport numpy as np\nimport scipy.interpolate\nimport scipy.stats\nfrom matplotlib.figure import Figure\nfrom shapely.geometry import Polygon\n\n\ndef make_from_args(args):\n ## If I need to use scipy, please let me have scipy. I'll even help you out!\n if args.useUpperLimit:\n\n args.noSig = True\n args.level = 1.0\n\n if args.ignoreUncertainty:\n listOfContours = [\"upperLimit\", \"expectedUpperLimit\"]\n else:\n listOfContours = [\n \"upperLimit\",\n \"expectedUpperLimit\",\n \"expectedUpperLimitPlus1Sig\",\n \"expectedUpperLimitPlus2Sig\",\n \"expectedUpperLimitMinus1Sig\",\n \"expectedUpperLimitMinus2Sig\",\n ]\n listOfContours_OneSigma = [\n \"expectedUpperLimitPlus1Sig\",\n \"expectedUpperLimitMinus1Sig\",\n ]\n listOfContours_TwoSigma = [\n \"expectedUpperLimitPlus2Sig\",\n \"expectedUpperLimitMinus2Sig\",\n ]\n expectedContour = \"expectedUpperLimit\"\n observedContour = \"upperLimit\"\n else:\n if args.ignoreUncertainty:\n listOfContours = [\"CLs\", \"CLsexp\", \"upperLimit\", \"expectedUpperLimit\"]\n else:\n listOfContours = [\n \"CLs\",\n \"CLsexp\",\n \"clsu1s\",\n \"clsu2s\",\n \"clsd1s\",\n \"clsd2s\",\n \"upperLimit\",\n \"expectedUpperLimit\",\n ]\n listOfContours_OneSigma = [\"clsu1s\", \"clsd1s\"]\n listOfContours_TwoSigma = [\"clsu2s\", \"clsd2s\"]\n expectedContour = \"CLsexp\"\n observedContour = \"CLs\"\n return (\n listOfContours,\n expectedContour,\n observedContour,\n listOfContours_OneSigma,\n listOfContours_TwoSigma,\n )\n\n\ndef main(args, inputData):\n \"\"\"Main function for driving the whole thing...\"\"\"\n\n # Print out the settings\n for setting in dir(args):\n if not setting[0] == \"_\":\n pass\n\n return processInputFile(args=args, inputData=inputData, label=\"\")\n\n\ndef processInputFile(args, inputData, label=\"\"):\n \"\"\"Do actual processing of a given input file\"\"\"\n\n (\n listOfContours,\n expectedContour,\n observedContour,\n listOfContours_OneSigma,\n listOfContours_TwoSigma,\n ) = make_from_args(args)\n\n ############################################################\n # Step 1 - Read in harvest list in either text or json format and dump it into a dictionary\n\n resultsDict = harvestToDict(inputJSON=inputData, args=args)\n\n if len(resultsDict) < 3:\n print(\n \">>> WARNING: You have fewer than three valid model points in your input. I can't interpolate that in 2D! You've given me %d valid points!\"\n % (len(resultsDict))\n )\n return -1\n\n if label != \"_UL\":\n truncateSignificances(args=args, modelDict=resultsDict, sigmax=args.sigmax)\n\n ############################################################\n # Step 1.5 - If there's a function for a kinematically forbidden region, add zeros to dictionary\n\n if \"none\" not in args.forbiddenFunction.lower():\n resultsDict = addValuesToDict(\n args=args,\n inputDict=resultsDict,\n function=args.forbiddenFunction,\n numberOfPoints=100,\n value=\"mirror\",\n )\n\n ############################################################\n # Step 2 - Interpolate the fit results\n\n outputArrays = interpolateSurface(\n args=args,\n modelDict=resultsDict,\n interpolationFunction=args.interpolation,\n useROOT=args.useROOT,\n outputSurface=True if label == \"\" else False,\n )\n\n ############################################################\n # Step 4 - Make pretty curves (and bands) or try to...\n\n outputs = {}\n\n if not args.ignoreUncertainty and label == \"\":\n if (\n len(outputArrays[listOfContours_OneSigma[0]]) == 0\n and len(outputArrays[listOfContours_OneSigma[1]]) > 0\n ):\n print(\">>>\")\n print(\">>> WARNING: You don't have +1 sigma sensitivity,\")\n print(\">>> ... but you do have -1 sigma reach. Making a \")\n print(\">>> ... +/-1 sigma band from only the -1 side.\")\n print(\">>> \")\n\n for icurve, (curve1, curve2) in enumerate(\n zip(\n outputArrays[listOfContours_OneSigma[0]],\n outputArrays[listOfContours_OneSigma[1]],\n )\n ):\n band_1s = createBandFromContours(args, contour1=curve1, contour2=curve2)\n outputs[\"Band_1s_%d\" % icurve] = band_1s\n for icurve, (curve1, curve2) in enumerate(\n zip(\n outputArrays[listOfContours_TwoSigma[0]],\n outputArrays[listOfContours_TwoSigma[1]],\n )\n ):\n band_2s = createBandFromContours(args, contour1=curve1, contour2=curve2)\n outputs[\"Band_2s_%d\" % icurve] = band_2s\n\n for icurve, obsCurve in enumerate(outputArrays[observedContour]):\n outputs[f\"Obs_{icurve}{label}\"] = obsCurve\n for icurve, expCurve in enumerate(outputArrays[expectedContour]):\n outputs[f\"Exp_{icurve}{label}\"] = expCurve\n\n return outputs\n\n\ndef harvestToDict(args, inputJSON, tmpListOfContours=None):\n listOfContours, expectedContour, observedContour, _, _ = make_from_args(args)\n tmpListOfContours = tmpListOfContours or listOfContours\n\n \"\"\"This parses the input file into a dictionary object for simpler handling\"\"\"\n\n modelDict = {}\n\n # for sample in inputJSON:\n for sample in inputJSON.values():\n try:\n sampleParams = (\n float(sample[args.xVariable]),\n float(sample[args.yVariable]),\n )\n except Exception:\n print(\n \">>> ... Error: %s or %s doesn't exist as an entry in the input file\"\n % (args.xVariable, args.yVariable)\n )\n print(\n \">>> ... Use cmd line options -x and -y to point to variables that exist in the input\"\n )\n print(\">>> Available variables are listed below:\")\n print(\">>> \")\n print(\">>> \" + \"\\n>>> \".join(sample.keys()))\n sys.exit(1)\n\n sampleParamsList = list(sampleParams)\n if args.logX:\n sampleParamsList[0] = math.log10(sampleParamsList[0])\n if args.logY:\n sampleParamsList[1] = math.log10(sampleParamsList[1])\n sampleParams = tuple(sampleParamsList)\n\n if not math.isinf(float(sample[expectedContour])):\n tmpList = [\n float(sample[f\"{x}\"])\n if (args.noSig or x in [\"upperLimit\", \"expectedUpperLimit\"])\n else scipy.stats.norm.ppf(1 - float(sample[f\"{x}\"]))\n for x in tmpListOfContours\n ]\n\n modelDict[sampleParams] = dict(zip(tmpListOfContours, tmpList))\n modelDict[sampleParams][\"fID\"] = sample[\"fID\"] if \"fID\" in sample else \"\"\n elif sampleParams not in modelDict:\n modelDict[sampleParams] = dict(\n zip(tmpListOfContours, [args.sigmax for _ in tmpListOfContours])\n )\n\n modelDict[sampleParams][\"fID\"] = \"\"\n if args.debug:\n print(\n sampleParams,\n float(sample[observedContour]),\n float(sample[expectedContour])\n if args.noSig\n else scipy.stats.norm.ppf(1 - float(sample[observedContour])),\n )\n\n return modelDict\n\n\ndef addValuesToDict(args, inputDict, function, numberOfPoints=100, value=0):\n \"\"\"This takes in a TF1 and dots zero points along that function, and adds to the dict\"\"\"\n listOfContours, _, _, _, _ = make_from_args(args)\n\n tmpListOfXValues = [entry[0] for entry in inputDict.keys()]\n lowerLimit = min(tmpListOfXValues)\n upperLimit = max(tmpListOfXValues)\n\n def forbiddenFunction_Lambda(x):\n return eval(args.forbiddenFunction)\n if value == \"mirror\":\n from scipy.spatial.distance import cdist\n\n def closest_point(pt, others):\n distances = cdist(pt, others)\n return others[distances.argmin()]\n\n def rotate(origin, point, angle=math.pi):\n ox, oy = origin\n px, py = point\n\n qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)\n qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)\n return qx, qy\n\n inputDictCopy = copy.deepcopy(inputDict)\n\n forbiddenLineArray = []\n for xValue in [\n lowerLimit + x * (upperLimit - lowerLimit) / float(numberOfPoints * 100)\n for x in range(numberOfPoints * 100)\n ]:\n forbiddenLineArray.append((xValue, forbiddenFunction_Lambda(xValue)))\n\n # now to loop over entries in the inputDict. rotate them about this closest point on the forbidden line\n for signalPoint in inputDict:\n closestPointOnLine = list(\n closest_point(np.array([signalPoint]), np.array(forbiddenLineArray))\n )\n inputDictCopy[tuple(closestPointOnLine)] = dict(\n zip(listOfContours, [1 for x in listOfContours])\n )\n fakeMirroredSignalPoint = rotate(closestPointOnLine, signalPoint)\n tmpDict = copy.deepcopy(inputDictCopy[signalPoint])\n for key in tmpDict:\n if isinstance(tmpDict[key], (int, float)):\n tmpDict[key] *= -1 * np.sign(tmpDict[key])\n inputDictCopy[fakeMirroredSignalPoint] = tmpDict\n\n inputDict = copy.deepcopy(inputDictCopy)\n\n else:\n for xValue in [\n lowerLimit + x * (upperLimit - lowerLimit) / float(numberOfPoints)\n for x in range(numberOfPoints)\n ]:\n inputDict[(xValue, forbiddenFunction_Lambda(xValue))] = dict(\n zip(listOfContours, [value for x in listOfContours])\n )\n\n return inputDict\n\n\ndef interpolateSurface(\n args,\n modelDict={},\n interpolationFunction=\"linear\",\n useROOT=False,\n outputSurface=False,\n outputSurfaceTGraph=False,\n tmpListOfContours=None,\n):\n \"\"\"The actual interpolation\"\"\"\n\n listOfContours, expectedContour, observedContour, _, _ = make_from_args(args)\n tmpListOfContours = tmpListOfContours or listOfContours\n\n modelPoints = modelDict.keys()\n modelPointsValues = modelDict.values()\n x0 = list(list(zip(*modelPoints))[0])\n y0 = list(list(zip(*modelPoints))[1])\n\n zValues = {} # entry x points\n x = {} # entry x points\n y = {} # entry x points\n\n array_data = {}\n for whichContour in tmpListOfContours:\n zValues[whichContour] = [\n tmpEntry[whichContour] for tmpEntry in modelPointsValues\n ]\n x[whichContour] = list(x0)\n y[whichContour] = list(y0)\n\n # remove inf point in each entry\n for whichContour in tmpListOfContours:\n\n while any(\n math.isinf(tmp) or math.isnan(tmp) for tmp in zValues[whichContour]\n ): # np.isinf( zValues[whichContour] ).any():\n myindex = [\n math.isinf(tmp) or math.isnan(tmp) for tmp in zValues[whichContour]\n ].index(True)\n if args.debug:\n print(\n \">>> ... Remove Inf or NaN at i=%d x=%d y=%d\"\n % (myindex, x[whichContour][myindex], y[whichContour][myindex])\n )\n x[whichContour].pop(myindex)\n y[whichContour].pop(myindex)\n zValues[whichContour].pop(myindex)\n if any(math.isinf(tmp) or math.isnan(tmp) for tmp in zValues[whichContour]):\n print(\n f\">>> ... Still infs or nans in {whichContour}!! This is a problem... Exiting.\"\n )\n\n sys.exit(0)\n\n for whichContour in tmpListOfContours:\n\n # Convert everything to numpy arrays\n xArray = np.array(x[whichContour])\n yArray = np.array(y[whichContour])\n zArray = np.array(zValues[whichContour])\n\n # this scaling here equalizes the axes such that using a radial basis function makes sense!\n yScaling = np.max(xArray) / np.max(yArray) if np.max(yArray) else 1\n yArray = yArray * yScaling\n\n # Creating some linspaces for interpolation\n xlinspace = np.linspace(\n xArray.min() if args.xMin is None else args.xMin,\n xArray.max() if args.xMax is None else args.xMax,\n args.xResolution,\n )\n\n ylinspace = np.linspace(\n yArray.min() if args.yMin is None else args.yMin,\n yArray.max() if args.yMax is None else args.yMax,\n args.yResolution,\n )\n\n # Creating meshgrid for interpolation\n xymeshgrid = np.meshgrid(xlinspace, ylinspace)\n\n # Optional smoothing given by -s option\n smoothingFactor = 0\n if args.smoothing:\n smoothingFactor = float(args.smoothing)\n\n try:\n # Actual interpolation done by RBF\n if args.interpolationEpsilon:\n rbf = scipy.interpolate.Rbf(\n xArray,\n yArray,\n zArray,\n function=interpolationFunction,\n smooth=smoothingFactor,\n epsilon=args.interpolationEpsilon,\n )\n else:\n rbf = scipy.interpolate.Rbf(\n xArray,\n yArray,\n zArray,\n function=interpolationFunction,\n smooth=smoothingFactor,\n )\n except Exception:\n print(\n \">>> Interpolation failing!!! Check to make sure there are no NANs or double defined points in your input JSON!\"\n )\n print(\">>> Printing points we're trying to interpolate (x,y,z) triplets:\")\n\n print(sorted(zip(xArray, yArray, zArray), key=lambda x: x[0] * x[1]))\n sys.exit(1)\n\n ZI = rbf(xymeshgrid[0], xymeshgrid[1])\n\n # Undo the scaling from above to get back to original units\n xymeshgrid[1] = xymeshgrid[1] / yScaling\n\n # Turn this surface into contours!\n contourList = get_contour_points(xymeshgrid[0], xymeshgrid[1], ZI, args.level)\n\n array_data[whichContour] = []\n for contour in contourList:\n adata = np.array([contour[0].flatten(\"C\"), contour[1].flatten(\"C\")]).T\n if (len(adata)) > 2 and Polygon(adata).area > args.areaThreshold:\n array_data[whichContour].append(adata)\n\n # Let's sort output graphs by area so that the band construction later is more likely to get the right pairs\n array_data[whichContour] = sorted(\n array_data[whichContour], key=lambda g: Polygon(g).area, reverse=True\n )\n\n return array_data\n\n\ndef truncateSignificances(args, modelDict, sigmax=5):\n \"\"\"Truncates significance to sigmax option\"\"\"\n listOfContours, _, _, _, _ = make_from_args(args)\n for model in modelDict:\n for thing in listOfContours:\n if modelDict[model][thing] > sigmax:\n modelDict[model][thing] = sigmax\n\n return\n\n\ndef get_contour_points(xi, yi, zi, level):\n fig = Figure()\n ax = fig.subplots()\n\n c = ax.contour(xi, yi, zi, [level])\n contour = c.collections[0]\n\n # contour_list = []\n # for i in range(len(contour.get_paths())):\n # vertices = contour.get_paths()[i].vertices\n # contour_list.append(vertices.T)\n\n return [\n contour.get_paths()[path_idx].vertices.T\n for path_idx in range(len(contour.get_paths()))\n ]\n\n\ndef createBandFromContours(args, contour1, contour2=None):\n\n output_data = []\n\n if contour2 is None:\n raise RuntimeError\n else:\n output_data += contour2.tolist()\n\n if args.closedBands:\n output_data.append(contour2[0])\n\n output_data += list(reversed(contour1.tolist()))\n\n if args.closedBands:\n output_data.append(contour1[-1])\n\n output_data.append(contour2[0])\n\n return np.array(output_data)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--inputFile\", \"-i\", type=str, help=\"input harvest file\", default=\"test.json\"\n )\n parser.add_argument(\n \"--outputFile\",\n \"-o\",\n type=str,\n help=\"output ROOT file\",\n default=\"outputGraphs.root\",\n )\n parser.add_argument(\n \"--interpolation\",\n type=str,\n help=\"type of interpolation for scipy (RBF). e.g. linear, cubic, gaussian, multiquadric.\",\n default=\"multiquadric\",\n )\n parser.add_argument(\n \"--interpolationEpsilon\",\n type=float,\n help=\"scipy (RBF) epsilon parameter\",\n default=0,\n )\n parser.add_argument(\n \"--level\",\n type=float,\n help=\"contour level output. Default to 95%% CL\",\n default=1.64485362695,\n )\n parser.add_argument(\n \"--useROOT\",\n \"-r\",\n help=\"use the root interpolation engine instead of mpl\",\n action=\"store_true\",\n default=False,\n )\n parser.add_argument(\n \"--debug\",\n \"-d\",\n help=\"print extra debugging info\",\n action=\"store_true\",\n default=False,\n )\n parser.add_argument(\n \"--sigmax\", type=float, help=\"maximum significance in sigmas\", default=5.0\n )\n parser.add_argument(\"--xVariable\", \"-x\", type=str)\n parser.add_argument(\"--yVariable\", \"-y\", type=str)\n parser.add_argument(\"--xResolution\", type=int, default=100)\n parser.add_argument(\"--yResolution\", type=int, default=100)\n\n parser.add_argument(\"--xMin\", type=float, default=None)\n parser.add_argument(\"--yMin\", type=float, default=None)\n parser.add_argument(\"--xMax\", type=float, default=None)\n parser.add_argument(\"--yMax\", type=float, default=None)\n\n parser.add_argument(\n \"--logX\", help=\"use log10 of x variable\", action=\"store_true\", default=False\n )\n parser.add_argument(\n \"--logY\", help=\"use log10 of y variable\", action=\"store_true\", default=False\n )\n parser.add_argument(\n \"--forbiddenFunction\",\n \"-l\",\n type=str,\n help=\"\"\"a ROOT TF1 definition for a forbidden line e.g. kinematically forbidden regions. (defaults to diagonal, i.e. -l 'x'). Set to 'None' to turn off.\"\"\",\n default=\"x\",\n )\n parser.add_argument(\n \"--ignoreUncertainty\",\n \"-u\",\n help=\"\"\"Don't care about uncertainty bands!\"\"\",\n action=\"store_true\",\n default=False,\n )\n\n parser.add_argument(\n \"--areaThreshold\",\n \"-a\",\n type=float,\n help=\"Throw away contours with areas less than threshold\",\n default=0,\n )\n parser.add_argument(\n \"--smoothing\",\n \"-s\",\n type=str,\n help=\"smoothing option. For ROOT, use {k5a, k5b, k3a}. For scipy, uses smoothing from RBF.\",\n default=\"0.1\",\n )\n parser.add_argument(\n \"--noSig\",\n \"-n\",\n help=\"don't convert CLs to significance -- don't use this option unless you know what you're doing!\",\n action=\"store_true\",\n default=False,\n )\n\n parser.add_argument(\n \"--nominalLabel\",\n help=\"keyword in filename to look for nominal sig XS\",\n type=str,\n default=\"Nominal\",\n )\n\n parser.add_argument(\n \"--useUpperLimit\",\n help=\"use upper limit information instead of CLs. Automatically turns off significance transform.\",\n action=\"store_true\",\n default=False,\n )\n\n parser.add_argument(\n \"--closedBands\",\n \"-b\",\n help=\"if contours are closed shapes in this space, this can help with stitching issues if you're seeing weird effects\",\n action=\"store_true\",\n default=False,\n )\n\n args = parser.parse_args()\n main(args)\n","repo_name":"iris-hep/analysis-grand-challenge","sub_path":"workshops/agctools2022/statistical-inference/exclusion/src/exclusion/interpolate.py","file_name":"interpolate.py","file_ext":"py","file_size_in_byte":20209,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"67"} +{"seq_id":"7454150332","text":"# 1 задание\nmy_list = [150, 100, 50, 60, 46, 967, 464, 1, 77, 345]\nfor index in my_list:\n if index > 100:\n print(index)\n\n#######################################################\n\n# 2 задание\nmy_list = [150, 100, 50, 60, 46, 967, 464, 1, 77, 345]\nmy_result = []\nfor index in my_list:\n if index > 100:\n my_result.append(index)\n\nprint(my_result)\n\n#######################################################\n\n# 3 задание\nmy_list = [50, 150, 100]\nif len(my_list) < 2:\n my_list.append(0)\nelse:\n my_list.append(my_list[len(my_list) - 1] + my_list[len(my_list) - 2])\n\n#######################################################\n\n# 4 задание\nvalue = input('Введите число с точкой: ')\n\ntry:\n print(float(value) ** -1)\nexcept ValueError:\n print('Кто-то ввел не цифру с точкой')\nexcept ZeroDivisionError:\n print('Ноль не подходит)')\n\n#######################################################\n\n# 5 задание\nmy_string = '0123456789'\nmy_list = []\nfor symbol_1 in my_string:\n for symbol_2 in my_string:\n my_number = symbol_1 + symbol_2\n my_list.append(int(my_number))\n\nprint(my_list)","repo_name":"xVilandx/my_hillel","sub_path":"HW4.py","file_name":"HW4.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8497910676","text":"import asyncio\nimport sys\nimport shlex\n\nfrom asyncio.subprocess import PIPE, STDOUT\n\n\nfrom plankton import settings\nimport logging\n\nlogger = logging.getLogger('plankton.wkhtmltopdf.utils.exec_wkhtmltopdf')\n\n\nclass WkhtmlToPdfFailure(Exception):\n pass\n\n\ndef wkhtmltopdf_args_mapping(data):\n \"\"\"\n fix our names to wkhtmltopdf's args\n \"\"\"\n mapping = {\n 'cookies': 'cookie',\n 'custom-headers': 'custom-header',\n 'run-scripts': 'run-script'\n }\n\n return {mapping.get(k, k): v for k, v in data.items()}\n\n\nasync def exec_wkhtmltopdf(data):\n data = wkhtmltopdf_args_mapping(data)\n # security things\n data['disable-local-file-access'] = True\n\n # Prepare inline options parameters for command\n\n command_args = [settings.WKHTMLTOPDF_CMD]\n command_args.extend(_options_to_args(data.get('options', settings.WKHTMLTOPDF_DEFAULT_OPTIONS)))\n # Output to STDOUT\n command_args.extend([shlex.quote(data['page']), '-'])\n\n proc = await asyncio.create_subprocess_shell(\n ' '.join(command_args),\n stdin=PIPE, stdout=PIPE, stderr=STDOUT\n )\n\n command_out, errs = await proc.communicate()\n\n if b'%PDF-' in command_out:\n debug_info, pdf_content = command_out.split(b'%PDF-', 1)\n else:\n error_msg = command_out.decode('utf-8').replace('\\n', ' ')\n raise WkhtmlToPdfFailure(error_msg)\n\n pdf_content = b'%PDF-' + pdf_content\n\n logger.info(debug_info)\n\n return pdf_content\n\n\ndef _options_to_args(options):\n flags = []\n\n for k, v in options.items():\n if v in [False, None]:\n continue\n\n if isinstance(v, list):\n # wkhtmltopdf can accept many parameters like --cookie\n for item in v:\n flags.extend(_options_to_args({k: item}))\n else:\n flags.append('--{}'.format(k))\n\n if v is not True:\n flags.append(shlex.quote(v))\n\n return flags\n","repo_name":"django-stars/plankton","sub_path":"plankton/wkhtmltopdf/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"26376726822","text":"\n\"\"\"python来读写ini的配置文件\"\"\"\nimport configparser\n\n\n\"\"\"读取文件\"\"\"\nconf = configparser.ConfigParser()\nconf.read(\"./read&write.ini\")\n\n# 返回title\nselections = conf.sections()\nprint(selections)\n\n# 返回 key\noptions = conf.options(\"title2\")\nprint(options)\n\n# 返回指定title下的指定key的值\nvalue = conf.get(\"title1\", \"key2\")\nprint(value)\n\n# 返回title下所有的key,value\nprint(conf.items(\"title1\")) # 返回列表包含元组\nprint(dict(conf.items(\"title1\"))) # 返回字典\n\n# 判断是否存在指定的title 或 value\nprint(conf.has_section(\"title1\"))\nprint(conf.has_option(\"title1\", \"key1\"))\n\n\"\"\"写入ini文件\"\"\"\ncon = configparser.ConfigParser()\ncon.read(\"./read&write.ini\")\n\n# 添加title,并在title下加入新的key和value\ncon.add_section(\"title3\")\ncon.set(\"title3\", \"key1\", \"1111\")\ncon.set(\"title3\", \"key3\", \"3333\")\n\n# 移除指定title下的key和value\ncon.remove_option(\"title3\", \"key3\")\n\n# 移除指定的title\ncon.remove_section(\"title1\")\n\n# 返回title下所有的key,value\nprint(con.items(\"title3\")) # 返回列表包含元组\nprint(dict(con.items(\"title3\"))) # 返回字典\n\n# 保存操作所做的修改\nwith open(\"read&write.ini\", \"w+\") as f:\n con.write(f)\n","repo_name":"xinjf/StudyNote","sub_path":"PythonStudy/APIAuto_study/read&write_ini.py","file_name":"read&write_ini.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"24963381987","text":"import pika\nimport sys\n\n\nseverity = sys.argv[1] if len(sys.argv) > 1 else 'info'\nprint(f'{severity}:{sys.argv[1]}:{len(sys.argv)}')\nmessage = ' '.join(sys.argv[2:]) or 'info: Hello World!'\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\nchannel = connection.channel()\n# channel.exchange_declare(exchange='logs', exchange_type='fanout')\nchannel.exchange_declare(exchange='direct_logs', exchange_type='direct')\n# channel.basic_publish(exchange='logs', routing_key='', body=message)\nchannel.basic_publish(exchange='direct_logs', routing_key=severity, body=message)\n\nprint(f' [x] Sent {severity}:{message}')\n\nconnection.close()\n","repo_name":"mikelhsia/Python","sub_path":"rabbitMQ_practice/emit_logs.py","file_name":"emit_logs.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12324091430","text":"#- Debuggowanie skryptu\n\n'''\ndebuggowanie przydaje się gdy popełnia się jakiś błąd.\n\npodświetlenie pokazuje która linia kodu jest wykonywana\n\n- menu konsoli przejdź do listy debug i zaznacz debugger\n- zaznaczam jakie informację ma wyświetlać Debug Control:\n Global - zmienne globalne które widzi skrypt\n Source - korelacja tego co za chwile będziemy robić z kodem śródłowym programu\n Stack - stos\n Locals - zmienne\n\n - klikając przycisk Go zostaje uruchomiony program tak jak by debugera nie było włączonego - klikając przycisk Step (ang. krok) wejście do użuwanej funkcji zewnętrznej, out wyjście z funkcji\n - klikając przycisk Out (ang. na zewnątrz) wychodzimy z funkcji w kórą weszliśmy za pomocą przycisku step\n - klikając przycisk Over (ang. koniec)w okienku Debug Control wykonujesz zakreśloną daną linijkę \n - klikając przycisk \n\n- uwaga przed kliknięciem klawisza over pomyśl co powinien wykonać skrypt\n\n- w tym przykładzie widać co się dzieje ze zmienną cargo\n\n'''\n#skrypt pokaże nam które paczki należy zapakować do pudła 90 aby wykorzystać jak najwięcej miejsca\ncargo = [40,20,4,5,30,8,2,7,3,19,32,40,20,35,15,32,9]\ncargo.sort()\ncargo.reverse()\nprint(\"The cargo list is:\",cargo)\n\n\nboxCapacity = 90\nbox = [] #-deklaraca pustej listy\ni = 0\n\n#while sum(box) + cargo[i] < boxCapacity and i=min(cargo)):# min(cargo) minimalny element w liście\n if (boxCapacity - sum(box)) >= cargo[i]: #- warunek do pobierania kolejnej największej wartości z listy\n box.append(cargo[i])\n i+=1 #- uwaga na licznik aby był w pętli dobrz e ułożony\n\nprint(\"The collected items sum is:\",sum(box)) #- funkcja sum sumuje elementy przekazane jako parametr\nprint(\"The element are:\",box)\n\n\n\n\n\n'''\n#- deklaracja zmiennej \n#- wyświetlenie\n#- wyniki\n'''\n #- \n #- \n #- \n #- \n #- \n #- \n #- \n #- \n #- \n #- \n","repo_name":"DamianSystem/Udemy---Python-dla-pocz-tkuj-cych---course-files","sub_path":"7_87_Debuggowanie skryptu.py","file_name":"7_87_Debuggowanie skryptu.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25085526683","text":"#!/usr/bin/env python3\n\nimport csv\nimport hashlib\nimport json\nimport operator\nimport subprocess\n\nimport pyfastaq\n\n\ndef get_bed_and_md5(url, outfile):\n subprocess.check_output(f\"wget -O {outfile} {url}\", shell=True)\n with open(outfile, \"rb\") as f:\n return hashlib.sha256(f.read()).hexdigest()\n\n\ndef file_to_set(infile):\n with open(infile) as f:\n return set([x.rstrip() for x in f if len(x.strip()) > 0])\n\n\nref = {}\npyfastaq.tasks.file_to_dict(\"../MN908947.fasta\", ref)\nassert len(ref) == 1\nref = list(ref.values())[0]\namplicons = {}\n\noutprefix = \"covid-artic-v5.0-5.3.2_400\"\n\nscheme2bed_url = {\n \"v5.0.0\": \"https://raw.githubusercontent.com/quick-lab/SARS-CoV-2/main/400/v5.0.0_400/SARS-CoV-2_v5.0.0_400.primer.bed\",\n \"v5.1.0\": \"https://raw.githubusercontent.com/quick-lab/SARS-CoV-2/main/400/v5.1.0_400/SARS-CoV-2_v5.1.0_400.primer.bed\",\n \"v5.2.0\": \"https://raw.githubusercontent.com/quick-lab/SARS-CoV-2/main/400/v5.2.0_400/SARS-CoV-2_v5.2.0_400.primer.bed\",\n \"v5.3.2\": \"https://raw.githubusercontent.com/quick-lab/SARS-CoV-2/main/400/v5.3.2_400/SARs-CoV-2_v5.3.2_400.primer.bed\",\n}\n\nscheme2bed = {k: f\"covid-artic-{k}_400.bed\" for k in scheme2bed_url}\nscheme2bed_sha = {k: get_bed_and_md5(scheme2bed_url[k], scheme2bed[k]) for k in scheme2bed}\nscheme2bed_lines = {k: file_to_set(v) for k, v in scheme2bed.items()}\n\n\nlines2scheme = {}\nfor scheme, lines in scheme2bed_lines.items():\n for line in lines:\n if line not in lines2scheme:\n lines2scheme[line] = []\n lines2scheme[line].append(scheme)\n\n\njson_file = f\"{outprefix}.json\"\nlines_out = []\n\n\nfor line, schemes in lines2scheme.items():\n fields = line.rstrip().split(\"\\t\")\n ref_name, start, end, primer_name, pool, strand, seq = fields\n start = int(start)\n end = int(end)\n\n # The coords of each primer are in the BED file, but we'll check they\n # match the reference sequence at the expected position anyway\n if primer_name == \"SARS-CoV-2_400_84_RIGHT_2\":\n assert seq == \"TGTTCAACACCARTGTCTGTACTC\"\n matches = ref.search(seq.replace(\"R\", \"G\"))\n else:\n matches = ref.search(seq)\n assert len(matches) == 1\n match = matches[0]\n assert match[0] == start\n assert match[1] == strand\n\n\n assert \"alt\" not in primer_name.upper()\n assert primer_name.startswith(\"SARS-CoV-2_400_\")\n amplicon_name, l_or_r, primer_number = primer_name.rsplit(\"_\", maxsplit=2)\n\n assert l_or_r.split(\"_\")[0] in [\"LEFT\", \"RIGHT\"]\n if amplicon_name not in amplicons:\n amplicons[amplicon_name] = {\"left_primers\": [], \"right_primers\": []}\n d = {\n \"original_data\": fields,\n \"in_schemes\": schemes,\n \"start\": start,\n \"end\": end - 1,\n }\n amplicons[amplicon_name][l_or_r.lower() + \"_primers\"].append(d)\n lines_out.append((\n amplicon_name,\n primer_name,\n l_or_r.lower(),\n seq,\n start,\n ))\n\nlines_out.sort(key=lambda x: int(x[4]))\n\nwith open(f\"../{outprefix}.vwf.tsv\", \"w\") as f_out:\n print(\"Amplicon_name\",\n \"Primer_name\",\n \"Left_or_right\",\n \"Sequence\",\n \"Position\",\n sep=\"\\t\",\n file=f_out\n )\n for line in lines_out:\n print(*line, sep=\"\\t\", file=f_out)\n\n\nfor amplicon_name, d in amplicons.items():\n d[\"start\"] = min([x[\"start\"] for x in d[\"left_primers\"]])\n d[\"end\"] = max([x[\"end\"] for x in d[\"right_primers\"]])\n d[\"left_primer_end\"] = max([x[\"end\"] for x in d[\"left_primers\"]])\n d[\"right_primer_start\"] = min([x[\"start\"] for x in d[\"right_primers\"]])\n\njson_data = {\n \"name\": outprefix,\n \"source_files\": scheme2bed_url,\n \"source_files_sha256\": scheme2bed_sha,\n \"reference_accession\": \"MN908947.3\",\n \"amplicons\": amplicons,\n}\n\nwith open(json_file, \"w\") as f:\n json.dump(json_data, f, indent=2)\n\n","repo_name":"iqbal-lab-org/viridian_workflow","sub_path":"viridian/amplicon_scheme_data/Scheme_processing/covid-artic-v5.0-5.3.2_400.make_json.py","file_name":"covid-artic-v5.0-5.3.2_400.make_json.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"33582004663","text":"from django.urls import path, re_path\nfrom . import views\n\napp_name = 'editor'\n\nurlpatterns = [\n path('uploadImage/', views.upload_images, name=\"upload_image\"),\n path('create/content/', views.create_content, name=\"create_content\"),\n re_path('content/detail/(?P[0-9]*)/$', views.display_content, name=\"display_content\"),\n]\n","repo_name":"blackhair3000/my_blog","sub_path":"editor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43185374740","text":"import os\nimport random\nimport itertools\nimport time\nimport numpy as np\nimport scipy.sparse\n\nfrom scipy.sparse import csr_matrix\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.svm import SVC\n\n\n# Returns training and validation labels and data\ndef load_data(fold_combination):\n ''' fold_combination: a list of integers indicating the fold '''\n fold_combination = sorted(fold_combination)\n\n lsi_folder_path = os.path.join(\"tf-idf_data\", \"lsi\")\n fold_str = ''.join([\"_\" + str(fold) for fold in fold_combination])\n\n lsi_train_path = os.path.join(lsi_folder_path, \"lsi_fold\" + fold_str + \".npy\")\n lsi_valid_path = os.path.join(lsi_folder_path, \"lsi_valid\" + fold_str + \".npy\")\n\n lsi_train = np.load(lsi_train_path)\n lsi_valid = np.load(lsi_valid_path)\n\n train_labels = lsi_train[:, 0]\n train_labels = np.reshape(train_labels, (-1, 1))\n\n valid_labels = lsi_valid[:, 0]\n valid_labels = np.reshape(valid_labels, (-1, 1))\n\n lsi_train = lsi_train[:, 1:]\n lsi_valid = lsi_valid[:, 1:]\n\n return train_labels, lsi_train, valid_labels, lsi_valid\n\n\ndef get_precision(rates):\n tp = rates[0]\n fp = rates[1]\n return tp / (tp + fp + 1)\n\n\ndef get_recall(rates):\n tp = rates[0]\n fn = rates[3]\n return tp / (tp + fn + 1)\n\n\ndef get_specificity(rates):\n tn = rates[2]\n fp = rates[1]\n return tn / (fp + tn + 1)\n\n\ndef get_fscore(precision, recall):\n return 2 * (precision * recall/(precision+recall + 1))\n\n\ndef get_accuracy(rates):\n tp = rates[0]\n fp = rates[1]\n tn = rates[2]\n fn = rates[3]\n return (tp + tn) / (tp + fp + tn + fn)\n\n\nif __name__ == \"__main__\":\n all_folds = [1, 2, 3, 4, 5]\n all_folds = set(all_folds)\n lambd = 1.5e-4\n\n sv_folder_path = os.path.join(\"tf-idf_data\", \"sv\")\n if not os.path.exists(sv_folder_path):\n os.makedirs(sv_folder_path)\n\n # element is tuple (tp, fp, tn, fn)\n linear_rates = []\n rbf_rates = []\n\n for fold in itertools.combinations(all_folds, 4):\n fold = list(fold)\n fold_str = \"_\".join(map(str, fold))\n train_labels, lsi_train, valid_labels, lsi_valid = load_data(fold)\n\n linear_SVC = SVC(kernel=\"linear\", max_iter=6000)\n rbf_SVC = SVC(kernel=\"rbf\", max_iter=6000)\n\n linear_SVC.fit(lsi_train, train_labels.ravel())\n rbf_SVC.fit(lsi_train, train_labels.ravel())\n\n linear_predictions = linear_SVC.predict(lsi_valid)\n rbf_predictions = rbf_SVC.predict(lsi_valid)\n\n tn, fp, fn, tp = confusion_matrix(valid_labels, linear_predictions).ravel()\n linear_rates.append((tp, fp, tn, fn))\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n specificity = tn / (tn + fp)\n fscore = 2 * ((precision * recall) / (precision + recall))\n accuracy = (tp + tn) / (tp + tn + fp + fn)\n print(\"precision:\", precision,\n \"recall:\", recall,\n \"specificity:\", specificity,\n \"fscore:\", fscore,\n \"accuracy:\", accuracy)\n print(\"Linear TP/FP/TN/FN rate for fold\", fold_str, \":\", tp, fp, tn, fn)\n print(\"--\" * 50)\n tn, fp, fn, tp = confusion_matrix(valid_labels, rbf_predictions).ravel()\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n specificity = tn / (tn + fp)\n fscore = 2 * ((precision * recall) / (precision + recall))\n accuracy = (tp + tn) / (tp + tn + fp + fn)\n print(\"precision:\", precision,\n \"recall:\", recall,\n \"specificity:\", specificity,\n \"fscore:\", fscore,\n \"accuracy:\", accuracy)\n rbf_rates.append((tp, fp, tn, fn))\n print(\"RBF TP/FP/TN/FN rate for fold\", fold_str, \":\", tp, fp, tn, fn)\n print(\"--\" * 50)\n\n agg_precision = []\n agg_recall = []\n agg_specificity = []\n agg_fscore = []\n agg_accuracy = []\n for rates in linear_rates:\n precision = get_precision(rates)\n recall = get_recall(rates)\n specificity = get_specificity(rates)\n fscore = get_fscore(precision, recall)\n accuracy = get_accuracy(rates)\n\n agg_precision.append(precision)\n agg_recall.append(recall)\n agg_specificity.append(specificity)\n agg_fscore.append(fscore)\n agg_accuracy.append(accuracy)\n\n avg_precision = sum(agg_precision) / len(agg_precision)\n avg_recall = sum(agg_recall) / len(agg_recall)\n avg_specificity = sum(agg_specificity) / len(agg_specificity)\n avg_fscore = sum(agg_fscore) / len(agg_fscore)\n avg_accuracy = sum(agg_accuracy) / len(agg_accuracy)\n\n print(avg_precision, avg_recall, avg_specificity, avg_fscore, avg_accuracy)\n print(\"--\" * 50)\n\n agg_precision = []\n agg_recall = []\n agg_specificity = []\n agg_fscore = []\n agg_accuracy = []\n for rates in rbf_rates:\n precision = get_precision(rates)\n recall = get_recall(rates)\n specificity = get_specificity(rates)\n fscore = get_fscore(precision, recall)\n accuracy = get_accuracy(rates)\n\n agg_precision.append(precision)\n agg_recall.append(recall)\n agg_specificity.append(specificity)\n agg_fscore.append(fscore)\n agg_accuracy.append(accuracy)\n\n avg_precision = sum(agg_precision) / len(agg_precision)\n avg_recall = sum(agg_recall) / len(agg_recall)\n avg_specificity = sum(agg_specificity) / len(agg_specificity)\n avg_fscore = sum(agg_fscore) / len(agg_fscore)\n avg_accuracy = sum(agg_accuracy) / len(agg_accuracy)\n\n print(avg_precision, avg_recall, avg_specificity, avg_fscore, avg_accuracy)\n print(\"--\" * 50)\n\n\n","repo_name":"autopear/CS235-Project","sub_path":"SVM/train_skl_svm.py","file_name":"train_skl_svm.py","file_ext":"py","file_size_in_byte":5649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6407872214","text":"import torch\nfrom torch import nn\nfrom transformers import AutoModel\nbert_model = AutoModel.from_pretrained('KB/bert-base-swedish-cased')\n\n\nclass GRUclassifier(nn.Module):\n def __init__(self, vocab_size, input_size, hidden_size, output_size, dev, pretrained):\n super().__init__()\n self.dev = dev\n self.num_layers = 1\n self.hidden_size = hidden_size\n if pretrained is not None:\n print('Using pretrained FastText weights')\n weight = pretrained\n self.embed = nn.Embedding.from_pretrained(weight)\n if pretrained is None:\n print('Using PyTorch embeddings layer')\n self.embed = nn.Embedding(vocab_size, input_size)\n self.gru = nn.GRU(768, hidden_size, num_layers=1, batch_first=True)\n self.output_size = output_size\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n output, _ = bert_model(x)\n # output = self.embed(x)\n h = self.init_hidden(len(x))\n output, hidden = self.gru(output, h)\n output = output.contiguous().view(-1, self.hidden_size * len(x[0])) # -1 just infers the size\n linear_layer = nn.Linear(len(x[0]) * self.hidden_size, self.output_size)\n output = linear_layer(output)\n return self.sigmoid(output)\n\n def set_dev(self, dev):\n self.dev = dev\n\n def init_hidden(self, x_len):\n return torch.zeros(self.num_layers, x_len, self.hidden_size).to(self.dev)\n","repo_name":"1noll1/LinsProjekt","sub_path":"GRU_model.py","file_name":"GRU_model.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33200646187","text":"import math\nfrom functools import reduce\nx = int(input())\n \ndef compute_gcd(x, y):\n while(y):\n x, y = y, x % y\n return x\n# This function computes LCM\ndef compute_lcm(x, y):\n lcm = (x*y)//compute_gcd(x,y)\n return lcm\ndef factors(n): \n return set(reduce(list.__add__, \n ([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))\n \nres=None\nfList=list(factors(x))\nfList.sort()\nn=len(fList)\n#print(n//2)\n\n#print(fList)\nans=[1,x]\ns,e=n//2-1,n//2\nIter=True\nwhile Iter:\n #print(s,e)\n if fList[s]==fList[e] or s==0 or e==n-1:\n Iter=False\n break\n else:\n for i in range(s,e):\n for j in range(i+1,e+1):\n #print(fList[i],fList[j])\n if compute_lcm(fList[i],fList[j])==x:\n s,e=i,j\n Iter=False\n break\n if Iter:\n s-=1\n e+=1\nprint(fList[s],fList[e])\n \n","repo_name":"marcus-aurelianus/codeforce","sub_path":"Round613/betterfadi.py","file_name":"betterfadi.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"37386638437","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # 1. Create a vector with values ranging from 15 to 55 and print all values except the first and last.import numpy as np\n\n# In[3]:\n\n\n\nimport numpy as np\nv = np.arange(15,55)\nprint(\"Original vector:\")\nprint(v)\nprint(\"After:\")\nprint(v[1:-1])\n\n\n# # 2. Create a 3X4 array using np.full().\n# \n\n# In[6]:\n\n\na = np.arange(10,22).reshape((3, 4))\nprint(a)\n\n\n# # 3. Create a 3x3 matrix filled with values from 10 to 18. Use np.arange() and np.reshape().\n\n# In[7]:\n\n\nx = np.arange(2, 11).reshape(3,3)\nprint(x)\n\n\n# # 4. Create a 5x5 zero matrix with elements on the main diagonal equal to 1, 2, 3, 4, 5 using np.diag().\n\n# In[14]:\n\n\nnp.zeros(5)\nx = np.diag([1, 2, 3, 4, 5])\nprint(x)\n\n\n# # 5. Create a null vector of size 10 using np.zeros() and update sixth value to 11.\n\n# In[15]:\n\n\nx = np.zeros(10)\nprint(x)\nprint(\"After:\")\nx[6] = 11\nprint(x)\n\n\n# # 6. Convert an array to a float type using np.asfarray().\n\n# In[16]:\n\n\na = [1, 2, 3, 4]\nprint(a)\nx = np.asfarray(a)\nprint(\"After:\")\nprint(x)\n\n\n# # 7. Swap columns in a given array. Such as:\n\n# In[40]:\n\n\nx = np.arange(12).reshape(4, 3)\nprint(x)\n\nx[:, [2, 0]] = x[:, [0, 2]]\nprint(x)\n\n\n# # 8. Capitalize the first letter, lowercase, uppercase, swapcase, title-case of all the elements of a\n# given array. Use np.char.capitalize(), np.char.lower(), np.char.upper(),\n# np.char.swapcase(), np.char.title().\n\n# In[43]:\n\n\nx = np.array(['python', 'PHP', 'java', 'C++'], dtype=str)\nprint(x)\ncapitalized_case = np.char.capitalize(x)\nlowered_case = np.char.lower(x)\nuppered_case = np.char.upper(x)\nswapcased_case = np.char.swapcase(x)\ntitlecased_case = np.char.title(x)\nprint(\"\\n Capitalized: \", capitalized_case)\nprint(\"Lowered: \", lowered_case)\nprint(\"Uppered: \", uppered_case)\nprint(\"Swapcased: \", swapcased_case)\nprint(\"Titlecased: \", titlecased_case)\n\n\n# # 9. Get the dates of yesterday, today and tomorrow using np.datetime64() and np.timedelta64().\n\n# In[44]:\n\n\nyesterday = np.datetime64('today', 'D') - np.timedelta64(1, 'D')\nprint(\"Yestraday: \",yesterday)\ntoday = np.datetime64('today', 'D')\nprint(\"Today: \",today)\ntomorrow = np.datetime64('today', 'D') + np.timedelta64(1, 'D')\nprint(\"Tomorrow: \",tomorrow)\n\n\n# # 10.Append values to the end of an array using np.append().\n\n# In[45]:\n\n\nx = [10, 20, 30]\nprint(x)\nx = np.append(x, [[40, 50, 60], [70, 80, 90]])\nprint(\"After:\")\nprint(x)\n\n\n# # cars = pd.read_excel('cars.xlsx')\n\n# In[79]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# Variable dictionary is listed below:\n# Manufacturer: manufacturer name\n# Model: model name\n# Displ: engine displacement, in litres\n# Year: year of manufacture\n# Cyl: number of cylinders\n# Trans: type of transmission\n# Drv: the type of drive train, where f = front-wheel drive, r = rear wheel drive,\n# 4 = 4wd\n# Cty: city miles per gallon\n# Hwy: highway miles per gallon\n# Fl: fuel type\n# Class: \"type\" of car\n\n# # \n\n# In[69]:\n\n\ncars = pd.read_excel('cars.xlsx')\ncars\n\n\n# # Show relationship between highway and city miles per gallon.\n\n# In[78]:\n\n\nx = np.array(['cty'])\ny = np.array(['hwy'])\n\nplt.scatter(x, y)\nplt.show()\n\n\n# In[81]:\n\n\ndf = cars[['cty','hwy']]\n\nsns.pairplot(df, kind=\"scatter\")\n\n\n# # 2 Show distributions and scatters between all variables. While hue equals to type of drive train which variables have the lowest and the highest correlation? Find according to scatterplot.\n\n# In[88]:\n\n\nsns.pairplot(cars, kind=\"scatter\" , hue=\"drv\");\n\n\n# # 3. Which type of car is most frequent in dataset? Show by using countplot.\n\n# In[99]:\n\n\nsns.countplot(data=cars, x=\"class\");\n\n\n# # 4. Display number of cylinders for each drive train in bar chart. Which drive train is the most frequent?\n\n# In[100]:\n\n\nsns.countplot(data=cars, x=\"cyl\");\n\n\n# # 5. Visualize engine displacement by each class using boxplot. Do the same thing in violinplot.\n\n# In[118]:\n\n\ncars.plot.box(x='class',y='displ', grid='True');\n\n\n# In[128]:\n\n\nsns.violinplot(data=cars, x=\"displ\", y=\"class\")\n\n","repo_name":"Jaagas/DSA_Bootcamp","sub_path":"Week_2/Python/Day 2/Python_Week2_D2_HomeW.py","file_name":"Python_Week2_D2_HomeW.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4504901721","text":"from PyQt6.QtWidgets import QTabWidget, QMessageBox\nfrom PyQt6.QtCore import pyqtSignal as Signal\n\n\nclass TabWidget(QTabWidget):\n saveEditor = Signal()\n\n def __init__(self):\n super(TabWidget, self).__init__()\n self.setTabPosition(QTabWidget.TabPosition.South)\n self.setTabsClosable(True)\n self.setMovable(True)\n self.setDocumentMode(True)\n\n self.tabCloseRequested[int].connect(self.remove_tab)\n\n def remove_tab(self, index):\n editor = self.currentWidget().get_editor()\n if editor.modified:\n msgbox = QMessageBox(self)\n msgbox.setIcon(QMessageBox.Icon.Question)\n msgbox.setWindowTitle(self.tr(\"Archivo modificado\"))\n msgbox.setText(\n self.tr(\n \"El archivo {} tiene cambios sin \"\n \"guardar. Quiere mantenerlos?\".format(editor.name)\n )\n )\n cancel_btn = msgbox.addButton(\n self.tr(\"Cancelar\"), QMessageBox.ButtonRole.RejectRole\n )\n msgbox.addButton(self.tr(\"No\"), QMessageBox.ButtonRole.NoRole)\n yes_btn = msgbox.addButton(self.tr(\"Si\"), QMessageBox.ButtonRole.YesRole)\n msgbox.exec_()\n r = msgbox.clickedButton()\n if r == cancel_btn:\n return\n if r == yes_btn:\n self.saveEditor.emit(editor)\n\n super(TabWidget, self).removeTab(index)\n\n def tab_modified(self, widget, modified):\n editor_widget = widget.get_editor()\n if modified:\n text = \"{} \\u2022\".format(editor_widget.name)\n else:\n text = editor_widget.name\n self.setTabText(self.currentIndex(), text)\n editor_widget.modified = modified\n\n def add_tab(self, widget, title):\n pass\n","repo_name":"centaurialpha/pireal","sub_path":"src/pireal/gui/query_container/tab_widget.py","file_name":"tab_widget.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"67"} +{"seq_id":"24594405308","text":"# -*- coding:utf-8 -*-\nimport logging\nimport datetime\nfrom configs.dir_path import logs_path\nimport os\n\n\ndef logging_handle(logFile=True, name=__name__):\n log_dir = os.path.join(logs_path, f\"{datetime.datetime.now().strftime('%Y%m%d')}.log\")\n logger = logging.getLogger(name) # 创建日志器对象\n logger.setLevel(logging.INFO) # 设置日志级别\n fmt = logging.Formatter('%(asctime)s - %(levelname)s - %(filename)s [%(lineno)d]:%(message)s') # 设置日志格式\n if logFile:\n file = logging.FileHandler(filename=log_dir, encoding='utf-8') # 控制日志输出到文件\n file.setFormatter(fmt)\n logger.addHandler(file)\n else:\n console = logging.StreamHandler() # 控制日志输出到控制台\n console.setFormatter(fmt)\n logger.addHandler(console)\n return logger\n\n\nlog = logging_handle()\n\n","repo_name":"wangyunhe1210/autotesting_framework","sub_path":"utils/logging_handle.py","file_name":"logging_handle.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11789505038","text":"#! /usr/bin/env python3\n\n###############################################################################\n #\n # Copyright (C) 2022-2023 Maxim Integrated Products, Inc., All Rights Reserved.\n # (now owned by Analog Devices, Inc.)\n #\n # Permission is hereby granted, free of charge, to any person obtaining a\n # copy of this software and associated documentation files (the \"Software\"),\n # to deal in the Software without restriction, including without limitation\n # the rights to use, copy, modify, merge, publish, distribute, sublicense,\n # and/or sell copies of the Software, and to permit persons to whom the\n # Software is furnished to do so, subject to the following conditions:\n #\n # The above copyright notice and this permission notice shall be included\n # in all copies or substantial portions of the Software.\n #\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n # IN NO EVENT SHALL MAXIM INTEGRATED BE LIABLE FOR ANY CLAIM, DAMAGES\n # OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\n # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n # OTHER DEALINGS IN THE SOFTWARE.\n #\n # Except as contained in this notice, the name of Maxim Integrated\n # Products, Inc. shall not be used except as stated in the Maxim Integrated\n # Products, Inc. Branding Policy.\n #\n # The mere transfer of this software does not imply any licenses\n # of trade secrets, proprietary technology, copyrights, patents,\n # trademarks, maskwork rights, or any other form of intellectual\n # property whatsoever. Maxim Integrated Products, Inc. retains all\n # ownership rights.\n #\n ##############################################################################\n #\n # Copyright 2023 Analog Devices, Inc.\n #\n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n # You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n #\n ##############################################################################\n\n## dtm_sweep.py\n #\n # Sweep connection parameters.\n #\n # Ensure that both targets are built with BT_VER := 9\n #\n\nimport sys\nimport argparse\nfrom argparse import RawTextHelpFormatter\nfrom time import sleep\nimport itertools\n\n\n## mc_rcdat_6000 can be found in msdk-test-and measurenent\n## mini_rcdat_usb.py can be used to sub this\nfrom mc_rcdat_6000 import McRcdat9000\n\n\n\nfrom BLE_hci import BLE_hci\nfrom BLE_hci import Namespace\nfrom termcolor import colored\nimport math\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.ticker as ticker\nfrom matplotlib.colors import LogNorm, Normalize\nfrom matplotlib.ticker import MaxNLocator\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors,cm\nfrom matplotlib.colors import ListedColormap\n\nverbose=True\n\nTRACE_INFO = 2\nTRACE_WARNING = 1\nTRACE_ERROR = 0\n\ntraceLevel = TRACE_INFO\n\ndef printTrace(label, msg,callerLevel, color='white'):\n if callerLevel <= traceLevel:\n print(colored(label + \": \", color), colored(msg, color))\n\ndef printWarning(msg):\n printTrace('Warning', msg, TRACE_WARNING, 'yellow')\n\ndef printInfo(msg):\n printTrace('Info', msg, TRACE_INFO, 'green')\n\ndef printError(msg):\n printTrace('Error', msg, TRACE_ERROR, 'red')\n\n\nLL_CRC_LEN = 3 # CRC length.\nLL_AA_LEN = 4 # Access address length.\nLL_PREAMBLE_LEN_1M = 1 # Preamble length (LE 1M PHY)\nLL_PREAMBLE_LEN_2M = 2 # Preamble length (LE 2M PHY)\nLL_PREAMBLE_LEN_CODED_BITS = 10 # Preamble length (LE Coded PHY)\nLL_CI_LEN_BITS = 2 # Coding indicator length (LE Coded PHY)\nLL_TERM1_LEN_BITS = 3 # TERM1 length (LE Coded PHY)\nLL_TERM2_LEN_BITS = 3 # TERM2 length (LE Coded PHY)\nLL_BLE_BIT_PER_US = 1 # BLE PHY rate\nLL_BLE_US_PER_BYTE_1M = 8 # BLE PHY speed (LE 1M PHY)\nLL_BLE_US_PER_BYTE_2M = 4 # BLE PHY speed (LE 2M PHY)\nLL_BLE_US_PER_BYTE_CODED_S8 = 64 # BLE PHY speed (LE Coded PHY, S=8)\nLL_BLE_US_PER_BIT_CODED_S8 = 8 # BLE PHY speed (LE Coded PHY, S=8)\nLL_BLE_US_PER_BYTE_CODED_S2 = 16 # BLE PHY speed (LE Coded PHY, S=2)\nLL_BLE_US_PER_BIT_CODED_S2 = 2 # BLE PHY speed (LE Coded PHY, S=2)\nLL_DTM_HDR_LEN = 2 # Direct Test Mode PDU header length\nNUM_CHANNELS = 40 # Number of testing channels\n\n# Calculate the duration of the test\ndef calcTestTime(packetLen, phy, numPackets):\n\n packetLen=int(packetLen)\n phy=int(phy)\n numPackets=int(numPackets)\n totalTime = 0\n\n # 1: 1M\n # 2: 2M\n # 3: S8\n # 4: S2\n\n # Calculate the length of each packet\n if (phy == 3 or phy == 4):\n totalTime = (LL_PREAMBLE_LEN_CODED_BITS + (LL_AA_LEN * 8) + LL_CI_LEN_BITS + LL_TERM1_LEN_BITS) * LL_BLE_US_PER_BIT_CODED_S8\n if (phy == 4):\n totalTime = totalTime + ((LL_DTM_HDR_LEN + packetLen + LL_CRC_LEN) * LL_BLE_US_PER_BYTE_CODED_S2) + (LL_TERM2_LEN_BITS * LL_BLE_US_PER_BIT_CODED_S2)\n else:\n totalTime = totalTime + ((LL_DTM_HDR_LEN + packetLen + LL_CRC_LEN) * LL_BLE_US_PER_BYTE_CODED_S8) + (LL_TERM2_LEN_BITS * LL_BLE_US_PER_BIT_CODED_S8)\n\n elif (phy == 2):\n totalTime = (LL_PREAMBLE_LEN_2M + LL_AA_LEN + LL_DTM_HDR_LEN + packetLen + LL_CRC_LEN) * LL_BLE_US_PER_BYTE_2M\n else:\n totalTime = (LL_PREAMBLE_LEN_1M + LL_AA_LEN + LL_DTM_HDR_LEN + packetLen + LL_CRC_LEN) * LL_BLE_US_PER_BYTE_1M\n\n # Add the inter frame spacing\n totalTime = math.ceil((totalTime + 249) / 625) * 625\n\n # Multiply by the number of packets we're sending\n totalTime = totalTime * numPackets\n\n # Add a constant 10 ms\n totalTime = totalTime + 10000\n\n return totalTime\n\ndef plottable_3d_info(df: pd.DataFrame):\n \"\"\"\n Transform Pandas data into a format that's compatible with\n Matplotlib's surface and wireframe plotting.\n \"\"\"\n index = df.index\n columns = df.columns\n\n x, y = np.meshgrid(np.arange(len(columns)), np.arange(len(index)))\n z = np.array([[df[c][i] for c in columns] for i in index])\n \n xticks = dict(ticks=np.arange(len(columns)), labels=columns)\n yticks = dict(ticks=np.arange(len(index)), labels=index)\n \n return x, y, z, xticks, yticks\n\nMAP_COLORS = [[181/255.0, 213/255.0, 227/255.0],\n [66/255.0, 131/255.0, 231/255.0],\n [128/255.0, 251/255.0, 96/255.0],\n [58/255.0, 125/255.0, 39/255.0],\n [254/255.0, 255/255.0, 84/255.0],\n [241/255.0, 167/255.0, 61/255.0],\n [233/255.0, 53/255.0, 40/255.0],\n [126/255.0, 30/255.0, 22/255.0],\n [0/255.0, 0/255.0, 0/0255.0]]\n\ndef create_heatmap(df, annot=False):\n\tcmap = ListedColormap(MAP_COLORS, name='adi-heatmap')\n\n\tbounds = [0.0, 0.0001, 0.1, 0.3, 1, 3, 10, 30, 99.9999, 105]\n\tnorm = colors.BoundaryNorm(bounds, cmap.N)\n\thm = sns.heatmap(df, cmap=cmap, norm=norm, annot=annot, xticklabels=True, linecolor=[237/255.0, 237/255.0, 237/255.0, 0.2], linewidth=0.35)\n\t\n\told_ticks = hm.collections[0].colorbar.get_ticks()\n\tnew_ticks = []\n\n\tfor i in range(len(old_ticks) - 1):\n\t\tnew_ticks.append(old_ticks[i] + (old_ticks[i+1] - old_ticks[i])/2)\n\n\tlabels = ['0%', '0%-0.1%', '0.1%-0.3%', '0.3%-1%', '1%-3%', '3%-10%', '10%-30%', '30%-100%', '100%']\n\thm.collections[0].colorbar.set_ticks(new_ticks, labels=labels)\n\thm.collections[0].colorbar.ax.set_ylabel('Packet Error Rate')\n\thm.set(xlabel=\"Channels\", ylabel=\"Rx Power [dBm]\", title=\"Rx Sensitivity -- No interference\")\n\n\treturn hm\n\n\n# Setup the command line description text\ndescText = \"\"\"\nDirect Test Mode Sweep\n\nThis tool uses a Mini Circuits RCDAT to control attenuation between two devices\nrunning DTM software. The Packet error rate (PER) of the slave will be collected by setting the master device in tx test mode and the slave in rx test mode.\nA vendor specific command will be sent to end the test. \nThe total number of packets transmitted will be compared to the number of packtes received and the PER will be \ncalculated as numPacketsReceived/numPacketsTransmitted * 100\n\nIMPORTANT: The end test command is vendor specific,\nmeaning it will only work with MAX32 BLE devices using the latest stack.\n\n\"\"\"\n\n# Parse the command line arguments\nparser = argparse.ArgumentParser(description=descText, formatter_class=RawTextHelpFormatter)\nparser.add_argument('slaveSerial',help='Serial port for slave device')\nparser.add_argument('masterSerial',help='Serial port for master device')\nparser.add_argument('results',help='CSV files to store the results')\nparser.add_argument('-d', '--delay', default=0.5,help='Number of seconds to wait before ending the test')\nparser.add_argument('-n', '--numPackets', default=0,help='Number of packets to send per test')\nparser.add_argument('-l', '--limit', default=0,help='PER limit for return value')\nparser.add_argument('-p', '--phys', default=\"1\",help='PHYs to test with, comma separated list with 1-4.')\nparser.add_argument('-t', '--txpows', default=\"0\",help='TX powers to test with, comma separated list.')\nparser.add_argument('-a', '--attens', help='Attenuation settings to use, comma separated list.')\nparser.add_argument('-da', '--disable-atten', action='store_true',help='Disbale Attenuator For Testing Purposes')\nparser.add_argument('-cl', '--channel-loss', default=\"0\",help='TX powers to test with, comma separated list.')\nparser.add_argument('-as', '--atten-step', default=\"20\",help='Attenuation Step Size.')\n\n\nargs = parser.parse_args()\nprint(args)\n\npacketLengths = [250]\nphys = args.phys.strip().split(\",\")\ntxPowers = args.txpows.strip().split(\",\")\nnumPackets = args.numPackets\n\n\n\n\n\nif(args.attens == None):\n attens = list(range(20,90,int(args.atten_step)))\n\n # Add the max attenuation \n attens.append(90)\nelse:\n attens = args.attens.strip().split(\",\")\n\n\nif args.disable_atten:\n attens=[0]\n printInfo('Disabling Attenuator')\n disableAttenuator = True\nelse:\n printInfo('Attenuator active')\n disableAttenuator = False\n\nprint(\"slaveSerial :\",args.slaveSerial)\nprint(\"masterSerial :\",args.masterSerial)\nprint(\"results :\",args.results)\nprint(\"delay :\",args.delay)\nprint(\"numPackets :\",numPackets)\nprint(\"packetLengths :\",packetLengths)\nprint(\"phys :\",phys)\nprint(\"attens :\",attens)\nprint(\"txPowers :\",txPowers)\nprint(\"PER limit :\",args.limit)\n\n\n# Open the results file, write the parameters\nresults = args.results\nresults = open(args.results, \"w\")\nresults.write(\"# slaveSerial : \"+str(args.slaveSerial)+\"\\n\")\nresults.write(\"# masterSerial : \"+str(args.masterSerial)+\"\\n\")\nresults.write(\"# results : \"+str(args.results)+\"\\n\")\nresults.write(\"# delay : \"+str(args.delay)+\"\\n\")\nresults.write(\"# numPackets : \"+str(numPackets)+\"\\n\")\nresults.write(\"# packetLengths : \"+str(packetLengths)+\"\\n\")\nresults.write(\"# phys : \"+str(phys)+\"\\n\")\nresults.write(\"# attens : \"+str(attens)+\"\\n\")\nresults.write(\"# PER limit : \"+str(args.limit)+\"\\n\")\n# Write the header line\n\nresults.close()\n\nmini_RCDAT = McRcdat9000()\n\n\nresults_df = pd.DataFrame()\n\nassert(args.slaveSerial != args.masterSerial)\n\n# Create the BLE_hci objects\nhciSlave = BLE_hci(Namespace(serialPort=args.slaveSerial, monPort=\"\", baud=115200))\nhciMaster = BLE_hci(Namespace(serialPort=args.masterSerial, monPort=\"\", baud=115200))\n\nperMax = 0\n\n# Reset the devices\nhciSlave.resetFunc(None)\nhciMaster.resetFunc(None)\nsleep(0.1)\nfor ch in range(NUM_CHANNELS):\n temp_results = []\n print('Channel Num', ch)\n print(f'{ch / NUM_CHANNELS *100}% complete')\n printInfo('Setting TX Power')\n hciSlave.txPowerFunc(Namespace(power=0, handle=\"0\"))\n hciMaster.txPowerFunc(Namespace(power=0, handle=\"0\"))\n\n for packetLen,phy,txPower in itertools.product(packetLengths,phys,txPowers):\n\n\n \n # Set the TX Power\n \n\n for atten in attens:\n print(packetLen,\" \",phy,\" \",atten,\" \",txPower)\n\n # Set the attenuation\n if not disableAttenuator:\n mini_RCDAT.setAttenuation(atten)\n \n \n # sleep(0.1)\n \n #start the test\n printInfo(\"RX Starting\")\n hciSlave.rxTestFunc(Namespace(channel=ch, phy=phy))\n printInfo('TX Starting')\n hciMaster.txTestVSFunc(Namespace(channel=ch, phy=phy,payload=0,packetLength=packetLen,numPackets=numPackets))\n\n\n \n if(numPackets == 0):\n sleep(int(args.delay))\n else:\n # Sleep based on the amount of time it takes to complete the test\n # Convert us to seconds\n sleep(calcTestTime(packetLen, phy, numPackets) / 1000000)\n \n printInfo('Endding master')\n stats = hciMaster.endTestVSFunc(Namespace(noPrint=True))\n printInfo('Edning slave')\n packetsReceived = hciSlave.endTestFunc(Namespace(noPrint=True))\n\n\n packetsTransmitted = 0\n perMaster = 0\n\n if(numPackets == 0):\n if stats is not None:\n packetsTransmitted = stats['txData']\n\n if packetsTransmitted != 0:\n perSlave = round(100 * (1 - packetsReceived / packetsTransmitted), 2)\n \n else:\n printWarning('End Test stats returned invalid data. (Packets Transmitted = 0) PER rate being set to 100')\n perSlave = 100\n else:\n perSlave = round(100 * (1 - packetsReceived / int(numPackets)), 2)\n\n if(packetsReceived == 0):\n printWarning('Did not receive any packets')\n\n if perSlave >= 50.0:\n printWarning(f'Unusually high PER {perSlave}')\n\n \n if(perSlave > perMax):\n perMax = perSlave\n elif(perSlave < 0):\n perSlave = 0\n \n # Gather the results\n temp_results.append(perSlave)\n \n\n # Save the results to dataframe\n\n\n col_name = f'ch{ch}'\n results_df[col_name] = temp_results\n\n\n \n print(\"perMax: \",perMax)\n\n if(float(args.limit) != 0.0):\n if(perMax > float(args.limit)):\n print(\"PER too high!\")\n sys.exit(1)\n\n \n# Create the master dataframe\nresults_df.index = [str(-(x+int(args.channel_loss))) for x in attens]\n\nprint(results_df)\nresults_df.to_csv(args.results,mode='a')\n\nhm = create_heatmap(results_df)\nplt.savefig('heatmap.png')\n\n\n\nx,y,z,xticks,yticks = plottable_3d_info(results_df)\n\n\nfor i,row in enumerate(y):\n y[i] = [i * int(args.atten_step) + attens[0]] * len(row)\n\n# print(y)\ncmap = ListedColormap(MAP_COLORS, name='adi-heatmap')\nbounds = [0.0, 0.0001, 0.1, 0.3, 1, 3, 10, 30, 99.9999, 105]\nnorm = colors.BoundaryNorm(bounds, cmap.N)\nmappable = cm.ScalarMappable(norm=norm, cmap=cmap)\n\nfig = plt.figure(figsize=(10,5))\ncb = fig.colorbar(mappable)\n\n\nold_ticks = cb.get_ticks()\nnew_ticks = []\n\nfor i in range(len(old_ticks) - 1):\n new_ticks.append(old_ticks[i] + (old_ticks[i+1] - old_ticks[i])/2)\n\nlabels = ['0%', '0%-0.1%', '0.1%-0.3%', '0.3%-1%', '1%-3%', '3%-10%', '10%-30%', '30%-100%', '100%']\ncb.set_ticks(new_ticks, labels=labels)\ncb.ax.set_ylabel(\"Packet Error Rate\")\n\naxes = fig.add_subplot(projection='3d')\naxes.plot_surface(x, y, z,cmap='gnuplot')\naxes.set(title='PER Across Channels', xlabel='Channel', ylabel='RX Power',zlabel='PER %')\n\nplt.savefig('surfaceplt.png')\n\n\n\n\nsys.exit(0)\n","repo_name":"Analog-Devices-MSDK/msdk","sub_path":"Tools/Bluetooth/dtm_sweep_vs_allCh.py","file_name":"dtm_sweep_vs_allCh.py","file_ext":"py","file_size_in_byte":15983,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"67"} +{"seq_id":"11862272163","text":"from .pages.main_page import AutorizePage\nfrom .pages.mail_page import MailPage\nimport allure\n\n\n@allure.feature('simbirsoft_task')\n@allure.story('Авторизация на почте. Поиск и отправка письма')\ndef test_task_simbirsoft(browser):\n link = \"https://mail.yandex.ru/\"\n # инициализируем Page Object, передаем в конструктор экземпляр драйвера и url адрес\n auth_page = AutorizePage(browser, link)\n auth_page.open() # открываем страницу\n mail_page = auth_page.authorize() # авторизуемся в почте\n mail_page.find_messages() # ищем письма\n mail_page.send_test_message_to_youself() # отправляем письмо\n","repo_name":"SINDIKAT1488/Simbirsoft","sub_path":"test_task.py","file_name":"test_task.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13579183565","text":"\"\"\"Geometry related classes.\"\"\"\nfrom ctypes import *\n\nfrom .fmodobject import FmodObject\nfrom .globalvars import DLL as _dll\nfrom .structures import VECTOR\nfrom .utils import ckresult\n\n\nclass PolygonAttributes:\n \"\"\"Convenience wrapper class to handle polygons for simulated occlusion\n which is based on its winding.\n \"\"\"\n\n def __init__(self, gptr, index):\n self._gptr = gptr\n self.index = index\n self._directocclusion = c_float()\n self._reverbocclusion = c_float()\n self._doublesided = c_bool()\n self._refresh_state()\n\n def _refresh_state(self):\n \"\"\"Retrieve the attributes for the polygon.\"\"\"\n ckresult(\n _dll.FMOD_Geometry_GetPolygonAttributes(\n self._gptr,\n self.index,\n byref(self._directocclusion),\n byref(self._reverbocclusion),\n byref(self._doublesided),\n )\n )\n\n @property\n def direct_occlusion(self):\n \"\"\"Occlusion factor for the direct path where 0 represents no occlusion\n and 1 represents full occlusion.\n\n :type: float\n \"\"\"\n self._refresh_state()\n return self._directocclusion.value\n\n @direct_occlusion.setter\n def direct_occlusion(self, occ):\n ckresult(\n _dll.FMOD_Geometry_SetPolygonAttributes(\n self._gptr,\n self.index,\n c_float(occ),\n self._reverbocclusion,\n self._doublesided,\n )\n )\n\n @property\n def reverb_occlusion(self):\n \"\"\"Occlusion factor of the polygon for the reverb path where 0\n represents no occlusion and 1 represents full occlusion.\n\n :type: float\n \"\"\"\n self._refresh_state()\n return self._reverbocclusion.value\n\n @reverb_occlusion.setter\n def reverb_occlusion(self, occ):\n ckresult(\n _dll.FMOD_Geometry_SetPolygonAttributes(\n self._gptr,\n self.index,\n self._directocclusion,\n c_float(occ),\n self._doublesided,\n )\n )\n\n @property\n def double_sided(self):\n \"\"\"Double sidedness of the polygon.\n\n - True: Polygon is double sided\n - False: Polygon is single sided, and the winding of the polygon (which\n determines the polygon's normal) determines which side of the polygon\n will cause occlusion.\n\n :type: bool\n \"\"\"\n self._refresh_state()\n return self._doublesided\n\n @double_sided.setter\n def double_sided(self, dval):\n ckresult(\n _dll.FMOD_Geometry_SetPolygonAttributes(\n self._gptr,\n self.index,\n self._directocclusion,\n self._reverbocclusion,\n dval,\n )\n )\n\n @property\n def num_vertices(self):\n \"\"\"The number of vertices in the polygon.\n\n :type: int\n \"\"\"\n num = c_int()\n ckresult(\n _dll.FMOD_Geometry_GetPolygonNumVertices(self._gptr, self.index, byref(num))\n )\n return num.value\n\n def get_vertex(self, index):\n \"\"\"Retrieve the position of a vertex.\n\n :param int index: Polygon vertex index.\n :returns: 3D Position of the vertex.\n :rtype: list of x, y, z coordinate floats\n \"\"\"\n vertex = VECTOR()\n ckresult(\n _dll.FMOD_Geometry_GetPolygonVertex(\n self._gptr, self.index, index, byref(vertex)\n )\n )\n return vertex.to_list()\n\n def set_vertex(self, index, vertex):\n \"\"\"Alter the position of a polygon's vertex inside a geometry object.\n\n :param int index: Polygon vertex index.\n :param vertex: 3D Position of the vertex.\n :type vertex: list of x, y, z coordinate floats\n \"\"\"\n vvec = VECTOR.from_list(vertex)\n ckresult(\n _dll.FMOD_Geometry_SetPolygonVertex(self._gptr, self.index, index, byref(vvec))\n )\n\n\nclass Geometry(FmodObject):\n \"\"\"Geometry methods.\"\"\"\n\n def add_polygon(self, directocclusion, reverbocclusion, doublesided, *vertices):\n \"\"\"Add a polygon.\n\n All vertices must lay in the same plane otherwise behavior may be\n unpredictable. The polygon is assumed to be convex. A non convex\n polygon will produce unpredictable behavior. Polygons with zero area\n will be ignored.\n\n Polygons cannot be added if already at the maximum number of polygons\n or if the addition of their verticies would result in exceeding the\n maximum number of vertices.\n\n Vertices of an object are in object space, not world space, and so are\n relative to the position, or center of the object. See\n :py:attr:`position`.\n\n :param float directocclusion: Occlusion factor of the polygon for the\n direct path where 0 represents no occlusion and 1 represents full\n occlusion.\n :param float reverbocclusion: Occlusion factor of the polygon for the\n reverb path where 0 represents no occlusion and 1 represents full\n occlusion.\n :param bool doublesided: Double sidedness of the polygon.\n\n - True: Polygon is double sided\n - False: Polygon is single sided, and the winding of the polygon\n (which determines the polygon's normal) determines which side of\n the polygon will cause occlusion.\n :param vertices: At least three vertices located in object space.\n :type vertices: list of list of coordinate floats\n :returns: Polygon index. Use this with other per polygon based\n functions as a handle.\n :rtype: int\n \"\"\"\n vectors = VECTOR * len(vertices)\n varray = vectors(*vertices)\n idx = c_int()\n self._call_fmod(\n \"FMOD_Geometry_AddPolygon\",\n c_float(directocclusion),\n c_float(reverbocclusion),\n c_bool(doublesided),\n len(vertices),\n varray,\n byref(idx),\n )\n return idx.value\n\n @property\n def active(self):\n \"\"\"Whether an object is processed by the geometry engine.\n\n :type: bool\n \"\"\"\n active = c_bool()\n self._call_fmod(\"FMOD_Geometry_GetActive\", byref(active))\n return active.value\n\n @active.setter\n def active(self, active):\n self._call_fmod(\"FMOD_Geometry_SetActive\", active)\n\n @property\n def _creation_limits(self):\n \"\"\"The maximum number of polygons and vertices allocatable for this\n object.\n\n :type: two-tuple with\n\n - Maximum possible number of polygons in this object\n - Maximum possible number of vertices in this object\n \"\"\"\n maxpols, maxverts = (c_int(), c_int())\n self._call_fmod(\"FMOD_Geometry_GetMaxPolygons\", byref(maxpols), byref(maxverts))\n return (maxpols.value, maxverts.value)\n\n @property\n def max_polygons(self):\n \"\"\"The maximum number of polygons allocatable for this object.\n\n :type: int\n \"\"\"\n return self._creation_limits[0]\n\n @property\n def max_vertices(self):\n \"\"\"The maximum number of vertices allocatable for this object.\n\n :type: int\n \"\"\"\n return self._creation_limits[1]\n\n @property\n def num_polygons(self):\n \"\"\"The number of polygons in this object.\n\n :type: int\n \"\"\"\n num = c_int()\n self._call_fmod(\"FMOD_Geometry_GetNumPolygons\", byref(num))\n return num.value\n\n def get_polygon(self, index):\n \"\"\"The polygon at the given index.\n\n :param int index: The polygon index.\n :rtype: PolygonAttributes\n \"\"\"\n return PolygonAttributes(self._ptr, index)\n\n @property\n def position(self):\n \"\"\"The 3D position of the object.\n\n Position is in world space.\n\n :type: list of coordinate floats.\n \"\"\"\n pos = VECTOR()\n self._call_fmod(\"FMOD_Geometry_GetPosition\", byref(pos))\n return pos.to_list()\n\n @position.setter\n def position(self, pos):\n posv = VECTOR.from_list(pos)\n self._call_fmod(\"FMOD_Geometry_SetPosition\", byref(posv))\n\n @property\n def _rotation(self):\n \"\"\"The 3D orientation of the object.\n\n :type: list of lists of unit length vector coordinates\n \"\"\"\n fwd_vec = VECTOR()\n up_vec = VECTOR()\n self._call_fmod(\"FMOD_Geometry_GetRotation\", byref(fwd_vec), byref(up_vec))\n return [fwd_vec.to_list(), up_vec.to_list()]\n\n @_rotation.setter\n def _rotation(self, rot):\n fwd_vec = VECTOR.from_list(rot[0])\n up_vec = VECTOR.from_list(rot[1])\n self._call_fmod(\"FMOD_Geometry_SetRotation\", byref(fwd_vec), byref(up_vec))\n\n @property\n def forward_rotation(self):\n \"\"\"Forwards orientation.\n\n This vector must be of unit length and perpendicular to the up vector.\n\n :type: list of unit length vector coordinates\n \"\"\"\n return self._rotation[0]\n\n @forward_rotation.setter\n def forward_rotation(self, rot):\n rotation = self._rotation\n rotation[0] = rot\n self._rotation = rotation\n\n @property\n def up_rotation(self):\n \"\"\"Upwards orientation.\n\n This vector must be of unit length and perpendicular to the forwards\n vector.\n\n :type: list of unit length vector coordinates\n \"\"\"\n return self._rotation[1]\n\n @up_rotation.setter\n def up_rotation(self, rot):\n rotation = self._rotation\n rotation[1] = rot\n self._rotation = rotation\n\n @property\n def scale(self):\n \"\"\"The 3D scale of the object.\n\n An object can be scaled/warped in all three dimensions separately using\n this function without having to modify polygon data.\n\n :type: list of three scale dimensions.\n \"\"\"\n scale = VECTOR()\n self._call_fmod(\"FMOD_Geometry_GetScale\", byref(scale))\n return scale.to_list()\n\n @scale.setter\n def scale(self, scale):\n scalev = VECTOR.from_list(scale)\n self._call_fmod(\"FMOD_Geometry_SetScale\", byref(scalev))\n\n def release(self):\n \"\"\"Free a geometry object and release its memory.\"\"\"\n self._call_fmod(\"FMOD_Geometry_Release\")\n\n def save(self):\n \"\"\"Save the geometry object as a serialized binary block to a user\n memory buffer.\n\n The data can be saved to a file if required and loaded later with\n :py:meth:`~pyfmodex.system.System.load_geometry`.\n\n :returns: raw memory data\n \"\"\"\n size = c_int()\n self._call_fmod(\"FMOD_Geometry_Save\", None, byref(size))\n ptr = create_string_buffer(size.value)\n self._call_fmod(\"FMOD_Geometry_Save\", ptr, byref(size))\n return ptr.raw\n","repo_name":"tyrylu/pyfmodex","sub_path":"pyfmodex/geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":10923,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"67"} +{"seq_id":"72092195735","text":"import pyfits\nimport numpy as np\nimport math as m\nimport sys\nimport matplotlib.pyplot as plt\nimport pdb\n\n#Get list of table files\nfluxlist = sys.argv[1]\n\n#Make the figure\nfig = plt.figure()\nplt.grid(True)\nplt.xlabel('Time')\nplt.ylabel('Relative Flux')\nplt.title('Lightcurve of Reference Stars')\n\nctr = 0\n\nfor tablefile in open(fluxlist):\n ctr += 1\n fig.clf()\n\n #Open file\n tablefile = tablefile.rstrip('\\n')\n data = np.loadtxt(tablefile)\n \n #Get the information\n fluxdata = data[:,1] #fluxdata holds all raw data\n fluxerrordata = data[:,2]\n time = data[:,0]\n\n #Normalize flux and ignore outliers\n flux = fluxdata[np.where(fluxdata!=0)] #flux used for average \n flux = flux[287:] #flux ignores beginning exposures (no outliers)\n fluxerror = fluxerrordata[287:] #fluxerror ignores beginning images \n average = np.mean(flux) #average ignores beginning exposures\n n = flux.shape[0]\n sigma = m.sqrt( np.sum((flux-average)**2) / (n-1))\n normfluxdata = fluxdata/average #normfluxdata normalizes ALL exposures\n normfluxerrordata = fluxerrordata/average #normfluxerrordata normalizes ALL exposures\n \n #Plot data\n plt.errorbar(time, normfluxdata, yerr = normfluxerrordata, fmt = 'r.')\n fig.savefig('exo_curves' + str(ctr) + '.pdf', bbox_inches='tight', dpi=fig.dpi)\n \n #Make files containing time, flux, and error\n #For the science target, we won't normalize to average\n if (ctr == 11):\n #Scrap non-zero outliers more that 2 sigma away for non-beginning exposures\n tempflux = fluxdata[287:] #no beginning exposures, HAS outliers\n tempflux[np.where(tempflux < (average - 4*(sigma)))] = 0.0\n tempflux[np.where(tempflux > (average + 4*(sigma)))] = 0.0\n fluxerror[np.where(tempflux < (average - 4*(sigma)))] = 0.0\n fluxerror[np.where(tempflux > (average + 4*(sigma)))] = 0.0\n \n #Write average fluxes to file\n filename = ('exo_fluxdata' + str(ctr) + '.txt')\n finalflux = np.concatenate((fluxdata[:287], tempflux), axis = 0) #flux with beginning\n finalfluxerror = np.concatenate((fluxerrordata[:287], fluxerror), axis=0) \n np.savetxt(filename, np.column_stack((time, finalflux, finalfluxerror)), fmt='%f') \n #For the others, we will normalize to average\n else:\n #Scrap non-zero outliers more that 2 sigma away for non-beginning exposures\n tempflux = normfluxdata[287:] #ignores first exposures\n tempfluxerror = normfluxerrordata[287:] #ignores first exposures\n tempflux[np.where( tempflux < ((average - 4*sigma)/average) )] = 0.0\n tempflux[np.where( tempflux > ((average + 4*sigma)/average) )] = 0.0\n tempfluxerror[np.where( tempflux < ((average - 4*sigma)/average) )] = 0.0\n tempfluxerror[np.where( tempflux > ((average + 4*sigma)/average) )] = 0.0\n \n #Write average fluxes to file\n filename = ('exo_fluxdata' + str(ctr) + '.txt')\n normflux = np.concatenate((normfluxdata[:287], tempflux), axis = 0)\n normfluxerror = np.concatenate((normfluxerrordata[:287], tempfluxerror), axis = 0)\n np.savetxt(filename, np.column_stack((time, normflux, normfluxerror)), fmt='%f')\n\n\n\n\n\n\n\n\n\n#data2 = np.loadtxt(referencestarfile2)\n#data3 = np.loadtxt(referencestarfile3)\n#data4 = np.loadtxt(referencestarfile4)\n#data5 = np.loadtxt(referencestarfile5)\n#data6 = np.loadtxt(referencestarfile6)\n#data7 = np.loadtxt(referencestarfile7)\n#data8 = np.loadtxt(referencestarfile8)\n#data9 = np.loadtxt(referencestarfile9)\n#data10 = np.loadtxt(referencestarfile10)\n#data11 = np.loadtxt(referencestarfile11)\n#\n##--------------------------------------\n#fluxdata1 = data1[:,1]\n#fluxerrordata1 = data1[:,2]\n#time1 = data1[:,0]\n#\n#fluxdata1 = fluxdata1/np.mean(fluxdata1)\n#\n#fluxerrordata1 = fluxerrordata1/np.mean(fluxerrordata1)\n##--------------------------------------\n#fluxdata2 = data2[:,1]\n#fluxerrordata2 = data2[:,2]\n#time2 = data2[:,0]\n#\n#fluxdata2 = fluxdata2/np.mean(fluxdata2)\n#fluxerrordata2 = fluxerrordata2/np.mean(fluxerrordata2)\n#\n##-------------------------------------\n#fluxdata3 = data3[:,1]\n#fluxerrordata3 = data3[:,2]\n#time3 = data3[:,0]\n#\n#fluxdata3 = fluxdata3/np.mean(fluxdata3)\n#fluxerrordata3 = fluxerrordata3/np.mean(fluxerrordata3)\n##-------------------------------------\n#fluxdata4 = data4[:,1]\n#fluxerrordata4 = data4[:,2]\n#time4 = data4[:,0]\n#\n#fluxdata4 = fluxdata4/np.mean(fluxdata4)\n#fluxerrordata4 = fluxerrordata4/np.mean(fluxerrordata4)\n##-------------------------------------\n#fluxdata5 = data5[:,1]\n#fluxerrordata5 = data5[:,2]\n#time5 = data5[:,0]\n#\n#fluxdata5 = fluxdata5/np.mean(fluxdata5)\n#fluxerrordata5 = fluxerrordata5/np.mean(fluxerrordata5)\n##--------------------------------------\n#fluxdata6 = data6[:,1]\n#fluxerrordata6 = data6[:,2]\n#time6 = data6[:,0]\n#\n#fluxdata6 = fluxdata6/np.mean(fluxdata6)\n#fluxerrordata6 = fluxerrordata6/np.mean(fluxerrordata6)\n##--------------------------------------\n#fluxdata7 = data7[:,1]\n#fluxerrordata7 = data7[:,2]\n#time7 = data7[:,0]\n#\n#fluxdata7 = fluxdata7/np.mean(fluxdata7)\n#fluxerrordata7 = fluxerrordata7/np.mean(fluxdata7)\n##---------------------------------------\n#fluxdata8 = data8[:,1]\n#fluxerrordata8 = data8[:,2]\n#time8 = data8[:,0]\n#\n#fluxdata8 = fluxdata8/np.mean(fluxdata8)\n#fluxerrordata8 = fluxerrordata8/np.mean(fluxerrordata8)\n##---------------------------------------\n#fluxdata9 = data9[:,1]\n#fluxerrordata9 = data9[:,2]\n#time9 = data9[:,0]\n#\n#fluxdata9 = fluxdata9/np.mean(fluxdata9)\n#fluxerrordata9 = fluxerrordata9/np.mean(fluxerrordata9)\n##--------------------------------------\n#fluxdata10 = data10[:,1]\n#fluxerrordata10 = data10[:,2]\n#time10 = data10[:,0]\n#\n#fluxdata10 = fluxdata10/np.mean(fluxdata10)\n#fluxerrordata10 = fluxerrordata10/np.mean(fluxerrordata10)\n##-------------------------------------\n#plt.plot(time2, fluxdata2, 'ro')\n#plt.plot(time3, fluxdata3, 'ro')\n#plt.plot(time4, fluxdata4, 'ro')\n#plt.plot(time5, fluxdata5, 'ro')\n#plt.plot(time6, fluxdata6, 'ro')\n#plt.plot(time7, fluxdata7, 'ro')\n#plt.plot(time8, fluxdata8, 'ro')\n#plt.plot(time9, fluxdata9, 'ro')\n#plt.plot(time10, fluxdata10, 'ro')\n#plt.plot(time11, fluxdata11, 'ro')\n#\n##plt.errorbar(fluxerrordata1)\n","repo_name":"Yogesh3/Data-Reduction","sub_path":"Exoplanet/exo_plot.py","file_name":"exo_plot.py","file_ext":"py","file_size_in_byte":6299,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"31927600861","text":"# Eg\n# cancer_type: 'luad'\n# parent_out_dir (output directory): '/data07/shared/lehhou/lym_outputs/csv'\n# src_dir (source directory): '/data08/shared/lehhou/active_learning_osprey'\n\nimport os\nimport skimage\nimport skimage.io\n\ndef get_patch_til_svs_file_singlefile_wrap(cancer_type, parent_out_dir, src_dir):\n\n\n # parent_out_dir = '/data07/shared/lehhou/lym_outputs/csv';\n out_dir = os.path.join(parent_out_dir, cancer_type);\n if (os.path.isdir(out_dir) == 0):\n os.makedirs(out_dir);\n\n src_img_dir = src_dir + '/rates-' + cancer_type + '-all-auto';\n # src_img_dir = cancertype_path;\n # bin_src_imgs = dir(fullfile(src_img_dir, '*automatic.png'));\n # bin_src_imgs = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n bin_src_imgs = [f for f in os.listdir(src_img_dir) if f.endswith('automatic.png')]\n\n # parpool(12);\n for i_img in range(len(bin_src_imgs)):\n slide_name = bin_src_imgs[i_img][5:-14];\n csv_path = os.path.join(out_dir, slide_name + '.csv');\n info_path = os.path.join(out_dir, slide_name + '.info');\n fileID = open(csv_path, 'w');\n infoID = open(info_path, 'w');\n\n bin_img_name = bin_src_imgs[i_img][0:-4] + '_thres.png';\n real_img_name = bin_src_imgs[i_img];\n real_img_path = os.path.join(src_img_dir, real_img_name);\n bin_img_path = os.path.join(src_img_dir, bin_img_name);\n\n real_img = skimage.io.imread(real_img_path);\n bin_img = skimage.io.imread(bin_img_path);\n\n width = real_img.shape[1];\n height = real_img.shape[0];\n\n n_tissue = 0;\n n_til = 0;\n\n for iH in range(height):\n for iW in range(width):\n # real value\n real_value = float(real_img[iH, iW, 0]) / 255.0;\n\n # bin value\n bin_value = 0;\n # if this is pos tile\n if (bin_img[iH, iW, 0] > bin_img[iH, iW, 2]):\n bin_value = 1;\n n_til = n_til + 1;\n n_tissue = n_tissue + 1;\n\n # if this is a tissue tile\n if (bin_img[iH, iW, 2] > 128):\n n_tissue = n_tissue + 1;\n\n fileID.write('{},{},{},{:.4f}\\n'.format(iH, iW, bin_value, real_value));\n\n infoID.write('{},{}\\n'.format('tissue_number', n_tissue))\n infoID.write('{},{}\\n'.format('til_number', n_til));\n\n fileID.close();\n infoID.close();\n\n # delete(gcp);\n\n\nif __name__== \"__main__\":\n get_patch_til_svs_file_singlefile_wrap('prad', 'res', 'data')\n","repo_name":"SBU-BMI/u24_lymphocyte","sub_path":"cluster_indices/get_patch_til_svs_file_wrap.py","file_name":"get_patch_til_svs_file_wrap.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"67"} +{"seq_id":"8454623272","text":"import taichi as ti\nimport argparse\nimport os\nimport imageio\nfrom mass_spring_model import Mass_spring\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--mode\", type=str, default=\"implicit\")\nargs = parser.parse_args()\n\nti.init(arch=ti.cpu)\n\nvertices = ti.Vector.field(3, dtype=ti.f32, shape=2) \nindices = ti.field(int, shape=2)\n\nmass_spring_simple = Mass_spring(2, 1, 0.005, 40, 10)\n\n@ti.kernel\ndef update_vertices():\n for i in vertices:\n vertices[i] = mass_spring_simple.pos[i]\n\nmass_spring_simple.e2v[0] = [0, 1]\nmass_spring_simple.l_i[0] = 1.0\nmass_spring_simple.pos[0] = [0, 0, 0]\nmass_spring_simple.pos[1] = [0, 2, 0]\nmass_spring_simple.vel[0] = [-1, 0, 0]\nmass_spring_simple.vel[1] = [1, 0, 0]\nindices[0] = 0\nindices[1] = 1\n\nsubsteps = 10\ndef main():\n window = ti.ui.Window(\"Taichi Simulation on GGUI\", (768, 768),\n vsync=True)\n canvas = window.get_canvas()\n canvas.set_background_color((1, 1, 1))\n scene = ti.ui.Scene()\n camera = ti.ui.Camera()\n\n update_vertices()\n\n tot_step = 0\n save_path = f\"../imgs/mass_spring_simple_{args.mode}\"\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n frames = []\n while window.running:\n if tot_step > 200:\n break\n tot_step += 1\n\n for i in range(substeps):\n mass_spring_simple.step(args.mode)\n\n update_vertices()\n\n camera.position(0, -2.5, 3)\n camera.lookat(0, 1.5, 0)\n scene.set_camera(camera)\n\n scene.point_light(pos=(0.5, 0.5, 3), color=(1, 1, 1))\n scene.ambient_light((0.5, 0.5, 0.5))\n\n scene.lines(vertices, 0.05, indices, color=(0, 0, 1))\n scene.particles(vertices, 0.1, color=(1, 0, 0))\n canvas.scene(scene)\n\n window.save_image(os.path.join(save_path, f\"{tot_step}.png\"))\n window.show()\n \n for i in range(1, tot_step+1):\n filename = os.path.join(save_path, f\"{i}.png\")\n frames.append(imageio.imread(filename))\n \n gif_name = os.path.join(save_path, f\"GIF.gif\")\n imageio.mimsave(gif_name, frames, 'GIF', duration=0.2)\n \n\nmain()\n","repo_name":"wangyian-me/exercise_mass_spring","sub_path":"code/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72075918294","text":"import warnings\nfrom SMEFT19 import likelihood_global\nfrom SMEFT19.scenarios import rotBII\nimport SMEFT19\nimport numpy as np\n\nd_ell = SMEFT19.ellipse.load('../data/ellipses/rotBII.yaml')\nbf1 = d_ell['bf']\n\n\ndef lh(num: int) -> float:\n xmin = -0.02\n xmax = 0.02\n ymin = 0.0\n ymax = 2.0\n xmargin = 0.02*(xmax-xmin)\n ymargin = 0.02*(ymax-ymin)\n ix = num % 50\n iy = num // 50\n x = (xmin-xmargin) + ix/50 * ((xmax+xmargin) - (xmin-xmargin))\n y = (ymin-ymargin) + iy/50 * ((ymax+ymargin) - (ymin-ymargin))\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n lg = likelihood_global([bf1[0], bf1[1], x, bf1[3], y], rotBII)\n return max(lg, -100)\n\n\nstart = 0\n\nwith open('../data/likelihood/likelihood_rotBII_blbq.dat', 'at') as f:\n for i in range(start, 50*50):\n lg = lh(i)\n if i%50 == 49:\n sep = '\\n'\n else:\n sep = '\\t'\n f.write(f'{lg}{sep}')\n f.flush()\n print(i)\n","repo_name":"Jorge-Alda/SMEFT19-notebooks","sub_path":"PaperML/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"11558902173","text":"##\n# File: GromacsPTParserListener.py\n# Date: 02-Jun-2022\n#\n# Updates:\n\"\"\" ParserLister class for GROMACS PT files.\n @author: Masashi Yokochi\n\"\"\"\nimport sys\nimport collections\nimport copy\n\nfrom antlr4 import ParseTreeListener\nfrom rmsd.calculate_rmsd import NAMES_ELEMENT # noqa: F401 pylint: disable=no-name-in-module, import-error, unused-import\n\ntry:\n from wwpdb.utils.align.alignlib import PairwiseAlign # pylint: disable=no-name-in-module\n from wwpdb.utils.nmr.mr.GromacsPTParser import GromacsPTParser\n from wwpdb.utils.nmr.mr.ParserListenerUtil import (coordAssemblyChecker,\n translateToStdAtomName,\n translateToStdAtomNameOfDmpc,\n translateToStdResName,\n REPRESENTATIVE_MODEL_ID)\n from wwpdb.utils.nmr.ChemCompUtil import ChemCompUtil\n from wwpdb.utils.nmr.BMRBChemShiftStat import BMRBChemShiftStat\n from wwpdb.utils.nmr.NEFTranslator.NEFTranslator import NEFTranslator\n from wwpdb.utils.nmr.AlignUtil import (monDict3,\n protonBeginCode,\n letterToDigit, indexToLetter,\n alignPolymerSequence,\n assignPolymerSequence,\n trimSequenceAlignment,\n retrieveAtomIdentFromMRMap,\n alignPolymerSequenceWithConflicts,\n getRestraintFormatName,\n getOneLetterCodeSequence)\nexcept ImportError:\n from nmr.align.alignlib import PairwiseAlign # pylint: disable=no-name-in-module\n from nmr.mr.GromacsPTParser import GromacsPTParser\n from nmr.mr.ParserListenerUtil import (coordAssemblyChecker,\n translateToStdAtomName,\n translateToStdAtomNameOfDmpc,\n translateToStdResName,\n REPRESENTATIVE_MODEL_ID)\n from nmr.ChemCompUtil import ChemCompUtil\n from nmr.BMRBChemShiftStat import BMRBChemShiftStat\n from nmr.NEFTranslator.NEFTranslator import NEFTranslator\n from nmr.AlignUtil import (monDict3,\n protonBeginCode,\n letterToDigit, indexToLetter,\n alignPolymerSequence,\n assignPolymerSequence,\n trimSequenceAlignment,\n retrieveAtomIdentFromMRMap,\n alignPolymerSequenceWithConflicts,\n getRestraintFormatName,\n getOneLetterCodeSequence)\n\n\n# This class defines a complete listener for a parse tree produced by GromacsPTParser.\nclass GromacsPTParserListener(ParseTreeListener):\n\n __file_type = 'nm-aux-gro'\n\n # atom name mapping of public MR file between the archive coordinates and submitted ones\n __mrAtomNameMapping = None\n\n # CCD accessing utility\n __ccU = None\n\n # BMRB chemical shift statistics\n __csStat = None\n\n # NEFTranslator\n __nefT = None\n\n # Pairwise align\n __pA = None\n\n # coordinates information generated by ParserListenerUtil.coordAssemblyChecker()\n __polySeqModel = None\n __nonPolyModel = None\n __branchedModel = None\n __chemCompAtom = None\n\n __hasPolySeqModel = False\n __hasNonPolyModel = False\n __hasBranchedModel = False\n\n # polymer sequence of GROMACS parameter/topology file\n __polySeqPrmTop = None\n\n __seqAlign = None\n __chainAssign = None\n\n # system\n __system = None\n\n # atoms\n __atoms = []\n\n # molecules\n __molecules = []\n\n # collection of number selection\n numberSelection = []\n\n # GROMACS atom number dictionary\n __atomNumberDict = None\n\n # __cur_column_len = None\n __cur_word_len = None\n\n __f = None\n warningMessage = None\n\n def __init__(self, verbose=True, log=sys.stdout,\n representativeModelId=REPRESENTATIVE_MODEL_ID,\n mrAtomNameMapping=None,\n cR=None, caC=None, ccU=None, csStat=None, nefT=None):\n\n self.__mrAtomNameMapping = None if mrAtomNameMapping is None or len(mrAtomNameMapping) == 0 else mrAtomNameMapping\n\n if cR is not None:\n ret = coordAssemblyChecker(verbose, log, representativeModelId, cR, caC, None, fullCheck=True)\n self.__polySeqModel = ret['polymer_sequence']\n self.__nonPolyModel = ret['non_polymer']\n self.__branchedModel = ret['branched']\n self.__chemCompAtom = ret['chem_comp_atom']\n\n self.__hasPolySeqModel = self.__polySeqModel is not None and len(self.__polySeqModel) > 0\n self.__hasNonPolyModel = self.__nonPolyModel is not None and len(self.__nonPolyModel) > 0\n self.__hasBranchedModel = self.__branchedModel is not None and len(self.__branchedModel) > 0\n\n # CCD accessing utility\n self.__ccU = ChemCompUtil(verbose, log) if ccU is None else ccU\n\n # BMRB chemical shift statistics\n self.__csStat = BMRBChemShiftStat(verbose, log, self.__ccU) if csStat is None else csStat\n\n # NEFTranslator\n self.__nefT = NEFTranslator(verbose, log, self.__ccU, self.__csStat) if nefT is None else nefT\n\n # Pairwise align\n if self.__hasPolySeqModel:\n self.__pA = PairwiseAlign()\n self.__pA.setVerbose(verbose)\n\n self.defaultStatements = 0\n self.moleculetypeStatements = 0\n self.atomtypesStatements = 0\n self.pairtypesStatements = 0\n self.bondtypesStatements = 0\n self.angletypesStatements = 0\n self.dihedraltypesStatements = 0\n self.constrainttypesStatements = 0\n self.nonbond_paramsStatements = 0\n self.atomsStatements = 0\n self.bondsStatements = 0\n self.pairsStatements = 0\n self.pairs_nbStatements = 0\n self.anglesStatements = 0\n self.dihedralsStatements = 0\n self.exclusionsStatements = 0\n self.constraintsStatements = 0\n self.settlesStatements = 0\n self.virtual_sites1Statements = 0\n self.virtual_sites2Statements = 0\n self.virtual_sites3Statements = 0\n self.virtual_sites4Statements = 0\n self.virtual_sitesnStatements = 0\n self.systemStatements = 0\n self.moleculesStatements = 0\n\n # Enter a parse tree produced by GromacsPTParser#gromacs_pt.\n def enterGromacs_pt(self, ctx: GromacsPTParser.Gromacs_ptContext): # pylint: disable=unused-argument\n self.__atomNumberDict = {}\n self.__polySeqPrmTop = []\n self.__f = []\n\n # Exit a parse tree produced by GromacsPTParser#gromacs_pt.\n def exitGromacs_pt(self, ctx: GromacsPTParser.Gromacs_ptContext): # pylint: disable=unused-argument\n\n try:\n\n if not self.__hasPolySeqModel:\n return\n\n if len(self.__atoms) == 0:\n return\n\n chainIndex = letterToDigit(self.__polySeqModel[0]['chain_id']) - 1 # set tentative chain_id from label_asym_id, which will be assigned to coordinate auth_asym_id\n chainId = indexToLetter(chainIndex)\n\n terminus = [atom['auth_atom_id'].endswith('T') for atom in self.__atoms]\n\n atomTotal = len(self.__atoms)\n if terminus[0]:\n terminus[0] = False\n for i in range(0, atomTotal - 1):\n j = i + 1\n if terminus[i] and terminus[j]:\n terminus[i] = False\n if terminus[-1]:\n terminus[-1] = False\n\n seqIdList = []\n compIdList = []\n retrievedAtomNumList = []\n\n NON_METAL_ELEMENTS = ('H', 'C', 'N', 'O', 'P', 'S')\n\n ancAtomName = prevAtomName = ''\n prevSeqId = prevCompId = None\n offset = 0\n for atom in self.__atoms:\n atomNum = atom['atom_number']\n atomName = atom['auth_atom_id']\n atomType = atom['atom_type']\n _seqId = atom['auth_seq_id']\n compId = atom['auth_comp_id']\n if compId not in monDict3 and self.__mrAtomNameMapping is not None and atomName[0] in protonBeginCode:\n _, compId, _atomName = retrieveAtomIdentFromMRMap(self.__mrAtomNameMapping, _seqId, compId, atomName)\n if _atomName != atomName:\n atomName = _atomName\n retrievedAtomNumList.append(atomNum)\n\n if (terminus[atomNum - 1] and ancAtomName.endswith('T'))\\\n or (prevCompId is not None and (prevCompId.endswith('3') or self.__csStat.peptideLike(prevCompId)) and compId.endswith('5'))\\\n or (compId == atomName and compId.split('+')[0].title() in NAMES_ELEMENT)\\\n or (compId == atomName and compId.split('-')[0].title() in NAMES_ELEMENT)\\\n or (len(prevAtomName) > 0 and prevAtomName[0] not in NON_METAL_ELEMENTS and prevSeqId != _seqId):\n\n self.__polySeqPrmTop.append({'chain_id': chainId,\n 'seq_id': seqIdList,\n 'auth_comp_id': compIdList})\n seqIdList = []\n compIdList = []\n chainIndex += 1\n chainId = indexToLetter(chainIndex)\n offset = 1 - _seqId\n\n seqId = _seqId + offset\n if seqId not in seqIdList:\n seqIdList.append(seqId)\n compIdList.append(compId)\n self.__atomNumberDict[atomNum] = {'chain_id': chainId,\n 'seq_id': seqId,\n 'auth_comp_id': compId,\n 'auth_atom_id': atomName,\n 'atom_type': atomType}\n ancAtomName = prevAtomName\n prevAtomName = atomName\n prevSeqId = _seqId\n prevCompId = compId\n\n self.__polySeqPrmTop.append({'chain_id': chainId,\n 'seq_id': seqIdList,\n 'auth_comp_id': compIdList})\n\n nonPolyCompIdList = []\n if self.__hasNonPolyModel:\n for np in self.__nonPolyModel:\n compId = np['comp_id'][0]\n if compId not in nonPolyCompIdList:\n nonPolyCompIdList.append(compId)\n\n for ps in self.__polySeqPrmTop:\n chainId = ps['chain_id']\n compIdList = []\n for seqId, authCompId in zip(ps['seq_id'], ps['auth_comp_id']):\n authAtomIds = [translateToStdAtomName(atomNum['auth_atom_id'], atomNum['auth_comp_id'],\n ccU=self.__ccU, unambig=True)\n for atomNum in self.__atomNumberDict.values()\n if atomNum['chain_id'] == chainId\n and atomNum['seq_id'] == seqId\n and atomNum['auth_atom_id'][0] not in protonBeginCode]\n authCompId = translateToStdResName(authCompId, self.__ccU)\n if self.__ccU.updateChemCompDict(authCompId):\n chemCompAtomIds = [cca[self.__ccU.ccaAtomId] for cca in self.__ccU.lastAtomList]\n valid = True\n for _atomId in authAtomIds:\n if _atomId not in chemCompAtomIds:\n valid = False\n break\n if not valid:\n break\n if valid:\n compIdList.append(authCompId)\n for k, atomNum in self.__atomNumberDict.items():\n if atomNum['chain_id'] == chainId and atomNum['seq_id'] == seqId:\n atomNum['comp_id'] = authCompId\n\n if authCompId in nonPolyCompIdList and self.__mrAtomNameMapping is not None\\\n and atomNum['auth_atom_id'][0] in protonBeginCode and k not in retrievedAtomNumList:\n _, _, atomId = retrieveAtomIdentFromMRMap(self.__mrAtomNameMapping, None, authCompId, atomNum['auth_atom_id'], None, True)\n else:\n atomId = atomNum['auth_atom_id']\n\n atomId = translateToStdAtomName(atomId, authCompId, chemCompAtomIds, ccU=self.__ccU, unambig=True)\n\n if atomId[0] not in protonBeginCode or atomId in chemCompAtomIds:\n atomNum['atom_id'] = atomId\n if 'atom_type' in atomNum:\n del atomNum['atom_type']\n else:\n if atomId in chemCompAtomIds:\n atomNum['atom_id'] = atomId\n if 'atom_type' in atomNum:\n del atomNum['atom_type']\n\n else:\n compId = self.__csStat.getSimilarCompIdFromAtomIds([translateToStdAtomName(atomNum['auth_atom_id'],\n atomNum['auth_comp_id'],\n ccU=self.__ccU,\n unambig=True)\n for atomNum in self.__atomNumberDict.values()\n if atomNum['chain_id'] == chainId\n and atomNum['seq_id'] == seqId])\n if compId is not None:\n compIdList.append(compId + '?') # decide when coordinate is available\n chemCompAtomIds = None\n if self.__ccU.updateChemCompDict(compId):\n chemCompAtomIds = [cca[self.__ccU.ccaAtomId] for cca in self.__ccU.lastAtomList]\n for k, atomNum in self.__atomNumberDict.items():\n if atomNum['chain_id'] == chainId and atomNum['seq_id'] == seqId:\n atomNum['comp_id'] = compId\n\n if compId in nonPolyCompIdList and self.__mrAtomNameMapping is not None\\\n and atomNum['auth_atom_id'][0] in protonBeginCode and k not in retrievedAtomNumList:\n _, _, atomId = retrieveAtomIdentFromMRMap(self.__mrAtomNameMapping, None, compId, atomNum['auth_atom_id'], None, True)\n else:\n atomId = atomNum['auth_atom_id']\n\n atomId = translateToStdAtomName(atomId, compId, chemCompAtomIds, ccU=self.__ccU, unambig=True)\n\n if chemCompAtomIds is not None and atomId in chemCompAtomIds:\n atomNum['atom_id'] = atomId\n if 'atom_type' in atomNum:\n del atomNum['atom_type']\n elif chemCompAtomIds is not None:\n if atomId in chemCompAtomIds:\n atomNum['atom_id'] = atomId\n if 'atom_type' in atomNum:\n del atomNum['atom_type']\n else:\n compIdList.append('.')\n unknownAtomIds = [_atomId for _atomId in authAtomIds if _atomId not in chemCompAtomIds]\n self.__f.append(f\"[Unknown atom name] \"\n f\"{unknownAtomIds} are unknown atom names for {authCompId} residue.\")\n compIdList.append(f\"? {authCompId} {unknownAtomIds}\")\n else:\n compId = self.__csStat.getSimilarCompIdFromAtomIds([atomNum['auth_atom_id']\n for atomNum in self.__atomNumberDict.values()\n if atomNum['chain_id'] == chainId\n and atomNum['seq_id'] == seqId])\n if compId is not None:\n compIdList.append(compId + '?') # decide when coordinate is available\n chemCompAtomIds = None\n if self.__ccU.updateChemCompDict(compId):\n chemCompAtomIds = [cca[self.__ccU.ccaAtomId] for cca in self.__ccU.lastAtomList]\n for k, atomNum in self.__atomNumberDict.items():\n if atomNum['chain_id'] == chainId and atomNum['seq_id'] == seqId:\n atomNum['comp_id'] = compId\n\n if compId in nonPolyCompIdList and self.__mrAtomNameMapping is not None\\\n and atomNum['auth_atom_id'][0] in protonBeginCode and k not in retrievedAtomNumList:\n _, _, atomId = retrieveAtomIdentFromMRMap(self.__mrAtomNameMapping, None, compId, atomNum['auth_atom_id'], None, True)\n else:\n atomId = atomNum['auth_atom_id']\n\n atomId = translateToStdAtomName(atomId, compId, chemCompAtomIds, ccU=self.__ccU, unambig=True)\n\n if chemCompAtomIds is not None and atomId in chemCompAtomIds:\n atomNum['atom_id'] = atomId\n if 'atom_type' in atomNum:\n del atomNum['atom_type']\n elif chemCompAtomIds is not None:\n if atomId in chemCompAtomIds:\n atomNum['atom_id'] = atomId\n if 'atom_type' in atomNum:\n del atomNum['atom_type']\n else:\n compIdList.append('.')\n \"\"\" deferred to assignNonPolymer()\n self.__f.append(f\"[Unknown residue name] \"\n f\"{authCompId!r} is unknown residue name.\")\n \"\"\"\n\n ps['comp_id'] = compIdList\n\n for k, atomNum in self.__atomNumberDict.items():\n if 'atom_type' not in atomNum:\n continue\n if 'comp_id' in atomNum and atomNum['comp_id'] != atomNum['auth_comp_id']\\\n and 'atom_id' not in atomNum:\n compId = atomNum['comp_id']\n if self.__ccU.updateChemCompDict(compId):\n chemCompAtomIds = [cca[self.__ccU.ccaAtomId] for cca in self.__ccU.lastAtomList]\n\n if compId in nonPolyCompIdList and self.__mrAtomNameMapping is not None\\\n and atomNum['auth_atom_id'][0] in protonBeginCode and k not in retrievedAtomNumList:\n _, _, atomId = retrieveAtomIdentFromMRMap(self.__mrAtomNameMapping, None, compId, atomNum['auth_atom_id'], None, True)\n else:\n atomId = atomNum['auth_atom_id']\n\n atomId = translateToStdAtomName(atomId, compId, chemCompAtomIds, ccU=self.__ccU, unambig=True)\n\n if atomId is not None and atomId in chemCompAtomIds:\n atomNum['atom_id'] = atomId\n if 'atom_type' in atomNum:\n del atomNum['atom_type']\n elif atomNum['comp_id'] != atomNum['auth_comp_id']:\n authCompId = translateToStdResName(atomNum['auth_comp_id'], self.__ccU)\n if self.__ccU.updateChemCompDict(authCompId):\n chemCompAtomIds = [cca[self.__ccU.ccaAtomId] for cca in self.__ccU.lastAtomList]\n\n if authCompId in nonPolyCompIdList and self.__mrAtomNameMapping is not None\\\n and atomNum['auth_atom_id'][0] in protonBeginCode and k not in retrievedAtomNumList:\n _, _, atomId = retrieveAtomIdentFromMRMap(self.__mrAtomNameMapping, None, authCompId, atomNum['auth_atom_id'], None, True)\n else:\n atomId = atomNum['auth_atom_id']\n\n atomId = translateToStdAtomName(atomId, authCompId, chemCompAtomIds, ccU=self.__ccU, unambig=True)\n\n if atomId is not None and atomId in chemCompAtomIds:\n atomNum['atom_id'] = atomId\n if 'atom_type' in atomNum:\n del atomNum['atom_type']\n else:\n authCompId = translateToStdResName(atomNum['auth_comp_id'], self.__ccU)\n if self.__ccU.updateChemCompDict(authCompId):\n\n if authCompId in nonPolyCompIdList and self.__mrAtomNameMapping is not None\\\n and atomNum['auth_atom_id'][0] in protonBeginCode and k not in retrievedAtomNumList:\n _, _, atomId = retrieveAtomIdentFromMRMap(self.__mrAtomNameMapping, None, authCompId, atomNum['auth_atom_id'], None, True)\n else:\n atomId = atomNum['auth_atom_id']\n\n atomId = translateToStdAtomName(atomId, authCompId, ccU=self.__ccU, unambig=True)\n atomIds = self.__nefT.get_valid_star_atom_in_xplor(authCompId, atomId)[0]\n if len(atomIds) == 1:\n atomNum['atom_id'] = atomIds[0]\n if 'atom_type' in atomNum:\n del atomNum['atom_type']\n\n polySeqModel = copy.copy(self.__polySeqModel)\n if self.__hasBranchedModel:\n polySeqModel.extend(self.__branchedModel)\n\n self.__seqAlign, compIdMapping = alignPolymerSequence(self.__pA, polySeqModel, self.__polySeqPrmTop)\n\n if len(self.__seqAlign) == 0:\n for c in range(1, 5):\n self.__seqAlign, compIdMapping = alignPolymerSequenceWithConflicts(self.__pA, polySeqModel, self.__polySeqPrmTop, c)\n if len(self.__seqAlign) > 0:\n break\n\n if len(self.__seqAlign) == 0:\n len_cif_na = sum(len(ps_cif['seq_id']) for ps_cif in polySeqModel if 'identical_chain_id' in ps_cif and len(ps_cif['seq_id']) > 3)\n len_top_na = sum(len(ps_top['seq_id']) for ps_top in self.__polySeqPrmTop\n if len(ps_top['seq_id']) > 3 and any(compId in ('DA?', 'DT?', 'DG?', 'DC?', 'A?', 'U?', 'G?', 'C?') for compId in ps_top['comp_id']))\n if len_cif_na == len_top_na:\n chainIdList = []\n seqIdList = []\n authCompIdList = []\n for ps_top in self.__polySeqPrmTop:\n len_ps_cif_seq = len(ps_top['seq_id'])\n if len_ps_cif_seq > 3 and any(compId in ('DA?', 'DT?', 'DG?', 'DC?', 'A?', 'U?', 'G?', 'C?') for compId in ps_top['comp_id']):\n chainId = ps_top['chain_id']\n for seqId, compId in zip(ps_top['seq_id'], ps_top['auth_comp_id']):\n chainIdList.append(chainId)\n seqIdList.append(seqId)\n authCompIdList.append(compId)\n\n chainIndex = letterToDigit(self.__polySeqModel[0]['chain_id']) - 1\n idOffset = 0\n\n touched = []\n\n polySeqPrmTop = []\n for ps_cif in polySeqModel:\n len_ps_cif_seq = len(ps_cif['seq_id'])\n if 'identical_chain_id' in ps_cif and len_ps_cif_seq > 3:\n chainId = indexToLetter(chainIndex)\n polySeqPrmTop.append({'chain_id': chainId,\n 'seq_id': seqIdList[idOffset:idOffset + len_ps_cif_seq],\n 'comp_id': ps_cif['comp_id'],\n 'auth_comp_id': authCompIdList[idOffset:idOffset + len_ps_cif_seq]})\n\n for idx, (_chainId, _seqId) in enumerate(zip(chainIdList[idOffset:idOffset + len_ps_cif_seq],\n seqIdList[idOffset:idOffset + len_ps_cif_seq])):\n for k, atomNum in self.__atomNumberDict.items():\n if atomNum['chain_id'] == _chainId and atomNum['seq_id'] == _seqId:\n atomNum['chain_id'] = chainId\n atomNum['cif_comp_id'] = ps_cif['comp_id'][idx]\n touched.append(k)\n\n idOffset += len_ps_cif_seq\n chainIndex += 1\n\n for ps_top in self.__polySeqPrmTop:\n if len(ps_top['seq_id']) > 3 and any(compId in ('DA?', 'DT?', 'DG?', 'DC?', 'A?', 'U?', 'G?', 'C?') for compId in ps_top['comp_id']):\n continue\n _chainId = copy.copy(ps_top['chain_id'])\n chainId = indexToLetter(chainIndex)\n ps_top['chain_id'] = chainId\n polySeqPrmTop.append(ps_top)\n\n for k, atomNum in self.__atomNumberDict.items():\n if k in touched:\n continue\n if atomNum['chain_id'] == _chainId:\n atomNum['chain_id'] = chainId\n touched.append(k)\n\n chainIndex += 1\n\n self.__polySeqPrmTop = polySeqPrmTop\n\n self.__seqAlign, compIdMapping = alignPolymerSequence(self.__pA, polySeqModel, self.__polySeqPrmTop)\n\n _seqAlign = copy.copy(self.__seqAlign)\n for sa in _seqAlign:\n if sa['ref_chain_id'] != sa['test_chain_id']:\n self.__seqAlign.remove(sa)\n\n # test chain assignment before applying comp_id mapping\n self.__chainAssign, message = assignPolymerSequence(self.__pA, self.__ccU, self.__file_type, self.__polySeqModel, self.__polySeqPrmTop, self.__seqAlign)\n\n for cmap in compIdMapping:\n if any(ca for ca in self.__chainAssign if ca['test_chain_id'] == cmap['chain_id']):\n for k, atomNum in self.__atomNumberDict.items():\n if atomNum['chain_id'] == cmap['chain_id'] and atomNum['seq_id'] == cmap['seq_id']:\n atomNum['comp_id'] = cmap['comp_id']\n atomNum['auth_comp_id'] = cmap['auth_comp_id']\n if 'atom_type' in atomNum:\n authCompId = cmap['auth_comp_id']\n if self.__ccU.updateChemCompDict(authCompId):\n chemCompAtomIds = [cca[self.__ccU.ccaAtomId] for cca in self.__ccU.lastAtomList]\n\n if authCompId in nonPolyCompIdList and self.__mrAtomNameMapping is not None\\\n and atomNum['auth_atom_id'][0] in protonBeginCode and k not in retrievedAtomNumList:\n _, _, atomId = retrieveAtomIdentFromMRMap(self.__mrAtomNameMapping, None, authCompId, atomNum['auth_atom_id'], None, True)\n else:\n atomId = atomNum['auth_atom_id']\n\n atomNum['atom_id'] = translateToStdAtomName(atomId, authCompId, chemCompAtomIds, ccU=self.__ccU, unambig=True)\n del atomNum['atom_type']\n\n for k, atomNum in self.__atomNumberDict.items():\n if 'atom_type' not in atomNum:\n continue\n if 'atom_id' not in atomNum:\n if 'comp_id' not in atomNum or atomNum['comp_id'] == atomNum['auth_comp_id']:\n authCompId = translateToStdResName(atomNum['auth_comp_id'], self.__ccU)\n\n if self.__mrAtomNameMapping is not None\\\n and atomNum['auth_atom_id'][0] in protonBeginCode and k not in retrievedAtomNumList:\n _, _, atomId = retrieveAtomIdentFromMRMap(self.__mrAtomNameMapping, None, authCompId, atomNum['auth_atom_id'], None, True)\n else:\n atomId = atomNum['auth_atom_id']\n\n if self.__ccU.updateChemCompDict(authCompId):\n chemCompAtomIds = [cca[self.__ccU.ccaAtomId] for cca in self.__ccU.lastAtomList]\n atomId = translateToStdAtomName(atomId, authCompId, chemCompAtomIds, ccU=self.__ccU, unambig=True)\n if atomId in chemCompAtomIds:\n atomNum['atom_id'] = atomId\n continue\n if self.__chemCompAtom is not None:\n if 'comp_id' in atomNum and atomNum['comp_id'] in self.__chemCompAtom:\n if atomId in self.__chemCompAtom[atomNum['comp_id']]:\n atomNum['atom_id'] = atomId\n continue\n if 'cif_comp_id' in atomNum and atomNum['cif_comp_id'] in self.__chemCompAtom:\n if atomId in self.__chemCompAtom[atomNum['cif_comp_id']]:\n atomNum['atom_id'] = atomId\n continue\n self.__f.append(f\"[Unknown atom name] \"\n f\"{atomNum['auth_atom_id']!r} is not recognized as the atom name of {atomNum['auth_comp_id']!r} residue.\")\n elif self.__chemCompAtom is not None:\n if 'comp_id' in atomNum and atomNum['comp_id'] in self.__chemCompAtom:\n if atomId in self.__chemCompAtom[atomNum['comp_id']]:\n atomNum['atom_id'] = atomId\n continue\n if 'cif_comp_id' in atomNum and atomNum['cif_comp_id'] in self.__chemCompAtom:\n if atomId in self.__chemCompAtom[atomNum['cif_comp_id']]:\n atomNum['atom_id'] = atomId\n continue\n else:\n authCompId = translateToStdResName(atomNum['auth_comp_id'], self.__ccU)\n\n if self.__mrAtomNameMapping is not None\\\n and atomNum['auth_atom_id'][0] in protonBeginCode and k not in retrievedAtomNumList:\n _, _, atomId = retrieveAtomIdentFromMRMap(self.__mrAtomNameMapping, None, authCompId, atomNum['auth_atom_id'], None, True)\n else:\n atomId = atomNum['auth_atom_id']\n\n if self.__ccU.updateChemCompDict(authCompId):\n chemCompAtomIds = [cca[self.__ccU.ccaAtomId] for cca in self.__ccU.lastAtomList]\n atomId = translateToStdAtomName(atomId, authCompId, chemCompAtomIds, ccU=self.__ccU, unambig=True)\n if atomId in chemCompAtomIds:\n atomNum['atom_id'] = atomId\n continue\n if self.__chemCompAtom is not None:\n if 'comp_id' in atomNum and atomNum['comp_id'] in self.__chemCompAtom:\n if atomId in self.__chemCompAtom[atomNum['comp_id']]:\n atomNum['atom_id'] = atomId\n continue\n if 'cif_comp_id' in atomNum and atomNum['cif_comp_id'] in self.__chemCompAtom:\n if atomId in self.__chemCompAtom[atomNum['cif_comp_id']]:\n atomNum['atom_id'] = atomId\n continue\n atomNum['atom_id'] = atomNum['auth_atom_id']\n self.__f.append(f\"[Unknown atom name] \"\n f\"{atomNum['auth_atom_id']!r} is not recognized as the atom name of {atomNum['comp_id']!r} residue \"\n f\"(the original residue label is {atomNum['auth_comp_id']!r}).\")\n elif self.__chemCompAtom is not None:\n if 'comp_id' in atomNum and atomNum['comp_id'] in self.__chemCompAtom:\n if atomId in self.__chemCompAtom[atomNum['comp_id']]:\n atomNum['atom_id'] = atomId\n continue\n if 'cif_comp_id' in atomNum and atomNum['cif_comp_id'] in self.__chemCompAtom:\n if atomId in self.__chemCompAtom[atomNum['cif_comp_id']]:\n atomNum['atom_id'] = atomId\n continue\n\n self.__chainAssign, message = assignPolymerSequence(self.__pA, self.__ccU, self.__file_type, self.__polySeqModel, self.__polySeqPrmTop, self.__seqAlign)\n\n if len(message) > 0:\n self.__f.extend(message)\n\n if len(self.__seqAlign) == 0:\n mrFormatName = getRestraintFormatName(self.__file_type)\n _a_mr_format_name = 'the ' + mrFormatName\n\n ref_code = getOneLetterCodeSequence(self.__polySeqModel[0]['comp_id'])\n test_code = getOneLetterCodeSequence(self.__polySeqPrmTop[0]['comp_id'])\n\n hint = ''\n if abs(len(ref_code) - len(test_code)) < 20 and len(ref_code) > 40:\n hint = f\"For example, coordinates ({self.__polySeqModel[0]['auth_chain_id']}): {ref_code} vs topology: {test_code}. \"\n\n self.__f.append(f\"[Sequence mismatch] Polymer sequence between the coordinate and {_a_mr_format_name} data does not match. {hint}\"\n \"Please verify the two sequences and re-upload the correct file(s) if required.\")\n\n assi_ref_chain_ids = {}\n proc_test_chain_ids = []\n atom_nums = []\n delete_atom_nums = []\n\n def update_atom_num(seq_align, orphan):\n ref_chain_id = seq_align['ref_chain_id']\n test_chain_id = seq_align['test_chain_id']\n\n if ref_chain_id in assi_ref_chain_ids or test_chain_id in proc_test_chain_ids:\n return\n\n ps_cif = next(ps for ps in self.__polySeqModel if ps['auth_chain_id'] == ref_chain_id)\n\n if ref_chain_id not in assi_ref_chain_ids:\n assi_ref_chain_ids[ref_chain_id] = seq_align['length'] - seq_align['matched'] - seq_align['conflict']\n else:\n assi_ref_chain_ids[ref_chain_id] -= seq_align['matched'] + seq_align['conflict']\n proc_test_chain_ids.append(test_chain_id)\n\n offset = first_seq_id = None\n\n for atom_num, atomNum in self.__atomNumberDict.items():\n if atom_num in atom_nums:\n continue\n if atomNum['chain_id'] == test_chain_id:\n atom_nums.append(atom_num)\n\n test_seq_id = atomNum['seq_id']\n\n if first_seq_id is None:\n first_seq_id = test_seq_id\n\n if test_seq_id in seq_align['test_seq_id']:\n idx = seq_align['test_seq_id'].index(test_seq_id)\n if 'ref_auth_seq_id' in seq_align and idx < len(seq_align['ref_auth_seq_id']):\n ref_seq_id = seq_align['ref_auth_seq_id'][idx]\n elif offset is not None:\n ref_seq_id = test_seq_id + offset\n else:\n continue\n elif offset is not None:\n ref_seq_id = test_seq_id + offset\n else:\n continue\n\n if offset is None:\n offset = ref_seq_id - test_seq_id\n\n atomNum['chain_id'] = ref_chain_id\n atomNum['seq_id'] = ref_seq_id\n\n if ref_seq_id in ps_cif['auth_seq_id']:\n idx = ps_cif['auth_seq_id'].index(ref_seq_id)\n atomNum['comp_id'] = ps_cif['comp_id'][idx]\n\n if orphan and test_seq_id == first_seq_id and self.__csStat.getTypeOfCompId(atomNum['comp_id'])[0]:\n if self.__ccU.updateChemCompDict(atomNum['comp_id']):\n chemCompAtomIds = [cca[self.__ccU.ccaAtomId] for cca in self.__ccU.lastAtomList]\n leavingAtomIds = [cca[self.__ccU.ccaAtomId] for cca in self.__ccU.lastAtomList\n if cca[self.__ccU.ccaLeavingAtomFlag] == 'Y']\n if atomNum['atom_id'] not in chemCompAtomIds or atomNum['atom_id'] in leavingAtomIds:\n delete_atom_nums.append(atom_num)\n\n while True:\n\n orphanPolySeqPrmTop = []\n\n for ps in self.__polySeqPrmTop:\n test_chain_id = ps['chain_id']\n if test_chain_id in proc_test_chain_ids:\n continue\n try:\n ca = next(ca for ca in self.__chainAssign if ca['test_chain_id'] == test_chain_id)\n\n ref_chain_id = ca['ref_chain_id']\n sa = next((sa for sa in self.__seqAlign\n if sa['ref_chain_id'] == ref_chain_id and sa['test_chain_id'] == test_chain_id), None)\n\n if sa is not None: # and sa['conflict'] == 0:\n update_atom_num(sa, False)\n\n except StopIteration:\n orphanPolySeqPrmTop.append(ps)\n\n resolved = False\n\n if len(orphanPolySeqPrmTop) > 0:\n max_length = max(len(ps['seq_id']) for ps in orphanPolySeqPrmTop)\n __polySeqModel__ = [ps for ps in self.__polySeqModel\n if ps['auth_chain_id'] not in assi_ref_chain_ids\n or assi_ref_chain_ids[ps['auth_chain_id']] >= max_length]\n __seqAlign__, _ = alignPolymerSequence(self.__pA, __polySeqModel__, orphanPolySeqPrmTop)\n if len(__seqAlign__) > 0:\n for sa in __seqAlign__:\n if sa['conflict'] == 0:\n update_atom_num(sa, True)\n\n resolved = True\n\n if not resolved:\n for c in range(1, 5):\n __seqAlign__, _ = alignPolymerSequenceWithConflicts(self.__pA, __polySeqModel__, orphanPolySeqPrmTop, c)\n if len(__seqAlign__) > 0:\n for sa in __seqAlign__:\n if sa['conflict'] <= c:\n update_atom_num(sa, True)\n\n resolved = True\n if resolved:\n break\n\n if not resolved:\n break\n\n for ps in self.__polySeqPrmTop:\n test_chain_id = ps['chain_id']\n\n if test_chain_id in proc_test_chain_ids:\n continue\n\n for cif_ps in self.__polySeqModel:\n ref_chain_id = cif_ps['auth_chain_id']\n\n if ref_chain_id in assi_ref_chain_ids:\n continue\n\n len_gap = abs(len(ps['seq_id']) - len(cif_ps['auth_seq_id']))\n\n if len_gap > 20:\n continue\n\n if len_gap == 0:\n offset = cif_ps['auth_seq_id'][0] - ps['seq_id'][0]\n\n for atomNum in self.__atomNumberDict.values():\n if atomNum['chain_id'] == test_chain_id:\n atomNum['chain_id'] = ref_chain_id\n if len_gap == 0:\n atomNum['seq_id'] += offset\n\n proc_test_chain_ids.append(test_chain_id)\n assi_ref_chain_ids[ref_chain_id] = len_gap\n\n if len(delete_atom_nums) > 0:\n for atom_num in sorted(delete_atom_nums, reverse=True):\n del self.__atomNumberDict[atom_num]\n\n if self.__chainAssign is not None:\n trimSequenceAlignment(self.__seqAlign, self.__chainAssign)\n\n if self.__hasNonPolyModel:\n\n # metal ion\n if any(ps for ps in self.__polySeqPrmTop\n if len(ps['seq_id']) == 1 and ps['comp_id'][0].title() in NAMES_ELEMENT):\n self.assignMetalIon()\n\n # other non-polymer\n nonPolyIndices = [idx for idx, ps in enumerate(self.__polySeqPrmTop)\n if not any(ca for ca in self.__chainAssign\n if ca['test_chain_id'] == ps['chain_id'])\n and len(set(ps['comp_id'])) > 0 and ps['comp_id'][0] == '.']\n\n if len(nonPolyIndices) > 0:\n self.assignNonPolymer(nonPolyIndices)\n\n for idx in sorted(nonPolyIndices, reverse=True):\n del self.__polySeqPrmTop[idx]\n\n if self.__hasNonPolyModel:\n compIdMapping = {}\n mappedSeqVal = []\n mappedAtomNum = []\n\n for np in self.__nonPolyModel:\n authChainId = np['auth_chain_id']\n authSeqId = np['auth_seq_id'][0]\n compId = np['comp_id'][0]\n\n for k, v in self.__atomNumberDict.items():\n if k in mappedAtomNum:\n continue\n if 'comp_id' in v and v['comp_id'] == compId:\n seqKey = (v['comp_id'], v['chain_id'], v['seq_id'])\n seqVal = (authChainId, authSeqId)\n if seqKey not in compIdMapping:\n if seqVal not in mappedSeqVal:\n compIdMapping[seqKey] = seqVal\n if seqKey in compIdMapping:\n v['chain_id'], v['seq_id'] = compIdMapping[seqKey]\n mappedSeqVal.append(seqVal)\n mappedAtomNum.append(k)\n\n finally:\n self.warningMessage = sorted(list(set(self.__f)), key=self.__f.index)\n\n def assignMetalIon(self):\n if not self.__hasNonPolyModel:\n return\n\n metals = collections.Counter(s2['comp_id'][0] for s2 in self.__polySeqPrmTop\n if len(s2['seq_id']) == 1 and s2['comp_id'][0].title() in NAMES_ELEMENT).most_common()\n\n for metal in metals:\n compId = metal[0]\n\n atomNums = [atomNum for atomNum in self.__atomNumberDict.values()\n if atomNum['auth_comp_id'] == compId and atomNum['auth_atom_id'] == compId]\n\n nonPolys = [nonPoly for nonPoly in self.__nonPolyModel\n if nonPoly['comp_id'][0] == compId]\n\n for atomNum, nonPoly in zip(atomNums, nonPolys):\n atomNum['chain_id'] = nonPoly['auth_chain_id']\n atomNum['seq_id'] = nonPoly['auth_seq_id'][0]\n\n def assignNonPolymer(self, nonPolyIndices):\n if not self.__hasNonPolyModel:\n return\n\n authCompIds = []\n\n for idx, ps in enumerate(self.__polySeqPrmTop):\n if idx not in nonPolyIndices:\n continue\n for authCompId, compId in zip(ps['auth_comp_id'], ps['comp_id']):\n if compId != '.':\n continue\n authCompIds.append(authCompId)\n\n nonPolyCompIds = collections.Counter(authCompIds).most_common()\n\n compIds = []\n for nonPoly in self.__nonPolyModel:\n compId = nonPoly['comp_id'][0]\n if compId.title() in NAMES_ELEMENT:\n continue\n compIds.append(compId)\n\n refCompIds = collections.Counter(compIds).most_common()\n\n comp_id_mapping = {}\n\n for authCompId in nonPolyCompIds:\n refCompId = next((compId[0] for compId in refCompIds if compId[1] == authCompId[1] and compId[1] not in comp_id_mapping.values()), None)\n if refCompId is None:\n self.__f.append(f\"[Unknown residue name] \"\n f\"{authCompId[0]!r} is unknown residue name.\")\n continue\n comp_id_mapping[authCompId[0]] = refCompId\n\n for authCompId, compId in comp_id_mapping.items():\n chemCompAtomIds = None\n if self.__ccU.updateChemCompDict(compId):\n chemCompAtomIds = [cca[self.__ccU.ccaAtomId] for cca in self.__ccU.lastAtomList]\n\n authSeqKeys = []\n\n for idx, ps in enumerate(self.__polySeqPrmTop):\n if idx not in nonPolyIndices:\n continue\n _chainId = ps['chain_id']\n for _authCompId, _compId, _seqId in zip(ps['auth_comp_id'], ps['comp_id'], ps['seq_id']):\n if _authCompId != authCompId or _compId != '.':\n continue\n authSeqKeys.append((_chainId, _seqId))\n\n nonPolys = [nonPoly for nonPoly in self.__nonPolyModel\n if nonPoly['comp_id'][0] == compId]\n\n reported_auth_atom_id = []\n\n for authSeqKey, nonPoly in zip(authSeqKeys, nonPolys):\n atomNums = [atomNum for atomNum in self.__atomNumberDict.values()\n if atomNum['chain_id'] == authSeqKey[0] and atomNum['seq_id'] == authSeqKey[1]]\n authAtomNames = [atomNum['auth_atom_id'] for atomNum in self.__atomNumberDict.values()\n if atomNum['chain_id'] == authSeqKey[0] and atomNum['seq_id'] == authSeqKey[1]]\n\n for atomNum in atomNums:\n atomNum['chain_id'] = nonPoly['auth_chain_id']\n atomNum['seq_id'] = nonPoly['auth_seq_id'][0]\n atomNum['comp_id'] = compId\n authAtomId = atomNum['auth_atom_id']\n if chemCompAtomIds is not None and authAtomId in chemCompAtomIds:\n atomNum['atom_id'] = authAtomId\n else:\n dmpcNameSystemId = -1\n if compId == 'PX4':\n if 'OE' in authAtomNames:\n dmpcNameSystemId = 1\n elif 'OS31' in authAtomNames:\n dmpcNameSystemId = 2\n elif 'O21' in authAtomNames:\n if 'C314' in authAtomNames:\n dmpcNameSystemId = 3\n elif 'C114' in authAtomNames:\n dmpcNameSystemId = 4\n\n if dmpcNameSystemId != -1:\n atomId = translateToStdAtomNameOfDmpc(authAtomId, dmpcNameSystemId)\n else:\n atomId = translateToStdAtomName(authAtomId, compId, chemCompAtomIds, ccU=self.__ccU)\n\n if atomId in chemCompAtomIds:\n atomNum['atom_id'] = atomId\n else:\n _, _, atomId = retrieveAtomIdentFromMRMap(self.__mrAtomNameMapping, None, compId, authAtomId, None, True)\n\n if atomId in chemCompAtomIds:\n atomNum['atom_id'] = atomId\n continue\n if authAtomId not in reported_auth_atom_id:\n atomNum['atom_id'] = atomNum['auth_atom_id']\n self.__f.append(f\"[Unknown atom name] \"\n f\"{authAtomId!r} is not recognized as the atom name of {compId!r} residue \"\n f\"(the original residue label is {authCompId!r}).\")\n reported_auth_atom_id.append(authAtomId)\n\n # Enter a parse tree produced by GromacsPTParser#default_statement.\n def enterDefault_statement(self, ctx: GromacsPTParser.Default_statementContext): # pylint: disable=unused-argument\n self.defaultStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#default_statement.\n def exitDefault_statement(self, ctx: GromacsPTParser.Default_statementContext):\n if ctx.Integer(0):\n return\n self.defaultStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#moleculetype_statement.\n def enterMoleculetype_statement(self, ctx: GromacsPTParser.Moleculetype_statementContext): # pylint: disable=unused-argument\n self.moleculetypeStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#moleculetype_statement.\n def exitMoleculetype_statement(self, ctx: GromacsPTParser.Moleculetype_statementContext):\n if ctx.moleculetype(0):\n return\n self.moleculetypeStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#moleculetype.\n def enterMoleculetype(self, ctx: GromacsPTParser.MoleculetypeContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#moleculetype.\n def exitMoleculetype(self, ctx: GromacsPTParser.MoleculetypeContext): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#atomtypes_statement.\n def enterAtomtypes_statement(self, ctx: GromacsPTParser.Atomtypes_statementContext): # pylint: disable=unused-argument\n self.atomtypesStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#atomtypes_statement.\n def exitAtomtypes_statement(self, ctx: GromacsPTParser.Atomtypes_statementContext):\n if ctx.atomtypes(0):\n return\n self.atomtypesStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#atomtypes.\n def enterAtomtypes(self, ctx: GromacsPTParser.AtomtypesContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#atomtypes.\n def exitAtomtypes(self, ctx: GromacsPTParser.AtomtypesContext): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#pairtypes_statement.\n def enterPairtypes_statement(self, ctx: GromacsPTParser.Pairtypes_statementContext): # pylint: disable=unused-argument\n self.pairtypesStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#pairtypes_statement.\n def exitPairtypes_statement(self, ctx: GromacsPTParser.Pairtypes_statementContext):\n if ctx.pairtypes(0):\n return\n self.pairtypesStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#pairtypes.\n def enterPairtypes(self, ctx: GromacsPTParser.PairtypesContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#pairtypes.\n def exitPairtypes(self, ctx: GromacsPTParser.PairtypesContext): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#bondtypes_statement.\n def enterBondtypes_statement(self, ctx: GromacsPTParser.Bondtypes_statementContext): # pylint: disable=unused-argument\n self.bondtypesStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#bondtypes_statement.\n def exitBondtypes_statement(self, ctx: GromacsPTParser.Bondtypes_statementContext):\n if ctx.bondtypes(0):\n return\n self.bondtypesStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#bondtypes.\n def enterBondtypes(self, ctx: GromacsPTParser.BondtypesContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#bondtypes.\n def exitBondtypes(self, ctx: GromacsPTParser.BondtypesContext): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#angletypes_statement.\n def enterAngletypes_statement(self, ctx: GromacsPTParser.Angletypes_statementContext): # pylint: disable=unused-argument\n self.angletypesStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#angletypes_statement.\n def exitAngletypes_statement(self, ctx: GromacsPTParser.Angletypes_statementContext):\n if ctx.angletypes(0):\n return\n self.angletypesStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#angletypes.\n def enterAngletypes(self, ctx: GromacsPTParser.AngletypesContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#angletypes.\n def exitAngletypes(self, ctx: GromacsPTParser.AngletypesContext): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#dihedraltypes_statement.\n def enterDihedraltypes_statement(self, ctx: GromacsPTParser.Dihedraltypes_statementContext): # pylint: disable=unused-argument\n self.dihedraltypesStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#dihedraltypes_statement.\n def exitDihedraltypes_statement(self, ctx: GromacsPTParser.Dihedraltypes_statementContext):\n if ctx.dihedraltypes(0):\n return\n self.dihedraltypesStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#dihedraltypes.\n def enterDihedraltypes(self, ctx: GromacsPTParser.DihedraltypesContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#dihedraltypes.\n def exitDihedraltypes(self, ctx: GromacsPTParser.DihedraltypesContext): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#constrainttypes_statement.\n def enterConstrainttypes_statement(self, ctx: GromacsPTParser.Constrainttypes_statementContext): # pylint: disable=unused-argument\n self.constrainttypesStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#constrainttypes_statement.\n def exitConstrainttypes_statement(self, ctx: GromacsPTParser.Constrainttypes_statementContext):\n if ctx.constrainttypes(0):\n return\n self.constrainttypesStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#constrainttypes.\n def enterConstrainttypes(self, ctx: GromacsPTParser.ConstrainttypesContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#constrainttypes.\n def exitConstrainttypes(self, ctx: GromacsPTParser.ConstrainttypesContext): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#nonbonded_params_statement.\n def enterNonbonded_params_statement(self, ctx: GromacsPTParser.Nonbonded_params_statementContext): # pylint: disable=unused-argument\n self.nonbond_paramsStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#nonbonded_params_statement.\n def exitNonbonded_params_statement(self, ctx: GromacsPTParser.Nonbonded_params_statementContext):\n if ctx.nonbonded_params(0):\n return\n self.nonbond_paramsStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#nonbonded_params.\n def enterNonbonded_params(self, ctx: GromacsPTParser.Nonbonded_paramsContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#nonbonded_params.\n def exitNonbonded_params(self, ctx: GromacsPTParser.Nonbonded_paramsContext): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#atoms_statement.\n def enterAtoms_statement(self, ctx: GromacsPTParser.Atoms_statementContext): # pylint: disable=unused-argument\n self.atomsStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#atoms_statement.\n def exitAtoms_statement(self, ctx: GromacsPTParser.Atoms_statementContext): # pylint: disable=unused-argument\n if ctx.atoms(0):\n return\n self.atomsStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#atoms.\n def enterAtoms(self, ctx: GromacsPTParser.AtomsContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#atoms.\n def exitAtoms(self, ctx: GromacsPTParser.AtomsContext):\n\n try:\n\n nr = int(str(ctx.Integer(0)))\n seqId = int(str(ctx.Integer(1)))\n # cgnr = int(str(ctx.Integer(2)))\n\n type = str(ctx.Simple_name(0))\n compId = str(ctx.Simple_name(1))\n atomId = str(ctx.Simple_name(2))\n\n atom = {'atom_number': nr,\n 'auth_seq_id': seqId,\n 'auth_comp_id': compId,\n 'auth_atom_id': atomId,\n 'atom_type': type}\n\n if atom not in self.__atoms:\n self.__atoms.append(atom)\n\n except ValueError:\n pass\n finally:\n self.numberSelection.clear()\n\n # Enter a parse tree produced by GromacsPTParser#bonds_statement.\n def enterBonds_statement(self, ctx: GromacsPTParser.Bonds_statementContext): # pylint: disable=unused-argument\n self.bondsStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#bonds_statement.\n def exitBonds_statement(self, ctx: GromacsPTParser.Bonds_statementContext):\n if ctx.bonds(0):\n return\n self.bondsStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#bonds.\n def enterBonds(self, ctx: GromacsPTParser.BondsContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#bonds.\n def exitBonds(self, ctx: GromacsPTParser.BondsContext): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#pairs_statement.\n def enterPairs_statement(self, ctx: GromacsPTParser.Pairs_statementContext): # pylint: disable=unused-argument\n self.pairsStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#pairs_statement.\n def exitPairs_statement(self, ctx: GromacsPTParser.Pairs_statementContext):\n if ctx.pairs(0):\n return\n self.pairsStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#pairs.\n def enterPairs(self, ctx: GromacsPTParser.PairsContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#pairs.\n def exitPairs(self, ctx: GromacsPTParser.PairsContext): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#pairs_nb_statement.\n def enterPairs_nb_statement(self, ctx: GromacsPTParser.Pairs_nb_statementContext): # pylint: disable=unused-argument\n self.pairs_nbStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#pairs_nb_statement.\n def exitPairs_nb_statement(self, ctx: GromacsPTParser.Pairs_nb_statementContext):\n if ctx.pairs_nb(0):\n return\n self.pairs_nbStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#pairs_nb.\n def enterPairs_nb(self, ctx: GromacsPTParser.Pairs_nbContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#pairs_nb.\n def exitPairs_nb(self, ctx: GromacsPTParser.Pairs_nbContext): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#angles_statement.\n def enterAngles_statement(self, ctx: GromacsPTParser.Angles_statementContext): # pylint: disable=unused-argument\n self.anglesStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#angles_statement.\n def exitAngles_statement(self, ctx: GromacsPTParser.Angles_statementContext):\n if ctx.angles(0):\n return\n self.anglesStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#angles.\n def enterAngles(self, ctx: GromacsPTParser.AnglesContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#angles.\n def exitAngles(self, ctx: GromacsPTParser.AnglesContext): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#dihedrals_statement.\n def enterDihedrals_statement(self, ctx: GromacsPTParser.Dihedrals_statementContext): # pylint: disable=unused-argument\n self.dihedralsStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#dihedrals_statement.\n def exitDihedrals_statement(self, ctx: GromacsPTParser.Dihedrals_statementContext):\n if ctx.dihedrals(0):\n return\n self.dihedralsStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#dihedrals.\n def enterDihedrals(self, ctx: GromacsPTParser.DihedralsContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#dihedrals.\n def exitDihedrals(self, ctx: GromacsPTParser.DihedralsContext): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#exclusions_statement.\n def enterExclusions_statement(self, ctx: GromacsPTParser.Exclusions_statementContext): # pylint: disable=unused-argument\n self.exclusionsStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#exclusions_statement.\n def exitExclusions_statement(self, ctx: GromacsPTParser.Exclusions_statementContext):\n if ctx.exclusions(0):\n return\n self.exclusionsStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#exclusions.\n def enterExclusions(self, ctx: GromacsPTParser.ExclusionsContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#exclusions.\n def exitExclusions(self, ctx: GromacsPTParser.ExclusionsContext): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#constraints_statement.\n def enterConstraints_statement(self, ctx: GromacsPTParser.Constraints_statementContext): # pylint: disable=unused-argument\n self.constraintsStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#constraints_statement.\n def exitConstraints_statement(self, ctx: GromacsPTParser.Constraints_statementContext):\n if ctx.constraints(0):\n return\n self.constraintsStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#constraints.\n def enterConstraints(self, ctx: GromacsPTParser.ConstraintsContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#constraints.\n def exitConstraints(self, ctx: GromacsPTParser.ConstraintsContext): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#settles_statement.\n def enterSettles_statement(self, ctx: GromacsPTParser.Settles_statementContext): # pylint: disable=unused-argument\n self.settlesStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#settles_statement.\n def exitSettles_statement(self, ctx: GromacsPTParser.Settles_statementContext):\n if ctx.settles(0):\n return\n self.settlesStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#settles.\n def enterSettles(self, ctx: GromacsPTParser.SettlesContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#settles.\n def exitSettles(self, ctx: GromacsPTParser.SettlesContext): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#virtual_sites1_statement.\n def enterVirtual_sites1_statement(self, ctx: GromacsPTParser.Virtual_sites1_statementContext): # pylint: disable=unused-argument\n self.virtual_sites1Statements += 1\n\n # Exit a parse tree produced by GromacsPTParser#virtual_sites1_statement.\n def exitVirtual_sites1_statement(self, ctx: GromacsPTParser.Virtual_sites1_statementContext):\n if ctx.virtual_sites1(0):\n return\n self.virtual_sites1Statements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#virtual_sites1.\n def enterVirtual_sites1(self, ctx: GromacsPTParser.Virtual_sites1Context): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#virtual_sites1.\n def exitVirtual_sites1(self, ctx: GromacsPTParser.Virtual_sites1Context): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#virtual_sites2_statement.\n def enterVirtual_sites2_statement(self, ctx: GromacsPTParser.Virtual_sites2_statementContext): # pylint: disable=unused-argument\n self.virtual_sites2Statements += 1\n\n # Exit a parse tree produced by GromacsPTParser#virtual_sites2_statement.\n def exitVirtual_sites2_statement(self, ctx: GromacsPTParser.Virtual_sites2_statementContext):\n if ctx.virtual_sites2(0):\n return\n self.virtual_sites2Statements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#virtual_sites2.\n def enterVirtual_sites2(self, ctx: GromacsPTParser.Virtual_sites2Context): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#virtual_sites2.\n def exitVirtual_sites2(self, ctx: GromacsPTParser.Virtual_sites2Context): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#virtual_sites3_statement.\n def enterVirtual_sites3_statement(self, ctx: GromacsPTParser.Virtual_sites3_statementContext): # pylint: disable=unused-argument\n self.virtual_sites3Statements += 1\n\n # Exit a parse tree produced by GromacsPTParser#virtual_sites3_statement.\n def exitVirtual_sites3_statement(self, ctx: GromacsPTParser.Virtual_sites3_statementContext):\n if ctx.virtual_sites3(0):\n return\n self.virtual_sites3Statements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#virtual_sites3.\n def enterVirtual_sites3(self, ctx: GromacsPTParser.Virtual_sites3Context): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#virtual_sites3.\n def exitVirtual_sites3(self, ctx: GromacsPTParser.Virtual_sites3Context): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#virtual_sites4_statement.\n def enterVirtual_sites4_statement(self, ctx: GromacsPTParser.Virtual_sites4_statementContext): # pylint: disable=unused-argument\n self.virtual_sites4Statements += 1\n\n # Exit a parse tree produced by GromacsPTParser#virtual_sites4_statement.\n def exitVirtual_sites4_statement(self, ctx: GromacsPTParser.Virtual_sites4_statementContext):\n if ctx.virtual_sites4(0):\n return\n self.virtual_sites4Statements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#virtual_sites4.\n def enterVirtual_sites4(self, ctx: GromacsPTParser.Virtual_sites4Context): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#virtual_sites4.\n def exitVirtual_sites4(self, ctx: GromacsPTParser.Virtual_sites4Context): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#virtual_sitesn_statement.\n def enterVirtual_sitesn_statement(self, ctx: GromacsPTParser.Virtual_sitesn_statementContext): # pylint: disable=unused-argument\n self.virtual_sitesnStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#virtual_sitesn_statement.\n def exitVirtual_sitesn_statement(self, ctx: GromacsPTParser.Virtual_sitesn_statementContext):\n if ctx.virtual_sitesn(0):\n return\n self.virtual_sitesnStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#virtual_sitesn.\n def enterVirtual_sitesn(self, ctx: GromacsPTParser.Virtual_sitesnContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#virtual_sitesn.\n def exitVirtual_sitesn(self, ctx: GromacsPTParser.Virtual_sitesnContext): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#system_statement.\n def enterSystem_statement(self, ctx: GromacsPTParser.System_statementContext): # pylint: disable=unused-argument\n self.systemStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#system_statement.\n def exitSystem_statement(self, ctx: GromacsPTParser.System_statementContext):\n if ctx.Simple_name_AA(0):\n title = []\n i = 0\n while ctx.Simple_name_AA(i):\n title.append(str(ctx.Simple_name_AA(i)))\n i += 1\n\n self.__system = ' '.join(title)\n return\n self.systemStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#molecules_statement.\n def enterMolecules_statement(self, ctx: GromacsPTParser.Molecules_statementContext): # pylint: disable=unused-argument\n self.moleculesStatements += 1\n\n # Exit a parse tree produced by GromacsPTParser#molecules_statement.\n def exitMolecules_statement(self, ctx: GromacsPTParser.Molecules_statementContext):\n if ctx.molecules(0):\n return\n self.moleculesStatements -= 1\n\n # Enter a parse tree produced by GromacsPTParser#molecules.\n def enterMolecules(self, ctx: GromacsPTParser.MoleculesContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#molecules.\n def exitMolecules(self, ctx: GromacsPTParser.MoleculesContext):\n name = str(ctx.Simple_name())\n number = int(str(ctx.Integer()))\n if number > 0:\n self.__molecules.append({'molecule_name': name, 'number_of_copies': number})\n\n # Enter a parse tree produced by GromacsPTParser#number.\n def enterNumber(self, ctx: GromacsPTParser.NumberContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#number.\n def exitNumber(self, ctx: GromacsPTParser.NumberContext):\n \"\"\" not used the 'number' in the '[ atoms ]' statement so that pass through for performance\n if ctx.Real():\n self.numberSelection.append(float(str(ctx.Real())))\n\n elif ctx.Integer():\n self.numberSelection.append(float(str(ctx.Integer())))\n\n else:\n self.numberSelection.append(None)\n \"\"\"\n\n # Enter a parse tree produced by GromacsPTParser#position_restraints.\n def enterPosition_restraints(self, ctx: GromacsPTParser.Position_restraintsContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#position_restraints.\n def exitPosition_restraints(self, ctx: GromacsPTParser.Position_restraintsContext): # pylint: disable=unused-argument\n pass\n\n # Enter a parse tree produced by GromacsPTParser#position_restraint.\n def enterPosition_restraint(self, ctx: GromacsPTParser.Position_restraintContext): # pylint: disable=unused-argument\n pass\n\n # Exit a parse tree produced by GromacsPTParser#position_restraint.\n def exitPosition_restraint(self, ctx: GromacsPTParser.Position_restraintContext): # pylint: disable=unused-argument\n pass\n\n def getContentSubtype(self):\n \"\"\" Return content subtype of GROMACS parameter/topology file.\n \"\"\"\n\n contentSubtype = {'default': self.defaultStatements,\n 'moleculetype': self.moleculetypeStatements,\n 'atomtypes': self.atomtypesStatements,\n 'pairtypes': self.pairtypesStatements,\n 'bondtypes': self.bondtypesStatements,\n 'angletypes': self.angletypesStatements,\n 'dihedraltypes': self.dihedraltypesStatements,\n 'constrainttypes': self.constrainttypesStatements,\n 'nonbond_params': self.nonbond_paramsStatements,\n 'atoms': self.atomsStatements,\n 'bonds': self.bondsStatements,\n 'pairs': self.pairsStatements,\n 'pairs_nb': self.pairs_nbStatements,\n 'angles': self.anglesStatements,\n 'dihedrals': self.dihedralsStatements,\n 'exclusions': self.exclusionsStatements,\n 'constraints': self.constraintsStatements,\n 'settles': self.settlesStatements,\n 'virtual_sites1': self.virtual_sites1Statements,\n 'virtual_sites2': self.virtual_sites2Statements,\n 'virtual_sites3': self.virtual_sites3Statements,\n 'virtual_sites4': self.virtual_sites4Statements,\n 'virtual_sitesn': self.virtual_sitesnStatements,\n 'system': self.systemStatements,\n 'molecules': self.moleculesStatements\n }\n\n return {k: 1 for k, v in contentSubtype.items() if v > 0}\n\n def getSystem(self):\n \"\"\" Return system name of GROMACS parameter/topology file.\n \"\"\"\n return self.__system\n\n def getMolecules(self):\n \"\"\" Return list of molecules and its number of copies in GROMACS parameter/topology file.\n \"\"\"\n return self.__molecules\n\n def getAtomNumberDict(self):\n \"\"\" Return GROMACS atomic number dictionary.\n \"\"\"\n return self.__atomNumberDict\n\n def getPolymerSequence(self):\n \"\"\" Return polymer sequence of GROMACS parameter/topology file.\n \"\"\"\n return None if self.__polySeqPrmTop is None or len(self.__polySeqPrmTop) == 0 else self.__polySeqPrmTop\n\n def getSequenceAlignment(self):\n \"\"\" Return sequence alignment between coordinates and GROMACS parameter/topology.\n \"\"\"\n return None if self.__seqAlign is None or len(self.__seqAlign) == 0 else self.__seqAlign\n\n def getChainAssignment(self):\n \"\"\" Return chain assignment between coordinates and GROMACS parameter/topology.\n \"\"\"\n return None if self.__chainAssign is None or len(self.__chainAssign) == 0 else self.__chainAssign\n\n# del GromacsPTParser\n","repo_name":"wwPDB/py-wwpdb_utils_nmr","sub_path":"wwpdb/utils/nmr/mr/GromacsPTParserListener.py","file_name":"GromacsPTParserListener.py","file_ext":"py","file_size_in_byte":77946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23558642404","text":"import plotElnod as pE\nimport plotAzscan as pA\nimport AM_functions as am\nimport pickle as pk\nimport pylab as pl\nimport os, sys\nimport numpy as np\nimport scipy.optimize as spo\nfrom time import perf_counter\nimport time\nimport datetime\nimport matplotlib.dates as mdates\nfrom math import *\nimport extract_mod_par as emp\nimport Read_BICEP_ts as bts\nfrom dateutil import parser\n\n\nx_am=am.AM_functions()\nx=pA.ReadAzscan()\nx_el=pE.ReadElnod()\nx_emp = emp.mod_parameters()\n\n#WVR options\n\nwvr_scan='20200503_170002_skyDip_fast.txt'\n\n\n\nclass T_zenith(object):\n\n def __init__(self, unit=None, verb=True):\n\n '''\n\n\n '''\n\n def dT_del55(self, wvr_scan, template='SPole_annual_50.amc',show_im=0):\n\n month = wvr_scan[:6]\n # \n # if not os.path.exists('wvr1_data/'+wvr_scan[:-9]):\n # x_emp.unzip(month)\n #\n # if not os.path.exists('wvr1_data/'+wvr_scan[:-9]+'/'+wvr_scan):\n # x_emp.unzip(wvr_scan[:-9])\n\n #2. Extract PWv atmogram from Temperatures\n path='am_datfiles/'+template[:-4]+'/'+wvr_scan[:-4]\n fn=pickle_fn=path+'/'+wvr_scan[:-4]+'_fitoutput_corr.txt'\n\n # if not os.path.exists(fn):\n # #x_am.fit_w_am_Az(wvr_scan, clean_method='import_model')\n # data=x_am.fit_w_am(wvr_scan)\n # else:\n # print(fn+' already exists.\\n')\n # answer = input(\"Do you want to overwrite it? \")\n # if answer == \"y\":\n # #x_am.fit_w_am_Az(wvr_scan, clean_method='import_model')\n # data=x_am.fit_w_am(wvr_scan)\n # elif answer == \"n\":\n # print('Converting PWV to Trj.')\n # else:\n # print(\"Please enter y or n.\")\n #\n #\n # #\n #\n # f = open(fn,'rb')\n # data=pk.load(f)\n # f.close()\n #\n # pl.plot(data['El'], data['pwv_tropo'], label='pwv_tropo')\n # pl.plot(data['El'], data['pwv_total'], label='pwv_total')\n # pl.plot(data['El'], data['pwv_los_total'], label='pwv_los_total')\n # pl.legend()\n # pl.show()\n # print(data.keys())\n\n print(path+'Trj_el_'+wvr_scan[:-9]+'_pk.txt')\n\n if os.path.exists(path+'/Trj_el_'+wvr_scan[:-9]+'_pk.txt'):\n print('File exists!')\n f=open(path+'/Trj_el_'+wvr_scan[:-9]+'_pk.txt',\"rb\")\n Trj_dict=pk.load(f)\n f.close()\n\n else:\n print('File doesnt exists!')\n Trj_dict=x_am.plot_Trj_skydip(wvr_scan)\n\n Trj=np.array(Trj_dict['Trj'])\n el=np.array(Trj_dict['el'])\n\n print(np.shape(Trj)[0])\n\n Trj_30=[]\n Trj_40=[]\n Trj_90=[]\n Trj_150=[]\n Trj_220=[]\n Trj_270=[]\n\n for i in range(np.shape(Trj)[0]):\n Trj_30.append(Trj[i][0])\n Trj_40.append(Trj[i][1])\n Trj_90.append(Trj[i][2])\n Trj_150.append(Trj[i][3])\n Trj_220.append(Trj[i][4])\n Trj_270.append(Trj[i][5])\n\n print(np.logical_and(el>54, el<56))\n\n mask=np.where(np.logical_and(el>54, el<56))\n\n print(mask)\n Trj_150=np.array(Trj_150)\n Trj_150_55=Trj_150[mask]\n el_55=el[mask]\n\n T150=np.polyfit(el_55, Trj_150_55, 1)\n T150_fit=np.poly1d(T150)\n T150_f=T150_fit(el)\n\n # pl.scatter(Trj_dict['el'], Trj_30, s=1, label='30GHz')\n # pl.scatter(Trj_dict['el'], Trj_40, s=1, label='40GHz')\n # pl.scatter(Trj_dict['el'], Trj_90, s=1, label='90GHz')\n pl.scatter(Trj_dict['el'], Trj_150, s=1, label='T_cmb - 150GHz')\n pl.plot(el, T150_f, c='r', label='dT_150/d(el)='+str(round(T150[0],4)))\n # pl.scatter(Trj_dict['el'], Trj_220, s=1, label='220GHz')\n # pl.scatter(Trj_dict['el'], Trj_270, s=1, label='270GHz')\n\n pl.title(wvr_scan[:-9])\n\n #pl.xlim(54,56)\n pl.legend()\n pl.savefig(path+'/dT150_del_at55deg.png')\n pl.savefig('plots/'+wvr_scan[:-9]+'dT150_del_at55deg.png')\n if show_im==1:\n pl.show()\n else:\n pl.close()\n\n return T150[0]\n","repo_name":"sofiaf-pixel/WVR-pipeline","sub_path":"zenith_PWV.py","file_name":"zenith_PWV.py","file_ext":"py","file_size_in_byte":4102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30349719629","text":"n = int(input())\n\nmatrix = []\nfor _ in range(n):\n matrix.append(list(map(int, input().split(' '))))\n\ndp = [[0]*n for _ in range(n)]\ndp[0][0] = 1\n\ndef solution(n):\n for i in range(n):\n for j in range(n):\n if i == n-1 and j == n-1:\n return dp[n-1][n-1]\n\n if dp[i][j] > 0:\n if matrix[i][j] + j < n:\n dp[i][matrix[i][j] + j] += dp[i][j]\n\n if matrix[i][j] + i < n:\n dp[matrix[i][j] + i][j] += dp[i][j]\n\n\nprint(solution(n))","repo_name":"Kim-Young-Hoo/boj_algorithms","sub_path":"백준/Silver/1890. 점프/점프.py","file_name":"점프.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3927474394","text":"\"\"\"\nVocê recebe duas listas vinculadas não vazias que representam dois números inteiros não negativos. Os dígitos são\narmazenados na ordem inversa e cada um de seus nós contém um único dígito. Adicione os dois números e\nretorne-o como uma lista vinculada.\n\nVocê pode assumir que os dois números não contêm nenhum zero inicial, exceto o próprio número 0.\n\nExemplo:\n\n Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)\n Output: 7 -> 0 -> 8\n Explain: 342 + 465 = 807.\n\"\"\"\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n result = ListNode(0)\n result_tail = result\n carry = 0\n\n while l1 or l2 or carry:\n carry, out = divmod((l1.val if l1 else 0) + (l2.val if l2 else 0) + carry, 10)\n\n result_tail.next = ListNode(out)\n result_tail = result_tail.next\n\n l1 = (l1.next if l1 else None)\n l2 = (l2.next if l2 else None)\n\n return result.next\n","repo_name":"macio-matheus/algorithms-and-data-structure-practices","sub_path":"leetcode_add_two_num.py","file_name":"leetcode_add_two_num.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14027821827","text":"\"\"\"The Account class encapsulates the useful features of an AWS account.\"\"\"\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass Account(object):\n \"\"\"An AWS account.\"\"\"\n\n def __init__(self, name):\n \"\"\"Instantiate the AWS account.\n\n :name: String.\n :returns: Account object.\n \"\"\"\n self.name = name\n\n logger.debug(\n \"AWS account object (name: \\\"{}\\\", number: \\\"{}\\\") \"\n \"created.\".format(\n self.name, self.number)\n )\n\n @property\n def number(self):\n \"\"\"Provide the corresponding account number, given the account name.\n\n :accountname: String. One of: 'prod', 'test'.\n :returns: String. Corresponding account number.\n \"\"\"\n accountname = self.name\n assert (accountname == 'prod' or accountname == 'test'), (\n \"This currently only works in the 'prod' or 'test' accounts.\"\n )\n accountnumber = {\n 'prod': '1234567890',\n 'test': '0123456789'\n }\n return accountnumber[accountname]\n","repo_name":"forestmonster/ssmshare","sub_path":"account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"33200611217","text":"from heapq import *\n\nn,k=map(int,input().split(\" \"))\n\n#idea cut the longest every time\ndef val(l,nums):\n unit=l//nums\n extra=l-unit*nums\n return (nums-extra)*unit*unit+extra*(unit+1)*(unit+1)\n \npq=[]\narr=list(map(int,input().split(\" \")))\n\ntotal=0\nfor x in range(n):\n total+=arr[x]*arr[x]\n heappush(pq,(-val(arr[x],1)+val(arr[x],2),arr[x],2))\n\nfor x in range(k-n):\n temp=heappop(pq)\n total+=temp[0]\n a,b=temp[1],temp[2]\n heappush(pq,(-val(a,b)+val(a,b+1),a,b+1))\n\nprint(total)\n","repo_name":"marcus-aurelianus/codeforce","sub_path":"RaifRound/carrot.py","file_name":"carrot.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"22366765337","text":"class Noeud :\n\n def __init__(self, indice, questionReponse) :\n \n self.texte = questionReponse\n self.numero = int(indice)\n self.filsGauche = None\n self.filsDroit = None\n \n def ajouterNoeud(self, indice, questionReponse) :\n \n if indice < self.numero :\n if self.filsGauche == None :\n self.filsGauche = Noeud(indice, questionReponse)\n else :\n self.filsGauche.ajouterNoeud(indice, questionReponse)\n else :\n if self.filsDroit == None :\n self.filsDroit = Noeud(indice, questionReponse)\n else :\n self.filsDroit.ajouterNoeud(indice, questionReponse)\n \n def afficher(self, noATrouver) :\n \n if noATrouver == self.numero :\n return self\n \n if int(noATrouver) < self.numero :\n if self.filsGauche == None :\n return \"No text found 1\"\n else :\n return self.filsGauche.afficher(int(noATrouver))\n else :\n if self.filsDroit == None :\n return \"No text found 2\"\n else :\n return self.filsDroit.afficher(int(noATrouver))\n \n return None\n \n def afficheArbreNiveau(self, niveau) :\n \n if (self.filsDroit != None) :\n self.filsDroit.afficheArbreNiveau(niveau + 1)\n \n for i in range (0, niveau * 5) :\n print(' ', end='')\n print(self.numero)\n \n if (self.filsGauche != None) :\n self.filsGauche.afficheArbreNiveau(niveau + 1)","repo_name":"VyynceeF/Jeu-Collaboratif-en-Anglais","sub_path":"Interface/Noeud.py","file_name":"Noeud.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12853335483","text":"from unittest.mock import Mock\n\nimport pytest\nfrom aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup, Message, User\nfrom pytest_mock import MockerFixture\n\nfrom jpoetry import bot as bot_module\nfrom jpoetry.answers import HELP_TEXT, WELCOME_TEXT\nfrom jpoetry.bot import bot, detect_and_send_poem, get_author, send_cheat_sheet, welcome_user\nfrom jpoetry.config import DEFAULT_AUTHOR, TOO_LONG_MESSAGE_FILE\nfrom jpoetry.image import TooLongTextError\nfrom jpoetry.poetry import detect_poems, iter_poems\n\n\n@pytest.fixture\ndef get_message(mocker):\n def get_message(\n text=\"test\",\n username=\"test\",\n full_name=\"test\",\n private=True,\n forward_username=None,\n forward_sender_name=None,\n forward_full_name=None,\n ):\n message = mocker.Mock(\n spec=Message,\n text=text,\n message_id=123,\n chat_id=123,\n from_user=mocker.Mock(\n spec=User,\n id=123 if private else 321,\n full_name=full_name,\n username=username,\n ),\n )\n\n message.forward_sender_name = forward_sender_name\n if forward_username or forward_full_name:\n message.forward_from = mocker.Mock(\n spec=User, username=forward_username, full_name=forward_full_name\n )\n else:\n message.forward_from = None\n message.is_forward = lambda: bool(message.forward_from or message.forward_sender_name)\n\n return message\n\n return get_message\n\n\n@pytest.mark.parametrize(\n \"full_name,username,result\",\n (\n (\"Test Testov\", \"test\", \"Test Testov\"),\n (\"test\" * 11, \"test_username\", \"@test_username\"),\n (\"𝔓𝔢𝔫𝔤𝔲𝔦𝔫\", \"test_username\", \"@test_username\"),\n (\"test\" * 11, None, DEFAULT_AUTHOR),\n (\"𝔓𝔢𝔫𝔤𝔲𝔦𝔫\", None, DEFAULT_AUTHOR),\n ),\n)\ndef test_get_author_ordinal(full_name, username, result, get_message):\n message = get_message(\n username=username,\n full_name=full_name,\n )\n assert get_author(message) == result\n\n\n@pytest.mark.parametrize(\n \"fwd_full_name,fwd_username,fwd_sender_name,result\",\n (\n (\"Test Testov\", \"test\", None, \"Test Testov\"),\n (\"test\" * 11, \"test_username\", None, \"@test_username\"),\n (\"𝔓𝔢𝔫𝔤𝔲𝔦𝔫\", \"test_username\", None, \"@test_username\"),\n (\"test\" * 11, None, None, DEFAULT_AUTHOR),\n (None, None, \"Anonimus\", \"Anonimus\"),\n (None, None, \"Anonimus\" * 11, DEFAULT_AUTHOR),\n (None, None, \"𝔓𝔢𝔫𝔤𝔲𝔦𝔫\", DEFAULT_AUTHOR),\n ),\n)\ndef test_get_author_forward(fwd_full_name, fwd_username, fwd_sender_name, result, get_message):\n message = get_message(\n username=\"test_username\",\n full_name=\"test_full_name\",\n forward_full_name=fwd_full_name,\n forward_username=fwd_username,\n forward_sender_name=fwd_sender_name,\n )\n assert get_author(message) == result\n\n\nasync def test_welcome_user(get_message, call):\n message = get_message()\n await welcome_user(message)\n assert message.reply.mock_calls == [call(WELCOME_TEXT)]\n\n\nasync def test_send_cheat_sheet(get_message, call):\n message = get_message()\n await send_cheat_sheet(message)\n assert message.reply.mock_calls == [call(HELP_TEXT)]\n\n\nasync def test_detect_and_send_poem_positive(get_message, mocker: MockerFixture, call, hokku_text):\n message = get_message(hokku_text)\n detect_poems_mock = mocker.patch.object(\n bot_module, \"iter_poems\", Mock(return_value=detect_poems(hokku_text))\n )\n poem = next(iter_poems(hokku_text))\n detect_poems_mock = mocker.patch.object(\n bot_module, \"iter_poems\", Mock(return_value=iter([poem]))\n )\n\n create_poem_image_mock = mocker.patch.object(bot_module, \"get_poem_image\")\n input_file_mock = mocker.patch.object(bot_module, \"InputFile\")\n send_message_mock = mocker.patch.object(bot, \"send_photo\")\n\n await detect_and_send_poem(message)\n assert message.reply.mock_calls == []\n assert detect_poems_mock.mock_calls == [call(hokku_text)]\n assert create_poem_image_mock.mock_calls == [call(poem, \"test\")]\n assert input_file_mock.mock_calls == [\n call(create_poem_image_mock.return_value, filename=\"test — Хокку.png\")\n ]\n assert send_message_mock.mock_calls == [\n call(\n message.chat.id,\n input_file_mock.return_value,\n reply_to_message_id=message.message_id,\n reply_markup=InlineKeyboardMarkup().add(\n InlineKeyboardButton(text=\"Залить в канал\", callback_data=\"publish\")\n ),\n )\n ]\n\n\nasync def test_detect_and_send_poem_too_long(get_message, mocker: MockerFixture, call, hokku_text):\n message = get_message(hokku_text)\n poem = next(iter_poems(hokku_text))\n detect_poems_mock = mocker.patch.object(\n bot_module, \"iter_poems\", Mock(return_value=iter([poem]))\n )\n\n create_poem_image_mock = mocker.patch.object(\n bot_module, \"get_poem_image\", side_effect=TooLongTextError\n )\n send_message_mock = mocker.patch.object(bot, \"send_photo\")\n\n await detect_and_send_poem(message)\n assert message.reply.mock_calls == []\n assert detect_poems_mock.mock_calls == [call(hokku_text)]\n assert create_poem_image_mock.mock_calls == [call(poem, \"test\")]\n\n assert send_message_mock.mock_calls == [\n call(\n message.chat.id,\n TOO_LONG_MESSAGE_FILE,\n reply_to_message_id=message.message_id,\n reply_markup=InlineKeyboardMarkup().add(\n InlineKeyboardButton(text=\"Залить в канал\", callback_data=\"publish\")\n ),\n )\n ]\n\n\nasync def test_detect_and_send_poem_negative(get_message, call, mocker):\n message = get_message(\"Not a poem\")\n\n detect_poems_mock = mocker.patch.object(\n bot_module, \"iter_poems\", return_value=iter_poems(message.text)\n )\n await detect_and_send_poem(message)\n assert detect_poems_mock.mock_calls == [call(message.text)]\n assert message.reply.mock_calls == []\n","repo_name":"Bobronium/jpoetry","sub_path":"tests/test_handlers.py","file_name":"test_handlers.py","file_ext":"py","file_size_in_byte":6151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36699725558","text":"def distance(x1, y1, x2, y2):\n return (x1 - x2)**2 + (y1 - y2)**2\n\ndef solution(m, n, startX, startY, balls):\n answer = []\n \n for i, ball in enumerate(balls):\n endX, endY = ball\n \n min_dist = float(\"inf\")\n\n cases = [(-endX, endY), (2*m -endX, endY), (endX, -endY), (endX, 2*n - endY)]\n for i, case in enumerate(cases):\n modX, modY = case[0], case[1]\n\n # 벽에 부딪히기 전에 공에 맞는 경우\n if startY == endY:\n if i == 0 and startX > endX:\n continue\n if i == 1 and startX < endX:\n continue\n if startX == endX:\n if i == 2 and startY > endY:\n continue\n if i == 3 and startY < endY:\n continue\n \n # 대칭 좌표와의 거리\n min_dist = min(min_dist, distance(startX, startY, modX, modY))\n\n answer.append(round(min_dist))\n \n return answer\n\n\n# 정해 코드\n\ndef solution(m, n, startX, startY, balls):\n answer = []\n targets=[(-startX,startY),(startX,2*n-startY),(2*m-startX,startY),(startX,-startY)]\n for ball in balls:\n new_targets=[targets[0],targets[1],targets[2],targets[3]]\n if startX==ball[0]:\n new_targets=[targets[0],targets[1],targets[2]] if startY>ball[1] else [targets[0],targets[3],targets[2]]\n if startY==ball[1]:\n new_targets=[targets[1],targets[2],targets[3]] if startX>ball[0] else [targets[0],targets[1],targets[3]]\n answer.append(min(list(map(lambda target:(ball[0]-target[0])**2+(ball[1]-target[1])**2,new_targets))))\n return answer","repo_name":"jjweidon/exct","sub_path":"programmers/당구_연습.py","file_name":"당구_연습.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73172089812","text":"import dj_database_url\nimport os\n\nALLOWED_HOSTS = ['cargas-ar.herokuapp.com']\n\n# Application definition\n\nINSTALLED_APPS = [\n 'gdstorage',\n]\n\nMIDDLEWARE = [\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n]\n\n# Database\n# https://docs.djangoproject.com/en/3.0/ref/settings/#databases\nDATABASES = {\n 'default': dj_database_url.config(conn_max_age=600)\n}\n\n# Google Drive Storage Settings\nGOOGLE_DRIVE_STORAGE_JSON_KEY_FILE = None\nGOOGLE_DRIVE_STORAGE_JSON_KEY_FILE_CONTENTS = os.getenv(\n 'GOOGLE_DRIVE_STORAGE_JSON_KEY_FILE_CONTENTS')\nGOOGLE_DRIVE_STORAGE_MEDIA_ROOT = 'cargas/media'\n\n# Simplified static file serving.\n# https://warehouse.python.org/project/whitenoise/\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'\n\n# SSL Configuration\nSECURE_SSL_REDIRECT = True\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\nSECURE_CONTENT_TYPE_NOSNIFF = True\nSESSION_COOKIE_SECURE = True\nCSRF_COOKIE_SECURE = True\nSECURE_HSTS_SECONDS = 31536000\nSECURE_HSTS_INCLUDE_SUBDOMAINS = True\nSECURE_HSTS_PRELOAD = True\n# For session dictionary modifications between request\nSESSION_SAVE_EVERY_REQUEST = True\n\n# EMAIL SETTINGS\nEMAIL_HOST = os.getenv('MAILGUN_SMTP_SERVER')\nEMAIL_HOST_USER = os.getenv('MAILGUN_SMTP_LOGIN')\nEMAIL_HOST_PASSWORD = os.getenv('MAILGUN_SMTP_PASSWORD')\nEMAIL_PORT = os.getenv('MAILGUN_SMTP_PORT')\nEMAIL_USE_TLS = True\nEMAIL_SENDER_CREDENTIALS = os.getenv('EMAIL_SENDER_CREDENTIALS')\nEMAIL_RECEIVE_CREDENTIALS = os.getenv(\"EMAIL_RECEIVE_CREDENTIALS\")\nEMAIL_OWNER = os.getenv(\"EMAIL_OWNER\")\nMAILGUN_ACCESS_KEY = os.getenv('MAILGUN_API_KEY')\nMAILGUN_SERVER_NAME = os.getenv('MAILGUN_DOMAIN')\nEMAIL_BACKEND = 'django_mailgun.MailgunBackend'\n","repo_name":"ariel-brassesco/cargas","sub_path":"cargas/settings_production.py","file_name":"settings_production.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7039852744","text":"import pytest\n\nfrom .....order.models import Order\nfrom ....tests.utils import assert_no_permission, get_graphql_content\n\nDRAFT_ORDER_QUERY = \"\"\"\n query DraftOrdersQuery {\n draftOrders(first: 10) {\n edges {\n node {\n id\n number\n }\n }\n }\n }\n\"\"\"\n\n\n@pytest.fixture\ndef draft_orders_in_different_channels(\n draft_order_list, channel_USD, channel_JPY, channel_PLN\n):\n draft_order_list[0].channel = channel_USD\n draft_order_list[1].channel = channel_JPY\n draft_order_list[2].channel = channel_PLN\n\n Order.objects.bulk_update(draft_order_list, [\"channel\"])\n return draft_order_list\n\n\ndef test_draft_order_query(\n staff_api_client, permission_group_manage_orders, order, draft_order_list\n):\n # given\n permission_group_manage_orders.user_set.add(staff_api_client.user)\n\n # when\n response = staff_api_client.post_graphql(DRAFT_ORDER_QUERY)\n\n # then\n edges = get_graphql_content(response)[\"data\"][\"draftOrders\"][\"edges\"]\n\n assert len(edges) == Order.objects.drafts().count()\n\n\ndef test_query_draft_orders_by_user_with_access_to_all_channels(\n staff_api_client,\n permission_group_all_perms_all_channels,\n draft_orders_in_different_channels,\n):\n # given\n permission_group_all_perms_all_channels.user_set.add(staff_api_client.user)\n\n # when\n response = staff_api_client.post_graphql(DRAFT_ORDER_QUERY)\n\n # then\n edges = get_graphql_content(response)[\"data\"][\"draftOrders\"][\"edges\"]\n\n assert len(edges) == len(draft_orders_in_different_channels)\n\n\ndef test_query_draft_orders_by_user_with_restricted_access_to_channels(\n staff_api_client,\n permission_group_all_perms_channel_USD_only,\n draft_orders_in_different_channels,\n):\n # given\n permission_group_all_perms_channel_USD_only.user_set.add(staff_api_client.user)\n\n # when\n response = staff_api_client.post_graphql(DRAFT_ORDER_QUERY)\n\n # then\n content = get_graphql_content(response)\n\n assert len(content[\"data\"][\"draftOrders\"][\"edges\"]) == 1\n assert content[\"data\"][\"draftOrders\"][\"edges\"][0][\"node\"][\"number\"] == str(\n draft_orders_in_different_channels[0].number\n )\n\n\ndef test_query_draft_orders_by_user_with_restricted_access_to_channels_no_acc_channels(\n staff_api_client,\n permission_group_all_perms_without_any_channel,\n draft_orders_in_different_channels,\n):\n # given\n permission_group_all_perms_without_any_channel.user_set.add(staff_api_client.user)\n\n # when\n response = staff_api_client.post_graphql(DRAFT_ORDER_QUERY)\n\n # then\n content = get_graphql_content(response)\n assert len(content[\"data\"][\"draftOrders\"][\"edges\"]) == 0\n\n\ndef test_query_draft_orders_by_app(\n app_api_client, permission_manage_orders, draft_orders_in_different_channels\n):\n # when\n response = app_api_client.post_graphql(\n DRAFT_ORDER_QUERY, permissions=(permission_manage_orders,)\n )\n\n # then\n edges = get_graphql_content(response)[\"data\"][\"draftOrders\"][\"edges\"]\n\n assert len(edges) == len(draft_orders_in_different_channels)\n\n\ndef test_query_draft_orders_by_customer(\n user_api_client, draft_orders_in_different_channels\n):\n # when\n response = user_api_client.post_graphql(DRAFT_ORDER_QUERY)\n\n # then\n assert_no_permission(response)\n","repo_name":"saleor/saleor","sub_path":"saleor/graphql/order/tests/queries/test_draft_order.py","file_name":"test_draft_order.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","stars":19331,"dataset":"github-code","pt":"67"} +{"seq_id":"74795001172","text":"import socket\r\nfrom readJSON import readConfigFile\r\nfrom encryption.encrypt import decrypt_data, encrypt_data\r\n\r\n\r\ndef main():\r\n SENDER_HOST, SENDER_PORT, TCP_HOST, TCP_PORT, HOST_OUT, PORT_OUT, BUFSIZE, XOR_KEY = readConfigFile(\"config.json\")\r\n tunnel_client_address = (TCP_HOST, TCP_PORT)\r\n sender_address = (SENDER_HOST, SENDER_PORT)\r\n socketTCP = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n socketTCP.connect(tunnel_client_address)\r\n\r\n socketUDP = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\r\n socketUDP.bind(sender_address)\r\n print(\"Listening...\")\r\n while True:\r\n try:\r\n # print(\"Server listening on port {:d}\".format(PORT))\r\n data_address = socketUDP.recvfrom(BUFSIZE)\r\n data = data_address[0]\r\n address = data_address[1]\r\n # data, address = socketUDP.recvfrom(BUFSIZE)\r\n socketTCP.sendall(encrypt_data(data.decode(), XOR_KEY).encode())\r\n print(\"Sent data to TCP\")\r\n data = socketTCP.recv(BUFSIZE)\r\n print(\"Received data from TCP\")\r\n # data decryption\r\n data = decrypt_data(data.decode(), XOR_KEY)\r\n # print(\"DATA received after crypting\", data)\r\n\r\n # print(\"MSG\", msg)\r\n # print(decryptOrEncrypt(msg.decode(), XOR_KEY))\r\n print(\"Sending data by UDP\")\r\n socketUDP.sendto(data.encode(), address)\r\n except KeyboardInterrupt:\r\n print()\r\n print(\"Program exiting\")\r\n quit()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"dam1508/PSI-project","sub_path":"tunnelclient.py","file_name":"tunnelclient.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26686797595","text":"import os\nimport shutil\nimport sys\nimport cv2\nfrom time import sleep\nfrom threading import Timer\nfrom PyQt5 import QtCore,QtGui\nfrom PyQt5.QtCore import QThread,Qt\nfrom PyQt5.QtGui import QImage, QPixmap, QCursor\nfrom PyQt5.QtWidgets import QApplication, QWidget, QFileDialog, QMessageBox, QDesktopWidget, QInputDialog\nimport pynput\nimport numpy as np\nfrom PIL import Image\n\n#************path均最后不带'/'**********\nroot_folder = os.path.dirname(os.path.realpath(__file__))#当前py所在目录\nparent_folder=os.path.dirname(root_folder )\nsys.path.append(parent_folder)\nsys.path.append(os.path.join(parent_folder,'gui'))\nsys.path.append(os.path.join(parent_folder,'direct'))\nsys.path.append(os.path.join(parent_folder,'OSVOS'))\nsys.path.append(os.path.join(parent_folder,'bubbleNets'))\nsys.path.append(os.path.join(parent_folder,'InteSeg'))\nsys.path.append(os.path.join(parent_folder,'Removal'))\n#************path**********\nfrom gui import osvos_window\nfrom osvos import finetuneThread\nfrom RGMP_thread import VosRGMP_thread\nfrom osvos_window import osvos_form\nfrom bubbleNets_logic import BubbleNetsForm\nfrom MgrHelper import MgrHelper\nfrom remove_logic import RemoveObjectForm\n#********************训练模型的类和函数*********************\n\n#信号类用来发射标准输出作为信号\nclass EmittingStream(QtCore.QObject):\n textWritten = QtCore.pyqtSignal(str)\n def write(self,text):\n self.textWritten.emit(str(text))\n\n#********************训练模型的类和函数*********************\n\n#********************左边展示分割结果的类和函数*********************\ndef removeTmpDir(rootdir):\n filelist=os.listdir(rootdir)\n if filelist:\n for f in filelist:\n filepath=os.path.join(rootdir,f)\n if os.path.isfile(filepath):\n os.remove(filepath)\n elif os.path.isdir(filepath):\n shutil.rmtree(filepath,True)\n shutil.rmtree(rootdir,True)\n\ndef getResultsMaskImages(path):\n results_images=[]\n images=sorted(os.listdir(path))\n img_num=len(images)\n for i in range(img_num):\n img=os.path.join(path,images[i])\n results_images.append(img)\n return results_images\n\ndef getSrcImages(path):\n results_images=[]\n images=sorted(os.listdir(path))\n img_num=len(images)\n for i in range(img_num):\n img=os.path.join(path,images[i])\n results_images.append(img)\n return results_images\n\ndef preprocessShowImgae(srcImage,maskImage,scale):\n overlay_color=[255,0,0]\n transparency=0.6\n img = np.array(Image.open(srcImage))\n mask = np.array(Image.open(maskImage))\n mask=mask//np.max(mask)\n im_over=np.ndarray(img.shape)\n nchannel = cv2.imread(srcImage).shape[2]\n\n im_over[:, :, 0] = (1 - mask) * img[:, :, 0] + mask * (overlay_color[0] * transparency + (1 - transparency) * img[:, :, 0])\n im_over[:, :, 1] = (1 - mask) * img[:, :, 1] + mask * (overlay_color[1] * transparency + (1 - transparency) * img[:, :, 1])\n im_over[:, :, 2] = (1 - mask) * img[:, :, 2] + mask * (overlay_color[2] * transparency + (1 - transparency) * img[:, :, 2])\n limg2=im_over.astype(np.uint8)\n\n #limg2 = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n timg = cv2.resize(limg2, (int(scale * limg2.shape[1]), int(scale* limg2.shape[0]))) #cv2.resize(w,h)\n limage = QtGui.QImage(timg.data, timg.shape[1], timg.shape[0], nchannel * timg.shape[1], QtGui.QImage.Format_RGB888)\n #print(\"preprocess image over:nchannel is {},width is {},height is {}\".format(nchannel, timg.shape[1], timg.shape[0]))\n return limage\n\nclass workerThread(QThread):\n updatedM=QtCore.pyqtSignal(int)#信号,int是信号承载数据的类型\n\n def __init__(self,mw):\n self.mw=mw\n QThread.__init__(self)\n\n def run(self):#通过.Start()调用此run()函数\n itr=0\n #在执行耗时操作的地方添加,使得一边执行耗时程序一边刷新界面,让人感觉界面很流畅\n QApplication.processEvents()\n while self.mw.isRun:\n itr+=1\n\n if self.mw.isthreadActive and self.mw.isbusy==False and self.mw.frameID!=self.mw.currentID:\n #print(itr)\n\n if self.mw.timer is None:\n self.mw.frameID+=1\n self.mw.isbusy=True\n sf=self.mw.scaleFactor\n if sf<=0:\n self.mw.isbusy=False\n continue\n #if image is None or mask_image is None:\n if self.mw.frameID>=self.mw.length:\n self.mw.ui.pushButton_left6.setEnabled(False)\n self.mw.ui.pushButton_left5.setEnabled(True)\n if not self.mw.timer is None:\n self.mw.timer.cancel()\n self.mw.isthreadActive=False\n self.mw.isbusy=False\n continue\n self.mw.currentID = self.mw.frameID\n src_img=self.mw.srcImages[self.mw.currentID]\n mask_img=self.mw.maskImages[self.mw.currentID]\n image=cv2.imread(src_img)\n mask_image=cv2.imread(mask_img)\n #self.maskImages=[]\n self.mw.limg=image\n if self.mw.isInit==True:\n self.mw.on_zoomfit_clicked()\n self.mw.isInit=False\n limage=preprocessShowImgae(src_img,mask_img,self.mw.scaleFactor)\n if self.mw.resizegoing==False:\n self.mw.ui.leftImage.setPixmap(QtGui.QPixmap(limage))\n if not self.mw.sliderbusy and not self.mw.sliderbusy2:\n self.updatedM.emit(self.mw.frameID)\n QApplication.processEvents()\n self.mw.isbusy=False\n else:\n if self.mw.isthreadActive and self.mw.timer is None:\n self.mw.frameID+=1\n sleep(1.0/50)\n\n#********************左边展示分割结果的类和函数*********************\n\n#********************右边播放视频的类和函数*********************\ndef video_to_frames(result_path,video_path):\n if not os.path.exists(result_path):\n os.makedirs(result_path)\n capture = cv2.VideoCapture(video_path)\n num = 0\n while True:\n ret, img = capture.read()\n if not ret:\n break\n picname = '{:05d}.jpg'.format(num)\n cv2.imwrite(os.path.join(result_path, picname), img)\n num += 1\n capture.release()\n\n#过一段时间调用一次函数使用定时器\nclass perpetualTimer():\n def __init__(self,t,hFunction):\n self.t=t\n self.hFunction=hFunction\n self.thread=Timer(self.t,self.handle_function) #每隔t秒执行一次handle_function\n\n def handle_function(self):\n self.hFunction()\n self.thread=Timer(self.t,self.handle_function)\n self.thread.start()\n def start(self):\n self.thread.start()\n def cancel(self):\n self.thread.cancel()\n\n#将videocapture读取到的帧进行处理以便显示在QLabel上\ndef preprocessImage(img,scale):\n frame = img\n # self.on_zoomfit_clicked()\n nchannel = frame.shape[2]\n limg2 = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n timg = cv2.resize(limg2, (int(scale * limg2.shape[1]), int(scale* limg2.shape[0])))\n limage = QtGui.QImage(timg.data, timg.shape[1], timg.shape[0], nchannel * timg.shape[1], QtGui.QImage.Format_RGB888)\n #print(\"preprocess image over:nchannel is {},width is {},height is {}\".format(nchannel, timg.shape[1], timg.shape[0]))\n return limage\n\n#根据frameID进行图片的更新 \nclass workerThread1(QThread):\n updatedM=QtCore.pyqtSignal(int)#信号,int是信号承载数据的类型\n\n def __init__(self,mw):\n self.mw=mw\n QThread.__init__(self)\n\n # def __del__(self):#析构器,对象被销毁时调用\n # self.wait()\n\n def run(self):#通过.Start()调用此run()函数\n itr=0\n #在执行耗时操作的地方添加,使得一边执行耗时程序一边刷新界面,让人感觉界面很流畅\n QApplication.processEvents()\n while self.mw.isRun1:\n itr+=1\n\n if self.mw.isthreadActive1 and self.mw.isbusy1==False and self.mw.frameID1!=self.mw.cap1.get(cv2.CAP_PROP_POS_FRAMES):\n #print(itr)\n if np.abs(self.mw.frameID1-self.mw.cap1.get(cv2.CAP_PROP_POS_FRAMES))>1:\n self.mw.cap1.set(cv2.CAP_PROP_POS_FRAMES,self.mw.frameID1)\n if self.mw.timer1 is None:\n self.mw.frameID1+=1\n self.mw.isbusy1=True\n sf=self.mw.scaleFactor1\n ret,image=self.mw.cap1.read()\n self.mw.limg1=image\n if sf<=0:\n self.mw.isbusy1=False\n continue\n if ret==False:\n if self.mw.frameID1>=self.mw.length1:\n self.mw.ui.pushButton_right3.setEnabled(False)\n self.mw.ui.pushButton_right2.setEnabled(True)\n if not self.mw.timer1 is None:\n self.mw.timer1.cancel()\n self.mw.isthreadActive1=False\n self.mw.isbusy1=False\n continue\n limage=preprocessImage(image,self.mw.scaleFactor1)\n\n if self.mw.resizegoing1==False:\n self.mw.ui.rightImage.setPixmap(QtGui.QPixmap(limage))\n if not self.mw.sliderbusy_right and not self.mw.sliderbusy2_right:\n self.updatedM.emit(self.mw.frameID1)\n QApplication.processEvents()\n self.mw.isbusy1=False\n else:\n if self.mw.isthreadActive1 and self.mw.timer1 is None:\n self.mw.frameID1+=1\n sleep(1.0/50)\n#********************右边播放视频的类和函数*********************\n\nclass OsvosForm(QWidget):\n resized=QtCore.pyqtSignal() #缩放信号\n def __init__(self,workdir,direct_object):\n super(OsvosForm, self).__init__() # 继承的所有父类的初始化\n self.ui=osvos_form()\n self.ui.setupUi(self)\n #窗体居中显示\n screen_size=QDesktopWidget().screenGeometry()#获得屏幕的尺寸\n widget_size=self.geometry()#获得窗体的尺寸\n self.move((screen_size.width()-widget_size.width())/2,(screen_size.height()-widget_size.height())/2)\n self.setWindowTitle(u'视频目标分割')\n #sys.stdout=EmittingStream(textWritten=self.outputWritten)\n #sys.stderr=EmittingStream(textWritten=self.outputWritten)\n\n #self.setPalette(QPalette(QColor('#FFFFFF'))) # 背景颜色\n\n #*************path*******************\n self.model_folder = parent_folder + '/models' # models所在目录\n self.path_prefix = workdir # 工作目录\n self.direct_object=direct_object\n #**************path********************\n\n #*************右边播放视频的变量*********\n self.isFirstKey_right=False\n self.stframe1=0\n self.endframe1=0\n self.scaleFactor1 = 1.0 # 缩放比例\n self.length1=1\n self.frameID1 = 0 #第几帧\n self.isRun1 = True #未改变一直未TRUE\n self.resizegoing1 = False #当进行放缩调整时为True\n self.sliderbusy_right = False #进度条被按下时为True,放松时即无操作为False\n self.sliderbusy2_right = False #视频逐帧向前播放反过来让进度条值变化时为True\n self.ui.pushButton_right1.setEnabled(True)\n self.ui.pushButton_right2.setEnabled(False)\n self.ui.pushButton_right3.setEnabled(False)\n self.ui.pushButton_right1.clicked.connect(self.openButtonPressed1)\n self.ui.pushButton_right2.clicked.connect(self.startButtonPressed1)\n self.ui.horizontalSlider_right.sliderPressed.connect(self.horizontalSliderPressed1)\n self.ui.horizontalSlider_right.sliderReleased.connect(self.horizontalSliderReleased1)\n self.ui.horizontalSlider_right.valueChanged.connect(self.slider_value_changed1)\n #self.ui.rightImage.setScaledContents(True)#自适应图片大小\n self.ui.pushButton_right3.clicked.connect(self.pauseButtonPressed1)\n self.resized.connect(self._on_resized1)\n self.videoTIme=0\n self.startx1 = 0\n self.starty1 = 0\n self.isVideo1 = False #capture已经打开视频\n self.isbusy1 = 0\n self.frameHeight1 = 1\n self.frameWidth1 = 1\n self.limg1 = np.zeros((1, 1, 1))\n self.cap1 = None\n self.timer1 = None\n self.isthreadActive1 = False#开始播放视频\n self.wthread1=workerThread1(self)\n self.wthread1.updatedM.connect(self.horizontalSliderSet1)\n self.wthread1.start()\n self.klistener1_right=pynput.keyboard.Listener(on_press=self.on_release1)\n self.klistener2_right = pynput.keyboard.Listener(on_release=self.on_press1)\n self.ui.horizontalSlider_right.setEnabled(False)\n self.ui.pushButton_right2.setEnabled(False)\n # *************右边播放视频的变量*********\n\n\n # *************左边展示分割结果的变量*********\n self.isFirstKey_left=False\n #模型快速还是慢速\n self.ui.comboBox_model.addItems(['Fast', 'Slow'])\n # 选择形状的comboBox的值改变关联函数\n self.ui.comboBox_model.currentIndexChanged.connect(self.modelValueChanged)\n self.model='Fast'\n self.stframe=0\n self.endframe=0\n self.fps = 22\n self.scaleFactor = 1.0 # 缩放比例\n self.length = 1\n self.frameID = 0 # 第几帧\n self.currentID = 0 # 显示的第几帧\n self.isRun = True # 未改变一直为True\n self.resizegoing = False # 当进行放缩调整时为True\n self.sliderbusy = False # 进度条被按下时为True,放松时即无操作为False\n self.sliderbusy2 = False # 视频逐帧向前播放反过来让进度条值变化时为True\n self.ui.pushButton_left1.setEnabled(True)\n #self.ui.pushButton_left2.setEnabled(False)\n # self.ui.pushButton_left3.setEnabled(False)\n # self.ui.pushButton_left4.setEnabled(False)\n self.ui.pushButton_left5.setEnabled(False)\n self.ui.pushButton_left6.setEnabled(False)\n\n self.ui.pushButton_left5.clicked.connect(self.startButtonPressed)\n self.ui.pushButton_left6.clicked.connect(self.pauseButtonPressed)\n\n self.ui.horizontalSlider_left.sliderPressed.connect(self.horizontalSliderPressed)\n self.ui.horizontalSlider_left.sliderReleased.connect(self.horizontalSliderReleased)\n self.ui.horizontalSlider_left.valueChanged.connect(self.slider_value_changed)\n\n self.resized.connect(self._on_resized)\n\n self.startx = 0\n self.starty = 0\n self.isVideo = False # 已经显示图片\n self.isbusy = 0\n self.frameHeight = 1\n self.frameWidth = 1\n self.limg = np.zeros((1, 1, 1))\n # self.cap = None\n self.timer = None\n self.ui.textBrowser.setText('Finetune the model first!')\n print('\\n')\n self.isthreadActive = False # 开始播放视频\n self.wthread = workerThread(self)\n self.wthread.updatedM.connect(self.horizontalSliderSet)\n self.wthread.start()\n\n self.klistener1_left = pynput.keyboard.Listener(on_press=self.on_release)\n self.klistener2_left = pynput.keyboard.Listener(on_release=self.on_press)\n\n self.srcImages = []\n self.maskImages = []\n #self.initLeftImage()\n #存储分割结果的二值帧图\n self.result_dir = os.path.join(self.path_prefix, 'VosResults')\n self.vos_result_path = None\n self.ui.horizontalSlider_left.setEnabled(False)\n self.ui.pushButton_left5.setEnabled(False)\n # *************左边展示分割结果的变量*********\n\n # *************选择mask的变量*********\n self.ui.label_mask.openMaskSignal.connect(self.chooseMask) #双击打开文件夹选择mask\n self.resized.connect(self._on_resized2)\n self.curr_mask_img = None\n self.curr_mask_id = None\n self.mask_path=self.path_prefix\n #self.maskinfo = {}\n self.rgmpInfo = {}\n self.osvosInfo = {}\n # *************选择mask的变量*********\n\n # *************视频帧变量*********\n self.frames_path = os.path.join(self.path_prefix, 'JPEGImages')\n #self.file_list = sorted([name for name in os.listdir(self.frames_path) if self._is_img(name)])\n # *************视频帧变量*********\n\n #**************两个按键************\n #self.ui.commandLinkButton_next.clicked.connect(self.annoWindowExit)\n # **************两个按键************\n\n #*************训练模型的变量*********\n self.itertime=50\n self.cursor = self.ui.textBrowser.textCursor()\n self.cursor.movePosition(QtGui.QTextCursor.End)\n self.ui.textBrowser.setTextCursor(self.cursor)\n self.ui.textBrowser.ensureCursorVisible()\n self.ftThreadActive=False\n self.hasModel=False\n self.ui.pushButton_left1.clicked.connect(self.startFinetuneButtonPressed)\n self.ui.pushButton_left0.clicked.connect(self.startAnnotate)\n #self.ui.pushButton_left2.clicked.connect(self.stopFinetuneButtonPressed)\n #ivs\n self.ftThread_cnt=-1\n self.ftThreads_pool = [None, None, None, None, None]\n #ivs\n self.ImageSets_path=os.path.join(self.path_prefix,'ImageSets')\n # *************训练模型的变量*********\n self.adjustUI()\n\n def adjustUI(self):\n helper = MgrHelper.Instance()\n navButtonSize=25\n helper.setFontIcon(self.ui.pushButton_return,0xf015,navButtonSize)\n helper.setFontIcon(self.ui.pushButton_next,0xf061,navButtonSize)\n self.ui.pushButton_return.setCursor(QCursor(Qt.PointingHandCursor))\n self.ui.pushButton_next.setCursor(QCursor(Qt.PointingHandCursor))\n self.ui.pushButton_next.setStyleSheet(\"QPushButton{background-color:#4a93ca;\"\n #\"border-radius:5px\"\n \"border:none;\"\n #\"color:#ffffff;\"\n #\"font-size:12px;\"\n \"font-family:Microsoft Yahei;}\"\n \"QPushButton:hover{background-color:#ea5e00;}\"\n \"QPushButton:pressed{background-color:#002030;}\")\n self.ui.pushButton_return.setStyleSheet(\"QPushButton{background-color:#4a93ca;\"\n \"border:none;\"\n # \"color:#ffffff;\"\n # \"font-size:12px;\"\n \"font-family:Microsoft Yahei;}\"\n \"QPushButton:hover{background-color:#ea5e00;}\"\n \"QPushButton:pressed{background-color:#002030;}\")\n self.ui.label_title.setStyleSheet(\"QLabel{background-color:#4a93ca;\"\n \"border:none;\"\n \"color:#ffffff;\"\n \"font:bold;\"\n \"font-size:20px;\"\n \"font-family:Meiryo UI;\"\n \"qproperty-alignment:AlignCenter;}\")\n #顶部导航栏两个按钮\n self.ui.pushButton_return.clicked.connect(self.returnButtonPressed)\n self.ui.pushButton_next.clicked.connect(self.nextButtonPressed)\n\n def returnButtonPressed(self):\n self.direct_object.show()\n #direct_widget=DirectForm()\n #direct_widget.show()\n self.close()\n\n def nextButtonPressed(self):\n self.removal_widget=RemoveObjectForm(self.path_prefix,self.direct_object)\n self.removal_widget.show()\n self.close()\n\n def closeEvent(self, event):\n QWidget.closeEvent(self, event)\n # self.direct_object.close()\n self.close()\n\n #*************globle functions*********\n def resizeEvent(self,event):\n #print('resizeEvent')\n self.resized.emit()\n return super(OsvosForm,self).resizeEvent(event)\n\n def showCurrentTime(self, cnt, label):\n tsec = cnt / self.fps\n tmin = int(tsec / 60) # 视频播放到的帧的整数分钟\n ttsec = int(tsec - 60 * tmin) # 视频播放到的帧的整数秒钟\n ksec = tsec - 60 * tmin - ttsec # 视频播放到的帧的小数秒钟\n ksec=int (ksec*100)\n # if ksec > 0.5:\n # ttsec += 1\n label.setText(str(tmin).zfill(2) + ':' + str(ttsec).zfill(2) + ':' + str(ksec).zfill(2))\n #*************globle functions*********\n\n # *************选择mask的变量*********\n def callback(self,mask_list):\n print(mask_list)\n if mask_list==[]:\n return\n else:\n maskpic0=mask_list[0]\n self.mask_path=os.path.dirname(maskpic0)\n self.load_img(maskpic0)\n print('maskpic0:{}'.format(maskpic0))\n print('mask_path:{}'.format(self.mask_path))\n\n def _on_resized2(self):\n self.on_zoomfit_clicked2()\n\n def on_zoomfit_clicked2(self):\n sleep(0.2)\n if self.curr_mask_img!=None:\n self.load_img(self.curr_mask_img)\n\n # 加载图片并显示到label中\n def load_img(self, img_path):\n try:\n with open(img_path, 'rb') as f:\n img_data = f.read()\n except Exception as e:\n QMessageBox.warning(self, 'Warning', str(e))\n return\n\n img = QImage.fromData(img_data)\n if img.isNull():\n QMessageBox.warning(self, 'Warning', 'Invalid Image')\n return False\n\n pixmap = QPixmap.fromImage(img)\n pixmap = pixmap.scaled(int(self.ui.label_mask.width()),int(self.ui.label_mask.height()), Qt.KeepAspectRatio)\n # print(pixmap.size())\n self.ui.label_mask.setPixmap(pixmap)\n\n #设置mask参数\n self.curr_mask_img = img_path\n self.mask_path=os.path.dirname(self.curr_mask_img)\n picname = img_path.split('/')[-1].split('.')[0]\n if ('_' in picname) == True:\n self.curr_mask_id = int(picname.split('_')[0])\n else:\n self.curr_mask_id = int(picname)\n return True\n\n def chooseMask(self):\n filename_choose,filetype=QFileDialog.getOpenFileName(self,\"选取目标mask\",self.mask_path,\"Image files(*.jpg *.png *.jpeg)\")\n if filename_choose!=\"\":\n self.load_img(filename_choose)\n # *************选择mask的变量*********\n\n # *************左边展示结果的函数*********\n ########左右按键 #########\n\n def modelValueChanged(self):\n self.model = self.ui.comboBox_model.currentText()\n\n def on_release(self, key):\n #if self.isRun:\n if key == pynput.keyboard.Key.left:\n print('按键left')\n self.horizontalSliderIncrease(-1)\n elif key == pynput.keyboard.Key.right:\n print('按键right')\n self.horizontalSliderIncrease(1)\n\n ########左右按键 #########\n\n def on_press(self, key):\n if self.isRun:\n if pynput.keyboard.Key.space == key:\n if self.ui.pushButton_left6.isEnabled():\n self.pauseButtonPressed()\n else:\n while (self.sliderbusy == True or self.resizegoing == True):\n sleep(0.1)\n self.startButtonPressed()\n\n #进度条值改变为0\n def slider_value_changed(self):\n if not self.isthreadActive:\n #print(\"slidervalue change\")\n self.horizontalSliderIncrease(0)\n\n def horizontalSliderIncrease(self, val):\n if self.sliderbusy or self.resizegoing or self.stframe + self.ui.horizontalSlider_left.value()+val<0 or self.stframe + self.ui.horizontalSlider_left.value()+val>self.ui.horizontalSlider_left.maximum():\n return\n self.sliderbusy = True\n #print(\"current horizontalSlider_left value={}\".format(self.ui.horizontalSlider_left.value()))\n #print(\"current left frameId={}\".format(self.frameID))\n self.frameID = self.stframe + self.ui.horizontalSlider_left.value()+val\n #print(\"changed left frameId={}\".format(self.frameID))\n if self.ui.pushButton_left5.isEnabled():\n self.currentID=self.frameID\n # src_img = self.srcImages[self.currentID]\n # mask_img = self.maskImages[self.currentID]\n # image = cv2.imread(src_img)\n # self.on_zoomfit_clicked()\n # self.limg = image\n # limage = preprocessShowImgae(src_img, mask_img, self.scaleFactor)\n # self.ui.leftImage.setPixmap(QtGui.QPixmap(limage))\n self.on_zoomfit_clicked()\n self.showCurrentTime(self.currentID, self.ui.label_time1)\n self.ui.horizontalSlider_left.setValue(self.frameID)\n self.sliderbusy = False\n\n def horizontalSliderSet(self, cnt):\n if cnt-self.stframe>self.ui.horizontalSlider_left.maximum() or self.sliderbusy or self.resizegoing:\n return\n self.sliderbusy2 = True\n self.ui.horizontalSlider_left.setValue(cnt - self.stframe)\n self.showCurrentTime(cnt,self.ui.label_time1)\n self.sliderbusy2 = False\n\n def horizontalSliderPressed(self):\n if self.ui.horizontalSlider_left.value()==self.ui.horizontalSlider_left.maximum():\n self.isthreadActive=True\n self.sliderbusy=True\n\n def horizontalSliderReleased(self):\n self.frameID = self.stframe + self.ui.horizontalSlider_left.value()\n if self.ui.pushButton_left5.isEnabled():\n self.currentID = self.frameID\n # src_img = self.srcImages[self.currentID]\n # mask_img = self.maskImages[self.currentID]\n # image = cv2.imread(src_img)\n # self.limg = image\n # limage = preprocessShowImgae(src_img, mask_img, self.scaleFactor)\n # self.ui.leftImage.setPixmap(QtGui.QPixmap(limage))\n self.on_zoomfit_clicked()\n self.showCurrentTime(self.currentID, self.ui.label_time1)\n self.sliderbusy = False\n\n def updateFrame(self):\n self.frameID += 1\n\n def on_zoomfit_clicked(self):\n #print('on_zoomfit_clicked')\n self.resizegoing=True\n a=self.ui.leftImage.size()\n if a.width()/self.frameWidth=self.ui.horizontalSlider_left.maximum():\n return\n self.ui.pushButton_left5.setEnabled(False)\n self.timer=perpetualTimer(1.0/self.fps,self.updateFrame)\n self.timer.start()\n self.ui.pushButton_left6.setEnabled(True)\n self.isthreadActive=True\n\n def pauseButtonPressed(self):\n if not self.isthreadActive:\n return\n self.ui.pushButton_left5.setEnabled(True)\n self.ui.pushButton_left6.setEnabled(False)\n if not self.timer is None:\n self.timer.cancel()\n self.isthreadActive=False\n\n def initLeftImage(self):\n #print('初始化左边结果展示...')\n self.isInit=True\n src_img_path=self.frames_path\n result_mask_path=self.vos_result_path\n self.srcImages=getSrcImages(src_img_path)\n self.maskImages=getResultsMaskImages(result_mask_path)\n if len(self.srcImages)==0 or len(self.maskImages)==0 or len(self.srcImages)!=len(self.maskImages):\n QMessageBox.about(self, 'Warning', '处理的结果图像未准备好!')\n return\n else:\n self.isVideo=True\n self.length=len(self.maskImages)\n #print(\"total frames:{} fps:{}\".format(self.length, self.fps))\n self.stframe=0\n self.endframe=self.length-1\n\n self.ui.horizontalSlider_left.setMaximum(self.endframe - self.stframe)\n #print(\"horiontalSLider_left maxMiun:{}\".format(self.endframe - self.stframe))\n current_img=self.srcImages[self.stframe]\n current_mask=self.maskImages[self.stframe]\n self.currentID=self.stframe\n [height, width, pix] = cv2.imread(current_img).shape\n self.frameWidth=width\n self.frameHeight=height\n #self.on_zoomfit_clicked()\n if self.isInit==True:\n self.scaleFactor=0.604167\n #print('init leftImage-width:{} height:{}'.format(self.ui.leftImage.size().width(),self.ui.leftImage.size().height()))\n #print('init resultImage-frameWidth:{} frameHeight:{} scaleFactor:{}'.format(self.frameWidth,self.frameHeight,self.scaleFactor))\n #print(\"执行on_zoonfit_clicked,scaleFactor is {}\".format(self.scaleFactor))\n\n limage = preprocessShowImgae(current_img,current_mask,self.scaleFactor)\n #print('第一帧显示成功!')\n #视频总时长\n self.showCurrentTime(self.length-1,self.ui.label_time2)\n #归0\n self.ui.label_time1.setText('00:00:00')\n self.horizontalSliderSet(0)\n self.ui.leftImage.setPixmap(QtGui.QPixmap(limage))\n #self.ui.textBrowser.setText('Ready to play segmentated video!')\n print('\\n')\n self.ui.pushButton_left5.setEnabled(True)\n self.ui.pushButton_left6.setEnabled(False)\n self.ui.horizontalSlider_left.setEnabled(True)\n\n # *************左边展示结果的函数*********\n\n # *************右边播放视频的函数*********\n\n ########左右按键——前进后退#########\n def on_release1(self, key):\n #if self.isRun:\n if key == pynput.keyboard.Key.page_up:\n print('page_up')\n self.horizontalSliderIncrease1(1)\n elif key == pynput.keyboard.Key.page_down:\n print('page_down')\n self.horizontalSliderIncrease1(-1)\n\n ########左右按键-前进后退#########\n ########空格按键-播放暂停#########\n def on_press1(self, key):\n if self.isRun1:\n if pynput.keyboard.Key.enter == key:\n if self.ui.pushButton_right3.isEnabled():\n self.pauseButtonPressed1()\n else:\n while (self.sliderbusy_right == True or self.resizegoing1 == True):\n sleep(0.1)\n self.startButtonPressed1()\n ########空格按键-播放暂停#########\n # 进度条值改变为0\n def slider_value_changed1(self):\n if not self.isthreadActive1:\n #print(\"slidervalue change\")\n self.horizontalSliderIncrease1(0)\n\n def horizontalSliderIncrease1(self, val):\n if self.sliderbusy_right or self.resizegoing1 or self.ui.horizontalSlider_right.value()+val+self.stframe1<0 or self.ui.horizontalSlider_right.value()+val+self.stframe1>self.ui.horizontalSlider_right.maximum():\n print('horizontalSliderIncrease1 return')\n return\n self.sliderbusy_right = True\n self.frameID1 = self.stframe1 + self.ui.horizontalSlider_right.value()+val\n #print(\"changed frameId={}\".format(self.frameID1))\n if self.ui.pushButton_right2.isEnabled():\n # self.cap1.set(cv2.CAP_PROP_POS_FRAMES, self.frameID1)\n # ret, frame = self.cap1.read()\n # limage = preprocessImage(frame,self.scaleFactor1)\n # self.ui.rightImage.setPixmap(QtGui.QPixmap(limage))\n self.on_zoomfit_clicked1()\n self.showCurrentTime(self.frameID1,self.ui.label_time3)\n self.ui.horizontalSlider_right.setValue(self.frameID1)\n self.sliderbusy_right = False\n\n def updateFrame1(self):\n self.frameID1 += 1\n\n def horizontalSliderSet1(self,cnt):\n if cnt-self.stframe1>self.ui.horizontalSlider_right.maximum() or self.sliderbusy_right or self.resizegoing1:\n return\n self.sliderbusy2_right=True\n self.ui.horizontalSlider_right.setValue(cnt-self.stframe1)\n self.showCurrentTime(cnt,self.ui.label_time3)\n #self.ui.statusbar.showMessage(\"Frame TIme\"+str(tmin).zfill(2)+\":\"+str(ttsec).zfill(2)+\":\"+str(int(ksec*100)))\n self.sliderbusy2_right=False\n\n def horizontalSliderPressed1(self):\n if self.ui.horizontalSlider_right.value()==self.ui.horizontalSlider_right.maximum():\n self.isthreadActive1=True\n self.sliderbusy_right=True\n\n def horizontalSliderReleased1(self):\n self.frameID1=self.stframe1+self.ui.horizontalSlider_right.value()\n # tsec=self.frameID1/self.fps1\n # self.ui.label_time3.setText(str(tmin).zfill(2)+':'+str(ttsec).zfill(2))\n #如果还没开始播放\n if self.ui.pushButton_right2.isEnabled():\n # self.cap1.set(cv2.CAP_PROP_POS_FRAMES,self.frameID1)\n # ret,frame=self.cap1.read()\n # limage=preprocessImage(frame,self.scaleFactor1)\n # self.ui.rightImage.setPixmap(QtGui.QPixmap(limage))\n self.on_zoomfit_clicked1()\n self.showCurrentTime(self.frameID1, self.ui.label_time3)\n self.sliderbusy_right=False\n\n def _on_resized1(self):\n self.on_zoomfit_clicked1()\n\n def openButtonPressed1(self):\n if self.isthreadActive1 or self.isVideo1:\n #print(\"弹出确认对话框\")\n reply=QMessageBox.question(self,'Message','确定要关闭当前视频,打开新的视频吗?',QMessageBox.Yes|QMessageBox.No,QMessageBox.Yes)\n if reply==QMessageBox.Yes:\n self.curr_mask_id=None\n self.curr_mask_img=None\n if not self.timer1 is None:\n self.timer1.cancel()\n self.isthreadActive1 = False\n #self.ui.horizontalSlider_right.setValue(0)\n self.sliderbusy_right=False\n self.sliderbusy2_right=False\n self.timer1=None\n # self.ui.label_time3.setText('00:00:00')\n # self.ui.label_time4.setText('00:00:00')\n else:\n return\n\n #try:\n fileName=QFileDialog.getOpenFileName(None,caption=\"select video\",directory=self.path_prefix)#directory是起始路径\n #getOpenFIleName返回的是(name,type)\n if len(fileName[0])>0:\n self.cap1=cv2.VideoCapture(fileName[0])\n self.isVideo1=True\n self.ui.horizontalSlider_right.setEnabled(True)\n if self.isFirstKey_right==False:\n self.klistener1_right.start()\n self.klistener2_right.start()\n self.isFirstKey_right = True\n else:\n #print(\"filename {} is empty\".format(fileName[0]))\n return\n # 默认逐帧处理\n if not os.path.exists(self.frames_path) or not os.listdir(self.frames_path):\n video_to_frames(self.frames_path,fileName[0])\n self.length1=int(self.cap1.get(cv2.CAP_PROP_FRAME_COUNT))\n self.fps1=self.cap1.get(cv2.CAP_PROP_FPS)\n #print(\"videoName:{} total frames:{} fps:{}\".format(fileName[0],self.length1,self.fps1))\n self.stframe1=0\n self.endframe1=self.length1-1\n self.ui.horizontalSlider_right.setMaximum(self.endframe1-self.stframe1)\n #print(\"horiontalSLider maxMiun:{}\".format(self.endframe1-self.stframe1))\n #视频总时长\n self.showCurrentTime(self.length1-1,self.ui.label_time4)\n #归0\n self.ui.horizontalSlider_right.setValue(0)\n self.ui.label_time3.setText('00:00:00')\n # tsec = self.length1 / self.fps1\n # self.ui.label_time4.setText(str(tmin).zfill(2)+':'+str(ttsec).zfill(2))\n self.cap1.set(1,self.stframe1)\n ret,frame=self.cap1.read()\n # if(ret==True):\n # print(\"cap read successfully!\")\n # else:\n # print(\"cap read failed!\")\n self.limg1=frame\n self.frameID1=self.stframe1\n self.frameHeight1=frame.shape[0]\n self.frameWidth1=frame.shape[1]\n self.on_zoomfit_clicked1()\n #print(\"执行on_zoonfit_clicked,scaleFactor1 is {}\".format(self.scaleFactor1))\n #print('init rightImage-width:{} height:{}'.format(self.ui.rightImage.size().width(),self.ui.rightImage.size().height()))\n #print('init videoImage-frameWidth1:{} frameHeight1:{} scaleFactor1:{}'.format(self.frameWidth1, self.frameHeight1,self.scaleFactor1))\n limage=preprocessImage(frame,self.scaleFactor1)\n self.ui.rightImage.setPixmap(QtGui.QPixmap(limage))\n #print(\"打开的第一帧显示成功!\")\n #self.ui.textBrowser.setText(\"Ready to start or redifine video\")\n self.ui.textBrowser.clear()\n self.ui.textBrowser.setText('ENTER-播放暂停,PgUp-前进,PgDn-后退')\n print('\\n')\n self.ui.pushButton_right2.setEnabled(True)\n self.ui.pushButton_right3.setEnabled(False)\n self.ui.horizontalSlider_right.setEnabled(True)\n\n def startButtonPressed1(self):\n if self.isthreadActive1 or self.ui.horizontalSlider_right.value()>=self.ui.horizontalSlider_right.maximum():\n return\n self.ui.pushButton_right2.setEnabled(False)\n self.timer1=perpetualTimer(1.0/self.fps1,self.updateFrame1)\n self.timer1.start()\n\n self.ui.pushButton_right3.setEnabled(True)\n self.isthreadActive1=True\n\n def pauseButtonPressed1(self):\n if not self.isthreadActive1:\n return\n self.ui.pushButton_right2.setEnabled(True)\n self.ui.pushButton_right3.setEnabled(False)\n if not self.timer1 is None:\n self.timer1.cancel()\n self.isthreadActive1=False\n\n def on_zoomfit_clicked1(self):\n self.resizegoing1=True\n a=self.ui.rightImage.size()\n if a.width()/self.frameWidth1 bool:\n ls1 = ['0', '1', '8']\n ls2 = ['6', '9']\n length = len(num)\n ls = []\n for i in num[::-1]:\n if i in ls1: ls.append(i)\n elif i in ls2:\n if(i == '6'): ls.append('9')\n if(i == '9'): ls.append('6')\n else: \n return False \n new = ''.join(ls)\n if(new == num): return True\n else: return False","repo_name":"YikunHan42/LeetCode","sub_path":"(246)Strobogrammatic Number.py","file_name":"(246)Strobogrammatic Number.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1291443788","text":"#2\nfrom datetime import datetime\nimport zmq\n\n\nhost = '127.0.0.1'\nport = 6789\ncontext = zmq.Context()\nserver = context.socket(zmq.REP)\nserver.bind(\"tcp://%s:%s\" % (host, port))\nprint(\"Server started\")\nwhile True:\n server.recv()\n reply_str = \"Now is: %s\" % datetime.utcnow()\n reply_bytes = bytes(reply_str, 'utf-8')\n server.send(reply_bytes)\n","repo_name":"AndreyNeveikov/Introducing_Python_by_Bill_Lubanovic_exercises","sub_path":"Chapter_17_Data_in_space-Networks/Chapter_17_task2_server.py","file_name":"Chapter_17_task2_server.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70691594134","text":"import wandb\nfrom algorithms.train_tradaboost import train_tradaboost_nn\nfrom data_utils.datasets import load_preproc_data\n\n\ndef sweep_tradaboost_nn():\n config = dict(\n validation_split=0.1,\n # seed=list(range(5)),\n seed=4,\n cv=1,\n n_estimators=2,\n n_estimators_fs=2,\n mlp=dict(\n learning_rate=0.001,\n epochs=2,\n batch_size=64,\n early_stop_patience=15,\n hidden=[1024, 1024, 1024],\n l2_reg=1e-4,\n loss_function='mse',\n dropout_rate=0.1\n ),\n # train_dataset=['src', 'tar1', 'tar2', 'tar3'])\n train_dataset='bpm10_tar',\n src_dataset='bpm10_src'\n )\n\n wandb_init = dict(\n project='test2',\n entity='transfer-learning-tcn',\n reinit=False,\n config=config\n )\n\n wandb.init(**wandb_init)\n config = wandb.config\n print(config)\n wandb_init['config'] = config\n\n test_data_dict = dict()\n train_data_dict = dict()\n for name in [config.train_dataset, config.src_dataset]:\n data_dict = load_preproc_data(name=name)\n test_data_dict[name] = (data_dict['man_x_test'], data_dict['y_test'].reshape(-1, 1))\n train_data_dict[name] = (data_dict['man_x_train'], data_dict['y_train'].reshape(-1, 1))\n\n test_data_dict[\"test\"] = test_data_dict[config.train_dataset]\n src_x, src_y = train_data_dict[config.src_dataset]\n tar_x, tar_y = train_data_dict[config.train_dataset]\n train_tradaboost_nn(src_x, src_y, tar_x, tar_y, test_data_dict, wandb_init)\n\n\nif __name__ == '__main__':\n sweep_tradaboost_nn()","repo_name":"lpsilvestrin/mee-finetune","sub_path":"experiments/tradaboost_nn_sweep.py","file_name":"tradaboost_nn_sweep.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"9997171421","text":"from openerp.osv import orm, osv, fields\nfrom openerp.tools.translate import _\n\nclass res_partner(osv.osv):\n \n _inherit = 'res.partner'\n\n def get_report(self, cr, uid, id, document_type, datas, context=None):\n\n assert len(id) == 1, 'This option should only be used for a single id at a time.'\n\n ids = self.pool.get('xx.partner.document.layout').search(cr, uid, [('document_type','=',document_type),('partner_id','in',id)])\n document_options = False\n if ids:\n document_layout = self.pool.get('xx.partner.document.layout').browse(cr, uid, ids[0])\n document_options = {'jasper_report': document_layout.jasper_report.report_name,\n 'nb_of_copies': document_layout.nb_of_copies,}\n else:\n ids = self.pool.get('xx.document.layout').search(cr, uid, [('document_type','=',document_type),('company_id','=',self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id)])\n if ids:\n document_layout = self.pool.get('xx.document.layout').browse(cr, uid, ids[0])\n document_options = {'jasper_report': document_layout.jasper_report.report_name,\n 'nb_of_copies': document_layout.nb_of_copies,}\n\n if not document_options:\n raise osv.except_osv('Warning', _('Document Layout not specified for Document Type \"%s\" !') % (document_type))\n \n datas.update({'nb_of_copies':document_options.get('nb_of_copies')})\n return {'type': 'ir.actions.report.xml', 'report_name': document_options.get('jasper_report'), 'datas': datas, 'nodestroy': True}\n\n def get_email_template(self, cr, uid, id, document_type, context=None):\n\n assert len(id) == 1, 'This option should only be used for a single id at a time.'\n\n ids = self.pool.get('xx.partner.email.layout').search(cr, uid, [('document_type','=',document_type),('partner_id','=',id)])\n email_template = False\n if ids:\n email_layout = self.pool.get('xx.partner.email.layout').browse(cr, uid, ids[0])\n email_template = email_layout.email_template.id\n else:\n ids = self.pool.get('xx.email.layout').search(cr, uid, [('document_type','=',document_type),('company_id','=',self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id)])\n if ids:\n email_layout = self.pool.get('xx.email.layout').browse(cr, uid, ids[0])\n email_template = email_layout.email_template.id\n\n return email_template\n\n _columns = {\n 'xx_partner_document_layouts': fields.one2many('xx.partner.document.layout','partner_id','Print Layouts'),\n 'xx_partner_email_layouts': fields.one2many('xx.partner.email.layout','partner_id','E-mail Layouts'),\n 'xx_communication_type': fields.selection( [('post','Post'),('mail','E-mail')],'Communication Type'), \n }\n\n _defaults = {\n 'xx_communication_type': 'post',\n }","repo_name":"Niboo/legal1","sub_path":"jasper_reports/model/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"20383987862","text":"import sys, os.path, pygame, json, random, copy, threading, time\nfrom math import sin, cos, radians, degrees\nfrom pygame.locals import *\n\nmain_dir = os.path.split(os.path.abspath(__file__))[0]+\"\\\\resources\"\nprint(main_dir)\n# initialization\npygame.init()\nwith open('config.json') as config_file:\n data = json.load(config_file)\n\n# config\nWINDOW_HEIGHT = data[\"WINDOW_HEIGHT\"]\nWINDOW_WIDTH = data[\"WINDOW_WIDTH\"]\nBACKGROUND_COLOR = tuple(data[\"BACKGROUND_COLOR\"])\nPADDLE_COLOR = tuple(data[\"PADDLE_COLOR\"])\nPADDLE_SPEED = data[\"PADDLE_SPEED\"]\nPADDLE_SIZE = data[\"PADDLE_SIZE\"]\nBALL_SPEED = data[\"BALL_SPEED\"]\nBALL_COLOR = tuple(data[\"BALL_COLOR\"])\nBALL_SIZE = data[\"BALL_SIZE\"]\nBASIC_FONT = pygame.font.SysFont(data[\"BASIC_FONT\"][0], data[\"BASIC_FONT\"][1])\nBIG_FONT = pygame.font.SysFont(data[\"BASIC_FONT\"][0], data[\"BASIC_FONT\"][1])\nSMALL_FONT = pygame.font.SysFont(data[\"BASIC_FONT\"][0], data[\"BASIC_FONT\"][1])\nFONT_COLOR = tuple(data[\"FONT_COLOR\"])\nMAX_SCORE = data[\"MAX_SCORE\"]\nCOMPUTER_LEVEL = data[\"COMPUTER_LEVEL\"]\nGUN_CD = data[\"GUN_CD\"]\nGUN_DURATION = data[\"GUN_DURATION\"]\n\n# game initialization\nwindow = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT), 0, 32, pygame.DOUBLEBUF)\npygame.display.set_caption('Pong Game')\nbackground = pygame.Surface(window.get_size())\nbackground = background.convert()\nbackground.fill(BACKGROUND_COLOR)\nwindow.blit(background, (0, 0))\npygame.display.flip()\nclock = pygame.time.Clock()\ntick = 60\n\n# game constants\nSCORE = [0, 0]\nSCORED_NOW = False\nPAUSED = False\nGAME_OVER = False\nGAME_STARTED = False\nGAME_MODE = 1\nPERKS = [[0, 0, 0], [0, 0, 0]]\nLAST_TOUCH = 0\nONLINE = False\n\n\n\n# game elements loading functions\nclass DummySound:\n def play(self, *args): pass\n\n\nclass DummyImg(pygame.Surface):\n pass\n\n\ndef load_sound(file):\n if not pygame.mixer: return DummySound()\n file = os.path.join(main_dir, file)\n try:\n sound = pygame.mixer.Sound(file)\n return sound\n except pygame.error:\n print('Warning, unable to load, %s' % file)\n except FileNotFoundError:\n print('Warning, unable to load, %s' % file)\n return DummySound()\n\n\ndef load_image(file):\n file = os.path.join(main_dir, file)\n try:\n image = pygame.image.load(file)\n return image\n except pygame.error:\n print('Warning, unable to load, %s' % file)\n except FileNotFoundError:\n print('Warning, unable to load, %s' % file)\n return DummyImg([64, 64])\n\n\nclass Paddle(pygame.sprite.Sprite):\n \"\"\"\n Paddle class. Inherit init and update function. Update\n is called once per frame, sets paddle position and check\n if it collided with ball. If so it changes ball angle, based\n on collide point.\n \"\"\"\n direction = 0\n speed = PADDLE_SPEED\n moving = False\n pos = [0, 0]\n gun_cd = 0\n gun = False\n bigger = False\n faster = False\n\n def __init__(self, player_number):\n pygame.sprite.Sprite.__init__(self, self.containers)\n self.player_number = player_number\n if PERKS[self.player_number][1] == 1:\n self.image = pygame.Surface([PADDLE_SIZE[0], PADDLE_SIZE[1] + 40])\n self.bigger = True\n else:\n self.image = pygame.Surface(PADDLE_SIZE)\n self.image.fill(PADDLE_COLOR)\n self.rect = self.image.get_rect()\n self.speed = PADDLE_SPEED\n if PERKS[self.player_number][0] == 1:\n self.speed += 4\n self.faster = True\n if PERKS[self.player_number][2] == 1:\n self.gun = True\n if self.player_number == 0:\n self.rect.centerx = window.get_rect().left\n self.rect.centerx += 50\n\n else:\n self.rect.centerx = window.get_rect().right\n self.rect.centerx -= 50\n self.rect.centery = window.get_rect().centery\n self.pos = [self.rect.x, self.rect.y]\n\n def update(self):\n global LAST_TOUCH\n if self.moving and not GAME_OVER and not PAUSED:\n if self.direction == 0 and self.rect.y > 5:\n self.rect.centery -= self.speed\n elif self.direction == 1 and self.rect.bottom < WINDOW_HEIGHT - 5:\n self.rect.centery += self.speed\n self.pos = [self.rect.x, self.rect.y]\n\n # shoot method can be used once per 5 seconds, so we need to use thread\n def shoot(self):\n if self.gun:\n if self.gun_cd == 0:\n shoot_music.play()\n Shot([self.rect.centerx, self.rect.centery])\n self.gun_cd = GUN_CD\n time.sleep(GUN_CD)\n self.gun_cd = 0\n\n def get_shot(self):\n get_shot_music.play()\n temp_speed = self.speed\n self.image.fill((0, 11, 30))\n self.speed = 1\n time.sleep(GUN_DURATION)\n self.image.fill(PADDLE_COLOR)\n self.speed = temp_speed\n\n\nclass ComputerPaddle(pygame.sprite.Sprite):\n \"\"\"\n Computer paddle class. Pretty much same as normal paddle, but\n when ball is coming ball it moves depending of y-axis difference\n between its and ball position and percentage chance(to make it beatable)\n \"\"\"\n ball = None\n direction = 0\n speed = PADDLE_SPEED\n pos = [0, 0]\n gun_cd = 0\n gun = False\n bigger = False\n faster = False\n\n def __init__(self, player_number, ball):\n pygame.sprite.Sprite.__init__(self, self.containers)\n self.ball = ball\n self.player_number = player_number\n if PERKS[self.player_number][1] == 1:\n self.image = pygame.Surface([PADDLE_SIZE[0], PADDLE_SIZE[1] + 40])\n self.bigger = True\n else:\n self.image = pygame.Surface(PADDLE_SIZE)\n self.image.fill(PADDLE_COLOR)\n self.rect = self.image.get_rect()\n self.speed = PADDLE_SPEED\n if PERKS[self.player_number][0] == 1:\n self.speed += 4\n self.faster = True\n if PERKS[self.player_number][2] == 1:\n self.gun = True\n x = threading.Thread(target=self.shoot, daemon=True)\n x.start()\n if self.player_number == 0:\n self.rect.centerx = window.get_rect().left\n self.rect.centerx += 50\n else:\n self.rect.centerx = window.get_rect().right\n self.rect.centerx -= 50\n self.rect.centery = window.get_rect().centery\n self.pos = [self.rect.x, self.rect.y]\n\n def update(self, *args):\n global LAST_TOUCH\n if not GAME_OVER and not PAUSED and LAST_TOUCH == 0 and random.randint(0, 100) < COMPUTER_LEVEL:\n if (self.pos[1] - random.randint(0, 10) > ball.pos[1]) and self.rect.y > 5:\n self.pos = project(self.pos, radians(90), self.speed)\n elif (self.pos[1] + random.randint(0, 10) < ball.pos[1]) and self.rect.bottom < WINDOW_HEIGHT - 5:\n self.pos = project(self.pos, radians(270), self.speed)\n self.rect.center = self.pos\n\n def shoot(self):\n while self.gun:\n if random.randint(1, 3) == 2:\n shoot_music.play()\n Shot([self.rect.centerx, self.rect.centery])\n time.sleep(GUN_CD)\n\n def get_shot(self):\n get_shot_music.play()\n temp_speed = self.speed\n self.image.fill((0, 11, 30))\n self.speed = 1\n time.sleep(GUN_DURATION)\n self.image.fill(PADDLE_COLOR)\n self.speed = temp_speed\n\n\ndef project(pos, angle, distance):\n \"\"\"\n Returns tuple of pos projected distance at angle\n adjusted for pygame's y-axis.\n \"\"\"\n return (pos[0] + (cos(angle) * distance),\n pos[1] - (sin(angle) * distance))\n\n\nclass Ball(pygame.sprite.Sprite):\n \"\"\"\n Ball class. Inherited init and update function. Update\n is called once per frame and checks if ball collide with wall\n \"\"\"\n speed = BALL_SPEED\n pos = (window.get_rect().centerx, window.get_rect().centery)\n moving = True\n\n def __init__(self, angle):\n pygame.sprite.Sprite.__init__(self, self.containers)\n self.angle = radians(angle)\n self.image = pygame.Surface(BALL_SIZE)\n self.image.fill(BALL_COLOR)\n self.rect = self.image.get_rect()\n self.rect.center = self.pos\n\n def update(self, *args):\n global SCORED_NOW\n if self.moving and not GAME_OVER and not PAUSED:\n # bal, hit horizontal wall\n if self.pos[1] < 2 or self.pos[1] > WINDOW_HEIGHT - 2:\n degree = degrees(self.angle)\n if (degree < 180):\n self.angle = radians(360 - degree)\n else:\n self.angle = radians(-degree)\n # ball hit vertical wall\n if self.pos[0] < 0:\n loose_music.play()\n # player 2 scores\n SCORE[1] += 1\n self.pos = (window.get_rect().centerx, window.get_rect().centery)\n self.angle = radians(180)\n SCORED_NOW = True\n if self.pos[0] > WINDOW_WIDTH:\n loose_music.play()\n # player 1 scores\n SCORE[0] += 1\n self.pos = (window.get_rect().centerx, window.get_rect().centery)\n self.angle = radians(0)\n SCORED_NOW = True\n self.pos = project(self.pos, self.angle, self.speed)\n self.rect.center = self.pos\n\n\nclass Shot(pygame.sprite.Sprite):\n speed = 25\n pos = [0, 0]\n\n def __init__(self, pos):\n pygame.sprite.Sprite.__init__(self, self.containers)\n self.image = pygame.Surface((25, 10))\n self.image.fill((0, 11, 30))\n self.rect = self.image.get_rect()\n self.pos = pos\n if pos[0] < WINDOW_WIDTH / 2:\n self.angle = radians(0)\n self.rect.center = [pos[0] + 25, pos[1]]\n else:\n self.angle = radians(180)\n self.rect.center = [pos[0] - 25, pos[1]]\n\n def update(self):\n if self.pos[0] < 0:\n self.kill()\n if self.pos[0] > WINDOW_WIDTH:\n self.kill()\n self.pos = project(self.pos, self.angle, self.speed)\n self.rect.center = self.pos\n\n\nclass Score(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = BASIC_FONT.render(str(SCORE[0]) + \" \" + str(SCORE[1]), True, FONT_COLOR,\n BACKGROUND_COLOR)\n self.rect = self.image.get_rect()\n self.rect.centerx = window.get_rect().centerx\n self.rect.y = 10\n\n def update(self, *args):\n self.image = BASIC_FONT.render(str(SCORE[0]) + \" \" + str(SCORE[1]), True, FONT_COLOR,\n BACKGROUND_COLOR)\n\n\nclass Text(pygame.sprite.Sprite):\n def __init__(self, text, pos, font, text_color=FONT_COLOR, gamemode_text=0):\n pygame.sprite.Sprite.__init__(self)\n self.font = font\n self.text_color = text_color\n self.gamemode_text = gamemode_text\n self.pos = pos\n self.text = text\n self.image = font.render(text, True, text_color, BACKGROUND_COLOR)\n self.rect = self.image.get_rect()\n self.rect.centerx = pos[0]\n self.rect.centery = pos[1]\n\n def update(self, *args):\n self.image = self.font.render(self.text, True, self.text_color, BACKGROUND_COLOR)\n if 0 < self.gamemode_text == GAME_MODE:\n self.image = self.font.render(self.text, True, (139, 0, 0), BACKGROUND_COLOR)\n\n\nclass Image(pygame.sprite.Sprite):\n def __init__(self, img, pos, perk=[]):\n pygame.sprite.Sprite.__init__(self)\n self.pos = pos\n self.img = img\n self.perk = perk\n self.image = copy.copy(img)\n self.rect = self.image.get_rect()\n self.rect.centerx = pos[0]\n self.rect.centery = pos[1]\n\n def update(self, *args):\n self.image = copy.copy(self.img)\n if PERKS[self.perk[0]][self.perk[1]] == 1:\n self.image.blit(border_img, (0, 0))\n\n\n# menu def\ndef menu():\n global GAME_MODE, GAME_STARTED, PERKS\n # All the assets(text and images) for menu declaration\n gametype_text = Text(\"Choose gamemode\", [window.get_rect().centerx, window.get_rect().centery - 150], SMALL_FONT)\n choose_gametype1_text = Text(\"Player\", [window.get_rect().centerx - 170, window.get_rect().centery], SMALL_FONT,\n gamemode_text=1)\n player1_perk1 = Image(boots_img, [choose_gametype1_text.rect.centerx - 64, choose_gametype1_text.rect.centery + 70],\n [0, 0])\n player1_perk2 = Image(shield_img, [choose_gametype1_text.rect.centerx, choose_gametype1_text.rect.centery + 70],\n [0, 1])\n player1_perk3 = Image(sword_img, [choose_gametype1_text.rect.centerx + 64, choose_gametype1_text.rect.centery + 70],\n [0, 2])\n choose_gametype2_text = Text(\"Computer\", [window.get_rect().centerx + 130, window.get_rect().centery], SMALL_FONT,\n gamemode_text=2)\n player2_perk1 = Image(boots_img, [choose_gametype2_text.rect.centerx - 64, choose_gametype1_text.rect.centery + 70],\n [1, 0])\n player2_perk2 = Image(shield_img, [choose_gametype2_text.rect.centerx, choose_gametype1_text.rect.centery + 70],\n [1, 1])\n player2_perk3 = Image(sword_img, [choose_gametype2_text.rect.centerx + 64, choose_gametype1_text.rect.centery + 70],\n [1, 2])\n start_text = Text(\"START\", [window.get_rect().centerx, window.get_rect().centery + 200], BIG_FONT, (255, 215, 0))\n all.add(gametype_text)\n all.add(choose_gametype1_text)\n all.add(choose_gametype2_text)\n all.add(player1_perk1)\n all.add(player2_perk1)\n all.add(player1_perk2)\n all.add(player2_perk2)\n all.add(player1_perk3)\n all.add(player2_perk3)\n all.add(start_text)\n while not GAME_STARTED:\n all.clear(window, background)\n all.update()\n for event in pygame.event.get():\n if event.type == QUIT:\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n if event.type == MOUSEBUTTONDOWN:\n if event.button == 1:\n if choose_gametype1_text.rect.collidepoint(event.pos):\n GAME_MODE = 1\n if choose_gametype2_text.rect.collidepoint(event.pos):\n GAME_MODE = 2\n if start_text.rect.collidepoint(event.pos):\n GAME_STARTED = True\n all.empty()\n if player1_perk1.rect.collidepoint(event.pos):\n if PERKS[0][0] == 0:\n PERKS[0][0] = 1\n else:\n PERKS[0][0] = 0\n if player1_perk2.rect.collidepoint(event.pos):\n if PERKS[0][1] == 0:\n PERKS[0][1] = 1\n else:\n PERKS[0][1] = 0\n if player1_perk3.rect.collidepoint(event.pos):\n if PERKS[0][2] == 0:\n PERKS[0][2] = 1\n else:\n PERKS[0][2] = 0\n if player2_perk1.rect.collidepoint(event.pos):\n if PERKS[1][0] == 0:\n PERKS[1][0] = 1\n else:\n PERKS[1][0] = 0\n if player2_perk2.rect.collidepoint(event.pos):\n if PERKS[1][1] == 0:\n PERKS[1][1] = 1\n else:\n PERKS[1][1] = 0\n if player2_perk3.rect.collidepoint(event.pos):\n if PERKS[1][2] == 0:\n PERKS[1][2] = 1\n else:\n PERKS[1][2] = 0\n clock.tick(tick)\n dirty = all.draw(window)\n pygame.display.update(dirty)\n\n\n# assets loading (music + images)\nbackground_music = load_sound(\"background.ogg\")\nhit_music = load_sound(\"hit.ogg\")\ngame_over_music = load_sound(\"game_over.ogg\")\nloose_music = load_sound(\"loose.ogg\")\nshoot_music = load_sound(\"shoot.ogg\")\nget_shot_music = load_sound(\"get_shot.ogg\")\nborder_img = load_image(\"border.png\")\nsword_img = load_image(\"sword.png\")\nboots_img = load_image(\"boots.png\")\nshield_img = load_image(\"shield.png\")\n# Initialize Game Groups\npaddles = pygame.sprite.Group()\nballs = pygame.sprite.Group()\nshots = pygame.sprite.Group()\nall = pygame.sprite.RenderUpdates()\n# assign default groups to each sprite class\nPaddle.containers = all, paddles\nComputerPaddle.containers = all, paddles\nBall.containers = all, balls\nShot.containers = all, shots\nScore.containers = all\nText.containers = all\n# menu loop\nmenu()\n# game elements init\nall.add(Score())\nball = Ball(180)\npaddle1 = Paddle(0)\nif GAME_MODE == 1:\n paddle2 = Paddle(1)\nelse:\n paddle2 = ComputerPaddle(1, ball)\ntemp = pygame.mouse.get_pos()\nbackground_music.play(-1)\npygame.mouse.set_visible(False)\npygame.event.set_grab(True)\ngame_over_text = Text(\"GAME OVER\", [window.get_rect().centerx, window.get_rect().centery - 50], BIG_FONT)\nplayer1_win_text = Text(\"Player 1 Wins\", [game_over_text.rect.centerx, game_over_text.rect.centery + 75],\n BIG_FONT)\nplayer2_win_text = Text(\"Player 2 Wins\", [game_over_text.rect.centerx, game_over_text.rect.centery + 75],\n SMALL_FONT)\nreset_text = Text(\"Press (r) to reset\", [game_over_text.rect.centerx, game_over_text.rect.centery + 150],\n SMALL_FONT)\npaused_text = Text(\"GAME PAUSED\", [window.get_rect().centerx, 150], SMALL_FONT)\n# game loop\nwhile True:\n if ONLINE:\n pass\n if SCORED_NOW:\n if SCORE[0] > MAX_SCORE - 1:\n game_over_music.play()\n GAME_OVER = True\n all.add(game_over_text)\n all.add(player1_win_text)\n all.add(reset_text)\n elif SCORE[1] > MAX_SCORE - 1:\n game_over_music.play()\n GAME_OVER = True\n all.add(game_over_text)\n all.add(player2_win_text)\n all.add(reset_text)\n paddle1.rect.centery = window.get_rect().centery\n paddle2.rect.centery = window.get_rect().centery\n pygame.mouse.set_pos(window.get_rect().centerx, window.get_rect().centery)\n pygame.time.wait(1000)\n SCORED_NOW = False\n all.clear(window, background)\n all.update()\n for event in pygame.event.get():\n if event.type == QUIT:\n sys.exit()\n # paddle 2 control\n if event.type == KEYDOWN:\n # temp variable, for key up to not block going other direction\n up_temp = False\n down_temp = False\n if event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n if event.key == ord('p'):\n if PAUSED:\n PAUSED = False\n all.remove(paused_text)\n else:\n PAUSED = True\n all.add(paused_text)\n if event.key == ord('r'):\n SCORE = [0, 0]\n if GAME_OVER:\n GAME_OVER = False\n all.remove(game_over_text)\n all.remove(player1_win_text)\n all.remove(player2_win_text)\n all.remove(reset_text)\n if event.key == K_SPACE:\n x = threading.Thread(target=paddle2.shoot, daemon=True)\n x.start()\n if event.key == K_1:\n tick = 20\n if event.key == K_2:\n tick = 30\n if event.key == K_3:\n tick = 40\n if event.key == K_4:\n tick = 50\n if event.key == K_5:\n tick = 60\n if event.key == K_6:\n tick = 80\n if event.key == K_7:\n tick = 100\n if event.key == K_8:\n tick = 140\n if event.key == K_9:\n tick = 200\n if event.key == K_UP and not PAUSED and GAME_MODE == 1:\n paddle2.direction = 0\n paddle2.moving = True\n up_temp = True\n elif event.key == K_DOWN and not PAUSED and GAME_MODE == 1:\n paddle2.direction = 1\n paddle2.moving = True\n down_temp = True\n if event.type == KEYUP and GAME_MODE == 1:\n if event.key == K_UP:\n if not down_temp:\n up_temp = False\n paddle2.moving = False\n if event.key == K_DOWN:\n if not up_temp:\n down_temp = False\n paddle2.moving = False\n # Paddle 1 shooting\n if event.type == MOUSEBUTTONDOWN:\n if event.button == 1:\n x = threading.Thread(target=paddle1.shoot)\n x.start()\n # paddle 1 control\n mouse_diff = temp[1] - pygame.mouse.get_pos()[1]\n # so mouse wont get out of border\n if (pygame.mouse.get_pos()[0] < 5 or pygame.mouse.get_pos()[1] < 5 or pygame.mouse.get_pos()[\n 0] > WINDOW_HEIGHT - 5 or pygame.mouse.get_pos()[1] < WINDOW_WIDTH - 5):\n pygame.mouse.set_pos(window.get_rect().centerx, window.get_rect().centery)\n if mouse_diff != 0 and not PAUSED:\n if mouse_diff > 0:\n paddle1.direction = 0\n paddle1.moving = True\n else:\n paddle1.direction = 1\n paddle1.moving = True\n else:\n paddle1.moving = False\n temp = pygame.mouse.get_pos()\n # detect collisions\n for paddle in paddles:\n for ball in pygame.sprite.spritecollide(paddle, balls, 0):\n hit_music.play()\n pos_hitted = ball.pos[1] - paddle.rect.centery\n if paddle.bigger:\n normalized_y = pos_hitted / ((PADDLE_SIZE[1] + 40) / 2)\n else:\n normalized_y = pos_hitted / (PADDLE_SIZE[1] / 2)\n bounce_angle = normalized_y * radians(75)\n # second paddle so we need to make ball go \"other\" way\n if paddle.rect.x > WINDOW_WIDTH / 2:\n bounce_angle = normalized_y * radians(75) + radians(180)\n ball.angle = bounce_angle\n LAST_TOUCH = paddle.player_number\n for bullet in pygame.sprite.spritecollide(paddle, shots, 0):\n bullet.kill()\n x = threading.Thread(target=paddle.get_shot)\n x.start()\n\n # draw the scene\n dirty = all.draw(window)\n pygame.display.update(dirty)\n pygame.display.flip()\n clock.tick(tick)\n","repo_name":"MasterGTFX/Pong-2.0","sub_path":"my_pong.py","file_name":"my_pong.py","file_ext":"py","file_size_in_byte":22831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21878436192","text":"import config.apiKeys as apiKeys\nimport urllib\nfrom utils.webHelpers import get_json_from_url\n\ngoogleKey = apiKeys.keys['googleBooks']['key']\ngoogleBooksURL = \"https://www.googleapis.com/books/v1/\" # volumes/zyTCAlFPjgYC?projection=lite&key=yourAPIKey\" # NOQA\nsampleSearch = \"https://www.googleapis.com/books/v1/volumes?q=flowers+inauthor:keyes&key=yourAPIKey\" # NOQA\n\n\ndef getVolumeInformation(volumeId, projection=\"lite\"):\n url = googleBooksURL + \"volumes/\" + volumeId + \"?projection=\" + \\\n projection + \"&key=\" + googleKey\n return get_json_from_url(url)\n\n\n# does a Google Books search based on an approximate name and an\n# approximate author if known and returns the first item found\ndef identifyBook(approxName, approxAuthor=None):\n items = getSearchResults(approxName,approxAuthor)\n if len(items) > 0:\n result = items[0]\n else:\n result = None\n return result\n\n\ndef getSearchResults(query, author=None):\n # https://www.googleapis.com/books/v1/volumes?q=flowers+inauthor:keyes&key=AIzaSyCcF4oL9AwBtPHV5nU1RQIELNBZ63lfsaA\n\n query = urllib.parse.quote(query)\n url = googleBooksURL + \"volumes?q=\" + query\n\n if author is not None:\n url = url + \"+inauthor:\" + author\n\n url = url + \"&key=\" + googleKey\n full_list = get_json_from_url(url)\n items = []\n if \"items\" in full_list:\n # list of dicts\n items = full_list[\"items\"]\n\n return items\n","repo_name":"CreateRandom/book_bot","sub_path":"utils/booksApi.py","file_name":"booksApi.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"21380568159","text":"#!/usr/bin/env python\n\n## -*-Pyth-*-\n # ###################################################################\n # FiPy - Python-based finite volume PDE solver\n #\n # FILE: \"explicitSourceTerm.py\"\n #\n # Author: Jonathan Guyer \n # Author: Daniel Wheeler \n # Author: James Warren \n # mail: NIST\n # www: http://www.ctcms.nist.gov/fipy/\n #\n # ========================================================================\n # This software was developed at the National Institute of Standards\n # of Standards and Technology, an agency of the Federal Government.\n # Pursuant to title 17 section 105 of the United States Code,\n # United States Code this software is not subject to copyright\n # protection, and this software is considered to be in the public domain.\n # FiPy is an experimental system.\n # NIST assumes no responsibility whatsoever for its use by whatsoever for its use by\n # other parties, and makes no guarantees, expressed or implied, about\n # its quality, reliability, or any other characteristic. We would\n # appreciate acknowledgement if the software is used.\n #\n # To the extent that NIST may hold copyright in countries other than the\n # United States, you are hereby granted the non-exclusive irrevocable and\n # unconditional right to print, publish, prepare derivative works and\n # distribute this software, in any medium, or authorize others to do so on\n # your behalf, on a royalty-free basis throughout the world.\n #\n # You may improve, modify, and create derivative works of the software or\n # any portion of the software, and you may copy and distribute such\n # modifications or works. Modified works should carry a notice stating\n # that you changed the software and should note the date and nature of any\n # such change. Please explicitly acknowledge the National Institute of\n # Standards and Technology as the original source.\n #\n # This software can be redistributed and/or modified freely provided that\n # any derivative works bear some notice that they are derived from it, and\n # any modified versions bear some notice that they have been modified.\n # ========================================================================\n #\n # ###################################################################\n ##\n\n__docformat__ = 'restructuredtext'\n\nfrom fipy.terms.explicitSourceTerm import _ExplicitSourceTerm\nfrom fipy.variables.cellVariable import CellVariable\n\n__all__ = [\"ResidualTerm\"]\n\nclass ResidualTerm(_ExplicitSourceTerm):\n r\"\"\"\n\n The `ResidualTerm` is a special form of explicit `SourceTerm` that adds the\n residual of one equation to another equation. Useful for Newton's method.\n \"\"\"\n def __init__(self, equation, underRelaxation=1.):\n self.equation = equation\n self.underRelaxation = underRelaxation\n\n _ExplicitSourceTerm.__init__(self, var=None)\n\n def __repr__(self):\n return r\"$\\Delta$[\" + repr(self.equation) + \"]\"\n\n def _getGeomCoeff(self, var):\n return self.coeff\n\n def _buildMatrix(self, var, SparseMatrix, boundaryConditions=(), dt=None, transientGeomCoeff=None, diffusionGeomCoeff=None):\n vec = self.equation.justResidualVector(var=None,\n boundaryConditions=boundaryConditions,\n dt=dt)\n\n self.coeff = CellVariable(mesh=var.mesh, value=vec * self.underRelaxation)\n self.geomCoeff = None\n self.coeffVectors = None\n\n return _ExplicitSourceTerm._buildMatrix(self, var=var, SparseMatrix=SparseMatrix, boundaryConditions=boundaryConditions, dt=dt, transientGeomCoeff=transientGeomCoeff, diffusionGeomCoeff=diffusionGeomCoeff)\n","repo_name":"Rowin/fipy","sub_path":"fipy/terms/residualTerm.py","file_name":"residualTerm.py","file_ext":"py","file_size_in_byte":3715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"13566290464","text":"'''\nCreated on Oct 25, 2016\n\n\n@author: jurgen\n'''\nimport datetime\nimport os\nimport re\nimport multiprocessing\nimport logging\nimport pprint\n\nfrom rinex_parser import constants as cc\n\nfrom rinex_parser.ext.convertdate.convertdate import year_doy\nfrom rinex_parser.logger import logger\nfrom rinex_parser.obs_header import Rinex2ObsHeader, Rinex3ObsHeader, RinexObsHeader\nfrom rinex_parser.obs_epoch import RinexEpoch\n\n# from celery.utils.log import get_task_logger\n\n\n# celery_logger = get_task_logger(__name__)\n# celery_logger.setLevel(logging.DEBUG)\ncelery_logger = logger\n\n__updated__ = \"2016-11-16\"\n\n\nclass RinexObsReader(object):\n \"\"\"\n Doc of Class RinexObsReader\n\n Args:\n datadict: {\n \"epochs\": [\n {\n \"id\": \"YYYY-mm-ddTHH:MM:SSZ\",\n \"satellites\": [\n {\n \"id\": \"\",\n \"observations\": {\n \"\": {\n \"value\": ..,\n \"lli\": ..,\n \"ss\": ..\n }\n }\n }, \n {\n \"id\": \"...\",\n \"observations\": {...}\n }\n ]\n }, \n {\n \"id\": \"...\"\n \"satellites\": [..]\n },\n {..}\n ]\n }\n \"\"\"\n\n RINEX_HEADER_CLASS = RinexObsHeader\n\n def __init__(self, **kwargs):\n self.header = self.RINEX_HEADER_CLASS()\n self.epochs = []\n self.datadict = {}\n self.backup_epochs = []\n self.rinex_obs_file = kwargs.get(\"rinex_obs_file\", \"\")\n self.rinex_epochs = kwargs.get(\"rinex_epochs\", [])\n self.rinex_date = kwargs.get(\n \"rinex_date\", datetime.datetime.now().date())\n\n @staticmethod\n def get_start_time(file_sequence):\n \"\"\"\n \"\"\"\n if file_sequence == \"0\":\n return datetime.time(0, 0)\n return datetime.time(ord(file_sequence.lower() - 97), 0)\n\n @staticmethod\n def get_epochs_possible(file_sequence, interval):\n \"\"\"\n Get maximal epochs for rinex file sequence\n\n Args:\n file_sequence: str, [a-x0]\n interval: int, Rinex Epoch Interval\n\n Returns:\n int, Possible Epochs in File\n \"\"\"\n ef = datetime.datetime.combine(\n datetime.date.today(), Rinex2ObsReader.get_start_time(file_sequence))\n el = datetime.datetime.combine(\n datetime.date.today(), Rinex2ObsReader.get_end_time(file_sequence, interval))\n return int((el - ef).total_seconds() / interval) + 1\n\n @staticmethod\n def prepare_line(line):\n new_line = line.replace(\"\\r\", \"\").replace(\"\\n\", \"\")\n if len(new_line) % 16 != 0:\n new_line += \" \" * (16 - len(new_line) % 16)\n return new_line\n\n @staticmethod\n def get_end_time(file_sequence, interval):\n \"\"\"\n \"\"\"\n if file_sequence == \"0\":\n return datetime.time(23, 59, 60 - interval)\n return datetime.time(ord(file_sequence.lower() - 97), 59, 60 - interval)\n\n @staticmethod\n def is_valid_filename(filename, rinex_version=2):\n \"\"\"\n Checks if filename is rinex conform\n \"\"\"\n rinex_version = float(rinex_version)\n if (rinex_version < 3) & (rinex_version >= 2):\n filename_regex = Rinex2ObsReader.RINEX_FILE_NAME_REGEX\n elif (rinex_version >= 3):\n filename_regex = Rinex3ObsReader.RINEX_FILE_NAME_REGEX\n else:\n return False\n return re.match(filename_regex, filename) is not None\n\n def correct_year2(self, year2):\n \"\"\"\n Accourding to the RINEX Manual 2.10, chapter \"6.5 2-digit Years\"\n \"\"\"\n if year2 < 80:\n return year2 + 2000\n else:\n return year2 + 1900\n\n def do_thinning(self, interval):\n \"\"\"\n \"\"\"\n thinned_epochs = [epoch for epoch in self.rinex_epochs\n if epoch.get_day_seconds() % interval == 0]\n if len(self.backup_epochs) == 0:\n self.backup_epochs = self.rinex_epochs\n self.rinex_epochs = thinned_epochs\n\n def undo_thinning(self):\n \"\"\"\n \"\"\"\n self.rinex_epochs = self.backup_epochs\n self.backup_epochs = []\n\n def to_rinex2(self):\n \"\"\"\n\n \"\"\"\n out = \"\"\n for rinex_epoch in self.rinex_epochs:\n out += \"%s\\n\" % rinex_epoch.to_rinex2()\n return out\n\n def to_rinex3(self):\n \"\"\"\n\n \"\"\"\n out = \"\"\n for rinex_epoch in self.rinex_epochs:\n out += \"%s\\n\" % rinex_epoch.to_rinex3()\n return out\n\n def read_header(self, sort_obs_types=True):\n \"\"\"\n \"\"\"\n header = \"\"\n with open(self.rinex_obs_file, \"r\") as handler:\n for i, line in enumerate(handler):\n header += line\n if \"END OF HEADER\" in line:\n break\n self.header = self.RINEX_HEADER_CLASS.from_header(header_string=header)\n\n def add_satellite(self, satellite):\n \"\"\"\n Adds satellite to satellite list if not already added\n\n Args:\n satellite: Satid as str regexp '[GR][ \\d]{2}'\n \"\"\"\n if satellite not in self.header.satellites:\n self.header.satellites[satellite] = 0\n self.header.satellites[satellite] += 1\n\n def has_satellite_system(self, sat_sys):\n \"\"\"\n Checks if Satellite Systems is present or not\n\n Args:\n sat_sys: str, Satellite System \"GREJIS\"\n\n Returns: \n bool, True, if Satellite System is present, else False\n \"\"\"\n for epoch in self.rinex_epochs:\n if epoch.has_satellite_system(sat_sys):\n return True\n return False\n\n def add_epoch(self, epoch):\n \"\"\"\n Adds epoch to epoch list if not already added\n\n Args:\n epoch: Epoch as str '%Y-%m-%dT%H:%M:%SZ'\n \"\"\"\n if epoch not in self.epochs:\n self.epochs.append(epoch)\n\n\n def update_header_obs(self):\n \"\"\"\n Updates header information about first and last observation\n \"\"\"\n\n # First and Last Observation\n self.header.first_observation = self.rinex_epochs[0].timestamp\n self.header.last_observation = self.rinex_epochs[-1].timestamp\n\n def read_satellite(self, sat_id, line):\n raise NotImplementedError\n\n def read_data_to_dict(self):\n raise NotImplementedError\n\n\nclass Rinex2ObsReader(RinexObsReader):\n \"\"\"\n classdocs\n\n Args:\n datadict: {\n \"epochs\": [\n {\n \"id\": \"YYYY-mm-ddTHH:MM:SSZ\",\n \"satellites\": [\n {\n \"id\": \"[GR][0-9]{2},\n \"observations\": {\n \"[CLSPD][12]\": {\n \"value\": ..,\n \"lli\": ..,\n \"ss\": ..\n }\n }{1,}\n }\n ]\n }, \n {\n \"id\": \"...\"\n \"satellites\": [..]\n },\n {..}\n ]\n }\n \"\"\"\n RINEX_HEADER_CLASS = Rinex2ObsHeader\n RINEX_FILE_NAME_REGEX = r\"....\\d\\d\\d[a-x0]\\.\\d\\d[oO]\"\n RINEX_FORMAT = 2\n RINEX_DATELINE_REGEXP = cc.RINEX2_DATELINE_REGEXP\n RINEX_DATELINE_REGEXP_SHORT = cc.RINEX2_DATELINE_REGEXP_SHORT\n RINEX_SATELLITES_REGEXP = cc.RINEX2_SATELLITES_REGEXP\n\n def __init__(self, **kwargs):\n '''\n Constructor\n '''\n super(Rinex2ObsReader, self).__init__(**kwargs)\n\n def set_rinex_obs_file(self, rinex_obs_file):\n self.rinex_obs_file = rinex_obs_file\n self.station_doy_session = os.path.basename(\n self.rinex_obs_file).split(\".\")[0]\n assert self.__class__.is_valid_filename(\n os.path.basename(self.rinex_obs_file), self.header.format_version)\n self.station = self.station_doy_session[:4]\n self.doy = int(self.station_doy_session[4:7])\n year2 = int(self.rinex_obs_file.split(\".\")[-1][:2])\n self.year = self.correct_year2(year2)\n\n self.rinex_file_sequence = self.station_doy_session[7]\n self.epochs = []\n self.datadict = {}\n self.backup_epochs = []\n\n def read_satellite(self, sat_id, line):\n \"\"\"\n Parses trough rnx observation and creates dict. Referring to the RINEX Handbook 2.10\n there are only up to 5 observation types per line. This method parses any line length \n\n Args:\n sat_id: str satellite number/name\n line: str rnx line containing observations\n Returns:\n dict: {sat_id: {otk1: otv1, otk2: otv2, ... otkn: otvn}}\n \"\"\"\n\n sat_dict = {\"id\": sat_id, \"observations\": {}}\n for k in range(len(self.header.observation_types)):\n obs_type = self.header.observation_types[k]\n obs_col = line[(16 * k):(16 * (k + 1))]\n obs_val = obs_col[:14].strip()\n\n if obs_val == \"\":\n obs_val = None\n else:\n float(obs_val)\n\n if len(obs_col) < 15:\n obs_lli = 0\n else:\n obs_lli = obs_col[14].strip()\n if obs_lli == \"\":\n obs_lli = 0\n else:\n obs_lli = int(obs_lli)\n\n if len(obs_col) < 16:\n obs_ss = 0\n else:\n obs_ss = obs_col[15].strip()\n if obs_ss == \"\":\n obs_ss = 0\n else:\n obs_ss = int(obs_ss)\n\n sat_dict[\"observations\"].update(\n {\n obs_type + \"_value\": obs_val,\n obs_type + \"_lli\": obs_lli,\n obs_type + \"_ss\": obs_ss\n }\n )\n return sat_dict\n\n def read_data_to_dict(self):\n \"\"\"\n \"\"\"\n # SKIP HEADER\n with open(self.rinex_obs_file, \"r\") as handler:\n \n # for i, line in enumerate(handler):\n # if 'END OF HEADER' in line:\n # break\n # del i\n # i = 0\n rinex_obs = {\n \"epochs\": [], \n \"fileName\": self.rinex_obs_file,\n \"year4\": self.year,\n \"doy\": self.doy,\n \"markerName\": self.header.marker_name,\n \"epochInterval\": self.header.interval,\n \"epochFirst\": None,\n \"epochLast\": None\n } \n end_of_header = False\n #with open(self.rinex_obs_file, \"r\") as handler:\n if True:\n while True:\n\n # Check for END_OF_FILE\n line = handler.readline()\n if \"END OF HEADER\" in line:\n celery_logger.debug(\"End of Header Reached\")\n end_of_header = True\n if not end_of_header:\n continue\n if line == \"\":\n break\n\n # Get DateLine\n r = re.search(self.RINEX_DATELINE_REGEXP, line)\n if r is not None:\n timestamp = datetime.datetime(\n self.correct_year2(year2=int(r.group(\"year2\"))),\n int(r.group(\"month\")),\n int(r.group(\"day\")),\n int(r.group(\"hour\")),\n int(r.group(\"minute\")),\n int(float(r.group(\"second\")))\n )\n epoch = timestamp.strftime(\"%FT%TZ\")\n self.add_epoch(epoch)\n\n rnx_epoch = {\n \"id\": epoch,\n \"satellites\": [],\n }\n sats = r.group('sat1').strip()\n # Number of Satellites\n nos = int(r.group(\"nos\"))\n if nos == 0:\n continue\n\n additional_lines = int((nos-1)/12 % 12)\n for j in range(additional_lines):\n line = handler.readline()\n r2 = re.search(self.RINEX_DATELINE_REGEXP_SHORT, line)\n if r2 is not None:\n sats += r2.group('sat2').strip()\n\n # Get Observation Data\n for j in range(nos):\n # i += 1\n sat_num = sats[(3 * j):(3 * (j + 1))]\n self.add_satellite(sat_num)\n\n raw_obs = \"\"\n for k in range(1 + int(len(self.header.observation_types) / 5)):\n raw_obs = \"%s%s\" % (\n raw_obs, self.prepare_line(handler.readline()))\n\n rnx_epoch[\"satellites\"].append(\n self.read_satellite(\n sat_id=sat_num, line=raw_obs)\n )\n\n # Sort Satellites within epoch\n rnx_epoch[\"satellites\"] = sorted(\n rnx_epoch[\"satellites\"], key=lambda sat: sat[\"id\"])\n\n rinex_obs[\"epochs\"].append(rnx_epoch)\n rinex_epoch = RinexEpoch(\n timestamp=datetime.datetime.strptime(\n rnx_epoch[\"id\"], cc.RNX_FORMAT_DATETIME),\n observation_types=self.header.observation_types,\n satellites=rnx_epoch[\"satellites\"],\n rcv_clock_offset=self.header.rcv_clock_offset\n )\n if rinex_epoch.is_valid():\n self.rinex_epochs.append(rinex_epoch)\n\n if len(rinex_obs[\"epochs\"]) > 0:\n rinex_obs[\"epochFirst\"] = rinex_obs[\"epochs\"][0][\"id\"]\n rinex_obs[\"epochLast\"] = rinex_obs[\"epochs\"][-1][\"id\"]\n self.datadict = rinex_obs\n logger.debug(\"Successfully created data dict\")\n\n\nclass Rinex3ObsReader(RinexObsReader):\n\n \"\"\"\n classdocs\n\n Args:\n datadict: {\n \"epochs\": [\n {\n \"id\": \"YYYY-mm-ddTHH:MM:SSZ\",\n \"satellites\": [\n {\n \"id\": \"[GR][0-9]{2},\n \"observations\": {\n \"[CLSPD][1258][ACPQW]\": {\n \"value\": ..,\n \"lli\": ..,\n \"ss\": ..\n }\n }{1,}\n }\n ]\n }, \n {\n \"id\": \"...\"\n \"satellites\": [..]\n },\n {..}\n ]\n }\n \"\"\"\n\n RINEX_FORMAT = 3\n RINEX_HEADER_CLASS = Rinex3ObsHeader\n RINEX_DATELINE_REGEXP = cc.RINEX3_DATELINE_REGEXP\n RINEX_DATELINE_REGEXP_SHORT = cc.RINEX3_DATELINE_REGEXP\n RINEX_SATELLITES_REGEXP = cc.RINEX3_SATELLITES_REGEXP\n RINEX_FILE_NAME_REGEX = cc.RINEX3_FORMAT_FILE_NAME\n\n def __init__(self, **kwargs):\n '''\n Constructor, use the same as Rinex2ObsReader\n '''\n super(Rinex3ObsReader, self).__init__(**kwargs)\n # assert self.is_valid_filename(\n # os.path.basename(self.rinex_obs_file), self.header.format_version)\n # m = re.match(self.RINEX_FILE_NAME_REGEX, os.path.basename(self.rinex_obs_file))\n\n # d = m.groupdict()\n # self.station = d[\"station\"]\n # self.doy = int(d[\"doy\"])\n # self.year = int(d[\"year4\"])\n # self.file_period = d[\"file_period\"]\n # self.rinex_file_sequence = -1 # g[6]\n\n def set_rinex_obs_file(self, rinex_obs_file):\n self.rinex_obs_file = rinex_obs_file\n\n assert self.is_valid_filename(\n os.path.basename(self.rinex_obs_file), self.header.format_version)\n m = re.match(self.RINEX_FILE_NAME_REGEX, os.path.basename(self.rinex_obs_file))\n\n d = m.groupdict()\n self.station = d[\"station\"]\n self.doy = int(d[\"doy\"])\n self.year = int(d[\"year4\"])\n self.file_period = d[\"file_period\"]\n self.rinex_file_sequence = -1 # g[6]\n \n self.rinex_obs_file = rinex_obs_file\n\n self.epochs = []\n self.datadict = {}\n self.backup_epochs = []\n\n @staticmethod\n def is_valid_filename(filename, rinex_version=3):\n \"\"\"\n Checks if filename is rinex conform\n \"\"\"\n rinex_version = float(rinex_version)\n if rinex_version >= 3.0:\n filename_regex = Rinex3ObsReader.RINEX_FILE_NAME_REGEX\n else:\n return False\n m = re.match(filename_regex, filename)\n return m is not None\n\n def read_data_to_dict(self):\n \"\"\"\n \"\"\"\n # SKIP HEADER\n with open(self.rinex_obs_file, \"r\") as handler:\n for i, line in enumerate(handler):\n if 'END OF HEADER' in line:\n break\n # del i\n i = 0\n rinex_obs = {\n \"epochs\": [], \n \"fileName\": self.rinex_obs_file,\n \"year4\": self.year,\n \"doy\": self.doy,\n \"markerName\": self.station,\n \"epochPeriod\": self.file_period,\n \"epochInterval\": self.header.interval,\n \"epochFirst\": None,\n \"epochLast\": None\n } \n with open(self.rinex_obs_file, \"r\") as handler:\n while True:\n\n # Check for END_OF_FILE\n line = handler.readline()\n if line == \"\":\n break\n # Get DateLine\n r = re.search(self.RINEX_DATELINE_REGEXP, line)\n if r is not None:\n # logger.debug(\"Found Date\")\n timestamp = datetime.datetime(\n int(r.group(\"year4\")),\n int(r.group(\"month\")),\n int(r.group(\"day\")),\n int(r.group(\"hour\")),\n int(r.group(\"minute\")),\n int(float(r.group(\"second\")))\n )\n epoch = timestamp.strftime(\"%FT%TZ\")\n self.add_epoch(epoch)\n\n rnx_epoch = {\n \"id\": epoch,\n \"satellites\": [],\n }\n\n epoch_flag = r.group(\"epoch_flag\")\n if epoch_flag not in [\"0\", \"1\"]:\n logger.info(\"Special event: {}\".format(epoch_flag))\n\n # Number of Satellites\n nos = int(r.group(\"num_of_sats\"))\n # celery_logger.debug(\"Number of Sats: {}\".format(nos))\n # epoch_pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())\n # epoch_satellites = [handler.readline() for j in range(nos)]\n\n # pool_results = epoch_pool.map(self.read_epoch_satellite, [handler.readline() for j in range(nos)])\n # for result in pool_results:\n # celery_logger.debug(result)\n # if result:\n # self.add_satellite(result[\"sat_num\"])\n # rnx_epoch[\"satellites\"].append(\n # result[\"sat_data\"]\n # )\n\n for j in range(nos):\n line = handler.readline()\n epoch_sat = self.read_epoch_satellite(line)\n if epoch_sat:\n self.add_satellite(\"sat_num\")\n rnx_epoch[\"satellites\"].append(\n epoch_sat[\"sat_data\"]\n )\n else:\n celery_logger.debug(\"No Data\")\n\n # Sort Satellites within epoch\n rnx_epoch[\"satellites\"] = sorted(\n rnx_epoch[\"satellites\"], key=lambda sat: sat[\"id\"])\n\n rinex_obs[\"epochs\"].append(rnx_epoch)\n\n if len(rinex_obs[\"epochs\"]) > 0:\n rinex_obs[\"epochFirst\"] = rinex_obs[\"epochs\"][0][\"id\"]\n rinex_obs[\"epochLast\"] = rinex_obs[\"epochs\"][-1][\"id\"]\n self.datadict = rinex_obs\n logger.debug(\"Successfully created data dict\")\n\n def read_epoch_satellite(self, line):\n \"\"\"\n\n \"\"\"\n sat_data = re.search(cc.RINEX3_DATA_OBSEVATION_REGEXP, line)\n # Get Observation Data\n if sat_data is not None:\n sat_num = sat_data.group(\"sat_num\")\n self.add_satellite(sat_num)\n return {\n \"sat_num\": sat_num, \n \"sat_data\": self.read_satellite(sat_id=sat_num, line=line)\n }\n return {}\n\n def read_satellite(self, sat_id, line):\n \"\"\"\n Parses trough rnx observation and creates dict. Referring to the RINEX Handbook 3.03\n \n Args:\n sat_id: str satellite number/name\n line: str rnx line containing observations\n Returns:\n dict: {sat_id: {otk1: otv1, otk2: otv2, ... otkn: otvn}}\n \"\"\"\n\n all_obs = []\n m = re.match(cc.RINEX3_DATA_OBSEVATION_REGEXP, line)\n if m:\n regexp_dict = m.groupdict()\n if \"first_o\" in regexp_dict and regexp_dict[\"first_o\"] is not None:\n keys = [\"value\", \"lli\", \"ss\"]\n for n in re.finditer(cc.RINEX3_MULTIPLE_OBS_REGEXP, line):\n d = {}\n n_filter = n.groups()[1:]\n for i in range(len(n_filter)):\n vs = n_filter[i].strip()\n v = None if vs == \"\" else float(vs)\n k = keys[i]\n d.update({k: v})\n all_obs.append(d)\n if \"last_o\" in regexp_dict and regexp_dict[\"last_o\"] is not None:\n d = {\n \"lli\": None, \n \"ss\": None,\n \"value\": float(regexp_dict[\"last_o\"])\n }\n all_obs.append(d)\n\n sat_dict = {\"id\": sat_id, \"observations\": {}}\n d = {}\n\n sat_sys = sat_dict[\"id\"][0]\n\n for i in range(len(all_obs)):\n obs_descriptor = self.header.sys_obs_types[sat_sys][\"obs_types\"][i]\n for k in [\"value\", \"lli\", \"ss\"]:\n d[\"{}_{}\".format(obs_descriptor, k)] = all_obs[i][k]\n\n sat_dict[\"observations\"].update(d)\n return sat_dict\n","repo_name":"gjliesen/AEM417-Project-3","sub_path":"venv/Lib/site-packages/rinex_parser/obs_reader.py","file_name":"obs_reader.py","file_ext":"py","file_size_in_byte":23751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72039277655","text":"import csv\nimport os\nfrom typing import Tuple, List\n\nfrom pip._internal.models.scheme import SCHEME_KEYS\nfrom pip._internal.operations.install.wheel import (\n _fs_to_record_path,\n is_within_directory,\n chain,\n InstallationError\n)\n\nfrom wheel_axle.runtime._common import Installer\nfrom wheel_axle.runtime.constants import SYMLINKS_FILE\n\nSymlink = Tuple[str, str, bool]\nSymlinks = List[Symlink]\n\n\ndef write_symlinks_file(symlinks_file: str, symlinks: Symlinks) -> None:\n with open(symlinks_file, \"w\") as f:\n writer = csv.writer(f)\n writer.writerows([symlink[0].replace(os.sep, \"/\"),\n symlink[1].replace(os.sep, \"/\"),\n int(symlink[2])]\n for symlink in symlinks)\n\n\ndef read_symlinks_file(symlinks_file: str) -> Symlinks:\n with open(symlinks_file, \"r\") as f:\n reader = csv.reader(f)\n results = []\n for row in reader:\n results.append((row[0].replace(\"/\", os.sep),\n row[1].replace(\"/\", os.sep),\n bool(int(row[2]))))\n return results\n\n\nclass SymlinksInstaller(Installer):\n def install(self) -> None:\n symlinks_path = os.path.join(self.dist_info_dir, SYMLINKS_FILE)\n\n if not os.path.exists(symlinks_path):\n return\n\n symlinks = read_symlinks_file(symlinks_path)\n self._install_symlinks(symlinks)\n\n def _install_symlinks(self, symlinks: Symlinks):\n scheme_paths = {key: getattr(self.scheme, key) for key in SCHEME_KEYS}\n symlinking_paths = list(set(chain((self.lib_dir,), scheme_paths.values())))\n real_symlinking_paths = list(map(os.path.realpath, symlinking_paths))\n\n def assert_no_path_traversal(dest_dir_path: str, target_path: str) -> None:\n if not is_within_directory(dest_dir_path, target_path):\n message = (\n \"The distribution {!r} has a file {!r} trying to install\"\n \" outside the target directory {!r}\"\n )\n raise InstallationError(\n message.format(self.dist_meta.project_name, target_path, dest_dir_path)\n )\n\n def assert_no_path_symlinking(symlink_path: str, symlink_target: str) -> None:\n for real_symlinking_path in real_symlinking_paths:\n if is_within_directory(real_symlinking_path, symlink_target):\n return\n\n message = (\n \"The distribution {!r} has a symlink {!r} trying to link to {!r}\"\n \" outside the allowed target directories {!r}\"\n )\n raise InstallationError(\n message.format(self.dist_meta.project_name, symlink_path, symlink_target,\n \",\".join(real_symlinking_paths))\n )\n\n def make_root_scheme_path(dest: str, record_path: str) -> Tuple[str, str]:\n normed_path = os.path.normpath(record_path)\n dest_path = os.path.join(dest, normed_path)\n assert_no_path_traversal(dest, dest_path)\n return dest, dest_path\n\n def make_data_scheme_path(record_path: str) -> Tuple[str, str]:\n normed_path = os.path.normpath(record_path)\n try:\n _, scheme_key, dest_subpath = normed_path.split(os.path.sep, 2)\n except ValueError:\n message = (\n \"Unexpected file in {}: {!r}. .data directory contents\"\n \" should be named like: '/'.\"\n ).format(self.dist_meta.project_name, record_path)\n raise InstallationError(message)\n\n try:\n scheme_path = scheme_paths[scheme_key]\n except KeyError:\n valid_scheme_keys = \", \".join(sorted(scheme_paths))\n message = (\n \"Unknown scheme key used in {}: {} (for file {!r}). .data\"\n \" directory contents should be in subdirectories named\"\n \" with a valid scheme key ({})\"\n ).format(self.dist_meta.project_name, scheme_key, record_path, valid_scheme_keys)\n raise InstallationError(message)\n\n dest_path = os.path.join(scheme_path, dest_subpath)\n assert_no_path_traversal(scheme_path, dest_path)\n return scheme_path, dest_path\n\n def is_data_scheme_path(path: str) -> bool:\n return path.split(\"/\", 1)[0].endswith(\".data\")\n\n for symlink in symlinks:\n symlink_path, symlink_target, is_symlink_dir = symlink\n if is_data_scheme_path(symlink_path):\n # Data scheme\n scheme_path, norm_symlink_path = make_data_scheme_path(symlink_path)\n else:\n # Root scheme\n scheme_path, norm_symlink_path = make_root_scheme_path(self.lib_dir, symlink_path)\n\n # This has to be done in order one by one, because previously created symlinks\n # will affect normpath resolution\n real_symlink_target = os.path.realpath(\n os.path.join(os.path.dirname(norm_symlink_path), symlink_target))\n\n # Here we will compare real paths\n assert_no_path_symlinking(norm_symlink_path, real_symlink_target)\n\n # os.path.exists doesn't work with broken symlinks, returns False\n # so we check if something is link or it otherwise exists\n if os.path.islink(norm_symlink_path) or os.path.exists(norm_symlink_path):\n os.unlink(norm_symlink_path)\n\n # We preserve symlink target as de-normalized, while checking traversal as real\n os.symlink(symlink_target, norm_symlink_path, is_symlink_dir)\n self.record_installed(norm_symlink_path, norm_symlink_path, False)\n\n def record_installed(self,\n srcfile: str,\n destfile: str,\n modified: bool = False\n ) -> None:\n \"\"\"Map archive RECORD paths to installation RECORD paths.\"\"\"\n newpath = _fs_to_record_path(destfile, self.lib_dir)\n self.installed[newpath] = newpath\n if modified:\n self.changed.add(_fs_to_record_path(destfile))\n","repo_name":"karellen/wheel-axle-runtime","sub_path":"src/main/python/wheel_axle/runtime/_symlinks.py","file_name":"_symlinks.py","file_ext":"py","file_size_in_byte":6297,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"20322524051","text":"from remark.lib.spreadsheets import (\n CurrencyCell,\n DateCell,\n find_col,\n find_row,\n FloatCell,\n IntCell,\n row_range,\n StrCell,\n)\n\nfrom .base import ProjectExcelImporter\n\nfrom datetime import timedelta\n\n\ndef find(predicate):\n return find_row(\"OUTPUT!A\", predicate, target=\"B\")\n\n\ndef model(predicate):\n return find_col(\"MODEL!2\", predicate)\n\n\nclass ModelingImporter(ProjectExcelImporter):\n expected_type = \"model\"\n expected_version = 1\n\n OUTPUT_SCHEMA = {\n \"name\": StrCell(find(\"model name\")),\n \"dates\": {\n \"start\": DateCell(find(\"start date\")),\n \"end\": DateCell(find(\"end date\")),\n },\n \"property\": {\n \"average_monthly_rent\": CurrencyCell(find(\"average rent\")),\n \"lowest_monthly_rent\": CurrencyCell(find(\"lowest rent\")),\n \"cost_per_exe_vs_rent\": FloatCell(find(\"cost per exe vs rent\")),\n \"total_units\": IntCell(find(\"total units\")),\n \"leasing\": {\n \"change\": IntCell(find(\"leasing change\")),\n \"cds\": IntCell(find(\"cancels & denials\")),\n \"cd_rate\": FloatCell(find(\"cd rate\")),\n \"renewal_notices\": IntCell(find(\"renewal notices\")),\n \"renewals\": IntCell(find(\"renewals\")),\n \"renewal_rate\": FloatCell(find(\"renewal rate\")),\n \"resident_decisions\": IntCell(find(\"resident decisions\")),\n \"vacation_notices\": IntCell(find(\"vacation notices\")),\n \"rate\": FloatCell(find(\"leasing rate\")),\n \"units\": IntCell(find(\"lease units\")),\n },\n \"occupancy\": {\n \"move_ins\": IntCell(find(\"move ins\")),\n \"move_outs\": IntCell(find(\"move outs\")),\n \"rate\": FloatCell(find(\"occupancy rate\")),\n \"units\": IntCell(find(\"occupancy units\")),\n \"occupiable\": IntCell(find(\"occupiable units\")),\n },\n },\n \"funnel\": {\n \"volumes\": {\n \"usv\": IntCell(find(\"usv volume\")),\n \"inq\": IntCell(find(\"inq volume\")),\n \"tou\": IntCell(find(\"tou volume\")),\n \"app\": IntCell(find(\"app volume\")),\n \"exe\": IntCell(find(\"exe volume\")),\n },\n \"costs\": {\n \"usv\": CurrencyCell(find(\"usv cost\")),\n \"inq\": CurrencyCell(find(\"inq cost\")),\n \"tou\": CurrencyCell(find(\"tou cost\")),\n \"app\": CurrencyCell(find(\"app cost\")),\n \"exe\": CurrencyCell(find(\"exe cost\")),\n },\n \"conversions\": {\n \"usv_inq\": FloatCell(find(\"usv conversions\")),\n \"inq_tou\": FloatCell(find(\"inq conversions\")),\n \"tou_app\": FloatCell(find(\"tou conversions\")),\n \"app_exe\": FloatCell(find(\"app conversions\")),\n \"usv_exe\": FloatCell(find(\"usv_exe conversions\")),\n },\n },\n \"four_week_funnel_averages\": {\n \"usv\": IntCell(find(\"usv 4 week\")),\n \"inq\": IntCell(find(\"inq 4 week\")),\n \"tou\": IntCell(find(\"tou 4 week\")),\n \"app\": IntCell(find(\"app 4 week\")),\n \"exe\": IntCell(find(\"exe 4 week\")),\n },\n \"investment\": {\n \"acquisition\": {\n \"expenses\": {\n \"demand_creation\": CurrencyCell(\n find(\"acquisition demand creation\")\n ),\n \"leasing_enablement\": CurrencyCell(\n find(\"acquisition leasing enablement\")\n ),\n \"market_intelligence\": CurrencyCell(\n find(\"acquisition market intelligence\")\n ),\n \"reputation_building\": CurrencyCell(\n find(\"acquisition reputation building\")\n ),\n },\n \"total\": CurrencyCell(find(\"acquisition total\")),\n \"romi\": IntCell(find(\"acquisition romi\")),\n \"estimated_revenue_gain\": CurrencyCell(\n find(\"acquisition revenue gain\")\n ),\n },\n \"retention\": {\n \"expenses\": {\n \"demand_creation\": CurrencyCell(find(\"retention demand creation\")),\n \"leasing_enablement\": CurrencyCell(\n find(\"retention leasing enablement\")\n ),\n \"market_intelligence\": CurrencyCell(\n find(\"retention market intelligence\")\n ),\n \"reputation_building\": CurrencyCell(\n find(\"retention reputation building\")\n ),\n },\n \"total\": CurrencyCell(find(\"retention total\")),\n \"romi\": IntCell(find(\"retention romi\")),\n \"estimated_revenue_gain\": CurrencyCell(find(\"retention revenue gain\")),\n },\n \"total\": {\n \"total\": CurrencyCell(find(\"total total\")),\n \"romi\": IntCell(find(\"total romi\")),\n \"estimated_revenue_gain\": CurrencyCell(find(\"total revenue gain\")),\n },\n },\n }\n\n MODEL_SCHEMA = {\n \"start\": DateCell(model(\"week start\")),\n \"target_leased_rate\": FloatCell(model(\"lease up %\")),\n \"target_lease_applications\": IntCell(model(\"apps\")),\n \"target_leases_executed\": IntCell(model(\"exe\")),\n \"target_lease_renewal_notices\": IntCell(model(\"notice to renew\")),\n \"target_lease_renewals\": IntCell(model(\"renewals\")),\n \"target_lease_vacation_notices\": IntCell(model(\"notice to vacate\")),\n \"target_lease_cds\": IntCell(model(\"c/d\")),\n \"target_delta_leases\": IntCell(model(\"weekly delta leased units\")),\n \"target_move_ins\": IntCell(model(\"move ins\")),\n \"target_move_outs\": IntCell(model(\"move outs\")),\n \"target_occupied_units\": IntCell(model(\"occupied units\")),\n \"target_acq_expenses\": {\n \"demand_creation\": CurrencyCell(model(\"aqc demand\")),\n \"leasing_enablement\": CurrencyCell(model(\"aqc leasing\")),\n \"market_intelligence\": CurrencyCell(model(\"aqc market\")),\n \"reputation_building\": CurrencyCell(model(\"aqc reputation\")),\n },\n \"target_ret_expenses\": {\n \"demand_creation\": CurrencyCell(model(\"ret demand\")),\n \"leasing_enablement\": CurrencyCell(model(\"ret leasing\")),\n \"market_intelligence\": CurrencyCell(model(\"ret market\")),\n \"reputation_building\": CurrencyCell(model(\"ret reputation\")),\n },\n \"target_usvs\": IntCell(model(\"usvs\")),\n \"target_inquiries\": IntCell(model(\"inqs\")),\n \"target_tours\": IntCell(model(\"tou\")),\n }\n\n def clean_output_data(self):\n return self.schema(self.OUTPUT_SCHEMA)\n\n def clean_model_targets(self):\n start_row = 4\n end_row = self.schema(\n IntCell(find_row(\"INPUTS!A\", \"Model End Row\", target=\"B\"))\n )\n raw_targets = self.schema_list(\n schema=self.MODEL_SCHEMA, start=start_row, end=end_row\n )\n\n # Add the end dates for each of the targets\n for target in raw_targets:\n start = target[\"start\"]\n target[\"end\"] = start + timedelta(days=7)\n\n # Fix up the total investment targets\n for raw_target in raw_targets:\n for category in [\"acq\", \"ret\"]:\n raw_target[f\"target_{category}_investment\"] = (\n raw_target[f\"target_{category}_expenses\"][\"demand_creation\"]\n + raw_target[f\"target_{category}_expenses\"][\"leasing_enablement\"]\n + raw_target[f\"target_{category}_expenses\"][\"market_intelligence\"]\n + raw_target[f\"target_{category}_expenses\"][\"reputation_building\"]\n )\n\n # Drop the extraneous period.\n return raw_targets\n\n def clean(self, ctx):\n super().clean(ctx)\n\n self.cleaned_data = self.clean_output_data()\n self.cleaned_data[\"targets\"] = self.clean_model_targets()\n","repo_name":"konaindev/React-Django","sub_path":"remark/projects/spreadsheets/importers/modeling.py","file_name":"modeling.py","file_ext":"py","file_size_in_byte":8180,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"30407651467","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 18 23:05:30 2015\n@author: Brenda_Brandy\n\"\"\"\nfrom __future__ import division, print_function\nfrom numpy import array, arange, sqrt, log, arcsin, std\nfrom pylab import plot, show, axhline\nfrom math import pi\n\nx = []\ny = []\nz = []\nt_i \t= \t0.0\nt_f \t= \t4000.0\nN \t\t= \t200000\nh \t\t=\t(t_f-t_i) / N\ntpoints = \tarange(t_i,t_f,h)\ncutoff = 5.0/6.0\n#Takes indecies of two points on the attractor\ndef dist(a,b):\n return sqrt((x[a]-x[b])**2 + (y[a]-y[b])**2 + (z[a]-z[b])**2)\n\n#Takes vectors v1 and v2\ndef crossmag(x1,x2,y1,y2,z1,z2):\n i = y1*z2 - z1*y2\n j = z1*x2 - x1*z2\n k = x1*y2 - y1*x2\n crossprod = sqrt(i**2 + j**2 + k**2)\n v2 = sqrt(x2**2 + y2**2 + z2**2)\n v1 = sqrt(x1**2 + y1**2 + z1**2)\n if v1 == 0 or v2 ==0:\n return 0\n else:\n return(crossprod / (v1*v2))\n \n#takes index of the compared point, returns index of closest point not on the same cycle, j\ndef test(index): \n d = 1.0\n final_crit = 100\n for j in range (0 , index - 1000):\n distance= dist(index,j)\n theta = arcsin(crossmag(x[index]-x[j],vx,y[index]-y[j],vy,z[index]-z[j],vz)) \n criteria = distance * (theta**2)\n if (dist(index,j) < d and theta < (pi/6)):\n if criteria < final_crit:\n d = dist(index,j)\n n = j\n for j in range (index + 1000 , int(N*cutoff)):\n distance= dist(index,j)\n theta = arcsin(crossmag(x[index]-x[j],vx,y[index]-y[j],vy,z[index]-z[j],vz)) \n criteria = distance * (theta**2)\n if (dist(index,j) < d and theta < (pi/6)):\n if criteria < final_crit:\n d = dist(index,j)\n n = j\n #print(crossmag(x[index]-x[n],vx,y[index]-y[n],vy,z[index]-z[n],vz))\n return n\n\t\ndef g(v):\n\tif v > -1 and v < 1:\n\t\treturn -0.8* v\n\telif v > 1: #if v is bigger than 1\n\t\treturn -0.8 - 0.5*(v-1.0)\n\telse: #if v is less than -1\n\t\treturn 0.8 - 0.5*(v+1.0)\n\t\n\t\nr_array = array([ -1.7713,0.0527854,1.74606 ],float)\nc1 = 1.0 / 9.0\nc2 = 1.0 \nG = 0.7\nL = 1.0 / 7.0\ndef diff(r_array,t):\n x \t= r_array[0]\n y \t= r_array[1]\n z \t= r_array[2]\n fx \t= (G*(y-x) - g(x) ) / c1\t#dx/dt\n fy \t= (G*(x - y) + z) / c2\t\t#dy/dt\n fz \t= - y / L\t\t\t\t\t#dz/dt\n return array([fx,fy,fz],float)\n#fourth order RK loop\nfor t in tpoints:\n\tx.append(r_array[0])\n\ty.append(r_array[1])\n\tz.append(r_array[2])\n\tk1 = h*diff(r_array,t)\n\tk2 = h*diff(r_array+0.5*k1, t+0.5*h)\n\tk3 = h*diff(r_array+0.5*k2, t+0.5*h)\n\tk4 = h*diff(r_array+k3, t+h)\n\t \n\tr_array += (k1+2*k2+2*k3+k4)/6.0\n\t\n#plot (x,y)\n#show()\n(xmin,xmax,ymin,ymax,zmin,zmax) = min(x),max(x),min(y),max(y),min(z),max(z)\nx_range = xmax - xmin\ny_range = ymax - ymin\nz_range = zmax - zmin\nmax_d = sqrt(x_range**2 + y_range**2 + z_range**2)\nlim_d = 0.05 * max_d\nprint(lim_d)\nseparation = []\nrunning_average_list = []\nd_test = 1000\nk = test(1000)\nvx = x[1000] - x[k]\nvy = y[1000] - y[k]\nvz = z[1000] - z[k]\nL = 0\ndi = dist(1000,k)\nti = 1000*h\n\n\nfor i in range(1000,int(cutoff*N)):\n d_test = dist(i,k) #check distance between fudicial and comparison point\n if d_test > lim_d: #if distance between two vectors bigger than limit distance\n tf = i*h\n df = dist(i,k)\n vx = x[i] - x[k]\n vy = y[i] - y[k]\n vz = z[i] - z[k]\n L = L + log(df/di)\n running_average = L / tf\n k = test(i)\n di = dist(i,k)\n ti = i*h\n #print(\"df: \",df)\n print(\"\")\n print(\"init_pt: \",i)\n print(\"comp_pt: \",k)\n running_average_list.append(running_average)\n print(\"running_average: \",running_average)\n #print(\"di: \", di)\n #print(\"df/di: \",df/di)\n separation.append(dist(i,k))\n k += 1\n\naverage = L/((cutoff*t_f)-1000*h)\nprint(\"Average Lambda is \", average)\nplot(running_average_list)\naxhline(y=average)\naxhline(y=0.23, color = 'r')\nshow()","repo_name":"brendabrandy/chuacircuit","sub_path":"chuacircuit_lambda2.py","file_name":"chuacircuit_lambda2.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"3618395937","text":"'''Debugging'''\n\n\n# General Tips\n# -----------------------------------------------------------------------------\n# Though not very advanced, dropping print() statements and other reflection\n# related functions can tell you a lot about what's going on. Beyond that:\n\n# print the values of your local arguments with:\n\nprint(vars())\n\n# If you run your program with an -i flag, it will drop you into the\n# interactive interpreter if the program fails:\n\n# $ python3 -i myfile.py\n\n\n# Standard Library: pdb module\n# -----------------------------------------------------------------------------\n# Bug test: read a file of countries and their capital cities, separated by a\n# comma, and write them out as capital, country. In addition:\n# – Make sure they capitalized incorrectly\n# – Remove any extra spaces extra spaces\n# – Stop the program if we find the word quit (uppercase or lowercase)\n\n# Here's a sample data file data/cities.csv:\n'''\nFrance, Paris\n venuzuela,caracas\n LithuniA,vilnius\n argentina,buenos aires\n bolivia,la paz\n brazil,brasilia\n chile,santiago\n colombia,Bogotá\n ecuador,quito\n falkland islands,stanley\nfrench guiana,cayenne\nguyana,georgetown\n paraguay,Asunción\n peru,lima\nsuriname,paramaribo\nuruguay,montevideo\nvenezuela,caracas\n quit\n'''\n\n# Here's pseudocode:\n'''\nfor each line in the text file:\n read the line\n strip leading and trailing spaces\n if 'quit' occurs in a lowercase copy of the line:\n stop\n else:\n split the country and capital by the comma character\n trim any leading and trailing spaces\n convert the country and capital to titlecase\n print the capital, a comma, and the country\n'''\n\n\n# Here's the actual code:\ndef process_cities(filename):\n with open(filename, 'rt') as file:\n for line in file:\n line = line.strip()\n if 'quit' in line.lower(): # should be if 'quit' == line.lower():\n return\n country, city = line.split(',')\n city = city.strip()\n country = country.strip()\n print(city.title(), country.title(), sep=',')\n\n\n# import pdb; pdb.set_trace() # this will launch the debugger\nbreakpoint() # this will launch the debugger as of Python 3.7\n\nprocess_cities('data/cities.csv')\n\n\n# pdb Commands\n# -----------------------------------------------------------------------------\n# Documented commands (type help ):\n\n# EOF c d h list q rv undisplay\n# a cl debug help ll quit s unt\n# alias clear disable ignore longlist r source until\n# args commands display interact n restart step up\n# b condition down j next return tbreak w\n# break cont enable jump p retval u whatis\n# bt continue exit l pp run unalias where\n\n# type 'c' to continue (the program will run until it ends normally or via\n# an Error/Exception). If it ends normally, we now it's logic error\n# rather than a syntax error.\n# type 's' single step through lines and into functions/methods\n# (this will include any modules you might be using like sys)\n# type 'n' to step past a function/method (execute and move onto the next).\n# Generally speaking you would 'n' to step past any library code.\n# type 'l' to see the next few lines at once\n# type 'll' to see even more of the next few lines at once\n# type 'l' and a line number to see lines from that point onward\n# type 'b' and a line number to insert a breakpoint\n# type 'b' alone to see all your breakpoints\n# type 'cl' and a number to (clear) a breakpoint\n\n# NOTE: Your program will ONLY stop at a breakpoint if it has a chance to\n# actually get to that spot in the code.\n\n# type 'u' to go up (back)\n# type 'p' to print the value of something. i.e. p line\n# type a variable name and it will output its current value\n# type '?' or help when in the pdb to see all the commands.\n# type help command to get help on that one.\n\n# NOTE: when viewing lines, 'B' indicates a breakpoint you've inserted and\n# '->' indicates your current line\n\n# To use the debugger from the command line, import the pdb module by typing:\n# $ python3 -m pdb myfile.py\n\n# For the example above if we drop a breakpoint at line 68 and continue our\n# program, when it stops if we type line to see the current value of line we\n# see our problem: ecuador,quito.\n\n\n# breakpoint(*args, **kws)\n# ------------------------------------------------------------------------------\n# New to Python 3.7, this function drops you into the debugger where ever you\n# call it in your code. Specifically, it calls sys.breakpointhook(), passing\n# *args and **kws straight through. By default, sys.breakpointhook() calls\n# pdb.set_trace() expecting no arguments. It is purely a convenience function\n# so you don’t have to explicitly import pdb or type as much code to enter the\n# debugger.\n\nimages = ['pickle.png', 'dog.jpg', 'car.png', 'apple.gif', 'baloon.psd']\ntodo = []\n\nfor i in images:\n if not i.endswith('.png'):\n breakpoint()\n todo.append(i)\n\nprint(todo)\n","repo_name":"jessicarush/python-notes","sub_path":"debugging.py","file_name":"debugging.py","file_ext":"py","file_size_in_byte":5198,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"67"} +{"seq_id":"441772250","text":"import time\nimport pytest\nfrom pages.product_page import ProductPage\nfrom pages.login_page import LoginPage\nfrom pages.basket_page import BasketPage\n\n\ndef test_guest_should_see_login_link_on_product_page(browser):\n link = \"http://selenium1py.pythonanywhere.com/en-gb/catalogue/the-city\" \\\n \"-and-the-stars_95/ \"\n page = ProductPage(browser, link)\n page.open()\n page.should_be_login_link()\n\n\n@pytest.mark.need_review\ndef test_guest_can_go_to_login_page_from_product_page(browser):\n link = \"http://selenium1py.pythonanywhere.com/en-gb/catalogue/the-city\" \\\n \"-and-the-stars_95/ \"\n page = ProductPage(browser, link)\n page.open()\n page.go_to_login_page()\n login_page = LoginPage(browser, browser.current_url)\n login_page.should_be_login_page()\n\n\n@pytest.mark.need_review\ndef test_guest_can_add_product_to_basket(browser):\n link = \"http://selenium1py.pythonanywhere.com/catalogue/the-shellcoders\" \\\n \"-handbook_209/?promo=newYear \"\n page = ProductPage(browser, link)\n page.open()\n page.add_to_cart()\n page.solve_quiz_and_get_code()\n page.check_alert_success()\n page.check_cart_price_is_equal_product_price()\n\n\noffer_list = range(0, 10)\n\n\n@pytest.mark.parametrize('promo', [*range(7),\n pytest.param(7, marks=pytest.mark.xfail),\n *range(8, 10)])\ndef test_guest_can_add_product_to_basket_offers(browser, promo):\n link = f'http://selenium1py.pythonanywhere.com/catalogue/coders-at' \\\n f'-work_207/?promo=offer{promo}'\n page = ProductPage(browser, link)\n page.open()\n page.add_to_cart()\n page.solve_quiz_and_get_code()\n page.check_alert_success()\n page.check_cart_price_is_equal_product_price()\n\n\n@pytest.mark.xfail(reason='msg appears')\ndef test_guest_cant_see_success_message_after_adding_product_to_basket(\n browser):\n link = 'http://selenium1py.pythonanywhere.com/catalogue/coders-at' \\\n '-work_207/ '\n page = ProductPage(browser, link)\n page.open()\n page.add_to_cart()\n page.should_not_be_success_message()\n\n\ndef test_guest_cant_see_success_message(browser):\n link = 'http://selenium1py.pythonanywhere.com/catalogue/coders-at' \\\n '-work_207/ '\n page = ProductPage(browser, link)\n page.open()\n page.should_not_be_success_message()\n\n\n@pytest.mark.xfail(reason='msg should be closed')\ndef test_message_disappeared_after_adding_product_to_basket(browser):\n link = 'http://selenium1py.pythonanywhere.com/catalogue/coders-at' \\\n '-work_207/ '\n page = ProductPage(browser, link)\n page.open()\n page.add_to_cart()\n page.should_disappear_success_message()\n\n\n@pytest.mark.need_review\ndef test_guest_cant_see_product_in_basket_opened_from_product_page(browser):\n link = 'http://selenium1py.pythonanywhere.com/catalogue/coders-at' \\\n '-work_207/ '\n page = ProductPage(browser, link)\n page.open()\n page.go_to_basket_page()\n basket_page = BasketPage(browser, browser.current_url)\n basket_page.should_be_empty_basket()\n\n\nclass TestUserAddToBasketFromProductPage:\n @pytest.fixture(scope='function')\n def setup(self, browser):\n link = 'http://selenium1py.pythonanywhere.com/en-gb/accounts/login/'\n login_page = LoginPage(browser, link)\n email = str(time.time()) + \"@fakemail.org\"\n password = str(time.time())\n login_page.register_new_user(email, password)\n login_page.should_be_authorized_user()\n\n def test_user_cant_see_success_message(self, browser):\n link = 'http://selenium1py.pythonanywhere.com/catalogue/coders-at' \\\n '-work_207/ '\n page = ProductPage(browser, link)\n page.open()\n page.should_not_be_success_message()\n\n @pytest.mark.need_review\n def test_user_can_add_product_to_basket(self, browser):\n link = 'http://selenium1py.pythonanywhere.com/catalogue/coders-at' \\\n '-work_207/ '\n page = ProductPage(browser, link)\n page.open()\n page.add_to_cart()\n page.check_alert_success()\n page.check_cart_price_is_equal_product_price()\n","repo_name":"Tashunya/autotest-final-work","sub_path":"test_product_page.py","file_name":"test_product_page.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73234750934","text":"# error handling\n\nwhile True:\n try:\n x = int(input(\"x: \"))\n y = int(input(\"y: \"))\n print(x/y)\n\n# except ZeroDivisionError:\n# print(\"You cannot give 0 to y\")\n# except ValueError:\n# print(\"You should give numeric values to both x and y\")\n\n# except (ZeroDivisionError, ValueError) as e:\n# print(\"What you gave was wrong!\")\n# print(e)\n\n except Exception as ex:\n print(\"You gave wrond values \", ex)\n else:\n # print(\"Everything is fine!\")\n break\n finally:\n print(\"try-except has done!\")\n","repo_name":"yakuplacin/python_fundamental","sub_path":"Errors and Handling/handling.py","file_name":"handling.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21155705817","text":"def add(x, y):\n \"\"\"\n This function adds two numbers.\n \"\"\"\n return x + y\n\ndef subtract(x, y):\n \"\"\"\n This function subtracts y from x.\n \"\"\"\n return x - y\n\ndef multiply(x, y):\n \"\"\"\n This function multiplies two numbers.\n \"\"\"\n return x * y\n\ndef divide(x, y):\n \"\"\"\n This function divides x by y.\n If y is 0, it returns a message indicating division by zero is not allowed.\n \"\"\"\n if y == 0:\n return \"Cannot divide by zero\"\n return x / y\n\nif __name__ == \"__main__\":\n while True:\n print(\"Options:\")\n print(\"Enter 'add' for addition\")\n print(\"Enter 'subtract' for subtraction\")\n print(\"Enter 'multiply' for multiplication\")\n print(\"Enter 'divide' for division\")\n print(\"Enter 'quit' to end the program\")\n\n user_input = input(\": \")\n\n if user_input == \"quit\":\n break\n elif user_input in [\"add\", \"subtract\", \"multiply\", \"divide\"]:\n num1 = float(input(\"Enter first number: \"))\n num2 = float(input(\"Enter second number: \"))\n\n if user_input == \"add\":\n print(\"Result:\", add(num1, num2))\n elif user_input == \"subtract\":\n print(\"Result:\", subtract(num1, num2))\n elif user_input == \"multiply\":\n print(\"Result:\", multiply(num1, num2))\n elif user_input == \"divide\":\n result = divide(num1, num2)\n print(\"Result:\", result)\n else:\n print(\"Invalid input\")\n","repo_name":"SubramanyaPReddy/Project2","sub_path":"calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28477264428","text":"# Functions for removing, changing and creating files\nimport json\n\ndef remove_ext(input_file):\n '''\n Removes the extension of a file. Works similar to os.path.splitext\n Args: the file to remove extension of (Full, relative paths can be given and also works on just filenames)\n Returns: the filename without extension\n '''\n filename = input_file.split(\"/\")\n filename = filename[-1].split(\".\")\n filename.pop(-1)\n filename = (\".\").join(filename)\n return filename\n\ndef filename(js_file, make_json):\n '''\n Gets the name of the file and removes all 'junk' and makes a JSON file (ie.: path and extension)\n /home/user/test/to/given/path/test.js will become test.JSON\n Args: full path to file with filename and extension\n Returns: The name of the file as a JSON file\n '''\n filename = remove_ext(js_file)\n if make_json == \"Yes\":\n filename = filename+\".JSON\"\n return filename\n\ndef make_output_files(location, name, text):\n '''\n Creates files from output of the website\n Args: location: The place to save the output file to\n name: Name of the output file\n text: The content of the output file\n Returns: Done or Errorcode\n '''\n filename = location+name\n with open(filename, \"w\") as results:\n json.dump(text, results, indent=4, sort_keys=True)\n results.close()\n return 1\n\ndef json_prettify(input_file, output_file):\n '''\n Makes indents on a json file\n json_file: Path/to/file\n Returns the path of pretty file\n '''\n with open(input_file, \"r\") as messy_file:\n text = json.load(messy_file)\n print(\"File content read\")\n messy_file.close()\n with open(output_file, \"w\") as pretty_file:\n doc = json.dump(text, pretty_file, indent=4)\n print(\"File prettifyed\")\n pretty_file.close()\n return output_file\n","repo_name":"qodIIbop/js-browsercheck","sub_path":"filemod.py","file_name":"filemod.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25506721569","text":"def replace(frase,velha,nova):\n palavra = ''\n i = 0\n while i 0 and field[i - 1][j] == 0:\n field[i - 1][j] = True\n o[1].append(str(i - 1) + '_' + str(j))\n is_infected = True\n\n if i < n - 1 and field[i + 1][j] == 0:\n field[i + 1][j] = True\n o[1].append(str(i + 1) + '_' + str(j))\n is_infected = True\n\n if j > 0 and field[i][j - 1] == 0:\n field[i][j - 1] = True\n o[1].append(str(i) + '_' + str(j - 1))\n is_infected = True\n\n if j < m - 1 and field[i][j + 1] == 0:\n field[i][j + 1] = True\n o[1].append(str(i) + '_' + str(j + 1))\n is_infected = True\n\n o[0] = o[1]\n \n if is_infected:\n quants += 1\n\nprint(quants)\n\nfin.close()\n#поиск в ширину(!)\n#bytearray -- развлечение (можно байты отдельно изменять)","repo_name":"kokosda/sport-programming","sub_path":"src/acmp.ru/task_x_951.py","file_name":"task_x_951.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37570908820","text":"import sys\n\ntry:\n if sys.argv[1].lower() == 'f':\n temp = float(sys.argv[2]) * (9/5) + 32.0\n elif sys.argv[1].lower() == 'c':\n temp = (float(sys.argv[2]) - 32.0) * (5/9)\n else:\n print('ERROR: Please input C or F as argument 1')\n quit()\n\n temp = round(temp)\n print(f'{temp}°{sys.argv[1].title()}')\n\nexcept ValueError:\n print('ERROR: Please input a number for argument 2')\n\nexcept IndexError:\n print('ERROR: Please enter 2 arguments')\n","repo_name":"sbowles22/CS550","sub_path":"HW2/C-to-F-Conversion.py","file_name":"C-to-F-Conversion.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2388030359","text":"#!/usr/bin/python3\n\n'''Reddit api'''\n\nimport requests\n\n\ndef top_ten(subreddit):\n '''Return top 10 post in a subreddit'''\n\n url = 'https://reddit.com/r/{}/hot.json'.format(subreddit)\n response = requests.get(url=url)\n\n if response.status_code != 200:\n print(None)\n return\n\n children = response.json()['data']['children']\n\n for post in range(10):\n print(children[post]['data']['title'])\n","repo_name":"SamuelFelipe/holberton-system_engineering-devops","sub_path":"0x16-api_advanced/1-top_ten.py","file_name":"1-top_ten.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27881039378","text":"\"\"\"\nAdvent of Code - Day 18\n\"\"\"\n\nimport re\n\n\nlocation = __file__\ninput_ = open(location.replace('.py', '_input.txt')).read()\n\n\ndef calculate(string):\n \"\"\"Parses the numbers from left to right, applying the\n operators from left to right.\"\"\"\n digits = []\n operators = []\n for token in string.split(' '):\n if token.isnumeric():\n digits.append(int(token))\n elif token in ['+', '*']:\n operators.append(token)\n\n # Initialize the return value as the first operation.\n # Then update the value for each additional operator.\n if operators[0] == '*':\n value = digits[0] * digits[1]\n else:\n value = digits[0] + digits[1]\n \n for i, op in enumerate(operators):\n if i == 0:\n continue\n if op == '*':\n value *= digits[i+1]\n else:\n value += digits[i+1]\n \n return value\n\n\ndef eval_expression(formula):\n exp = formula\n while '(' in exp:\n # extract the inner group of operators, solve, and update the expression.\n result = re.search(\"\\(([0-9+* ]+)\\)\", exp)\n substring = result.group(1)\n evaluated = calculate(substring)\n exp = exp.replace(f'({substring})', str(evaluated), 1)\n return calculate(exp)\n\n\n# Part 1 Solution\nprint(sum(eval_expression(x) for x in input_.splitlines()))\n\n\n# Part 2 ----------------------------------------------------------------------\ndef calculate2(formula):\n \"\"\"Apply Addition Before Multiplication.\"\"\"\n exp = formula\n\n # Resolve each Addition Operation and update the Expression.\n while '+' in exp:\n result = re.search(\"(\\d+[ ]\\+[ ]\\d+)\", exp)\n substring = result.group(1)\n value = eval(substring)\n exp = exp.replace(f'{substring}', str(value), 1)\n \n # after resolving all the additions, we can multiply all\n # the remaining numbers together.\n exp = exp.replace('*', ' ')\n x = 1\n numbers = [int(x) for x in exp.split()]\n for n in numbers:\n x *= n\n return x\n\n\ndef eval_expression2(formula):\n exp = formula\n while '(' in exp:\n # extract the inner group of operators.\n result = re.search(\"\\(([0-9+* ]+)\\)\", exp)\n substring = result.group(1)\n evaluated = calculate2(substring)\n exp = exp.replace(f'({substring})', str(evaluated), 1)\n return calculate2(exp)\n\n# Part 2 solve\nprint(sum(eval_expression2(x) for x in input_.splitlines()))\n","repo_name":"techartorg/Advent_of_Code_2020","sub_path":"rob_kovach/puzzle_18.py","file_name":"puzzle_18.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"14896177901","text":"import random\nfrom graph import Edge, Graph\nimport pylab\nimport matplotlib.lines as lines\n\n\n# TODO: understand how it works\nclass Point(object):\n\n def __init__(self, x, y, ident=-1):\n self.x = x\n self.y = y\n self.id = ident\n\n def __str__(self):\n return str(self.id) + \"(\" + str(self.x) + \",\" + str(self.y) + \")\"\n\n def __eq__(self, other):\n return self.x == other.x and self.y == other.y\n\n def __hash__(self):\n return hash(self.id)\n\n\nclass Triangle(object):\n\n def __init__(self, points):\n assert len(points) == 3\n self.points = points\n\n def has_vertex(self, vertex):\n return self.points[0] == vertex or \\\n self.points[1] == vertex or \\\n self.points[2] == vertex\n\n def circum_circle_contains(self, vertex):\n ab = self.points[0].x ** 2 + self.points[0].y ** 2\n cd = self.points[1].x ** 2 + self.points[1].y ** 2\n ef = self.points[2].x ** 2 + self.points[2].y ** 2\n\n circum_x = (ab * (self.points[2].y - self.points[1].y) + cd * (self.points[0].y - self.points[2].y) +\n ef * (self.points[1].y - self.points[0].y)) / \\\n (self.points[0].x * (self.points[2].y - self.points[1].y) + self.points[1].x *\n (self.points[0].y - self.points[2].y) + self.points[2].x * (self.points[1].y - self.points[0].y)) / 2\n\n circum_y = (ab * (self.points[2].x - self.points[1].x) + cd * (self.points[0].x - self.points[2].x) +\n ef * (self.points[1].x - self.points[0].x)) / \\\n (self.points[0].y * (self.points[2].x - self.points[1].x) + self.points[1].y *\n (self.points[0].x - self.points[2].x) + self.points[2].y * (self.points[1].x - self.points[0].x)) / 2\n\n circum_radius = (((self.points[0].x - circum_x) ** 2) + ((self.points[0].y - circum_y) ** 2)) ** 0.5\n\n dist = ((vertex.x - circum_x) ** 2 + (vertex.y - circum_y) ** 2) ** 0.5\n return dist <= circum_radius\n\n def has_edge(self, edge: Edge) -> bool:\n v = edge.vertices\n assert v[0] != v[1]\n\n return v[0] in self.points and v[1] in self.points\n\n def get_edges(self):\n return [Edge(self.points[0], self.points[1]),\n Edge(self.points[1], self.points[2]),\n Edge(self.points[2], self.points[0])]\n\n def __str__(self):\n return \"<\" + \"\".join([str(i) for i in self.points]) + \">\"\n\n\ndef generate_random_points(num, low_range, high_range):\n points = []\n for i in range(num):\n points.append(Point(random.randint(low_range, high_range),\n random.randint(low_range, high_range), i))\n # TODO: check for repeats\n return points\n\n\ndef __get_max_min_points(points):\n\n assert len(points) > 0\n\n x_min = points[0].x\n x_max = points[0].x\n y_min = points[0].y\n y_max = points[0].y\n\n for i in range(len(points)):\n x_min = min(points[i].x, x_min)\n x_max = max(points[i].x, x_max)\n\n y_min = min(points[i].y, y_min)\n y_max = max(points[i].y, y_max)\n\n return [x_min, x_max, y_min, y_max]\n\n\ndef __get_triangle_around_points(points):\n x_min, x_max, y_min, y_max = __get_max_min_points(points)\n\n # x_median = (x_min + x_max) / 2\n # y_coord = y_max * 10\n #\n # # Fount top point of super triangle\n # top = Point(x_median, y_coord, -1)\n #\n # m1 = (y_coord - y_max) / (x_median - x_min)\n # x2 = (y_min - y_coord + m1 * x_median) / m1\n # # Found leftmost point of triangle\n # left = Point(x2, y_min, -1)\n #\n # m2 = (y_coord - y_max) / (x_median - x_max)\n # x3 = (y_min - y_coord + m2 * x_median) / m2\n #\n # right = Point(x3, y_min, -1)\n #\n # return Triangle((top, left, right))\n\n dx = x_max - x_min\n dy = y_max - y_min\n delta_max = max(dx, dy)\n x_mid = (x_min + x_max) / 2\n y_mid = (y_min + y_max) / 2\n\n p1 = Point(x_mid - 20 * delta_max, y_mid - delta_max)\n p2 = Point(x_mid, y_mid + 20 * delta_max)\n p3 = Point(x_mid + 20 * delta_max, y_mid - delta_max)\n return Triangle((p1, p2, p3))\n\n\ndef __check_if_edge_in_triangles(edge, triangles) -> bool:\n count = 0\n for triangle in triangles:\n if triangle.has_edge(edge):\n count += 1\n return count > 1\n\n\ndef __get_graph_from_triangulation(triangulation):\n g = Graph()\n included_edges = []\n for triangle in triangulation:\n for edge in triangle.get_edges():\n if edge not in included_edges:\n included_edges.append(edge)\n g.add_edge(Edge(edge.vertices[0].id, edge.vertices[1].id))\n return g\n\niteration_count = 0\n\n\ndef generate_random_graph(num, return_points=False):\n global iteration_count\n assert num > 0\n points = generate_random_points(num, -num * 20, num * 20)\n\n # points = [Point(p[0],p[1], i) for i, p in enumerate([(15,32), (58,17), (42,-31), (-96,-90), (75,-95)])]\n print([str(i) for i in points])\n\n # Initialize empty list of triangles\n triangulation = []\n\n # Create super triangle(which contains all points inside) and add it to the list\n super_triangle = __get_triangle_around_points(points)\n triangulation.append(super_triangle)\n\n for point in points:\n bad_triangles = []\n polygon = []\n\n iteration_count += 1\n print(iteration_count)\n\n for triangle in triangulation:\n if triangle.circum_circle_contains(point):\n bad_triangles.append(triangle)\n for edge in triangle.get_edges():\n polygon.append(edge)\n\n for triangle in bad_triangles:\n triangulation.remove(triangle)\n\n bad_edges = []\n\n #processed = []\n # polygon = list(set(polygon))\n for i in range(len(polygon)):\n for j in range(len(polygon)):\n if i == j:\n continue\n if polygon[i] == polygon[j]:\n #if (j, i) in processed:\n # continue\n #processed.append((i, j))\n bad_edges.append(polygon[i])\n bad_edges.append(polygon[j])\n\n for edge in bad_edges:\n if edge in polygon:\n polygon.remove(edge)\n\n for edge in polygon:\n triangulation.append(Triangle((edge.vertices[0], edge.vertices[1], point)))\n\n for triangle in triangulation[:]:\n for p in super_triangle.points:\n if triangle.has_vertex(p):\n triangulation.remove(triangle)\n break\n\n g = __get_graph_from_triangulation(triangulation)\n if return_points:\n return g, points\n else:\n return g\n\n\ndef get_random_non_planar_graph(size, p=0.5):\n g = Graph()\n for i in range(size):\n for j in range(i, size):\n if i != j:\n if random.random() < p:\n g.add_edge(Edge(i, j))\n return g\n\ndef show_graph(g, p, static_graph=False):\n ax = pylab.subplot()\n bx = pylab.subplot()\n\n x = list(map(lambda w: w.x, p))\n y = list(map(lambda w: w.y, p))\n num = list(range(len(x)))\n\n tmp_lines = []\n for edge in g.get_edges():\n if static_graph:\n e1 = edge.data.vertices[0]\n e2 = edge.data.vertices[1]\n else:\n e1 = edge.vertices[0]\n e2 = edge.vertices[1]\n tmp_lines.append([(x[e1], y[e1]), (x[e2], y[e2])])\n\n for i, n in enumerate(num):\n bx.annotate(n, (x[i], y[i]))\n\n converted_lines = []\n for line in tmp_lines:\n tmp = list(zip(*line))\n print(line, tmp)\n ax.add_line(lines.Line2D(tmp[0], tmp[1], linewidth=1, color='b'))\n\n pylab.scatter(x, y, s=5, marker='o', c='b')\n pylab.plot()\n\n pylab.show()\n\n print(g)\n\n\nif __name__ == \"__main__\":\n g, p = generate_random_graph(100, return_points=True)\n show_graph(g, p)","repo_name":"ilyastepykin/GraphEmbedding","sub_path":"random_graph_generation.py","file_name":"random_graph_generation.py","file_ext":"py","file_size_in_byte":7894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38273434668","text":"import csv \n\n# Dimport csv\n\n# Define an empty dictionary to store the user's expenses by category\nexpenses = {}\n\n# Prompt the user to enter their monthly income\nmonthly_income = float(input(\"What is your monthly income? \"))\n\n# Load existing expenses from a file if available\ntry:\n with open('expenses.csv', mode='r') as file:\n reader = csv.reader(file)\n for row in reader:\n category, amount = row\n expenses[category] = float(amount)\n print(\"Existing expenses loaded.\")\nexcept FileNotFoundError:\n print(\"No existing expenses found.\")\n\n# Define a function to add a new expense to the expenses dictionary\ndef add_expense():\n while True:\n category = input(\"Enter a category for the expense (or 'done' to finish): \")\n if category == \"done\":\n break\n else:\n amount = float(input(\"Enter the monthly expense amount: \"))\n if category in expenses:\n expenses[category] += amount\n else:\n expenses[category] = amount\n\n# Define a function to edit an existing expense in the expenses dictionary\ndef edit_expense():\n category = input(\"Enter the category of the expense you want to edit: \")\n if category in expenses:\n new_amount = float(input(\"Enter the new monthly expense amount: \"))\n expenses[category] = new_amount\n else:\n print(\"Expense not found.\")\n\n# Define a function to delete an existing expense from the expenses dictionary\ndef delete_expense():\n category = input(\"Enter the category of the expense you want to delete: \")\n if category in expenses:\n del expenses[category]\n else:\n print(\"Expense not found.\")\n\n# Define a function to edit an existing expense in the expenses dictionary \ndef save_expenses():\n with open('expenses.csv', mode='w', newline='') as file:\n writer = csv.writer(file)\n for category, amount in expenses.items():\n writer.writerow([category, amount])\n print(\"Expenses saved.\")\n\n# Allow the user to add, edit, or delete expenses\nwhile True:\n action = input(\"Enter an action (add, edit, delete, summary, save, or quit): \")\n if action == \"add\":\n add_expense()\n elif action == \"edit\":\n edit_expense()\n elif action == \"delete\":\n delete_expense()\n elif action == \"save\":\n save_expenses()\n elif action == \"quit\":\n break\n elif action == \"summary\":\n # Calculate the user's total monthly expenses and savings\n total_expenses = sum(expenses.values())\n monthly_savings = monthly_income - total_expenses\n\n # Print a summary of the user's finances\n print(\"\\nMonthly Income: ${:.2f}\".format(monthly_income))\n print(\"Monthly Expenses:\")\n for category, amount in expenses.items():\n print(\"- {}: ${:.2f}\".format(category, amount))\n print(\"Total Expenses: ${:.2f}\".format(total_expenses))\n print(\"Monthly Savings: ${:.2f}\".format(monthly_savings))\n \n","repo_name":"KingFin743/Finance-Manager","sub_path":"finance-manager.py","file_name":"finance-manager.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37739101140","text":"import numpy as np\n\nfrom abc import ABC, abstractmethod\n\n# Add base directory of project to path.\nimport os\nimport sys\ndir_path = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(dir_path + \"/..\")\n\nfrom supervised_learning.internal.decision_tree_split_algorithms import _ID3_Algorithm, _CART_Algorithm\nfrom util.data_operation import entropy, mean_square_error\n\n# TODO: Later will have categorical=\"none\", then can be changed to \"all\" or\n# a 1d array\n# TODO: Add regularization - max depth, few observations in node, entropy change is small,\n# cross-validation entropy starts to increase.\nclass DecisionTree(ABC):\n \"\"\"\n Generates a decision tree to predict future data using the provided examples\n with categorical data.\n \n Determines feature to split on by minimizing uncertainty.\n \n Parameters\n ---------\n algorithm_to_use : string\n What decision tree generation algorithm should be used. Currently only\n ID3 and CART are supported.\n \n max_depth : numeric\n If provided, will limit the depth that the tree can reach. May help\n prevent overfitting.\n \n min_node_samples : numeric\n If provided, any nodes that have less samples than the min will\n be forced to be a leaf.\n \n tuning_param\n How should the cost-complexity pruning proceed. If 'find', \n \n Theory\n --------\n - Decision Trees are helpful when need to understand how different\\\n values will change the expected result.\n - Only examine one feature at a time, so creates rectangular estimation boxes.\n - Very high variance, structure may heavily change due to sampling.\n - Splitting is locally greedy due to minimizing uncertainty at current step, and\\\n is likely to reach a local optimum.\n - Due to the greedy splitting, will prefer shorter trees, but will often\\\n not pick the shortest possible tree.\n - No way to get a confidence measurement (although this can be added when using\\\n GINI index)\n \n References\n --------\n T. Hastie, R. Tibshirani and J. Friedman. \"Elements of Statistical\n Learning\", Springer, 2009.\n \"\"\"\n \n def __init__(self, algorithm_to_use='ID3', max_depth=None, min_node_samples=None):\n if algorithm_to_use == 'ID3':\n self._split_algorithm = _ID3_Algorithm()\n elif algorithm_to_use == 'CART':\n self._split_algorithm = _CART_Algorithm()\n else:\n raise ValueError(\"algorithm_to_use value '{}' is not valid, only ID3 is supported\".format(algorithm_to_use))\n \n self._max_depth = max_depth\n self._min_node_samples = min_node_samples\n \n class LeafNode(object):\n def __init__(self, estimate, values_and_counts):\n self._estimate = estimate\n self._values_and_counts = values_and_counts\n \n def predict(self, row):\n return self._estimate\n \n def print_tree(self, offset):\n \"\"\"\n Parameters\n --------\n \n offset : string\n All spaces that should be printed before printing out any content\n from the node.\n \"\"\"\n print(offset + \"Estimate:\", str(self._estimate), \"had value-count dist\", list(self._values_and_counts))\n \n class CategoricalNode(object):\n \"\"\"\n Splits the tree based on the different values possible for the given feature\n \"\"\"\n def __init__(self, default_estimate, values_and_counts, feature_index_split_on, feature_value_to_group_map,\n group_split_explanation, groups_dict):\n self._default_estimate = default_estimate\n self._values_and_counts = values_and_counts\n self._feature_index_split_on = feature_index_split_on\n self._feature_value_to_group_map = feature_value_to_group_map\n self._group_split_explanation = group_split_explanation\n self._groups_dict = groups_dict\n \n def predict(self, row):\n feature_val = row[self._feature_index_split_on]\n group = self._feature_value_to_group_map(feature_val)\n if group in self._groups_dict:\n return self._groups_dict[group].predict(row)\n else:\n # The group wasn't encountered, so go with default.\n return self._default_estimate\n \n def print_tree(self, offset):\n \"\"\"\n Parameters\n --------\n offset : string\n All spaces that should be printed before printing out any content\n from the node.\n \"\"\"\n print(offset + \"Split on feature\", self._feature_index_split_on,\n self._group_split_explanation,\n \"had value-count dist\", list(self._values_and_counts),\n \"and default\", self._default_estimate)\n value_offset = offset + \" \"\n child_offset = offset + \" \"\n for feature_value in self._groups_dict:\n print(value_offset + \"group\", str(feature_value) + \":\")\n self._groups_dict[feature_value].print_tree(child_offset)\n \n def fit(self, X, y):\n \"\"\"\n Fit internal parameters to minimize MSE on given X y dataset.\n \n Parameters\n ---------\n X : array-like, shape [n_samples, n_features]\n Input array of features.\n \n y : array-like, shape [n_samples,]\n Input array of expected results.\n \"\"\"\n self._split_algorithm.check_data_is_valid(X, y)\n \n available_features = [i for i in range(0, X.shape[1])]\n \n features_values = [np.unique(column) for column in X.T]\n examples = np.append(X, np.reshape(y, (np.shape(X)[0], 1)), axis=1)\n \n self._base_node = self._fit_node(examples, available_features, features_values,\n 1) # First will have depth of 1\n\n def _fit_node(self, examples, available_features, features_values, depth):\n \"\"\"\n Returns the sub-tree generated by fitting the examples using some (or all)\n of the available features.\n \n Parameters\n ---------\n examples : array-like, shape [n_samples, n_features + 1]\n Input array of samples with features and expected value. The expected value\n must have been appended to end of rows.\n \n available_features : array-like, shape [n_available_features]\n The index for each feature in examples that can still be used. Element\n at index i corresponds to features_values at index i.\n \n features_values : array-like, shape [n_available_features]\n All possible values for each feature that can still be used.\n Element at index i corresponds to features_values at index i.\n \n depth : numeric\n How many nodes (including node about to be created) are there to root\n of tree.\n \"\"\"\n # There should always be at least one example.\n assert(len(examples) > 0)\n unique_values, values_counts = self._get_unique_values_and_counts(examples)\n best_estimate = self._get_best_estimate(unique_values, values_counts)\n \n # Force as leaf if only 1 value, no features, hit max depth, or doesn't have enough samples.\n if len(unique_values) == 1 or len(available_features) == 0 or\\\n self._reached_max_depth(depth) or\\\n not self._has_enough_samples_for_node(examples.shape[0]):\n return DecisionTree.LeafNode(best_estimate, zip(unique_values, values_counts))\n \n best_uncertainty = None\n \n best_feature_index_in_available = None\n best_feature_value_to_group_map = None\n best_group_split_explanation = None\n best_examples_by_group = None\n \n for feature_index_in_available in range(len(available_features)):\n for feature_value_to_group_map, examples_by_group, group_split_explanation in\\\n self._split_algorithm.create_splits_from_feature_values(\n examples, available_features[feature_index_in_available],\n features_values[feature_index_in_available]):\n \n uncertainty = self._uncertainty(examples_by_group)\n \n if best_uncertainty is None or best_uncertainty > uncertainty:\n best_uncertainty = uncertainty\n best_feature_index_in_available = feature_index_in_available\n best_examples_by_group = examples_by_group\n best_feature_value_to_group_map = feature_value_to_group_map\n best_group_split_explanation = group_split_explanation\n \n \n chosen_feature_index_in_examples = available_features[best_feature_index_in_available]\n \n # Calculate for the groups\n groups_dict = {}\n for group in best_examples_by_group:\n examples_in_group = best_examples_by_group[group]\n \n next_available_features = available_features\n next_features_values = features_values\n \n feature_values_orig = next_features_values[best_feature_index_in_available]\n \n # Only bother to update the feature values group if it won't be immediately\n # removed\n if not self._split_algorithm.should_remove_feature_after_use():\n next_features_values[best_feature_index_in_available] =\\\n np.unique(examples_in_group[:, best_feature_index_in_available])\n \n # Should not be used, or only has one value left.\n if self._split_algorithm.should_remove_feature_after_use() or\\\n len(next_features_values[best_feature_index_in_available]) == 1:\n # Remove the chosen feature from available features and their possible values\n # if the split algorithm says that is fine.\n next_available_features =\\\n available_features[:best_feature_index_in_available] +\\\n available_features[(best_feature_index_in_available +1):]\n next_features_values =\\\n features_values[:best_feature_index_in_available] +\\\n features_values[(best_feature_index_in_available +1):]\n \n groups_dict[group] = self._fit_node(\n examples_in_group, next_available_features,\n next_features_values, depth + 1)\n \n # Restore the original features_values\n if not self._split_algorithm.should_remove_feature_after_use():\n features_values[best_feature_index_in_available] = feature_values_orig\n \n return DecisionTree.CategoricalNode(best_estimate, zip(unique_values, values_counts),\n chosen_feature_index_in_examples, best_feature_value_to_group_map,\n best_group_split_explanation,\n groups_dict)\n \n @abstractmethod\n def _get_best_estimate(self, unique_values, values_counts):\n \"\"\"\n Given all of the different unique values and their count, return\n the estimate that will minimize the error.\n \n Parameters\n ---------\n unique_values : array-like, shape[num_unique_values]\n All of the different unique values at the current node.\n \n values_counts : array-like, shape[num_unique_values]\n The number of occurrences for each value in unique_values.\n \"\"\"\n pass\n \n def _reached_max_depth(self, depth):\n return self._max_depth is not None and depth >= self._max_depth\n \n def _has_enough_samples_for_node(self, num_samples):\n return self._min_node_samples is None or num_samples >= self._min_node_samples\n \n def predict(self, X):\n \"\"\"\n Predict the value(s) associated with each row in X.\n \n X must have the same size for n_features as the input this instance was\n trained on.\n \n Parameters\n ---------\n \n X : array-like, shape [n_samples, n_features]\n Input array of features.\n \n Returns\n ------\n array-like, shape [n_samples]\n Estimate predicted by tree for each sample.\n \"\"\"\n return np.apply_along_axis(self._base_node.predict,\n axis=1, arr=X)\n \n @abstractmethod\n def _uncertainty(self, examples_split_by_groups):\n \"\"\"\n Given how the examples are separated into different groups, will\n calculate the uncertainty for the groups distribution.\n \n Currently only entropy is used as the measure of uncertainty.\n \n Parameters\n ---------\n examples_split_by_groups : map from group to examples in group\n Contains all of the samples\n \n Returns\n -------\n Non-negative value for the entropy that would result from splitting the data\n on the given split.\n \"\"\"\n pass\n \n def print_tree(self):\n self._base_node.print_tree(\"\")\n \n def _get_unique_values_and_counts(self, examples):\n \"\"\"\n Given a set of rows, will return all of the unique values and their counts.\n \n Parameters\n ---------\n examples : array-like, shape [n_samples, n_features + 1]\n Input array of samples with features and expected value. The expected value\n must have been appended to end of rows.\n \n Returns\n ----------\n (unique_values, counts) where counts[i] is number of occurrences of\n unique_values[i].\n \"\"\"\n return np.unique(examples[:, -1], return_counts=True)\n \n def get_feature_params(self):\n pass\n\nclass DecisionTreeClassifier(DecisionTree):\n \"\"\"\n Generates a decision tree to predict the class of future data using provided\n features.\n \n Determines feature to split on by minimizing uncertainty (currently just entropy).\n \n Parameters\n ---------\n algorithm_to_use : string\n What decision tree generation algorithm should be used. Currently only\n ID3 and CART are supported.\n \n max_depth : numeric\n If provided, will limit the depth that the tree can reach. May help\n prevent overfitting.\n \n min_node_samples : numeric\n If provided, any nodes that have less samples than the min will\n be forced to be a leaf.\n \"\"\"\n def __init__(self, algorithm_to_use='ID3', max_depth=None, min_node_samples=None):\n super().__init__(algorithm_to_use=algorithm_to_use, max_depth=max_depth,\n min_node_samples=min_node_samples)\n \n def _get_best_estimate(self,\n unique_values, values_counts):\n # Just return the most common class\n return unique_values[np.argmax(values_counts)]\n \n \n def _uncertainty(self, examples_split_by_groups):\n \"\"\"\n Given how the examples are separated into different groups, will\n calculate the uncertainty for the groups distribution.\n \n Currently only entropy is used as the measure of uncertainty.\n \n Parameters\n ---------\n examples_split_by_groups : map from group to examples in group\n Contains all of the samples\n \n Returns\n -------\n Non-negative value for the entropy that would result from splitting the data\n on the given split.\n \"\"\"\n penalty = 0\n for group in examples_split_by_groups:\n examples_in_group = examples_split_by_groups[group]\n _, counts = self._get_unique_values_and_counts(examples_in_group)\n # TODO: Be able to customize the penalty used.\n # Note that I should also update the comments to reflect this.\n penalty += entropy(counts)\n \n return penalty\n\n\nclass DecisionTreeRegression(DecisionTree):\n \"\"\"\n Generates a decision tree to predict the value of future data using provided\n features.\n \n Determines feature to split on by minimizing uncertainty (currently just MSE).\n \n Parameters\n ---------\n algorithm_to_use : string\n What decision tree generation algorithm should be used. Currently only\n ID3 and CART are supported.\n \n max_depth : numeric\n If provided, will limit the depth that the tree can reach. May help\n prevent overfitting.\n \n min_node_samples : numeric\n If provided, any nodes that have less samples than the min will\n be forced to be a leaf.\n \"\"\"\n def __init__(self, algorithm_to_use='ID3', max_depth=None, min_node_samples=None):\n super().__init__(algorithm_to_use=algorithm_to_use, max_depth=max_depth,\n min_node_samples=min_node_samples)\n \n def _get_best_estimate(self,\n unique_values, values_counts):\n # Select (weighted) average\n return np.average(unique_values, weights=values_counts)\n \n def _uncertainty(self, examples_split_by_groups):\n \"\"\"\n Given how the examples are separated into different groups, will\n calculate the uncertainty for the groups distribution.\n \n The uncertainty within a group is the MSE from the best estimate (mean of all\n samples) to each samples value.\n \n Parameters\n ---------\n examples_split_by_groups : map from group to examples in group\n Contains all of the samples\n \n Returns\n -------\n Sum of the MSE for each group.\n \"\"\"\n penalty = 0\n for group in examples_split_by_groups:\n examples_in_group = examples_split_by_groups[group]\n all_y_values = examples_in_group[:, -1]\n # For each group, best value for estimate would be the mean\n best_estimate = np.mean(all_y_values)\n \n penalty += mean_square_error(all_y_values, best_estimate)\n \n return penalty\n\n","repo_name":"Diusrex/Machine-Learning-Implementations","sub_path":"supervised_learning/decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":18444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8080619193","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport pyexcel\nfrom collections import OrderedDict\nfrom youtube_dl import YoutubeDL\nurl = \"https://www.apple.com/itunes/charts/songs\"\n\nconn = urlopen(url)\nraw_data = conn.read()\ncontent = raw_data.decode(\"utf8\")\n\nsoup = BeautifulSoup(content, \"html.parser\")\nul_songs = soup.find(\"section\",\"section chart-grid\")\n# print(soup.prettify())\n# print(ul_songs)\nli_list = ul_songs.find_all(\"li\")\nsongs_list = []\nfor li in li_list:\n h3 = li.h3.a\n h4 = li.h4.a\n name = h3.string\n artists = h4.string\n songs = OrderedDict({\n \"Song's name\": name,\n \"Artists\": artists,\n })\n songs_list.append(songs)\n# print(songs_list)\npyexcel.save_as(records=songs_list, dest_file_name=\"iTunes top songs.xlsx\")\noptions = {\n 'default_search': 'ytsearch',\n 'max_downloads': 1 \n }\ndl = YoutubeDL(options)\n\nfor k in songs_list:\n dl.download(k[\"Song's name\"])","repo_name":"dinhquanghuy97/dinhquanghuy_fundamentals_c4e25","sub_path":"lab2/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20610284045","text":"import random\n\n# declaration of lists and variables\nboard = []\nmines = []\nboard_row = 0\nadj = 0\nwrong = 0\n\n# start board-size loop\nwhile True:\n board_row = int(input('How many rows? (4 minimum): '))\n\n if board_row < 4:\n print('That is too small, (4 is minimum)!')\n else:\n break\n#\nif board_row >= 8:\n for i in range(15):\n mines.append([random.randint(0, board_row - 1), random.randint(0, board_row - 1)])\nelif board_row >= 5:\n for i in range(10):\n mines.append([random.randint(0, board_row - 1), random.randint(0, board_row - 1)])\n\nfor i in range(board_row):\n board.append(['X'] * board_row)\n\n\ndef draw_board(board):\n for j in board:\n print(' '.join(j))\n\n\ndef check_ans():\n if row >= board_row or col >= board_row:\n print('That number is too high. The order goes 0 to ', board_row - 1)\n wrong = 1\n else:\n wrong = 0\n\n\ndef adjacent_mines(r, c, adj):\n \n\n if [r + 1, c] in mines:\n adj += 1\n if [r + 1, c + 1] in mines:\n adj += 1\n if [r + 1, c + 1] in mines:\n adj += 1\n if [r + 1, c + 1] in mines:\n adj += 1\n if [r + 1, c + 1] in mines:\n adj += 1\n if [r + 1, c + 1] in mines:\n adj += 1\n if [r + 1, c + 1] in mines:\n adj += 1\n if [r + 1, c + 1] in mines:\n adj += 1\n\n return adj\n\n\ndraw_board(board)\n\nwhile True:\n print('Input where you want to mine')\n row = int(input('Row: '))\n col = int(input('Column: '))\n\n check_ans()\n\n if wrong != 1:\n if [row, col] in mines:\n break\n else:\n board[row][col] = str(adjacent_mines(row, col, 0))\n\n # draw board if it did not hit mine\n draw_board(board)\n\nprint('Sorry but you have blown up !')\n","repo_name":"Mafika-Mahlobo/Python-practice","sub_path":"wethink_project(group)/game2.py","file_name":"game2.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41730167984","text":"#!../../../../virtualenv/bin/python3\n# -*- coding: utf-8 -*-\n\n# NB: The shebang line above assumes you've installed a python virtual environment alongside your working copy of the\n# <4most-4gp-scripts> git repository. It also only works if you invoke this python script from the directory where it\n# is located. If these two assumptions are incorrect (e.g. you're using Conda), you can still use this script by typing\n# , but <./import_brani_grid.py> will not work.\n\n\"\"\"\nTake the grid of template spectra used by Brani's RV code, and turn it into a spectrum library for use in 4MOST 4GP.\n\nTo run this script, you need to have a copy of the file which is part of Brani's RV code.\n\"\"\"\n\nimport argparse\nimport itertools\nimport logging\nimport os\nfrom os import path as os_path\n\nimport numpy as np\nfrom fourgp_speclib import SpectrumLibrarySqlite, Spectrum\n\n# Path to where we find Brani's <4MOST_forward_modeling>\nour_path = os_path.split(os_path.abspath(__file__))[0]\ndefault_brani_code_path = os_path.join(our_path, \"../../../../forwardModelling/4MOST_forward_modeling\")\n\n# Read input parameters\nparser = argparse.ArgumentParser(description=__doc__)\nparser.add_argument('--brani-code-path',\n required=False,\n default=default_brani_code_path,\n dest=\"brani_code_path\",\n help=\"Specify the path where we can find the original data files for Brani's RV code.\")\nargs = parser.parse_args()\n\n# Path to Brani's template wavelength grid and templates\nwavelength_raster_path = os_path.join(args.brani_code_path, \"LAMBDA_RAV.DAT\")\ntemplate_spectra_path = os_path.join(args.brani_code_path, \"templates.npy\")\n\n# Start logger\nlogging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s:%(filename)s:%(message)s',\n datefmt='%d/%m/%Y %H:%M:%S')\nlogger = logging.getLogger(__name__)\nlogger.info(\"Importing grid of template spectra for Brani's RV code\")\n\n# Set path to workspace where we create libraries of spectra\nworkspace = os_path.join(our_path, \"../../../workspace\")\ntarget_library_name = \"brani_rv_grid\"\nos.system(\"mkdir -p {}\".format(workspace))\n\n# Load pickled collection of templates\nwavelength_raster = np.loadtxt(wavelength_raster_path)\nflux_templates = np.load(template_spectra_path)\n\n# logger.info(\"Full wavelength array shape: {}\".format(wavelength_raster.shape))\n# logger.info(\"Flux template array shape: {}\".format(flux_templates.shape))\n\n# Filter wavelength range\nwavelength_filter = (wavelength_raster > 3670) & (wavelength_raster < 9530)\nwavelength_raster = wavelength_raster[wavelength_filter]\n\n# logger.info(\"After filtering, wavelength array shape: {}\".format(wavelength_raster.shape))\n\n# The stellar parameters which each grid axis samples are not specified in Brani's file.\n# They are as follows...\ngrid_axes = [[\"Teff\", (4000, 8250, 250)],\n [\"[Fe/H]\", (0.5, 3.0, 0.5)],\n [\"logg\", (1.5, 5.5, 0.5)]\n ]\n\ngrid_axis_values = [np.arange(axis[1][0], axis[1][1], axis[1][2]) for axis in grid_axes]\ngrid_axis_indices = [list(range(int((axis[1][1] - axis[1][0]) / axis[1][2]))) for axis in grid_axes]\ngrid_axis_index_combinations = itertools.product(*grid_axis_indices)\n\n# Turn Brani's set of templates into a spectrum library with path specified above\nlibrary_path = os_path.join(workspace, target_library_name)\nlibrary = SpectrumLibrarySqlite(path=library_path, create=True)\n\n# Brani's template spectra do not have any error vectors associated with them, so add an array of zeros\nerrors_dummy = np.zeros_like(wavelength_raster)\n\n# Import each template spectrum in turn\nfor i, axis_indices in enumerate(grid_axis_index_combinations):\n filename = \"template{:06d}\".format(i)\n metadata = {\"Starname\": filename}\n item = flux_templates\n for axis_counter, index in enumerate(axis_indices):\n metadata_key = grid_axes[axis_counter][0]\n metadata_value = grid_axis_values[axis_counter][index]\n metadata[metadata_key] = metadata_value\n metadata[metadata_key + \"_index\"] = index\n item = item[index]\n\n # Turn data into a Spectrum object\n spectrum = Spectrum(wavelengths=wavelength_raster,\n values=item,\n value_errors=errors_dummy,\n metadata=metadata)\n\n # Import spectrum into our SpectrumLibrary\n library.insert(spectra=spectrum, filenames=filename)\n","repo_name":"dcf21/4most-4gp-scripts","sub_path":"src/scripts/import_spectra/import_brani_grid.py","file_name":"import_brani_grid.py","file_ext":"py","file_size_in_byte":4469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"12723220113","text":"from typing import Tuple, Union\nimport autograd.numpy as anp\nimport autograd.scipy.linalg as aspl\nimport numpy as np\nfrom autograd.builtins import isinstance\nfrom autograd.tracer import getval\nfrom numpy.random import RandomState\n\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.constants import (\n NOISE_VARIANCE_LOWER_BOUND,\n MIN_POSTERIOR_VARIANCE,\n MIN_CHOLESKY_DIAGONAL_VALUE,\n)\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.custom_op import (\n AddJitterOp,\n cholesky_factorization,\n flatten_and_concat,\n)\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.mean import (\n MeanFunction,\n)\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.kernel import (\n KernelFunction,\n)\n\n\nKernelFunctionWithCovarianceScale = Union[\n KernelFunction, Tuple[KernelFunction, np.ndarray]\n]\n\n\ndef _extract_kernel_and_scale(kernel: KernelFunctionWithCovarianceScale):\n if isinstance(kernel, tuple):\n return kernel[0], anp.reshape(kernel[1], (1, 1))\n else:\n return kernel, 1.0\n\n\ndef cholesky_computations(\n features,\n targets,\n mean: MeanFunction,\n kernel: KernelFunctionWithCovarianceScale,\n noise_variance,\n debug_log: bool = False,\n):\n \"\"\"\n Given input matrix X (features), target matrix Y (targets), mean and kernel\n function, compute posterior state {L, P}, where L is the Cholesky factor\n of\n k(X, X) + sigsq_final * I\n and\n L P = Y - mean(X)\n Here, sigsq_final >= noise_variance is minimal such that the Cholesky\n factorization does not fail.\n\n :param features: Input matrix X (n, d)\n :param targets: Target matrix Y (n, m)\n :param mean: Mean function\n :param kernel: Kernel function, or tuple\n :param noise_variance: Noise variance (may be increased)\n :param debug_log: Debug output during add_jitter CustomOp?\n :return: L, P\n \"\"\"\n _kernel, covariance_scale = _extract_kernel_and_scale(kernel)\n kernel_mat = _kernel(features, features) * covariance_scale\n # Add jitter to noise_variance (if needed) in order to guarantee that\n # Cholesky factorization works\n sys_mat = AddJitterOp(\n flatten_and_concat(kernel_mat, noise_variance),\n initial_jitter_factor=NOISE_VARIANCE_LOWER_BOUND,\n debug_log=\"true\" if debug_log else \"false\",\n )\n chol_fact = cholesky_factorization(sys_mat)\n centered_y = targets - anp.reshape(mean(features), (-1, 1))\n pred_mat = aspl.solve_triangular(chol_fact, centered_y, lower=True)\n return chol_fact, pred_mat\n\n\ndef predict_posterior_marginals(\n features,\n mean: MeanFunction,\n kernel: KernelFunctionWithCovarianceScale,\n chol_fact,\n pred_mat,\n test_features,\n):\n \"\"\"\n Computes posterior means and variances for test_features.\n If pred_mat is a matrix, so will be posterior_means, but not\n posterior_variances. Reflects the fact that for GP regression and fixed\n hyperparameters, the posterior mean depends on the targets y, but the\n posterior covariance does not.\n\n :param features: Training inputs\n :param mean: Mean function\n :param kernel: Kernel function, or tuple\n :param chol_fact: Part L of posterior state\n :param pred_mat: Part P of posterior state\n :param test_features: Test inputs\n :return: posterior_means, posterior_variances\n \"\"\"\n _kernel, covariance_scale = _extract_kernel_and_scale(kernel)\n k_tr_te = _kernel(features, test_features) * covariance_scale\n linv_k_tr_te = aspl.solve_triangular(chol_fact, k_tr_te, lower=True)\n posterior_means = anp.matmul(anp.transpose(linv_k_tr_te), pred_mat) + anp.reshape(\n mean(test_features), (-1, 1)\n )\n posterior_variances = _kernel.diagonal(test_features) * covariance_scale - anp.sum(\n anp.square(linv_k_tr_te), axis=0\n )\n return posterior_means, anp.reshape(\n anp.maximum(posterior_variances, MIN_POSTERIOR_VARIANCE), (-1,)\n )\n\n\ndef sample_posterior_marginals(\n features,\n mean: MeanFunction,\n kernel: KernelFunctionWithCovarianceScale,\n chol_fact,\n pred_mat,\n test_features,\n random_state: RandomState,\n num_samples: int = 1,\n):\n \"\"\"\n Draws num_sample samples from the product of marginals of the posterior\n over input points test_features. If pred_mat is a matrix with m columns,\n the samples returned have shape (n_test, m, num_samples).\n\n :param features: Training inputs\n :param mean: Mean function\n :param kernel: Kernel function, or tuple\n :param chol_fact: Part L of posterior state\n :param pred_mat: Part P of posterior state\n :param test_features: Test inputs\n :param num_samples: Number of samples to draw\n :return: Samples, shape (n_test, num_samples) or (n_test, m, num_samples)\n \"\"\"\n post_means, post_vars = predict_posterior_marginals(\n features, mean, kernel, chol_fact, pred_mat, test_features\n )\n post_means = anp.expand_dims(post_means, axis=-1) # (n_test, m, 1)\n post_stds = anp.sqrt(anp.reshape(post_vars, (-1, 1, 1))) # (n_test, 1, 1)\n n01_vecs = [\n random_state.normal(size=getval(post_means.shape)) for _ in range(num_samples)\n ]\n n01_mat = anp.concatenate(n01_vecs, axis=-1)\n samples = anp.multiply(n01_mat, post_stds) + post_means\n\n if samples.shape[1] == 1:\n n_test = getval(samples.shape)[0]\n samples = anp.reshape(samples, (n_test, -1)) # (n_test, num_samples)\n\n return samples\n\n\ndef sample_posterior_joint(\n features,\n mean: MeanFunction,\n kernel: KernelFunctionWithCovarianceScale,\n chol_fact,\n pred_mat,\n test_features,\n random_state: RandomState,\n num_samples: int = 1,\n):\n \"\"\"\n Draws num_sample samples from joint posterior distribution over inputs\n test_features. This is done by computing mean and covariance matrix of\n this posterior, and using the Cholesky decomposition of the latter. If\n pred_mat is a matrix with m columns, the samples returned have shape\n (n_test, m, num_samples).\n\n :param features: Training inputs\n :param mean: Mean function\n :param kernel: Kernel function, or tuple\n :param chol_fact: Part L of posterior state\n :param pred_mat: Part P of posterior state\n :param test_features: Test inputs\n :param num_samples: Number of samples to draw\n :return: Samples, shape (n_test, num_samples) or (n_test, m, num_samples)\n \"\"\"\n _kernel, covariance_scale = _extract_kernel_and_scale(kernel)\n k_tr_te = _kernel(features, test_features) * covariance_scale\n linv_k_tr_te = aspl.solve_triangular(chol_fact, k_tr_te, lower=True)\n posterior_mean = anp.matmul(anp.transpose(linv_k_tr_te), pred_mat) + anp.reshape(\n mean(test_features), (-1, 1)\n )\n posterior_cov = _kernel(test_features, test_features) * covariance_scale - anp.dot(\n anp.transpose(linv_k_tr_te), linv_k_tr_te\n )\n jitter_init = anp.ones((1,)) * (1e-5)\n sys_mat = AddJitterOp(\n flatten_and_concat(posterior_cov, jitter_init),\n initial_jitter_factor=NOISE_VARIANCE_LOWER_BOUND,\n )\n lfact = cholesky_factorization(sys_mat)\n # Draw samples\n # posterior_mean.shape = (n_test, m), where m is number of cols of pred_mat\n # Reshape to (n_test, m, 1)\n n_test = getval(posterior_mean.shape)[0]\n posterior_mean = anp.expand_dims(posterior_mean, axis=-1)\n n01_vecs = [\n random_state.normal(size=getval(posterior_mean.shape))\n for _ in range(num_samples)\n ]\n n01_mat = anp.reshape(anp.concatenate(n01_vecs, axis=-1), (n_test, -1))\n samples = anp.reshape(anp.dot(lfact, n01_mat), (n_test, -1, num_samples))\n samples = samples + posterior_mean\n\n if samples.shape[1] == 1:\n samples = anp.reshape(samples, (n_test, -1)) # (n_test, num_samples)\n\n return samples\n\n\ndef _compute_lvec(features, chol_fact, kernel, covariance_scale, feature):\n kvec = anp.reshape(kernel(features, feature), (-1, 1)) * covariance_scale\n return anp.reshape(aspl.solve_triangular(chol_fact, kvec, lower=True), (1, -1))\n\n\ndef cholesky_update(\n features,\n mean: MeanFunction,\n kernel: KernelFunctionWithCovarianceScale,\n chol_fact,\n pred_mat,\n noise_variance,\n feature,\n target,\n lvec=None,\n):\n \"\"\"\n Incremental update of posterior state (Cholesky factor, prediction\n matrix), given one datapoint (feature, target).\n\n Note: noise_variance is the initial value, before any jitter may have\n been added to compute chol_fact. Here, we add the minimum amount of\n jitter such that the new diagonal entry of the Cholesky factor is\n >= MIN_CHOLESKY_DIAGONAL_VALUE. This means that if cholesky_update is\n used several times, we in fact add a diagonal (but not spherical)\n jitter matrix.\n\n :param features: Shape (n, d)\n :param chol_fact: Shape (n, n)\n :param pred_mat: Shape (n, m)\n :param mean:\n :param kernel:\n :param noise_variance:\n :param feature: Shape (1, d)\n :param target: Shape (1, m)\n :param lvec: If given, this is the new column of the Cholesky factor\n except the diagonal entry. If not, this is computed here\n :return: chol_fact_new (n+1, n+1), pred_mat_new (n+1, m)\n \"\"\"\n _kernel, covariance_scale = _extract_kernel_and_scale(kernel)\n if lvec is None:\n lvec = _compute_lvec(features, chol_fact, _kernel, covariance_scale, feature)\n kscal = anp.reshape(_kernel.diagonal(feature) * covariance_scale, (1,))\n noise_variance = anp.reshape(noise_variance, (1,))\n lsqscal = anp.maximum(\n kscal + noise_variance - anp.sum(anp.square(lvec)),\n MIN_CHOLESKY_DIAGONAL_VALUE**2,\n )\n lscal = anp.reshape(anp.sqrt(lsqscal), (1, 1))\n mscal = anp.reshape(mean(feature), (1, 1))\n pvec = target - mscal\n pvec = anp.divide(pvec - anp.matmul(lvec, pred_mat), lscal)\n pred_mat_new = anp.concatenate([pred_mat, pvec], axis=0)\n zerovec = anp.zeros((getval(lvec.size), 1))\n chol_fact_new = anp.concatenate(\n [\n anp.concatenate([chol_fact, lvec], axis=0),\n anp.concatenate([zerovec, lscal], axis=0),\n ],\n axis=1,\n )\n\n return chol_fact_new, pred_mat_new\n\n\n# Specialized routine, used in IncrementalUpdateGPPosteriorState.\n# The idea is to share the computation of lvec between sampling a new target\n# value and incremental Cholesky update.\n# If mean_impute_mask is given, it is a boolean vector of size m (number\n# columns of pred_mat). Columns j of target, where mean_impute_ mask[j] is\n# true, are set to the predictive mean (instead of being sampled).\ndef sample_and_cholesky_update(\n features,\n mean: MeanFunction,\n kernel: KernelFunctionWithCovarianceScale,\n chol_fact,\n pred_mat,\n noise_variance,\n feature,\n random_state: RandomState,\n mean_impute_mask=None,\n):\n _kernel, covariance_scale = _extract_kernel_and_scale(kernel)\n # Draw sample target. Also, lvec is reused below\n lvec = _compute_lvec(features, chol_fact, _kernel, covariance_scale, feature)\n pred_mean = anp.dot(lvec, pred_mat) + anp.reshape(mean(feature), (1, 1))\n # Note: We do not add noise_variance to the predictive variance\n pred_std = anp.reshape(\n anp.sqrt(\n anp.maximum(\n _kernel.diagonal(feature) * covariance_scale\n - anp.sum(anp.square(lvec)),\n MIN_POSTERIOR_VARIANCE,\n )\n ),\n (1, 1),\n )\n n01mat = random_state.normal(size=getval(pred_mean.shape))\n if mean_impute_mask is not None:\n assert len(mean_impute_mask) == pred_mat.shape[1]\n n01mat[0, mean_impute_mask] = 0\n target = pred_mean + anp.multiply(n01mat, pred_std)\n chol_fact_new, pred_mat_new = cholesky_update(\n features=features,\n mean=mean,\n kernel=kernel,\n chol_fact=chol_fact,\n pred_mat=pred_mat,\n noise_variance=noise_variance,\n feature=feature,\n target=target,\n lvec=lvec,\n )\n features_new = anp.concatenate([features, feature], axis=0)\n\n return chol_fact_new, pred_mat_new, features_new, target\n\n\ndef negative_log_marginal_likelihood(chol_fact, pred_mat):\n \"\"\"\n The marginal likelihood is only computed if pred_mat has a single column\n (not for fantasy sample case).\n \"\"\"\n assert (\n pred_mat.ndim == 1 or pred_mat.shape[1] == 1\n ), \"Multiple target vectors are not supported\"\n sqnorm_predmat = anp.sum(anp.square(pred_mat))\n logdet_cholfact = 2.0 * anp.sum(anp.log(anp.abs(anp.diag(chol_fact))))\n n_samples = getval(pred_mat.size)\n part1 = 0.5 * (n_samples * anp.log(2 * anp.pi) + logdet_cholfact)\n part2 = 0.5 * sqnorm_predmat\n return part1 + part2\n","repo_name":"awslabs/syne-tune","sub_path":"syne_tune/optimizer/schedulers/searchers/bayesopt/gpautograd/posterior_utils.py","file_name":"posterior_utils.py","file_ext":"py","file_size_in_byte":12663,"program_lang":"python","lang":"en","doc_type":"code","stars":332,"dataset":"github-code","pt":"67"} +{"seq_id":"10768087829","text":"# SPDX-License-Identifier: GPL-3.0-or-later\nimport json\nimport os\nfrom unittest import mock\n\nimport pytest\n\nfrom cachito.common.paths import RequestBundleDir as BaseRequestBundleDir\nfrom cachito.errors import FileAccessError, InvalidRepoStructure\nfrom cachito.workers.paths import RequestBundleDir\nfrom cachito.workers.tasks import npm\n\n\ndef test_verify_npm_files(tmpdir):\n app_dir = tmpdir.mkdir(\"temp\").mkdir(\"1\").mkdir(\"app\")\n app_dir.join(\"package.json\").write(b\"{}\")\n app_dir.join(\"package-lock.json\").write(b\"{}\")\n bundle_dir = BaseRequestBundleDir(1, str(tmpdir))\n\n npm._verify_npm_files(bundle_dir, [\".\"])\n\n\ndef test_verify_npm_files_no_lock_file(tmpdir):\n app_dir = tmpdir.mkdir(\"temp\").mkdir(\"1\").mkdir(\"app\").mkdir(\"client\")\n app_dir.join(\"package.json\").write(b\"{}\")\n bundle_dir = BaseRequestBundleDir(1, str(tmpdir))\n\n expected = (\n \"The client/npm-shrinkwrap.json or client/package-lock.json file must be present for the \"\n \"npm package manager\"\n )\n with pytest.raises(InvalidRepoStructure, match=expected):\n npm._verify_npm_files(bundle_dir, [\"client\"])\n\n\ndef test_verify_npm_files_no_package_json(tmpdir):\n app_dir = tmpdir.mkdir(\"temp\").mkdir(\"1\").mkdir(\"app\").mkdir(\"client\")\n app_dir.join(\"package-lock.json\").write(b\"{}\")\n bundle_dir = BaseRequestBundleDir(1, str(tmpdir))\n\n expected = \"The client/package.json file must be present for the npm package manager\"\n with pytest.raises(InvalidRepoStructure, match=expected):\n npm._verify_npm_files(bundle_dir, [\"client\"])\n\n\ndef test_verify_npm_files_node_modules(tmpdir):\n app_dir = tmpdir.mkdir(\"temp\").mkdir(\"1\").mkdir(\"app\").mkdir(\"client\")\n app_dir.join(\"package.json\").write(b\"{}\")\n app_dir.join(\"package-lock.json\").write(b\"{}\")\n app_dir.mkdir(\"node_modules\")\n bundle_dir = BaseRequestBundleDir(1, str(tmpdir))\n\n expected = \"The client/node_modules directory cannot be present in the source repository\"\n with pytest.raises(InvalidRepoStructure, match=expected):\n npm._verify_npm_files(bundle_dir, [\"client\"])\n\n\n@mock.patch(\"cachito.workers.tasks.npm.nexus.execute_script\")\ndef test_cleanup_npm_request(mock_exec_script):\n npm.cleanup_npm_request(3)\n\n expected_payload = {\"repository_name\": \"cachito-npm-3\", \"username\": \"cachito-npm-3\"}\n mock_exec_script.assert_called_once_with(\"js_cleanup\", expected_payload)\n\n\n# The package.json and package-lock.json mock values are not actually valid,\n# they just need to be valid JSON\n@pytest.mark.parametrize(\"package_json\", (None, {\"name\": \"han-solo\"}))\n@pytest.mark.parametrize(\"lock_file\", (None, {\"dependencies\": []}))\n@pytest.mark.parametrize(\"ca_file\", (None, \"some CA file contents\"))\n@pytest.mark.parametrize(\n \"package_subpath, subpath_as_path_component, reverse_path_component\",\n [(None, \"\", \"\"), (\".\", \"\", \"\"), (\"some/path\", \"some/path/\", \"../../\")],\n)\n@mock.patch(\"cachito.workers.paths.get_worker_config\")\n@mock.patch(\"cachito.workers.tasks.npm._verify_npm_files\")\n@mock.patch(\"cachito.workers.tasks.npm.set_request_state\")\n@mock.patch(\"cachito.workers.tasks.npm.get_request\")\n@mock.patch(\"cachito.workers.tasks.npm.prepare_nexus_for_js_request\")\n@mock.patch(\"cachito.workers.tasks.npm.resolve_npm\")\n@mock.patch(\"cachito.workers.tasks.npm.finalize_nexus_for_js_request\")\n@mock.patch(\"cachito.workers.tasks.npm.nexus.get_ca_cert\")\n@mock.patch(\"cachito.workers.tasks.npm.generate_npmrc_content\")\n@mock.patch(\"cachito.workers.tasks.npm.update_request_with_config_files\")\n@mock.patch(\"cachito.workers.tasks.npm.update_request_env_vars\")\ndef test_fetch_npm_source(\n mock_update_request_env_vars,\n mock_urwcf,\n mock_gnc,\n mock_gcc,\n mock_fnfjr,\n mock_rn,\n mock_pnfjr,\n mock_get_request,\n mock_srs,\n mock_vnf,\n get_worker_config,\n ca_file,\n lock_file,\n package_json,\n package_subpath,\n subpath_as_path_component,\n reverse_path_component,\n task_passes_state_check,\n tmpdir,\n):\n get_worker_config.return_value = mock.Mock(cachito_bundles_dir=tmpdir)\n request_id = 6\n request = {\"id\": request_id}\n mock_get_request.return_value = request\n package = {\"name\": \"han-solo\", \"type\": \"npm\", \"version\": \"5.0.0\"}\n deps = [\n {\"dev\": False, \"name\": \"@angular/animations\", \"type\": \"npm\", \"version\": \"8.2.14\"},\n {\"dev\": False, \"name\": \"tslib\", \"type\": \"npm\", \"version\": \"1.11.1\"},\n ]\n mock_rn.return_value = {\n \"deps\": deps,\n \"downloaded_deps\": {\"@angular/animations@8.2.14\", \"tslib@1.11.1\"},\n \"lock_file\": lock_file,\n \"lock_file_name\": \"package-lock.json\",\n \"package\": package,\n \"package.json\": package_json,\n }\n username = f\"cachito-npm-{request_id}\"\n password = \"asjfhjsdfkwe\"\n mock_fnfjr.return_value = password\n mock_gcc.return_value = ca_file\n mock_gnc.return_value = \"some npmrc\"\n\n if package_subpath:\n package_configs = [{\"path\": package_subpath}]\n else:\n package_configs = None\n\n npm.fetch_npm_source(request_id, package_configs=package_configs)\n\n bundle_dir = RequestBundleDir(request_id)\n mock_vnf.assert_called_once_with(bundle_dir, [package_subpath or \".\"])\n assert mock_srs.call_count == 3\n assert mock_get_request.called_once_with(request_id)\n mock_pnfjr.assert_called_once_with(\"cachito-npm-6\")\n lock_file_path = str(bundle_dir.app_subpath(package_subpath or \".\").source_dir)\n mock_rn.assert_called_once_with(lock_file_path, request, skip_deps=set())\n if ca_file:\n mock_gnc.assert_called_once_with(\n \"http://nexus:8081/repository/cachito-npm-6/\",\n username,\n password,\n custom_ca_path=f\"{reverse_path_component}registry-ca.pem\",\n )\n else:\n mock_gnc.assert_called_once_with(\n \"http://nexus:8081/repository/cachito-npm-6/\", username, password, custom_ca_path=None\n )\n\n expected_config_files = []\n if package_json:\n expected_config_files.append(\n {\n \"content\": \"ewogICJuYW1lIjogImhhbi1zb2xvIgp9\",\n \"path\": f\"app/{subpath_as_path_component}package.json\",\n \"type\": \"base64\",\n }\n )\n\n if lock_file:\n expected_config_files.append(\n {\n \"content\": \"ewogICJkZXBlbmRlbmNpZXMiOiBbXQp9\",\n \"path\": f\"app/{subpath_as_path_component}package-lock.json\",\n \"type\": \"base64\",\n }\n )\n\n if ca_file:\n expected_config_files.append(\n {\n \"content\": \"c29tZSBDQSBmaWxlIGNvbnRlbnRz\",\n \"path\": \"app/registry-ca.pem\",\n \"type\": \"base64\",\n }\n )\n\n expected_config_files.append(\n {\n \"content\": \"c29tZSBucG1yYw==\",\n \"path\": f\"app/{subpath_as_path_component}.npmrc\",\n \"type\": \"base64\",\n }\n )\n mock_urwcf.assert_called_once_with(request_id, expected_config_files)\n\n mock_update_request_env_vars.assert_called_once_with(\n request_id,\n {\n \"CHROMEDRIVER_SKIP_DOWNLOAD\": {\"value\": \"true\", \"kind\": \"literal\"},\n \"SKIP_SASS_BINARY_DOWNLOAD_FOR_CI\": {\"value\": \"true\", \"kind\": \"literal\"},\n },\n )\n\n pkg_info = package.copy()\n pkg_info[\"dependencies\"] = deps\n if package_subpath and package_subpath != os.curdir:\n pkg_info[\"path\"] = package_subpath\n assert {\"packages\": [pkg_info]} == json.loads(bundle_dir.npm_packages_data.read_bytes())\n\n\n@mock.patch(\"cachito.workers.tasks.npm.RequestBundleDir\")\n@mock.patch(\"cachito.workers.tasks.npm._verify_npm_files\")\n@mock.patch(\"cachito.workers.tasks.npm.set_request_state\")\n@mock.patch(\"cachito.workers.tasks.npm.get_request\")\n@mock.patch(\"cachito.workers.tasks.npm.prepare_nexus_for_js_request\")\n@mock.patch(\"cachito.workers.tasks.npm.resolve_npm\")\n@mock.patch(\"cachito.workers.tasks.npm.finalize_nexus_for_js_request\")\n@mock.patch(\"cachito.workers.tasks.npm.nexus.get_ca_cert\")\n@mock.patch(\"cachito.workers.tasks.npm.generate_npmrc_content\")\n@mock.patch(\"cachito.workers.tasks.npm.update_request_with_config_files\")\n@mock.patch(\"cachito.workers.tasks.npm.update_request_env_vars\")\ndef test_fetch_npm_source_multiple_paths(\n mock_update_request_env_vars,\n mock_urwcf,\n mock_gnc,\n mock_gcc,\n mock_fnfjr,\n mock_rn,\n mock_pnfjr,\n mock_get_request,\n mock_srs,\n mock_vnf,\n mock_rbd,\n task_passes_state_check,\n):\n request_id = 6\n request = {\"id\": request_id}\n mock_get_request.return_value = request\n package = {\"name\": \"han-solo\", \"type\": \"npm\", \"version\": \"5.0.0\"}\n package_two = {\"name\": \"han-solo\", \"type\": \"npm\", \"version\": \"6.0.0\"}\n deps = [\n {\"dev\": False, \"name\": \"@angular/animations\", \"type\": \"npm\", \"version\": \"8.2.14\"},\n {\"dev\": False, \"name\": \"tslib\", \"type\": \"npm\", \"version\": \"1.11.1\"},\n ]\n # The package.json and package-lock.json mock values are not actually valid,\n # they just need to be valid JSON\n mock_rn.side_effect = [\n {\n \"deps\": deps,\n \"downloaded_deps\": {\"@angular/animations@8.2.14\", \"tslib@1.11.1\"},\n \"lock_file\": {\"dependencies\": []},\n \"lock_file_name\": \"package-lock.json\",\n \"package\": package,\n \"package.json\": {\"name\": \"han-solo\", \"version\": \"5.0.0\"},\n },\n {\n \"deps\": deps,\n \"downloaded_deps\": {\"@angular/animations@8.2.14\", \"tslib@1.11.1\"},\n \"lock_file\": {\"dependencies\": []},\n \"lock_file_name\": \"package-lock.json\",\n \"package\": package_two,\n \"package.json\": {\"name\": \"han-solo\", \"version\": \"6.0.0\"},\n },\n ]\n ca_file = \"some CA file contents\"\n mock_gcc.return_value = ca_file\n mock_gnc.return_value = \"some npmrc\"\n\n npm.fetch_npm_source(request_id, [{\"path\": \"old-client\"}, {\"path\": \"new-client/client\"}])\n\n mock_vnf.assert_called_once_with(mock_rbd.return_value, [\"old-client\", \"new-client/client\"])\n mock_pnfjr.assert_called_once()\n mock_rn.assert_has_calls(\n (\n mock.call(\n str(mock_rbd().app_subpath(\"old-client\").source_dir), request, skip_deps=set()\n ),\n mock.call(\n str(mock_rbd().app_subpath(\"new-client/client\").source_dir),\n request,\n skip_deps={\"@angular/animations@8.2.14\", \"tslib@1.11.1\"},\n ),\n )\n )\n mock_gnc.assert_has_calls(\n (\n mock.call(mock.ANY, mock.ANY, mock.ANY, custom_ca_path=\"../registry-ca.pem\"),\n mock.call(mock.ANY, mock.ANY, mock.ANY, custom_ca_path=\"../../registry-ca.pem\"),\n )\n )\n\n expected_config_files = [\n {\n \"content\": \"ewogICJuYW1lIjogImhhbi1zb2xvIiwKICAidmVyc2lvbiI6ICI1LjAuMCIKfQ==\",\n \"path\": \"app/old-client/package.json\",\n \"type\": \"base64\",\n },\n {\n \"content\": \"ewogICJkZXBlbmRlbmNpZXMiOiBbXQp9\",\n \"path\": \"app/old-client/package-lock.json\",\n \"type\": \"base64\",\n },\n {\n \"content\": \"ewogICJuYW1lIjogImhhbi1zb2xvIiwKICAidmVyc2lvbiI6ICI2LjAuMCIKfQ==\",\n \"path\": \"app/new-client/client/package.json\",\n \"type\": \"base64\",\n },\n {\n \"content\": \"ewogICJkZXBlbmRlbmNpZXMiOiBbXQp9\",\n \"path\": \"app/new-client/client/package-lock.json\",\n \"type\": \"base64\",\n },\n {\n \"content\": \"c29tZSBDQSBmaWxlIGNvbnRlbnRz\",\n \"path\": \"app/registry-ca.pem\",\n \"type\": \"base64\",\n },\n {\"content\": \"c29tZSBucG1yYw==\", \"path\": \"app/old-client/.npmrc\", \"type\": \"base64\"},\n {\"content\": \"c29tZSBucG1yYw==\", \"path\": \"app/new-client/client/.npmrc\", \"type\": \"base64\"},\n ]\n\n mock_urwcf.assert_called_once_with(request_id, expected_config_files)\n\n mock_update_request_env_vars.assert_called_once_with(\n request_id,\n {\n \"CHROMEDRIVER_SKIP_DOWNLOAD\": {\"kind\": \"literal\", \"value\": \"true\"},\n \"SKIP_SASS_BINARY_DOWNLOAD_FOR_CI\": {\"kind\": \"literal\", \"value\": \"true\"},\n },\n )\n\n\n@mock.patch(\"cachito.workers.tasks.npm.RequestBundleDir\")\n@mock.patch(\"cachito.workers.tasks.npm._verify_npm_files\")\n@mock.patch(\"cachito.workers.tasks.npm.set_request_state\")\n@mock.patch(\"cachito.workers.tasks.npm.get_request\")\n@mock.patch(\"cachito.workers.tasks.npm.prepare_nexus_for_js_request\")\n@mock.patch(\"cachito.workers.tasks.npm.resolve_npm\")\ndef test_fetch_npm_source_resolve_fails(\n mock_rn, mock_pnfjr, mock_get_request, mock_srs, mock_vnf, mock_rbd, task_passes_state_check\n):\n request_id = 6\n request = {\"id\": request_id}\n mock_get_request.return_value = request\n mock_rn.side_effect = FileAccessError(\"Some error\")\n\n with pytest.raises(FileAccessError, match=\"Some error\"):\n npm.fetch_npm_source(request_id)\n\n assert mock_srs.call_count == 2\n mock_get_request.assert_called_once_with(request_id)\n mock_pnfjr.assert_called_once_with(\"cachito-npm-6\")\n\n\n@mock.patch(\"cachito.workers.tasks.npm.nexus.get_ca_cert\")\n@mock.patch(\"cachito.workers.tasks.npm.make_base64_config_file\")\n@mock.patch(\"cachito.workers.tasks.npm.generate_npmrc_content\")\n@pytest.mark.parametrize(\"has_ca_cert\", [True, False])\ndef test_generate_npmrc_config_files(\n mock_generate_content, mock_make_config_file, mock_get_cert, has_ca_cert\n):\n url = \"http://example.org\"\n username = \"nicola\"\n password = \"tesla\"\n subpaths = [\".\", \"foo\", \"foo/bar\"]\n\n npmrc_contents = [mock.Mock(), mock.Mock(), mock.Mock()]\n mock_generate_content.side_effect = npmrc_contents\n\n expected_configs = [mock.Mock(), mock.Mock(), mock.Mock()]\n expected_make_cfg_calls = [\n mock.call(npmrc_contents[0], \"app/.npmrc\"),\n mock.call(npmrc_contents[1], \"app/foo/.npmrc\"),\n mock.call(npmrc_contents[2], \"app/foo/bar/.npmrc\"),\n ]\n\n if has_ca_cert:\n mock_get_cert.return_value = \"some CA cert\"\n expected_ca_pem = mock.Mock()\n expected_configs.insert(0, expected_ca_pem)\n expected_make_cfg_calls.insert(\n 0, mock.call(mock_get_cert.return_value, \"app/registry-ca.pem\")\n )\n expected_content_calls = [\n mock.call(url, username, password, custom_ca_path=\"registry-ca.pem\"),\n mock.call(url, username, password, custom_ca_path=\"../registry-ca.pem\"),\n mock.call(url, username, password, custom_ca_path=\"../../registry-ca.pem\"),\n ]\n else:\n mock_get_cert.return_value = None\n expected_content_calls = [\n mock.call(url, username, password, custom_ca_path=None),\n mock.call(url, username, password, custom_ca_path=None),\n mock.call(url, username, password, custom_ca_path=None),\n ]\n\n mock_make_config_file.side_effect = expected_configs\n\n rv = npm.generate_npmrc_config_files(url, username, password, subpaths)\n assert rv == expected_configs\n\n mock_get_cert.assert_called_once()\n mock_generate_content.assert_has_calls(expected_content_calls)\n mock_make_config_file.assert_has_calls(expected_make_cfg_calls)\n","repo_name":"containerbuildsystem/cachito","sub_path":"tests/test_workers/test_tasks/test_npm.py","file_name":"test_npm.py","file_ext":"py","file_size_in_byte":15122,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"67"} +{"seq_id":"25173191088","text":"from .base import ResourceRecord\nfrom ..domains.domain import Domain\n\n\nclass MX(ResourceRecord):\n class _Binary(ResourceRecord._Binary):\n\n @property\n def full(self):\n result = bin(int(self.resource_record.preference))[2:].zfill(16)\n result += Domain.sub_encode(self.resource_record.exchange.label)\n return result\n\n id = 15\n repr = ['preference', 'exchange']\n\n @classmethod\n def parse_bytes(cls, answer, read_len):\n instance = cls(answer)\n instance.preference = answer.message.stream.read(f'uint:{2 * 8}')\n instance.exchange = Domain.decode(answer.message)\n return instance\n\n @classmethod\n def parse_dict(cls, answer, data):\n instance = cls(answer)\n instance.preference = data.get('preference')\n instance.exchange = Domain(data.get('exchange'), None)\n return instance\n\n @property\n def __dict__(self):\n return {'preference': self.preference,\n 'exchange': self.exchange.label}\n\n @classmethod\n def from_json(cls, answer, data):\n instance = cls(answer)\n instance.preference = data.get('preference')\n instance.exchange = Domain(data.get('exchange'), None)\n return instance\n","repo_name":"Yurzs/triton","sub_path":"triton/dns/message/rdata/mx.py","file_name":"mx.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17456355811","text":"import unittest\nimport os\nimport sys\nimport numpy as np\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),\n '../domopt/')))\n\nfrom surrogates import PolySurrogate\n\nclass TestSurrogate(unittest.TestCase):\n\n def testPolySurrogate(self):\n\n def fun(x):\n return x**3 + x**2\n\n poly1 = PolySurrogate(dimensions=1, order=3, poly_type='hermite')\n u = poly1.getQuadraturePoints()\n poly1.train([fun(ui) for ui in u])\n\n self.assertAlmostEqual(poly1.predict(1), fun(1))\n\n poly2 = PolySurrogate(dimensions=1, order=3, poly_type=['legendre'])\n poly1.train(fun)\n\n self.assertAlmostEqual(poly1.predict(2), fun(2))\n","repo_name":"lwcook/dominance-optimizers","sub_path":"tests/test_surrogate.py","file_name":"test_surrogate.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"70305119574","text":"def good_vs_evil(good, evil):\n\n vim_good = [1, 2, 3, 3, 4, 10]\n vim_evil = [1, 2, 2, 2, 3, 5, 10]\n\n forces_good = sum([x*int(y) for x, y in zip(vim_good, good.split())])\n forces_evil = sum([x*int(y) for x, y in zip(vim_evil, evil.split())])\n\n if forces_good > forces_evil:\n return \"Battle Result: Good triumphs over Evil\"\n elif forces_good < forces_evil:\n return \"Battle Result: Evil eradicates all trace of Good\"\n else:\n return \"Battle Result: No victor on this battle field\"\n","repo_name":"Aleksey-Lyap/codewars_tasks","sub_path":"good_vs_evil.py","file_name":"good_vs_evil.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5299382462","text":"import math\nimport random\n\nimport numpy as np\nfrom pandas import DataFrame\n\nfrom lifelike.constants import CHROMOSOME_LEN\nfrom maze.rulestring import Rulestring\n\nEVAL_ITER = 5\n\nclass Population:\n def __init__(self, pop_size, path_len_bias, elitism, mutation):\n self.pop_size = pop_size\n self.path_len_bias = path_len_bias\n self.elitism = elitism\n self.mutation = mutation\n self.elite_n = math.floor(elitism * pop_size)\n self.child_n = pop_size - self.elite_n\n self.inds = np.array([Rulestring() for _ in range(self.elite_n)])\n\n def iterate(self):\n # Selection\n mean_dead_ends, mean_path_lens = self.select()\n\n # Crossover\n self.crossover()\n\n # Mutate\n self.mutate()\n # best_diversity = 0\n # best_mutation = self.inds.copy()\n # for _ in range(self.novelty):\n # self.mutate(self.elite_n // 5)\n # d = self.diversity()\n # if d > best_diversity:\n # best_diversity = d\n # best_mutation = self.inds.copy()\n # self.inds = best_mutation\n\n return mean_dead_ends, mean_path_lens\n\n def select(self):\n scores, mean_dead_ends, mean_path_lens = self.evaluate()\n # sorted_scores = np.sort(scores)[::-1]\n self.inds = self.inds[(-scores).argsort()]\n self.inds = self.inds[:self.elite_n]\n # self.inds = random.choices(self.inds, scores, k=self.elite_n)\n return mean_dead_ends, mean_path_lens\n # return mean_dead_ends, mean_path_lens, sorted_scores[:self.elite_n]\n\n def crossover(self):\n children = []\n for _ in range(self.child_n // 2):\n cpoint = random.randint(1, CHROMOSOME_LEN - 1)\n parents = np.random.choice(self.inds, 2, replace=False)\n a, b = parents[0].get_rstring(), parents[1].get_rstring()\n left_a, right_a = a[:cpoint], a[cpoint:]\n left_b, right_b = b[:cpoint], b[cpoint:]\n child1 = Rulestring(int(left_a + right_b, 2))\n child2 = Rulestring(int(left_b + right_a, 2))\n children.append(child1)\n children.append(child2)\n self.inds = np.append(self.inds, np.array(children))\n\n\n def mutate(self):\n for ind in self.inds:\n ind.mutate(self.mutation)\n\n def evaluate(self):\n dead_ends = []\n path_lens = []\n for r in self.inds:\n dead_end, path_len, reachable = r.evaluate(n_iters=EVAL_ITER)\n dead_ends.append(dead_end)\n path_lens.append(path_len)\n\n dead_ends = np.array(dead_ends)\n path_lens = np.array(path_lens)\n\n n = len(self.inds)\n dead_ixs = np.argsort(dead_ends)\n path_ixs = np.argsort(path_lens)\n\n dead_ranks = np.empty(n)\n dead_ranks[dead_ixs] = np.linspace(0, 1, num=n)\n path_ranks = np.empty(n)\n path_ranks[path_ixs] = np.linspace(0, 1, num=n)\n # scores1 = ((1 - self.path_len_bias) * dead_ixs) + (self.path_len_bias * path_ixs)\n scores2 = ((1 - self.path_len_bias) * dead_ranks) + (self.path_len_bias * path_ranks)\n # scores1 = np.where(path_lens == 0, 0, scores1)\n scores2 = np.where(path_lens == 0, 0, scores2)\n return scores2, np.mean(dead_ends[dead_ends != 0]), np.mean(path_lens[path_lens != 0])\n\n def diversity_to(self, rstring):\n return np.mean([hamming(rstring, i.rstring) for i in self.inds])\n\n def diversity(self):\n return np.mean([hamming(i.rstring, j.rstring) for i in self.inds for j in self.inds])\n\n def __str__(self):\n res = \"[\"\n for i in self.inds:\n res += f\"{i.get_rstring()}, \"\n res += \"]\"\n return res\n\ndef hamming(a, b):\n # Returns hamming distance between a and b\n c = a ^ b\n count = 0\n while c:\n c &= c - 1\n count += 1\n return count\n\n# if __name__ == \"__main__\":\n# pop = Population(100, 0.5, 0.5, 0.05)\n# s1, s2, _, _= pop.evaluate()\n# obj = {k:None for k in range(30)}\n# rel = {k:None for k in range(30)}\n# obj[0] = s1\n# rel[0] = s2\n# for epoch in range(1, 31):\n# print(\"Epoch:\", epoch)\n# _, _, s1, s2 = pop.iterate()\n# obj[epoch] = s1\n# rel[epoch] = s2\n# DataFrame.from_dict(obj).to_csv(\"obj.csv\")\n# DataFrame.from_dict(rel).to_csv(\"rel.csv\")\n\n\n","repo_name":"manuj-mishra/imperial-thesis","sub_path":"src/maze/population.py","file_name":"population.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33118848525","text":"#format_list_0 = []\n#format_list_1 = []\n#format_list_2 = []\n\ndef default(level = 0):\n return []\n\ndef number_outline(L, plugin = default, prefix = ()):\n #global format_list_0 \n format_list_0 = plugin(0)\n #global format_list_1 \n format_list_1 = plugin(1)\n #global format_list_2 \n format_list_2 = plugin(2)\n if type(L) in {list, tuple}:\n i = 0\n for v in L:\n if type(v) not in {list, tuple}:\n i += 1\n number_outline(v, plugin, prefix + (i,))\n else:\n if format_list_0 == []:\n s = ' ' * 4 * (len(prefix) - 1)\n s += '.'.join(map(str, prefix))\n s += '. ' + L\n print(s)\n elif format_list_0 == ():\n s = ' ' * 4 * (len(prefix) - 1)\n if len(prefix) == 1:\n s += 'Chapter '\n s += '.'.join(map(str, prefix))\n elif len(prefix) == 2:\n s += 'Section '\n s += '.'.join(map(str, prefix))\n elif len(prefix) == 3:\n s += '.'.join(map(str, prefix))\n else:\n s += '.'.join(map(str, prefix))\n s += '. ' + L\n print(s)\n\n else:\n s = ' ' * 4 * (len(prefix) - 1)\n if len(prefix) == 1:\n s += format_list_0[prefix[0] - 1]\n elif len(prefix) == 2:\n s += format_list_1[prefix[1] - 1]\n elif len(prefix) == 3:\n s += format_list_2[prefix[2] - 1]\n else:\n prefix = list(prefix)\n prefix.pop(0)\n prefix.pop(0)\n prefix = tuple(prefix)\n s += '.'.join(map(str, prefix))\n s += '. ' + L\n print(s)\n \n\ndef my_outline_format_function(level = 0):\n if level == 0:\n return ['I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX', 'X']\n elif level == 1:\n return [chr(i) for i in range(65, 75)]\n else:\n return [str(i) for i in range(1, 11)]\n\ndef my_thesis_format_function(level = 0):\n return ()\n\n\n","repo_name":"Jack24658735/Introduction-to-Programming-in-Python","sub_path":"self_check/HW09/9_bonus_number_outline.py","file_name":"9_bonus_number_outline.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"72199791252","text":"from __future__ import print_function\n\nimport datetime\nimport os\nimport select\nimport signal\nimport socket\nimport sys\nimport time\nimport threading\nimport struct\n\nDELAY = 0.5\nTHREADS = []\n\n\ndef log(msg):\n print(datetime.datetime.now().strftime(\"%d-%b-%Y %H:%M:%S.%f \") + msg)\n\n\ndef sigterm(*_):\n log(\"SIGTERM received, shutting down\")\n for thread in THREADS:\n thread.close()\n thread.join()\n os.remove(\"ans.pid\")\n sys.exit(0)\n\n\nclass TCPDelayer(threading.Thread):\n \"\"\"For a given TCP connection conn we open a connection to (ip, port),\n and then we delay each incoming packet by DELAY by putting it in a\n queue.\n In the pipelined test TCP should not be used, but it's here for\n completnes.\n \"\"\"\n\n def __init__(self, conn, ip, port):\n threading.Thread.__init__(self)\n self.conn = conn\n self.cconn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.cconn.connect((ip, port))\n self.queue = []\n self.running = True\n\n def close(self):\n self.running = False\n\n def run(self):\n while self.running:\n curr_timeout = 0.5\n try:\n curr_timeout = self.queue[0][0] - time.time()\n except StopIteration:\n pass\n if curr_timeout > 0:\n if curr_timeout == 0:\n curr_timeout = 0.5\n rfds, _, _ = select.select(\n [self.conn, self.cconn], [], [], curr_timeout\n )\n if self.conn in rfds:\n data = self.conn.recv(65535)\n if not data:\n return\n self.queue.append((time.time() + DELAY, data))\n if self.cconn in rfds:\n data = self.cconn.recv(65535)\n if not data == 0:\n return\n self.conn.send(data)\n try:\n while self.queue[0][0] - time.time() < 0:\n _, data = self.queue.pop(0)\n self.cconn.send(data)\n except StopIteration:\n pass\n\n\nclass UDPDelayer(threading.Thread):\n \"\"\"Every incoming UDP packet is put in a queue for DELAY time, then\n it's sent to (ip, port). We remember the query id to send the\n response we get to a proper source, responses are not delayed.\n \"\"\"\n\n def __init__(self, usock, ip, port):\n threading.Thread.__init__(self)\n self.sock = usock\n self.csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.dst = (ip, port)\n self.queue = []\n self.qid_mapping = {}\n self.running = True\n\n def close(self):\n self.running = False\n\n def run(self):\n while self.running:\n curr_timeout = 0.5\n if self.queue:\n curr_timeout = self.queue[0][0] - time.time()\n if curr_timeout >= 0:\n if curr_timeout == 0:\n curr_timeout = 0.5\n rfds, _, _ = select.select(\n [self.sock, self.csock], [], [], curr_timeout\n )\n if self.sock in rfds:\n data, addr = self.sock.recvfrom(65535)\n if not data:\n return\n self.queue.append((time.time() + DELAY, data))\n qid = struct.unpack(\">H\", data[:2])[0]\n log(\"Received a query from %s, queryid %d\" % (str(addr), qid))\n self.qid_mapping[qid] = addr\n if self.csock in rfds:\n data, addr = self.csock.recvfrom(65535)\n if not data:\n return\n qid = struct.unpack(\">H\", data[:2])[0]\n dst = self.qid_mapping.get(qid)\n if dst is not None:\n self.sock.sendto(data, dst)\n log(\n \"Received a response from %s, queryid %d, sending to %s\"\n % (str(addr), qid, str(dst))\n )\n while self.queue and self.queue[0][0] - time.time() < 0:\n _, data = self.queue.pop(0)\n qid = struct.unpack(\">H\", data[:2])[0]\n log(\"Sending a query to %s, queryid %d\" % (str(self.dst), qid))\n self.csock.sendto(data, self.dst)\n\n\ndef main():\n signal.signal(signal.SIGTERM, sigterm)\n signal.signal(signal.SIGINT, sigterm)\n\n with open(\"ans.pid\", \"w\") as pidfile:\n print(os.getpid(), file=pidfile)\n\n listenip = \"10.53.0.5\"\n serverip = \"10.53.0.2\"\n\n try:\n port = int(os.environ[\"PORT\"])\n except KeyError:\n port = 5300\n\n log(\"Listening on %s:%d\" % (listenip, port))\n\n usock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n usock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n usock.bind((listenip, port))\n thread = UDPDelayer(usock, serverip, port)\n thread.start()\n THREADS.append(thread)\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((listenip, port))\n sock.listen(1)\n sock.settimeout(1)\n\n while True:\n try:\n (clientsock, _) = sock.accept()\n log(\"Accepted connection from %s\" % clientsock)\n thread = TCPDelayer(clientsock, serverip, port)\n thread.start()\n THREADS.append(thread)\n except socket.timeout:\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"NetBSD/src","sub_path":"external/mpl/bind/dist/bin/tests/system/pipelined/ans5/ans.py","file_name":"ans.py","file_ext":"py","file_size_in_byte":5599,"program_lang":"python","lang":"en","doc_type":"code","stars":589,"dataset":"github-code","pt":"67"} +{"seq_id":"23775421502","text":"from app.db import db\nfrom app.controller.ctrl_base import CtrlBase\nfrom app.db.feature_func import Feature\n\n\nclass CtrlFeatureBase(CtrlBase):\n def __init__(self):\n CtrlBase.__init__(self)\n self.sub_feature_list = []\n\n def get_feature(self, feature_id, ver):\n \"\"\"根据feature_id和ver获取一条feature\"\"\"\n q = (db.session.query(Feature)\n .filter(Feature.feature_id == feature_id)\n .filter(Feature.ver == ver).first())\n return q\n\n def add_feature(self, feature_dict):\n \"\"\"添加一条feature\"\"\"\n feature_id = self.get_common_key_id(type=\"Feature\")\n feature_dict[Feature.feature_id.name] = feature_id\n new_feature = Feature(**feature_dict)\n db.session.add(new_feature)\n return feature_id\n\n","repo_name":"clearloveyin/Cararote","sub_path":"spider/spider_server/app/controller/ctrl_feature_base.py","file_name":"ctrl_feature_base.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39333047259","text":"import os\nimport requests\nimport pandas as pd\n\ndf = pd.read_csv('pokemon.csv')\n\nif os.path.exists('sprites') == False:\n os.mkdir('sprites')\n\ndef fix_trunc_zeros(val):\n # Convert the value to a string\n val_str = str(val)\n \n # Check the length of the string\n if len(val_str) < 3:\n # Add leading zeros to the string until it reaches three characters\n val_str = val_str.zfill(3)\n \n # Return the corrected value\n return val_str\n\nfor poke in df['#']:\n\n url = r'https://www.pokexperto.net/3ds/sprites/icon/{}.png'.format(fix_trunc_zeros(poke))\n response = requests.get(url)\n # Save the image\n file = open('sprites/poke_{}.png'.format(poke), \"wb\")\n file.write(response.content)\n file.close()","repo_name":"AlbertGallegoJimenez/Kaggle-Projects","sub_path":"1_Pokemon/script_scraping_sprites.py","file_name":"script_scraping_sprites.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"10220600126","text":"import requests\nfrom bs4 import BeautifulSoup\nimport inquire_all_route as iar\nimport bus_tools as bt\n\n# 춘천시 도시번호 32010\n\nurl = 'http://openapi.tago.go.kr/openapi/service/BusLcInfoInqireService/getRouteAcctoBusLcList'\np_key = iar.init_bus.p_key\np_city = iar.init_bus.p_city\n\ndef get_bus_location (route_id):\n\n #print(route_id)\n res = requests.get(url+'?'+p_key+'&'+p_city+'&'+'routeId='+route_id)\n res_parse = BeautifulSoup(res.text,\"html.parser\")\n ret = []\n for key,val in dict(zip(bt.remove_tags(res_parse.find_all(\"nodenm\")),bt.remove_tags(res_parse.find_all(\"vehicleno\")))).items():\n ret.append(key + \" [\" + val + \"]\" )\n return ret\n \ndef search_bus (entered): \n ids=dict(filter(lambda item: entered in item[0], iar.bus_dict.items()))\n # ids = [key:val for key, val in iar.bus_dict.items() if entered in key] \n res=''\n #print(ids)\n for key,val in ids.items():\n res += \"\\n\" + key + \"\\n- \"\n loc = get_bus_location(val)\n if loc:\n res += '\\n- '.join(loc) + \"\\n\"\n else:\n res += \"배차 정보가 없습니다.\\n\"\n # print (res)\n return res\n \n \n#print(search_bus('서면'))\n \n\n#print (get_bus_location(\"300\"))\n#print (get_bus_location(\"100\"))\n\n\n","repo_name":"PaengE/KakaoChatBot-KangBot","sub_path":"where_is_bus.py","file_name":"where_is_bus.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"1112768610","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nfrom neorl import DE, ES, MFO\nfrom neorl.benchmarks.reactivity_model import ReactivityModel\nfrom neorl.benchmarks.qpower_model import QPowerModel\n\n#import models from other files in repo\nrm = ReactivityModel()\npm = QPowerModel()\n\n#define unscaled objectives\ndef hatfc(x):\n thetas = np.zeros(8)\n thetas[0] = x[0]\n thetas[2:] = x[1:]\n react = rm.eval(thetas)\n return np.abs(react - 0.03308)\n\ndef hatfp(x):\n thetas = np.zeros(8)\n thetas[0] = x[0]\n thetas[2:] = x[1:]\n powers = pm.eval(thetas)\n targets = np.zeros(4)+0.25\n return np.abs(powers - targets).sum()\n\ndef hatfd(x):\n return np.max(np.abs(x))\n\n#define objective scaling parameters\nfc_max = 0.03308\nfc_min = 0\n\nfp_max = 0.0345\nfp_min = 0\n\nfd_max = np.pi\nfd_min = 0\n\n#define scaling objectives\nfc = lambda x : (hatfc(x) - fc_min)/(fc_max - fc_min)\nfp = lambda x : (hatfp(x) - fp_min)/(fp_max - fp_min)\nfd = lambda x : (hatfd(x) - fd_min)/(fd_max - fd_min)\n\n#define function weights\nwc = 0.5\nwp = 0.4\nwd = 0.1\n\n#define single objective function\nF = lambda x : wc*fc(x) + wp*fp(x) + wd*fd(x)\n\n#define drum rotation bounds\nBOUNDS = {\"x%i\"%i : [\"float\", -1.*np.pi, 1.*np.pi] for i in range(1, 8)}\n\n#run de optimization\nnpop = 20\nF_de = 0.4\nCR = 0.3\nde = DE(mode = \"min\", bounds = BOUNDS, fit = F, npop = npop, F = F_de, CR = CR, seed = 1)\nde_x, de_y, de_hist = de.evolute(100, verbose = True)\n\n#run es optimization\nmu = 25\ncxpb = 0.6\nmutpb = 0.3\nes = ES(mode = \"min\", bounds = BOUNDS, fit = F, lambda_ = 50, mu = mu, cxpb = 0.6,\n mutpb = 0.3, seed = 1)\nes_x, es_y, es_hist = es.evolute(100, verbose = True)\n\n#run mfo optimization\nnmoths = 55\nmfo = MFO(mode = \"min\", bounds = BOUNDS, fit = F, nmoths = nmoths, b = 1, seed = 1)\nmfo_x, mfo_y, mfo_hist = mfo.evolute(100, verbose = True)\n\nplt.plot(de_hist[\"global_fitness\"], label = \"DE\")\nplt.plot(es_hist[\"global_fitness\"], label = \"ES\")\nplt.plot(mfo_hist[\"global_fitness\"], label = \"MFO\")\n\nplt.xlabel(\"Generation\")\nplt.ylabel(\"Fitness\")\nplt.legend()\nplt.show()\n\nprint(\"MFO fc hat\")\nprint(hatfc(mfo_x))\nprint(\"MFO fp hat\")\nprint(hatfp(mfo_x))\nprint(\"MFO fd hat\")\nprint(hatfd(mfo_x))\n","repo_name":"mradaideh/neorl","sub_path":"examples/ex11_microreactor.py","file_name":"ex11_microreactor.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"67"} +{"seq_id":"29383172974","text":"import asyncio\n\nimport pandas as pd\n\nfrom ..data.base import RealTimeProvider\nfrom .base import TradableAsset, USStockMixin\n\n\nclass CSVProvider(RealTimeProvider):\n def __init__(self, asset: TradableAsset, path: str) -> None:\n super().__init__(asset)\n self._path = path\n bars = pd.read_csv(self._path)\n bars[\"start\"] = pd.DatetimeIndex(pd.to_datetime(bars[\"start\"], utc=True))\n self._df = self.asset.localize(bars.set_index(\"start\"))\n self._curr_idx = -1\n self._task: asyncio.Task = None\n\n async def _retrieve(self, start: pd.Timestamp, end: pd.Timestamp) -> pd.DataFrame:\n return self._df.iloc[: self._curr_idx + 1].loc[start:end]\n\n async def _subscribe(self) -> None:\n async def start():\n while True:\n self._curr_idx += 1\n df = self._df.iloc[self._curr_idx : self._curr_idx + 1]\n if df.empty:\n break\n self.add(df)\n while self._new_value_event.is_set():\n await asyncio.sleep(0)\n\n self._task = asyncio.create_task(start())\n\n\nclass CSVAsset(TradableAsset):\n def __init__(self, symbol: str, path: str) -> None:\n super().__init__(symbol)\n self._bars = CSVProvider(self, path)\n self._trader = None\n\n @property\n def bars(self) -> CSVProvider:\n return self._bars\n\n\nclass CSVUSStock(CSVAsset, USStockMixin):\n pass\n","repo_name":"dfonnegra/quantrion","sub_path":"quantrion/asset/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14323191835","text":"#!/usr/bin/env python3\n################################################################################\n# This file contains tests for the L5_1_exercises.py file. #\n################################################################################\n\nimport sys; sys.path.append('..')\nfrom TestRun import TestRun, Redirect, MultiRedirect\n\nimport os\nif not os.path.exists('test_files'):\n os.mkdir('test_files')\n\nclass L5Tests(TestRun):\n def __init__(self):\n ''' Initialise this test suite. '''\n super().__init__()\n\n # create input file\n self._inputs = ['exit','10','exit'] # immediate exit (on 2, not 0)\n self._inputs += ['Name','10','1','2','3','exit'] # random exit\n self._inputs += ['Name','10','1','2','3','4'] # finish questions\n self._in_file = 'test_files/in.txt'\n with open(self._in_file,'w') as in_file:\n in_file.write('\\n'.join(self._inputs) + '\\n')\n \n # create question file\n self._qs = ['Hmm?', 'And I should care why?', '???', 'No...?']\n self._q_file = 'test_files/qs.txt'\n with open(self._q_file,'w') as q_file:\n q_file.write('\\n'.join(self._qs) + '\\n')\n\n # store file names for logging and printed output\n self._out_file = 'test_files/out.txt'\n self._log_file = 'test_files/log.txt'\n\n # helper methods\n def _run_qs(self, n):\n ''' Runs the question_asker n times with redirected IO. '''\n # redirect standard IO\n IO = MultiRedirect(Redirect(sys.stdin, open(self._in_file)),\n Redirect(sys.stdout, open(self._out_file,'w'), maintain=False))\n\n try:\n for i in range(n):\n question_asker(self._q_file, self._log_file)\n finally:\n IO.close()\n\n def _general_exit(self, n):\n ''' Tests that exiting occurs correctly for n runs of input. '''\n num = [7, 7+13, 7+13+14] # expected number of lines of printing\n self._run_qs(n)\n\n with open(self._out_file) as out:\n num_lines = len(out.readlines())\n assert num_lines == num[n-1], 'question_asker should exit ' +\\\n \"after the first 'exit' command received after the \" +\\\n 'first two questions. (Look at qs.txt for test-cases)'\n\n def _general_logging(self, n, r, e):\n ''' Logging test for n runs, r response offset, and e+2 questions.\n\n A 'question' is counted as one which should validly be logged (not\n one which is responded to with 'exit').\n\n '''\n self._run_qs(n)\n\n with open(self._log_file) as log:\n name_q = log.readline()\n assert name_q == 'What is your name?\\n', 'First question should ' +\\\n \"be 'What is your name?', not '{}'.\".format(name_q.rstrip())\n\n self._response(log.readline(), r)\n \n age_q = log.readline()\n assert age_q == 'How old are you?\\n', 'Second question should ' +\\\n \"be 'How old are you?', not '{}'.\".format(age_q.rstrip()) +\\\n \"\\n('exit' should only work after the first two questions)\"\n\n self._response(log.readline(), r+1)\n\n for i in range(e):\n self._q(log.readline(), r+2+i)\n self._response(log.readline(), r+2+i)\n\n assert log.readline() == '', 'Question and response should not ' +\\\n \"be logged for an 'exit' response.\"\n\n def _response(self, response, n):\n ''' Checks if nth input == response. '''\n assert response == self._inputs[n] + '\\n', 'Log file should log ' +\\\n 'all questions and their responses (before a valid exit).'\n\n def _q(self, question, n):\n ''' Checks if question is expected (from qs.txt). '''\n assert question != '\\n', 'Log file should not contain empty lines.'\n assert question.rstrip() in self._qs, 'Question listed does not ' +\\\n 'match any expected questions ({!r}).'.format(question.rstrip())\n\n @staticmethod\n def line_between(line):\n ''' Checks if line is empty. Raises AssertionError on failure. '''\n assert line == '\\n', 'Responses should be on the next line to ' +\\\n 'the question, with an empty line before the next question.'\n\n # test functions\n def test_startup(self):\n ''' Tests the printed output for qs 1 and 2, and the confirmation. '''\n self._run_qs(1)\n\n with open(self._out_file) as out:\n name_q = out.readline()\n assert name_q == 'What is your name?\\n', 'First question should ' +\\\n \"be 'What is your name?', not '{}'.\".format(name_q.rstrip())\n \n self.line_between(out.readline())\n\n age_q = out.readline()\n assert age_q == 'How old are you?\\n', 'Second question should ' +\\\n \"be 'How old are you?', not '{}'.\".format(age_q.rstrip()) +\\\n \"\\n('exit' should only work after the first two questions)\"\n\n self.line_between(out.readline())\n\n confirm = out.readline()\n correct = 'Your name is {} and you are {} years old.\\n'.format(\n self._inputs[0], self._inputs[1])\n assert confirm == correct, 'Confirmation should take the form: ' +\\\n \"{!r}, not {!r}\".format(correct.strip(), confirm.strip())\n\n def test_exit_1(self):\n ''' Tests printed output for the immediate exit case. '''\n self._general_exit(1)\n\n def test_exit_2(self):\n ''' Tests printed output for a general exit case. '''\n self._general_exit(2)\n\n def test_exit_3(self):\n ''' Tests printed output for a completion case (no more questions). '''\n self._general_exit(3)\n\n def test_logging_1(self):\n ''' Tests logging for the immediate exit case. '''\n self._general_logging(1,0,0)\n\n def test_logging_2(self):\n ''' Tests logging for a general exit case. '''\n self._general_logging(2,3,3)\n\n def test_logging_3(self):\n ''' Tests logging for a completion case (no more questions). '''\n self._general_logging(3,9,4)\n \n\n \n\n\n#------------------------------ Example Solutions -----------------------------#\n\nif __name__ == '__main__':\n\n from random import randrange\n\n def question_asker(q_file, log_file):\n ''' Asks questions found in q_file and records answers in log_file.\n\n 'q_file': a filename specifying a file of 1 or more questions, with\n one question per line and no repetitions.\n 'log_file': a filename for where to log asked questions and their\n recorded responses.\n\n Asks for name and age, before printing a formatted string of form:\n 'Your name is __ and you are __ years old.'\n Subsequently, asks questions at random from q_file (with no repeats)\n until no questions remain.\n\n Responding to a question with 'exit' quits the questioning session.\n\n question_asker(str'filename.txt', str'filename.txt') -> None\n \n '''\n questions = ['What is your name?\\n', 'How old are you?\\n']\n with open(q_file) as qs:\n # extract the questions and randomise their order, add to list\n questions += shuffle(qs.readlines())\n\n with open(log_file, 'w') as log:\n responses = []\n for question in questions:\n responses += [input(question)]\n if len(responses) > 2 and responses[-1] == 'exit':\n break\n log.write(question + responses[-1] + '\\n')\n if len(responses) == 2:\n print('\\nYour name is {} and you are {} years old.'.format(\n responses[0], responses[1]))\n \n print()\n\n def shuffle(shuffle_list):\n ''' Returns the inputted list in a random order.\n\n shuffle(list) -> list\n\n '''\n out_list = []\n while len(shuffle_list) > 0:\n out_list += [shuffle_list.pop(randrange(0,len(shuffle_list)))]\n return out_list\n\n Tests = L5Tests()\n Tests.run_tests(verbose=True)\n","repo_name":"ES-Alexander/intro-to-monty","sub_path":"Lessons/L5/L5_2_exercise_checker.py","file_name":"L5_2_exercise_checker.py","file_ext":"py","file_size_in_byte":8238,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"42174071929","text":"\n\n\nfrom hashlib import new\n\nfrom matplotlib.pyplot import close\nfrom numpy import Infinity\nfrom .graphclasses import Node\nfrom .google_api_and_graph_functions import get_travel_times\nfrom .graphclasses import WeightedEdge\nfrom alive_progress import alive_bar\n\ndef add_hueristics_to_graph(T_graph,start,end):\n for node in T_graph.nodes:\n if node == start:\n continue\n edges = T_graph.get_edges_for_node(node)\n for edge in edges:\n if edge.get_destination() == end:\n node_to_end_edge = edge\n node.add_h_value(node_to_end_edge.get_time())\n \n \n return T_graph\n\ndef return_path(current_node):\n path = []\n current = current_node\n while current is not None:\n #print(current, 'in returnpath function')\n path.append(current)\n current = current.get_parent_node()\n return path[::-1]\n\ndef get_f_value(node):\n return node.get_f_value()\n\ndef get_best_path_a_star(digraph, start, end, path, best_time,best_path,paths):\n \"\"\"\n \n \"\"\"\n open_list = []\n closed_list = []\n open_list.append(start)\n i = 0\n while len(open_list) > 0:\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.get_f_value() < current_node.get_f_value():\n current_node = item\n current_index = index\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n print(current_node, i)\n\n #current_node = sorted(open_list,key=get_f_value)[0]\n # open_list.remove(current_node)\n # closed_list.append(current_node)\n print('openlist', open_list)\n print('current',current_node)\n \n if current_node == end and i == len(digraph.get_nodes()): # and len(return_path(current_node)) == (len(digraph.get_nodes())) :\n print('at end',current_node,end)\n return return_path(current_node),0\n \n children = {}\n for edge in digraph.get_edges_for_node(current_node):\n new_node = edge.get_destination()\n new_node.set_parent_node(current_node)\n children[new_node] = edge.get_time()\n #print(current_node, children)\n for child in children:\n\n if len([closed_child for closed_child in closed_list if closed_child == child]) > 0:\n continue\n\n child.add_g_value(current_node.g_value + children[child])\n \n if len([open_node for open_node in open_list if child.get_name() == open_node.get_name() and child.g_value > open_node.g_value]) > 0:\n continue\n print('child loop',child,child.h_value)\n open_list.append(child)\n i +=1","repo_name":"booker1997/FindPathGMaps","sub_path":"utils/astar.py","file_name":"astar.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70208162455","text":"from django.conf.urls.defaults import patterns, url\nfrom rosters import views\n\nurlpatterns = patterns('',\n url(r'^$', views.view_all_rosters),\n url(r'^create/$', views.create_roster),\n url(r'^manage/(?P\\d+)/$', views.manage_roster),\n url(r'^add_player/', views.add_player),\n url(r'^remove_player/', views.remove_player)\n)\n","repo_name":"DanielNill/ballz","sub_path":"ballz/rosters/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"26515245614","text":"\"\"\"Base de dados para problemas de química, Gabriel Braun, 2022\n\nEsse módulo implementa uma classe para os tópicos.\n\"\"\"\nimport logging\nfrom datetime import datetime\nfrom multiprocessing import Pool\nfrom pathlib import Path\n\nimport frontmatter\nfrom pydantic import BaseModel\n\nimport braunchem.utils.latex as latex\nimport braunchem.utils.text as text\nfrom braunchem.latex.document import Document\nfrom braunchem.problem import ProblemSet\nfrom braunchem.utils.text import Text\n\nlogger = logging.getLogger(__name__)\n\n\nclass Section(BaseModel):\n \"\"\"Seção.\n\n Atributos:\n id_ (str): Identificador único.\n title (str): Título do tópico.\n content (Text): Conteúdo teórico.\n \"\"\"\n\n id_: str\n title: str\n content: Text\n\n\nclass Topic(BaseModel):\n \"\"\"Tópico.\n\n Atributos:\n id_ (str): Identificador único.\n path (Path): Endereço do arquivo `.md` do tópico\n title (str): Título do tópico.\n author (str): Autor da teoria.\n content (Text): Conteúdo teórico.\n sections (list[str]): Títulos das seções.\n problem_sets (list[ProblemSet]): Listas de problemas.\n \"\"\"\n\n id_: str\n path: Path\n date: datetime\n title: str\n author: str = \"Gabriel Braun\"\n affiliation: str = \"Colégio e Curso Pensi, Coordenação de Química\"\n sections: list[Section] | None\n content: Text | None\n problem_sets: dict = None\n\n def problem_collections(self, problem_db: ProblemSet):\n \"\"\"Retorna uma lista de `ProblemSets` a partir dos ids dos problemas.\"\"\"\n if not self.problem_sets:\n return\n\n for i, (title, problem_ids) in enumerate(self.problem_sets.items()):\n problem_set_id = self.id_ + str(i + 1)\n\n yield problem_db.filter(problem_set_id, title, problem_ids)\n\n def tex_problems(self, problem_db: ProblemSet):\n \"\"\"Retorna os problemas do tópico em LaTeX.\"\"\"\n if not self.problem_sets:\n return \"\"\n\n tex_statements = \"\"\n tex_answers = latex.section(\"Gabarito\", level=0, numbered=False)\n\n previous_len = 1\n for problem_collection in self.problem_collections(problem_db):\n tex_statements = \"\\n\".join(\n [tex_statements, problem_collection.tex_statements()]\n )\n tex_answers = \"\\n\".join(\n [tex_answers, problem_collection.tex_answers(start=previous_len)]\n )\n previous_len += len(problem_collection)\n\n return \"\\n\".join([tex_statements, tex_answers])\n\n def tex_solutions(self, problem_db: ProblemSet):\n \"\"\"Retorna os problemas do tópico em LaTeX.\"\"\"\n if not self.problem_sets:\n return \"\"\n\n tex_solutions = \"\"\n\n previous_len = 1\n for problem_collection in self.problem_collections(problem_db):\n tex_solutions = \"\\n\".join(\n [tex_solutions, problem_collection.tex_solutions()]\n )\n previous_len += len(problem_collection)\n\n return tex_solutions\n\n def tex(self, problem_db: ProblemSet):\n \"\"\"Retorna o conteúdo do tópico em LaTeX.\"\"\"\n if not self.content:\n return self.tex_problems(problem_db)\n\n return self.content.tex + self.tex_problems(problem_db)\n\n def tex_document(self, problem_db: ProblemSet):\n \"\"\"Cria o arquivo `pdf` do tópico.\"\"\"\n return Document(\n id_=self.id_,\n path=self.path.parent,\n title=self.title,\n author=self.author,\n affiliation=self.affiliation,\n template=\"braun, twocolumn=true\",\n toc=True if self.content is not None else False,\n contents=self.tex(problem_db),\n )\n\n def tex_solutions_document(self, problem_db: ProblemSet):\n \"\"\"Cria o arquivo `pdf` do tópico.\"\"\"\n return Document(\n id_=self.id_ + \"_gabarito\",\n path=self.path.parent,\n title=\"Gabarito: \" + self.title,\n author=\"Daniel Sahadi, Renan Romariz, e Gabriel Braun\",\n affiliation=self.affiliation,\n template=\"braun, twocolumn=true\",\n toc=False,\n contents=self.tex_solutions(problem_db),\n )\n\n def write_pdf(self, problem_db: ProblemSet, tmp_dir: Path, out_dir: Path):\n \"\"\"Cria o arquivo `.pdf` do tópico.\"\"\"\n self.tex_document(problem_db).write_pdf(\n tmp_dir.joinpath(self.id_),\n out_dir,\n )\n\n def write_solutions_pdf(self, problem_db: ProblemSet, tmp_dir: Path, out_dir: Path):\n \"\"\"Cria o arquivo `.pdf` do tópico.\"\"\"\n self.tex_solutions_document(problem_db).write_pdf(\n tmp_dir.joinpath(self.id_ + \"_gabarito\"),\n out_dir,\n )\n\n @classmethod\n def parse_mdfile(cls, topic_path: Path):\n \"\"\"Cria um `Topic` a partir de um arquivo `.md`.\"\"\"\n logger.info(f\"Atualizando tópico em {topic_path}.\")\n\n try:\n metadata, content = frontmatter.parse(topic_path.read_text())\n except:\n raise Exception(f\"Problema no tópico {topic_path}\")\n\n # informações básicas\n topic = {\n \"id_\": topic_path.stem,\n \"path\": topic_path.resolve(),\n \"date\": datetime.utcfromtimestamp(topic_path.stat().st_mtime),\n }\n\n # extrair os metadados do arquivo `.md`\n topic.update(metadata)\n\n # extrai as seções\n soup = text.md2soup(content)\n\n split_content = text.soup_split_header(soup, \"h1\", topic[\"path\"])\n\n sections = []\n try:\n for index, (title, section_content) in enumerate(split_content, start=1):\n section = Section.parse_obj(\n {\n \"id_\": f\"{topic_path.stem}{index:02d}\",\n \"title\": title,\n \"content\": section_content,\n }\n )\n sections.append(section)\n topic[\"sections\"] = sections\n except RuntimeError:\n topic[\"sections\"] = None\n\n # conteúdo\n topic[\"content\"] = Text.parse_md(content, topic[\"path\"])\n\n return cls.parse_obj(topic)\n\n\nclass TopicSet(BaseModel):\n \"\"\"Conjunto de tópicos.\n\n Atributos:\n date (datetime): Data.\n topics (list[Topic]): Conjuntos de tópicos.\n \"\"\"\n\n id_: str\n date: datetime\n title: str\n topics: list[Topic]\n\n def __len__(self):\n return len(self.topics)\n\n def __iter__(self):\n return iter(self.topics)\n\n def __getitem__(self, key: str) -> Topic:\n return next(filter(lambda topic: topic.id_ == key, self), None)\n\n def filter(self, topic_set_id: str, title: str, topic_ids: list[str]):\n \"\"\"Cria um subconjunto da lista problemas.\n\n Args:\n topic_set_id (str): Identificador da lista de tópicos.\n title (str): Título da lista de problemas.\n problem_ids (list[str]): Lista com os `id_` desejados.\n\n Retorna:\n ProblemSet: Subconjunto de dados com os `id_` selecionados.\n \"\"\"\n if not topic_ids:\n return None\n\n topics = []\n for topic_id in topic_ids:\n try:\n topics.append(self[topic_id])\n except KeyError:\n logger.warning(f\"O tópico com ID {topic_id} não existe.\")\n\n date = min(topic.date for topic in self)\n\n return TopicSet(id_=topic_set_id, title=title, date=date, topics=topics)\n\n def update_topics(self, topic_paths: list[Path]):\n \"\"\"Atualiza os problemas do `ProblemSet`.\"\"\"\n updated_topics = []\n\n for topic_path in topic_paths:\n topic_id = topic_path.stem\n topic_date = datetime.utcfromtimestamp(topic_path.stat().st_mtime)\n\n topic = self[topic_id]\n\n if not topic:\n topic = Topic.parse_mdfile(topic_path)\n\n elif topic.date < topic_date:\n topic = Topic.parse_mdfile(topic_path)\n\n logger.debug(f\"Tópico '{topic_id}' mantido.\")\n updated_topics.append(topic)\n\n self.topics = updated_topics\n\n def tex_documents(self, problem_db: ProblemSet):\n return map(lambda topic: topic.tex_document(problem_db), self.topics)\n\n def write_pdfs(self, problem_db: ProblemSet, tmp_dir, out_dir):\n \"\"\"Cria o arquivo `pdf` para todos os tópicos.\"\"\"\n for topic in self.topics:\n topic.write_pdf(problem_db, tmp_dir, out_dir)\n\n @classmethod\n def parse_paths(cls, topic_paths: list[Path]):\n \"\"\"Cria um `TopicSet` com os endereços de tópicos fornecidos.\"\"\"\n with Pool() as pool:\n topics = list(pool.imap_unordered(Topic.parse_mdfile, topic_paths))\n\n return cls(id_=\"root\", title=\"ROOT\", date=datetime.now(), topics=topics)\n\n @classmethod\n def parse_database(cls, topics_dir: Path, force_update: bool = False):\n \"\"\"Atualiza a base de dados\"\"\"\n topic_json_path = topics_dir.joinpath(\"topics.json\")\n\n topic_paths = text.get_database_paths(topics_dir)\n\n if not topic_json_path.exists() or force_update:\n topic_db = cls.parse_paths(topic_paths)\n topic_json_path.write_text(\n topic_db.json(indent=2, ensure_ascii=False), encoding=\"utf-8\"\n )\n return topic_db\n\n logger.info(f\"Lendo base de dados no arquivo: {topic_json_path}.\")\n\n topic_db = cls.parse_file(topic_json_path)\n topic_db.update_topics(topic_paths)\n\n topic_json_path.write_text(\n topic_db.json(indent=2, ensure_ascii=False), encoding=\"utf-8\"\n )\n\n return topic_db\n","repo_name":"gpbraun/braunchem","sub_path":"src/braunchem/topic.py","file_name":"topic.py","file_ext":"py","file_size_in_byte":9698,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"21155490618","text":"from dash import html\nfrom dash import dcc\nfrom dash.dependencies import Input, Output\n\nfrom app import app\nfrom apps import form_example, list_data, app2, stock_graph\n\napp.layout = html.Div([\n dcc.Location(id='url', refresh=False),\n html.Nav(id='page-header', children=[\n html.Ul(style={'display': 'inline-block', 'padding': '2px 10px'}, children=[\n html.Li(style={'display': 'inline-block', 'padding': '2px 10px'}, children=[\n dcc.Link('Form Example', href='/apps/form_example')\n ]),\n html.Li(style={'display': 'inline-block', 'padding': '2px 10px'}, children=[\n dcc.Link('Go to App 2', href='/apps/app2')\n ]),\n html.Li(style={'display': 'inline-block', 'padding': '2px 10px'}, children=[\n dcc.Link('Stock Graph', href='/apps/stock_graph')\n ])\n ])\n ]),\n html.Div(id='page-content')\n])\n\n@app.callback(Output('page-content', 'children'),\n Input('url', 'pathname'))\ndef display_page(pathname):\n if pathname == '/apps/form_example':\n return form_example.layout\n elif pathname == '/apps/list_data':\n return list_data.layout\n elif pathname == '/apps/app2':\n return app2.layout\n elif pathname == '/apps/stock_graph':\n return stock_graph.layout\n else:\n return ''\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"marcosmuto/PythonDashTest","sub_path":"source/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74535965652","text":"import torch.nn as nn\nfrom util import *\nimport wandb\n\nclass DnCNN(nn.Module):\n def __init__(self, in_ch=1, out_ch=1, depth=18, ch=64):\n super(DnCNN, self).__init__()\n # in layer\n self.loss_func = nn.MSELoss()\n self.conv1 = nn.Conv2d(in_channels=in_ch, out_channels=ch, kernel_size=3, padding=1, padding_mode='replicate', bias=False)\n self.relu1 = nn.ReLU(inplace=True)\n # hidden layers\n hidden_layers = []\n for i in range(depth):\n hidden_layers.append(nn.Conv2d(in_channels=ch, out_channels=ch, kernel_size=3, padding=1, padding_mode='replicate', bias=False))\n hidden_layers.append(nn.GroupNorm(4, ch))\n hidden_layers.append(nn.ReLU(inplace=True))\n self.mid_layer = nn.Sequential(*hidden_layers)\n # out layer\n self.conv3 = nn.Conv2d(in_channels=ch, out_channels=out_ch, kernel_size=3, padding=1, padding_mode='replicate', bias=False)\n\n\n def forward_base(self, x):\n out = self.relu1(self.conv1(x))\n out = self.mid_layer(out)\n out = self.conv3(out)\n return out\n\n def forward(self, x, y):\n noise = self.forward_base(x)\n img = x - noise\n loss = self.loss_func(img, y)\n return noise, img, loss\n\n def predict(self, x):\n with torch.no_grad():\n noise = self.forward_base(x)\n img = x - noise\n return img\n\nclass DnCNN_OHE(DnCNN):\n def __init__(self, in_ch=1, out_ch=1, depth=18, ch=64):\n super(DnCNN_OHE, self).__init__(in_ch, out_ch, depth, ch)\n\n def forward(self, x, y, post_proc=False):\n y_prep = one_hot_y(y).cuda()\n noise = self.forward_base(x)\n one_hot_pred = torch.softmax(noise[:, 1:], dim=1)\n loss_one_hot = self.loss_func(one_hot_pred, y_prep)\n img = x - noise[:, 0].unsqueeze(1)\n loss_noise = (((img - y)**2) * one_hot_pred[:, -1].detach()).mean()\n loss = 5000 * loss_noise + loss_one_hot\n one_hot_round = torch.round(one_hot_pred)\n wandb.log({'oh': loss_one_hot,\n 'noise': loss_noise})\n if post_proc:\n img = img[:, 0]\n img[one_hot_round[:, 0]==1] = 0\n img[one_hot_round[:, 1]==1] = 0.194\n img[one_hot_round[:, 2]==1] = 0.233\n loss_pred = self.loss_func(img, y[:, 0])\n wandb.log({'loss_pred': loss_pred.item()**0.5})\n img = img.unsqueeze(1)\n return one_hot_round, img, loss\n\n def predict(self, x, y=None):\n with torch.no_grad():\n noise = self.forward_base(x)\n one_hot_pred = torch.softmax(noise[:, 1:], dim=1)\n img = x - noise[:, 0].unsqueeze(1)\n one_hot_round = torch.round(one_hot_pred)\n img = img[:, 0]\n img[one_hot_round[:, 0]==1] = 0\n img[one_hot_round[:, 1]==1] = 0.194\n img[one_hot_round[:, 2]==1] = 0.233\n if y:\n loss_pred = self.loss_func(img, y[:, 0])\n return img, loss_pred\n return img\n\nclass DnCNN_OHE_res(DnCNN_OHE):\n def __init__(self, in_ch=1, out_ch=1, p=1, k=3, depth=18, ch=64):\n super(DnCNN_OHE_res, self).__init__(in_ch, out_ch, depth, ch)\n self.convs = nn.ModuleList()\n self.norms = nn.ModuleList()\n for i in range(depth):\n self.convs.append(nn.Conv2d(in_channels=ch, out_channels=ch, kernel_size=k, padding=p, padding_mode='replicate', bias=False))\n self.norms.append(nn.BatchNorm2d(ch))\n\n def forward_base(self, x):\n out = self.relu1(self.conv1(x))\n for i, (conv, norm) in enumerate(zip(self.convs, self.norms)):\n out = self.relu1(norm(out + conv(out)))\n out = self.conv3(out)\n return out\n\nclass DnCNN_Pure_OHE(DnCNN_OHE_res):\n def __init__(self, in_ch=1, out_ch=4, p=1, k=3, depth=18, ch=64):\n super(DnCNN_Pure_OHE, self).__init__(in_ch, out_ch, p, k, depth, ch)\n\n def forward(self, x, y, post_proc=False):\n y_prep = one_hot_y(y).cuda()\n out = self.forward_base(x)\n one_hot_pred = torch.softmax(out, dim=1)\n loss_one_hot = self.loss_func(one_hot_pred, y_prep)\n # loss_one_hot_true = self.loss_func(one_hot_pred, y_prep)\n wandb.log({'oh_error': loss_one_hot})\n return loss_one_hot, y_prep, one_hot_pred\n","repo_name":"stke9/DL-Sparse-View","sub_path":"networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":4338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22740212515","text":"'''\nGiven the mapping a = 1, b = 2, ... z = 26, and an encoded message, count the number of ways it can be decoded.\n\nFor example, the message '111' would give 3, since it could be decoded as 'aaa', 'ka', and 'ak'.\n\nYou can assume that the messages are decodable. For example, '001' is not allowed.\n'''\n\nchar_map = {}\nchar_map['1'] = 'a'\nchar_map['2'] = 'b'\nchar_map['3'] = 'c'\nchar_map['4'] = 'd'\nchar_map['5'] = 'e'\nchar_map['6'] = 'f'\nchar_map['7'] = 'g'\nchar_map['8'] = 'h'\nchar_map['9'] = 'i'\nchar_map['10'] = 'j'\nchar_map['11'] = 'k'\nchar_map['12'] = 'l'\nchar_map['13'] = 'm'\nchar_map['14'] = 'n'\nchar_map['15'] = 'o'\nchar_map['16'] = 'p'\nchar_map['17'] = 'q'\nchar_map['18'] = 'r'\nchar_map['19'] = 's'\nchar_map['20'] = 't'\nchar_map['21'] = 'u'\nchar_map['22'] = 'v'\nchar_map['23'] = 'w'\nchar_map['24'] = 'x'\nchar_map['25'] = 'y'\nchar_map['26'] = 'z'\n\n\nmemo_dict = {} # stores the 'code => key' and 'number of ways => value' eg. '11'=> 2 ways(aa,k)\ndef count_ways(s):\n str_len = s.__len__()\n if s in memo_dict:\n return memo_dict[s]\n\n if str_len == 0:\n return 1\n\n if s[0] == '0':\n return 0\n\n\n if str_len > 0 and s[0] in char_map:\n memo_dict[s] = count_ways(s[1:])\n if str_len > 1 and s[:2] in char_map:\n memo_dict[s] += count_ways(s[2:])\n\n return memo_dict[s]\n\n\ndef coding_problem_7(s):\n \"\"\"\n Given the mapping a = 1, b = 2, ... z = 26, and an encoded message, count the number of ways it can be decoded.\n Examples:\n >>> coding_problem_7('111') # possible interpretations: 'aaa', 'ka', 'ak'\n 3\n >>> coding_problem_7('2626') # 'zz', 'zbf', 'bfz', 'bfbf'\n 4\n \"\"\"\n symbols = map(str, range(1, 27))\n if not s:\n return 1\n\n matches = filter(lambda symbol: s.startswith(symbol), symbols)\n encodings = [coding_problem_7(s[len(m):]) for m in matches]\n return sum(encodings)\n\n\nif __name__ == '__main__':\n s =input()\n print(\"\\n\")\n print(count_ways(s))\n print(coding_problem_7(s))","repo_name":"RafayAK/CodingPrep","sub_path":"DailyCodingProblem/7_Facebook_Decoding_porblem.py","file_name":"7_Facebook_Decoding_porblem.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"2498062053","text":"#!/usr/bin/env python3\n\"\"\" Alta3 Research | BCopeland\n For - Using a file's lines as a source for the for-loop \"\"\"\n\ndef main():\n \n with open(\"dnsservers.txt\", \"r\") as dnsfile:\n for svr in dnsfile:\n print(svr, end=\"\")\nmain()\n","repo_name":"BrandonCope/mycode","sub_path":"fact/looping02.py","file_name":"looping02.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29963206984","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom scrapy.selector import Selector\nfrom scrapy.http import Request\nfrom honglingchuangtou.items import HonglingchuangtouItem\nimport urllib\nimport urllib2\nimport os\nimport re\n\nclass HonglingchuangtouSpider(CrawlSpider):\n name = 'honglingchuangtou'\n allowd_domain = ['my089.com']\n\n #删除文件start\n #if (os.path.exists(\"content.txt\")): os.remove(\"content.txt\")\n #if (os.path.exists(\"content_index.txt\")): os.remove(\"content_index.txt\")\n #删除文件end\n\n url_list = []; #初始化url_list数组\n\n download_delay = 3 #访问间隔秒数\n\n #for循环开始:访问产品列表的10个页面\n for i in range(1,10) :\n url_js = 'https://www.my089.com/Loan/default.aspx?pid=' + str(i) #页面链接,str(i)是翻页数\n wp = urllib2.urlopen(url_js) #打开连接\n content = wp.read() #获取页面内容\n content_productid = re.findall('Detail.aspx'r'[\\S]*', content) #获取 (\"productid\":) 及其后6位的id\n content_url = [content_index.replace('Detail.aspx',\n 'https://www.my089.com/Loan/Detail.aspx')\n for content_index in content_productid] #替换url\n content_url2 = [content_index2.replace('\\\"',\n '')\n for content_index2 in content_url] #替换链接最后一个“\n\n #url写入文件\n #fp = open(\"content.txt\",'a')\n #fp.write(content)\n #写入productid位置\n #fp = open(\"content_index.txt\",'a')\n #fp.write(str(content_url2) + '\\n')\n url_list.extend(content_url2) #将content_url里的url迭代写入url_list\n #for循环结束\n\n start_urls = set(url_list) #start_urls赋值,并去重\n\n\n def parse(self, response):\n item = HonglingchuangtouItem()\n sel = Selector(response)\n #item['link'] = sel.xpath('//a[@class=\\\"viewBtn\\\"]/@href').extract()\n #title1 = title[0]\n #title2 = title1.extract().split(\",\")[1].split(\"-\")[0]\n item['name'] = sel.xpath('//span[@class=\\\"bt_txt\\\"]/text()').extract()[0]\n item['link'] = response.url\n item['amount'] = sel.xpath('//li[@class=\\\"jine\\\"]/span/b/text()').extract()[0][1:]\n item['min_amount'] = sel.xpath('//div[@class=\\\"biao_info\\\"]/ul/li/span/b/text()').extract()[4].split('~')[0]\n #item['amount'] = amount.split(\" \")[0] #截取空格钱第一个字段\n item['income_rate'] = sel.xpath('//div[@class=\\\"biao_info\\\"]/ul/li/span/b[@class=\\\"number\\\"]/text()').extract()[0]\n item['term'] = sel.xpath('//div[@class=\\\"biao_info\\\"]/ul/li/span/b[@class=\\\"number\\\"]/text()').extract()[1]\n item['area'] = ''\n item['transfer_claim'] = ''\n item['repay_type'] = sel.xpath('//div[@class=\\\"biao_info\\\"]/ul/li/span/text()').extract()[4]\n item['reward'] = sel.xpath('//div[@class=\\\"Bid_Reward\\\"]/div/text()').extract()[0].strip()\n item['protect_mode'] = ''\n item['description'] = sel.xpath('//div[@class=\\\"textbox\\\"]/text()').extract()[0].strip()\n item['process'] = sel.xpath('//div[@class=\\\"Loading\\\"]/span[@class=\\\"lf\\\"]/text()').extract()[0]\n\n #[0].encode('utf-8')\n #[n.encode('utf-8') for n in title]\n\n yield item\n","repo_name":"yfjelley/scrapy_1214049153","sub_path":"crawl/honglingchuangtou/honglingchuangtou/spiders/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"5863289779","text":"from keras import backend\nfrom keras import ops\nfrom keras.api_export import keras_export\nfrom keras.layers.input_spec import InputSpec\nfrom keras.layers.layer import Layer\nfrom keras.utils import argument_validation\n\n\n@keras_export(\"keras.layers.UpSampling2D\")\nclass UpSampling2D(Layer):\n \"\"\"Upsampling layer for 2D inputs.\n\n The implementation uses interpolative resizing, given the resize method\n (specified by the `interpolation` argument). Use `interpolation=nearest`\n to repeat the rows and columns of the data.\n\n Examples:\n\n >>> input_shape = (2, 2, 1, 3)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> print(x)\n [[[[ 0 1 2]]\n [[ 3 4 5]]]\n [[[ 6 7 8]]\n [[ 9 10 11]]]]\n >>> y = keras.layers.UpSampling2D(size=(1, 2))(x)\n >>> print(y)\n [[[[ 0 1 2]\n [ 0 1 2]]\n [[ 3 4 5]\n [ 3 4 5]]]\n [[[ 6 7 8]\n [ 6 7 8]]\n [[ 9 10 11]\n [ 9 10 11]]]]\n\n Args:\n size: Int, or tuple of 2 integers.\n The upsampling factors for rows and columns.\n data_format: A string,\n one of `\"channels_last\"` (default) or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `\"channels_first\"`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n When unspecified, uses\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json` (if exists) else `\"channels_last\"`.\n Defaults to `\"channels_last\"`.\n interpolation: A string, one of `\"bicubic\"`, `\"bilinear\"`, `\"lanczos3\"`,\n `\"lanczos5\"`, `\"nearest\"`.\n\n Input shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, rows, cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, rows, cols)`\n\n Output shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, upsampled_rows, upsampled_cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, upsampled_rows, upsampled_cols)`\n \"\"\"\n\n def __init__(\n self, size=(2, 2), data_format=None, interpolation=\"nearest\", **kwargs\n ):\n super().__init__(**kwargs)\n self.data_format = backend.config.standardize_data_format(data_format)\n self.size = argument_validation.standardize_tuple(size, 2, \"size\")\n self.interpolation = interpolation.lower()\n self.input_spec = InputSpec(ndim=4)\n\n def compute_output_shape(self, input_shape):\n if self.data_format == \"channels_first\":\n height = (\n self.size[0] * input_shape[2]\n if input_shape[2] is not None\n else None\n )\n width = (\n self.size[1] * input_shape[3]\n if input_shape[3] is not None\n else None\n )\n return (input_shape[0], input_shape[1], height, width)\n else:\n height = (\n self.size[0] * input_shape[1]\n if input_shape[1] is not None\n else None\n )\n width = (\n self.size[1] * input_shape[2]\n if input_shape[2] is not None\n else None\n )\n return (input_shape[0], height, width, input_shape[3])\n\n def call(self, inputs):\n return self._resize_images(\n inputs,\n self.size[0],\n self.size[1],\n self.data_format,\n interpolation=self.interpolation,\n )\n\n def get_config(self):\n config = {\n \"size\": self.size,\n \"data_format\": self.data_format,\n \"interpolation\": self.interpolation,\n }\n base_config = super().get_config()\n return {**base_config, **config}\n\n def _resize_images(\n self,\n x,\n height_factor,\n width_factor,\n data_format,\n interpolation=\"nearest\",\n ):\n \"\"\"Resizes the images contained in a 4D tensor.\n\n Args:\n x: Tensor or variable to resize.\n height_factor: Positive integer.\n width_factor: Positive integer.\n data_format: One of `\"channels_first\"`, `\"channels_last\"`.\n interpolation: A string, one of `\"bicubic\"`, `\"bilinear\"`,\n `\"lanczos3\"`, `\"lanczos5\"`, or `\"nearest\"`.\n\n Returns:\n A tensor.\n \"\"\"\n if data_format == \"channels_first\":\n rows, cols = 2, 3\n elif data_format == \"channels_last\":\n rows, cols = 1, 2\n else:\n raise ValueError(f\"Invalid `data_format` argument: {data_format}\")\n\n if data_format == \"channels_first\":\n x = ops.transpose(x, [0, 2, 3, 1])\n # https://github.com/keras-team/keras/issues/294\n # Use `ops.repeat` for `nearest` interpolation\n if interpolation == \"nearest\":\n x = ops.repeat(x, height_factor, axis=1)\n x = ops.repeat(x, width_factor, axis=2)\n else:\n # multiply the height and width factor on each dim\n # by hand (versus using element-wise multiplication\n # by np.array([height_factor, width_factor]) then\n # list-ifying the tensor by calling `.tolist()`)\n # since when running under torchdynamo, `new_shape`\n # will be traced as a symbolic variable (specifically\n # a `FakeTensor`) which does not have a `tolist()` method.\n new_shape = (\n x.shape[rows] * height_factor,\n x.shape[cols] * width_factor,\n )\n x = ops.image.resize(x, new_shape, interpolation=interpolation)\n if data_format == \"channels_first\":\n x = ops.transpose(x, [0, 3, 1, 2])\n\n return x\n","repo_name":"keras-team/keras","sub_path":"keras/layers/reshaping/up_sampling2d.py","file_name":"up_sampling2d.py","file_ext":"py","file_size_in_byte":6078,"program_lang":"python","lang":"en","doc_type":"code","stars":59773,"dataset":"github-code","pt":"67"} +{"seq_id":"25198041383","text":"#นายธัญสิทธิ์ อัครนราธิวัฒน์\r\n#B6022488\r\nimport numpy as np\r\nnp.set_printoptions(precision=2, suppress=True)\r\nimport matplotlib.pyplot as plt\r\nL1=10\r\nL2=10\r\nr=10\r\na=10\r\nb=0\r\nx=np.linspace(20,10,11)\r\ny=-x+20\r\n#print(y)\r\ndef q2(x,y):\r\n cq2=(x**2+y**2-L1**2-L2**2)/(2*L1*L2)\r\n sq2=np.sqrt(1-cq2**2)\r\n q2=np.arctan2(sq2,cq2)\r\n return q2\r\nprint('q2 =')\r\nprint(q2(x,y)*180/np.pi)\r\ndef q1(x,y,q2):\r\n k1=L1+L2*np.cos(q2(x,y))\r\n k2=L2*np.sin(q2(x,y))\r\n q1=np.arctan2(y,x)-np.arctan2(k2,k1)\r\n return q1\r\nprint('q1 =')\r\nprint(q1(x,y,q2)*180/np.pi)\r\nq1 = q1(x,y,q2)\r\nq2 = q2(x,y)\r\n#print(q1,'\\n','\\n',q2)\r\np=L1*np.cos(q1)+L2*np.cos(q1+q2)\r\ns=L1*np.sin(q1)+L2*np.sin(q1+q2)\r\nplt.xlabel('x')\r\nplt.ylabel('y')\r\nplt.title('Linear')\r\nplt.plot(p,s)\r\nplt.show()\r\n","repo_name":"Thanyasit/Code_Python","sub_path":"2019/Introduction Robot/B6022488-2.py","file_name":"B6022488-2.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9393928300","text":"# SAC training code reference\n# https://github.com/vitchyr/rlkit/blob/master/rlkit/torch/sac/sac.py\n\nimport copy\nfrom math import ceil\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom gym import spaces\n\nfrom rl.dataset import ReplayBuffer, RandomSampler, ReplayBufferIterableDatasetDisk, ReplayBufferIterableDataset\nfrom rl.base_agent import BaseAgent\nfrom rl.planner_agent import PlannerAgent\nfrom util.logger import logger\nfrom util.mpi import mpi_average\nfrom util.pytorch import (\n optimizer_cuda,\n count_parameters,\n sync_networks,\n sync_grads,\n to_tensor,\n)\nfrom util.gym import action_size, observation_size\nfrom rl.policies.utils import BC_Visual_Policy\nfrom util.gym import observation_size, action_size, goal_size, box_size, robot_state_size\n\n\nclass AAC_DDPG_Agent:\n def __init__(\n self,\n config,\n ob_space,\n ac_space,\n actor,\n critic,\n non_limited_idx=None,\n ref_joint_pos_indexes=None,\n joint_space=None,\n is_jnt_limited=None,\n jnt_indices=None,\n ac_scale=None,\n ):\n self._config = config\n self._ob_space = ob_space\n self._ac_space = ac_space\n self._jnt_indices = jnt_indices\n self._ref_joint_pos_indexes = ref_joint_pos_indexes\n self._joint_space = joint_space\n self._is_jnt_limited = is_jnt_limited\n self._ac_scale = ac_scale\n if joint_space is not None:\n self._jnt_minimum = joint_space[\"default\"].low\n self._jnt_maximum = joint_space[\"default\"].high\n\n self.bc_mse_loss = nn.MSELoss()\n self.q_mse_loss = nn.MSELoss()\n\n # build up networks\n self._build_actor(actor)\n self._build_critic(critic)\n self._network_cuda(config.device)\n\n # Freeze target networks with respect to optimizers (only update via polyak averaging)\n for p in self._actor_target.parameters():\n p.requires_grad = False\n for p in self._critic_target.parameters():\n p.requires_grad = False\n\n self._actor_optim = optim.Adam(self._actor.parameters(), lr=config.lr_actor)\n self._critic_optim = optim.Adam(self._critic.parameters(), lr=config.lr_critic)\n\n # parameters specific to DDPG\n self.gamma = config.gamma\n\n buffer_keys = [\"ob\", \"ac\", \"meta_ac\", \"done\", \"rew\"]\n if config.mopa or config.expand_ac_space:\n buffer_keys.append(\"intra_steps\")\n\n if config.parallel_dataloading:\n if config.parallel_dataloading_mode == 'disk':\n def collate_transitions(batch):\n transitions, ob_images, ob_next_images = zip(*batch)\n transitions[0]['ob']['image'] = ob_images[0]\n transitions[0]['ob_next']['image'] = ob_next_images[0]\n return transitions[0]\n self._buffer = ReplayBufferIterableDatasetDisk(buffer_keys, config.buffer_size, config.batch_size)\n self._expert_buffer = ReplayBufferIterableDatasetDisk(buffer_keys, config.buffer_size, config.batch_size)\n self._buffer_loader = torch.utils.data.DataLoader(self._buffer, batch_size=1, num_workers=0, collate_fn=collate_transitions)\n self._expert_buffer_loader = torch.utils.data.DataLoader(self._expert_buffer, batch_size=1, num_workers=0, collate_fn=collate_transitions)\n elif config.parallel_dataloading_mode == 'ram':\n self._buffer = ReplayBufferIterableDataset(buffer_keys, config.buffer_size, config.batch_size)\n self._expert_buffer = ReplayBufferIterableDataset(buffer_keys, config.buffer_size, config.batch_size)\n self._buffer_loader = torch.utils.data.DataLoader(self._buffer, batch_size=1, num_workers=0, collate_fn=lambda batch: batch[0])\n self._expert_buffer_loader = torch.utils.data.DataLoader(self._expert_buffer, batch_size=1, num_workers=0, collate_fn=lambda batch: batch[0])\n else:\n sampler = RandomSampler()\n self._buffer = ReplayBuffer(\n buffer_keys, config.buffer_size, sampler.sample_func\n )\n self._expert_buffer = ReplayBuffer(\n buffer_keys, config.buffer_size, sampler.sample_func\n )\n\n self._log_creation()\n\n self._planner = None\n self._is_planner_initialized = False\n\n # create and load expert BC Visual policy\n if config.env == 'PusherObstacle-v0':\n # observation (excluding goal information)\n input_dim = observation_size(ob_space) - goal_size(ob_space) - box_size(ob_space)\n elif 'Sawyer' in config.env:\n input_dim = robot_state_size(ob_space)\n else:\n raise NotImplementedError\n self.expert_policy = BC_Visual_Policy(robot_state=input_dim, num_classes=action_size(ac_space), img_size=config.env_image_size, device=config.device, env=config.env)\n self.expert_policy.load_bc_weights(config.bc_checkpoint)\n self.expert_policy.eval()\n self.expert_policy.cuda()\n print('load pretrained BC weights to BC model from {}'.format(self._config.bc_checkpoint))\n\n def _log_creation(self):\n if self._config.is_chef:\n logger.info(\"creating a AAC-DDPG agent\")\n logger.info(\"the actor has %d parameters\", count_parameters(self._actor))\n logger.info(\"the critic has %d parameters\", count_parameters(self._critic))\n\n def _build_actor(self, actor):\n self._actor = actor(self._config, self._ob_space, self._ac_space, self._config.tanh_policy, self._ac_scale)\n # build up target network\n self._actor_target = actor(self._config, self._ob_space, self._ac_space, self._config.tanh_policy, self._ac_scale)\n self._actor_target.load_state_dict(self._actor.state_dict())\n\n def _build_critic(self, critic):\n self._critic = critic(self._config, self._ob_space, self._ac_space)\n # build up target networks\n self._critic_target = critic(self._config, self._ob_space, self._ac_space)\n self._critic_target.load_state_dict(self._critic.state_dict())\n\n def store_episode(self, rollouts):\n self._buffer.store_episode(rollouts)\n \n def store_episode_expert(self, rollouts):\n self._expert_buffer.store_episode(rollouts)\n\n def valid_action(self, ac):\n return np.all(ac[\"default\"] >= -1.0) and np.all(ac[\"default\"] <= 1.0)\n\n def clip_qpos(self, curr_qpos):\n tmp_pos = curr_qpos.copy()\n if np.any(\n curr_qpos[self._is_jnt_limited[self._jnt_indices]]\n < self._jnt_minimum[self._jnt_indices][\n self._is_jnt_limited[self._jnt_indices]\n ]\n ) or np.any(\n curr_qpos[self._is_jnt_limited[self._jnt_indices]]\n > self._jnt_maximum[self._jnt_indices][\n self._is_jnt_limited[self._jnt_indices]\n ]\n ):\n new_curr_qpos = np.clip(\n curr_qpos.copy(),\n self._jnt_minimum[self._jnt_indices] + self._config.joint_margin,\n self._jnt_maximum[self._jnt_indices] - self._config.joint_margin,\n )\n new_curr_qpos[np.invert(self._is_jnt_limited[self._jnt_indices])] = tmp_pos[\n np.invert(self._is_jnt_limited[self._jnt_indices])\n ]\n curr_qpos = new_curr_qpos\n return curr_qpos\n\n def state_dict(self):\n return {\n \"actor_state_dict\": self._actor.state_dict(),\n \"critic_state_dict\": self._critic.state_dict(),\n \"actor_optim_state_dict\": self._actor_optim.state_dict(),\n \"critic_optim_state_dict\": self._critic_optim.state_dict(),\n }\n\n def load_state_dict(self, ckpt):\n self._actor.load_state_dict(ckpt[\"actor_state_dict\"])\n self._critic.load_state_dict(ckpt[\"critic_state_dict\"])\n\n self._actor_target.load_state_dict(self._actor.state_dict())\n self._critic_target.load_state_dict(self._critic.state_dict())\n\n self._network_cuda(self._config.device)\n\n self._actor_optim.load_state_dict(ckpt[\"actor_optim_state_dict\"])\n self._critic_optim.load_state_dict(ckpt[\"critic_optim_state_dict\"])\n\n optimizer_cuda(self._actor_optim, self._config.device)\n optimizer_cuda(self._critic_optim, self._config.device)\n\n def _network_cuda(self, device):\n self._actor.to(device)\n self._critic.to(device)\n self._actor_target.to(device)\n self._critic_target.to(device)\n \n def concat_transitions_helper(self, t1, t2, batch_size_expert, batch_size_policy):\n if isinstance(t1, dict):\n sub_out = dict()\n for k, v in t1.items():\n sub_out[k] = self.concat_transitions_helper(t1[k], t2[k], batch_size_expert, batch_size_policy)\n return sub_out\n elif type(t1) is np.ndarray:\n if batch_size_expert is not None and batch_size_policy is not None:\n return np.concatenate((t1[:batch_size_expert], t2[:batch_size_policy]), axis=0)\n else:\n return np.concatenate((t1, t2), axis=0)\n else:\n raise NotImplementedError\n\n def concat_transitions(self, t1, t2, batch_size_expert=None, batch_size_policy=None):\n out = dict()\n # dict_keys(['ob', 'ac', 'meta_ac', 'done', 'rew', 'intra_steps', 'ob_next'])\n for k, v in t1.items():\n out[k] = self.concat_transitions_helper(t1[k], t2[k], batch_size_expert, batch_size_policy)\n return out\n\n def train(self):\n # config.percent_expert_batch_size from expert trajectories and [1-config.percent_expert_batch_size] from policy\n batch_size_expert = int(self._config.batch_size * self._config.percent_expert_batch_size)\n batch_size_policy = self._config.batch_size - batch_size_expert\n for i in range(self._config.num_batches):\n if self._config.parallel_dataloading:\n transitions_expert = next(iter(self._expert_buffer_loader))\n transitions_policy = next(iter(self._buffer_loader))\n transitions = self.concat_transitions(transitions_expert, transitions_policy, batch_size_expert=batch_size_expert, batch_size_policy=batch_size_policy)\n else:\n transitions_expert = self._expert_buffer.sample(batch_size_expert)\n transitions_policy = self._buffer.sample(batch_size_policy)\n transitions = self.concat_transitions(transitions_expert, transitions_policy)\n\n train_info = self._update_network(transitions, i)\n transitions_expert.clear()\n transitions_policy.clear()\n transitions.clear()\n return train_info\n\n def train_expert(self): \n for i in range(self._config.num_batches):\n if self._config.parallel_dataloading:\n transitions = next(iter(self._expert_buffer_loader))\n else:\n transitions = self._expert_buffer.sample(self._config.batch_size)\n train_info = self._update_network(transitions, i)\n transitions.clear()\n return train_info\n\n def act(self, ob, is_train=True, return_stds=False, random_exploration=False, collect_expert_trajectories=False):\n if collect_expert_trajectories:\n ac = self.expert_policy.act_expert(ob)\n return ac, None, None\n \n if is_train:\n ac = self._actor.act(ob, self._config.act_noise)\n else:\n ac = self._actor.act(ob)\n\n return ac, None, None\n\n def load_env_image_if_needed(self, o):\n img_arr = []\n if 'image' in o.keys() and isinstance(o['image'][0], str):\n for img_path in o['image']:\n img_arr.append(np.load(img_path)[0])\n o['image'] = np.array(img_arr)\n return None\n\n # Set up function for computing DDPG Q-loss\n def compute_loss_q(self, o, o_next, ac, done, rew, info):\n q = self._critic(o, ac)\n\n # Bellman backup for Q function\n with torch.no_grad():\n q_pi_targ = self._critic_target(o_next, self._actor_target(o_next))\n backup = rew + self.gamma * (1 - done) * q_pi_targ\n\n # MSE loss against Bellman backup\n loss_q = self.q_mse_loss(q, backup)\n\n info['sum_rewards'] = torch.sum(rew).cpu().item()\n info['loss_q'] = loss_q.cpu().item()\n\n return loss_q\n\n def compute_loss_pi(self, o):\n q_pi = self._critic(o, self._actor(o))\n return -q_pi.mean()\n\n def _update_network(self, transitions, step=0):\n info = {}\n\n # pre-process observations\n _to_tensor = lambda x: to_tensor(x, self._config.device)\n o, o_next = transitions[\"ob\"], transitions[\"ob_next\"]\n bs = len(transitions[\"done\"])\n\n if not self._config.parallel_dataloading: \n self.load_env_image_if_needed(o)\n self.load_env_image_if_needed(o_next)\n\n o = _to_tensor(o)\n o_next = _to_tensor(o_next)\n ac = _to_tensor(transitions[\"ac\"])\n done = _to_tensor(transitions[\"done\"]).reshape(bs, 1)\n rew = _to_tensor(transitions[\"rew\"]).reshape(bs, 1)\n\n # First run one gradient descent step for Q.\n self._critic_optim.zero_grad()\n # calculate q loss\n loss_q = self.compute_loss_q(o, o_next, ac, done, rew, info)\n loss_q.backward()\n # torch.nn.utils.clip_grad_norm_(self._critic.parameters(), 1) # clip gradient\n self._critic_optim.step()\n\n # Freeze Q-network so you don't waste computational effort \n # computing gradients for it during the policy learning step.\n for p in self._critic.parameters():\n p.requires_grad = False\n\n # Next run one gradient descent step for pi.\n self._actor_optim.zero_grad()\n loss_pi = self.compute_loss_pi(o)\n\n # BC mse loss between expert BC visual policy and the actor\n bc_visual_expert_ac = self.expert_policy.get_predicted_ac(o)\n actor_prediced_ac = self._actor(o)\n bc_mse_loss = self.bc_mse_loss(actor_prediced_ac, bc_visual_expert_ac)\n info[\"loss_pi\"] = loss_pi.cpu().item()\n info[\"bc_mse_loss\"] = bc_mse_loss.cpu().item()\n loss_pi_bc = loss_pi + bc_mse_loss\n loss_pi_bc.backward()\n # torch.nn.utils.clip_grad_norm_(self._actor.parameters(), 1) # clip gradient\n self._actor_optim.step()\n\n # Unfreeze Q-network so you can optimize it at next DDPG step.\n for p in self._critic.parameters():\n p.requires_grad = True \n\n # Finally, update target networks by polyak averaging.\n with torch.no_grad():\n for p, p_targ in zip(self._actor.parameters(), self._actor_target.parameters()):\n # NB: We use an in-place operations \"mul_\", \"add_\" to update target\n # params, as opposed to \"mul\" and \"add\", which would make new tensors.\n p_targ.data.mul_(self._config.polyak)\n p_targ.data.add_((1 - self._config.polyak) * p.data)\n \n for p, p_targ in zip(self._critic.parameters(), self._critic_target.parameters()):\n # NB: We use an in-place operations \"mul_\", \"add_\" to update target\n # params, as opposed to \"mul\" and \"add\", which would make new tensors.\n p_targ.data.mul_(self._config.polyak)\n p_targ.data.add_((1 - self._config.polyak) * p.data)\n\n return info\n","repo_name":"clvrai/mopa-pd","sub_path":"rl/aac_ddpg_agent.py","file_name":"aac_ddpg_agent.py","file_ext":"py","file_size_in_byte":15608,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"38896210089","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n###\n### Core > Exploit \n###\nimport re\nimport sys\nimport subprocess\n\nfrom lib.core.Command import Command\nfrom lib.core.Config import *\nfrom lib.core.Exceptions import CommandException\nfrom lib.core.Logger import logger\nfrom lib.core.ScriptTemplates import *\nfrom lib.utils.NetUtils import NetUtils\n\nclass Exploit:\n\n def __init__(self, \n name, \n product,\n description, \n type_, \n detection_rawcmd, \n detection_success,\n exploit_rawcmd,\n exploit_rce_output,\n exploit_success):\n self.name = name\n self.product = product\n self.description = description\n self.type = type_\n self.detection_rawcmd = detection_rawcmd\n self.detection_success = detection_success\n self.exploit_rawcmd = exploit_rawcmd\n self.exploit_rce_output = exploit_rce_output\n self.exploit_success = exploit_success\n self.directory = TOOL_BASEPATH + '/exploits/' + self.name.lower()\n self.output = ''\n\n\n def is_mode_supported(self, mode):\n \"\"\"\n Check is specified mode is supported (either \"detect\" or \"exploit\")\n :param str mode: Requested mode\n \"\"\"\n if mode == 'detect':\n return (self.detection_rawcmd is not None and len(self.detection_rawcmd) > 0)\n elif mode == 'exploit':\n return (self.exploit_rawcmd is not None and len(self.exploit_rawcmd) > 0)\n else:\n return False\n\n\n def run(self, target, mode, rce_command=''):\n \"\"\"\n :param Target targer: Target instance\n :param str mode: mode can be either \"detect\" or \"exploit\"\n :param str rce_command: RCE command to run when running exploit (requires mode=exploit)\n \"\"\"\n try:\n if mode == 'detect':\n self.command = Command(self.detection_rawcmd, self.type)\n elif mode == 'exploit':\n self.command = Command(self.exploit_rawcmd, self.type, self.exploit_rce_output)\n except CommandException as e:\n logger.error(e)\n return None\n\n # Build script to run\n if mode == 'exploit':\n if self.type == 'rce':\n if not self.exploit_rce_output:\n logger.warning('The exploit will attempt to execute command on remote system but no '\n 'output will be available !')\n # For RCE exploit without command output in test mode (no rce_command provided):\n # Use script template that check for reverse connection with ICMP ping and HTTP requests\n if len(rce_command) == 0:\n logger.warning('WARNING: This attack box must be reachable from the target !')\n logger.info('No command supplied to run through RCE, automatic exploit test will be started...')\n logger.info('If target is vulnerable, exploit will try to ping (ICMP Echo request) and '\n 'to send HTTP request to local IP = {localip} from target'.format(\n localip=NetUtils.get_local_ip_address()))\n\n cmdline = self.command.get_cmdline(target)\n print(cmdline)\n\n script = SCRIPT_RCE_BLIND.format(\n exploit_dir=self.directory,\n command=cmdline)\n else:\n cmdline = self.command.get_cmdline(target, rce_command)\n print(cmdline)\n script = 'cd {exploit_dir}; '.format(exploit_dir=self.directory)\n script += cmdline\n else:\n if len(rce_command) == 0:\n logger.info('No command supplied to run through RCE, automatic exploit test will be started...')\n logger.info('If target is vulnerable, exploit will try to run an echo command on target')\n cmdline = self.command.get_cmdline(target)\n print(cmdline)\n script = 'cd {exploit_dir}; '.format(exploit_dir=self.directory)\n script += cmdline\n else:\n cmdline = self.command.get_cmdline(target, rce_command)\n print(cmdline)\n script = 'cd {exploit_dir}; '.format(exploit_dir=self.directory)\n script += cmdline \n\n else:\n cmdline = self.command.get_cmdline(target)\n print(cmdline)\n script = 'cd {exploit_dir}; '.format(exploit_dir=self.directory)\n script += cmdline\n else:\n logger.warning('The script will attempt to detect if remote system is vulnerable without '\n 'actually exploiting the vulnerability.')\n logger.warning('WARNING: False Positive is possible !')\n cmdline = self.command.get_cmdline(target)\n script = 'cd {exploit_dir}; '.format(exploit_dir=self.directory)\n script += cmdline \n\n # Run subprocess\n try:\n logger.info('{script} will be run from directory: {directory}'.format(\n script='Exploit' if mode == 'exploit' else 'Detection script',\n directory=self.directory))\n\n proc = subprocess.Popen(script, \n shell=True, \n executable='/bin/bash',\n stdout=subprocess.PIPE, \n stderr=subprocess.STDOUT)\n\n # Agressivelly get the output\n while True:\n out = proc.stdout.read(1)\n # We put that inside try block to avoid utf8 decoding error\n try:\n out = out.decode(sys.stdout.encoding)\n sys.stdout.write(out)\n self.output += out\n except:\n pass\n\n # Break if process has finished\n if out == '' and proc.poll() != None:\n break\n\n except Exception as e:\n logger.error('Error when trying to run command: {exception}'.format(\n exception=e))\n return None\n\n return self.output\n\n\n def check_success(self, mode):\n \"\"\"\n Check vuln detection success or exploit success when run in automatic test (i.e. \n no command provided via --cmd)\n\n :param str mode: mode can be either \"detect\" or \"exploit\"\n \"\"\"\n m = None\n if mode == 'detect':\n m = re.search(self.detection_success, self.output, re.IGNORECASE)\n if not m:\n if self.detection_success.lower() in self.output.lower():\n m = self.detection_success\n elif mode == 'exploit':\n if self.type == 'rce':\n if self.exploit_rce_output:\n # RCE with command output: use success match string provided in settings\n m = re.search(self.exploit_success, self.output, re.IGNORECASE)\n if not m:\n if self.exploit_success.lower() in self.output.lower():\n m = self.exploit_success\n else:\n # RCE without command output: use built-in match strings to detect either\n # ICMP echo reply or received HTTP request\n m = re.search(MATCHING_PATTERN_RCE_BLIND_ICMP, self.output, re.IGNORECASE)\n if not m:\n m = re.search(MATCHING_PATTERN_RCE_BLIND_HTTP, self.output, re.IGNORECASE)\n else:\n # Other exploit type (e.g. sqli)\n m = re.search(self.exploit_success, self.output, re.IGNORECASE)\n if not m:\n if self.exploit_success.lower() in self.output.lower():\n m = self.exploit_success\n\n return (m is not None)\n \n","repo_name":"koutto/jok3r-pocs","sub_path":"lib/core/Exploit.py","file_name":"Exploit.py","file_ext":"py","file_size_in_byte":8272,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"67"} +{"seq_id":"32531812596","text":"import os\nfrom uuid import uuid4\n\nimport firebase_admin\nimport telegram\nfrom telegram import InlineKeyboardButton, InlineQueryResultArticle, InlineKeyboardMarkup, InputTextMessageContent, \\\n ParseMode\nfrom telegram.ext import Updater, InlineQueryHandler, CallbackQueryHandler, Dispatcher\n\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\n\nscore=-999\npointscollection=None\n\ndef inlinequery(bot, update):\n \"\"\"Handle the inline query.\"\"\"\n query = update.inline_query.query\n newscore = getNumber(query.strip())\n score=getscore()\n results=[]\n if(newscore):\n keyboard = [\n [InlineKeyboardButton(\"Confirm\", callback_data='1#'+str(score+newscore)),\n InlineKeyboardButton(\"Cancel\", callback_data='2#')]]\n results.append(\n InlineQueryResultArticle(\n id=uuid4(),\n title='Request '+str(score)+\"+\"+str(newscore)+'='+str(score+newscore),\n reply_markup=InlineKeyboardMarkup(keyboard),\n input_message_content=InputTextMessageContent(\n 'Request for'+str(score)+\"+\"+str(newscore)+'='+str(score+newscore)+' points')),\n )\n results.append( InlineQueryResultArticle(\n id=uuid4(),\n title='Points: '+str(score),\n input_message_content=InputTextMessageContent(\n 'Total points: '+str(score))))\n\n\n update.inline_query.answer(results,cache_time=5)\n\ndef firebaseSetup():\n print('Firebase setup')\n global pointscollection\n # Use a service account\n cred = credentials.Certificate('serviceaccount.json')\n firebase_admin.initialize_app(cred)\n\n db = firestore.client()\n print('Firebase initialized')\n pointscollection = db.collection(u'points')\n print(getFirebaseScore())\n\ndef getFirebaseScore():\n print('Get score ')\n if not pointscollection:\n firebaseSetup()\n docs = pointscollection.stream()\n pointsvalue = []\n for doc in docs:\n pointsvalue.append(doc.to_dict())\n # docs[0].set(values)\n print('score '+str(pointsvalue[0][\"value\"]))\n return pointsvalue[0][\"value\"]\n\ndef writeFirebaseScore(score):\n if not pointscollection:\n firebaseSetup()\n pointscollection.document('1').set({'value': score})\ndef getNumber(score):\n try:\n return int(score)\n except ValueError:\n pass\n\ndef button(bot, update):\n query = update.callback_query\n if query.data.startswith(\"1#\"):\n newscore=query.data[2:].strip()\n score = getNumber(newscore)\n if(score):\n writescore(score)\n query.edit_message_text(text=\"Total points : \"+str(score))\n elif query.data.startswith(\"2#\"):\n query.edit_message_text(text=\"Request Rejected\")\n\ndef writescore(newscore):\n global score\n score = newscore\n # file = os.open(\"score\", \"w\")\n # file.write(file,str(score))\n # file.close\n writeFirebaseScore(score)\n\ndef getscore():\n global score\n # if score!=-999:\n # return score\n score= getFirebaseScore()\n return score\n\n\ndef sboss(request):\n if request.method == \"POST\":\n update = telegram.Update.de_json(request.get_json(force=True), bot)\n dispatcher.process_update(update)\n # chat_id = update.message.chat.id\n # Reply with the same message\n # bot.sendMessage(chat_id=chat_id, text=update.message.text+'1')\n return \"ok\"\n\nbot = telegram.Bot(token=os.environ[\"TELEGRAM_TOKEN\"])\ndispatcher = Dispatcher(bot, None, workers=0)\ndispatcher.add_handler(InlineQueryHandler(inlinequery))\ndispatcher.add_handler(CallbackQueryHandler(button))\nfirebaseSetup()\n# writeFirebaseScore(100)","repo_name":"thiyagab/sboss","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"2157922378","text":"import time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport os\n\nlink = \"http://suninjuly.github.io/file_input.html\"\n\nwith open(\"test.txt\", \"w\") as file:\n file.write(\"Test creating file and uploud it\")\n\ntry:\n browser = webdriver.Chrome()\n browser.get(link)\n\n input_first_name = browser.find_element(By.NAME, \"firstname\")\n input_first_name.send_keys(\"sdfdf\")\n\n input_last_name = browser.find_element(By.NAME, \"lastname\")\n input_last_name.send_keys(\"sdfdf\")\n\n input_email = browser.find_element(By.NAME, \"email\")\n input_email.send_keys(\"sdfdf@mail.ru\")\n\n current_dir = os.path.abspath(os.path.dirname(__file__))\n file_name = \"test.txt\"\n file_path = os.path.join(current_dir, file_name)\n element = browser.find_element(By.CSS_SELECTOR, \"[type='file']\")\n element.send_keys(file_path)\n\n submit_button = browser.find_element(By.CSS_SELECTOR, \"[type='submit']\")\n submit_button.click()\nfinally:\n time.sleep(10)\n browser.quit()\n","repo_name":"Eldar-Adzhiev/Python_automation","sub_path":"stepik_selenium_course/section2_selenium_methods/lesson2_step8_task_for_uploading_files.py","file_name":"lesson2_step8_task_for_uploading_files.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5862447809","text":"from keras import backend\nfrom keras import layers\nfrom keras.api_export import keras_export\nfrom keras.applications import imagenet_utils\nfrom keras.models import Functional\nfrom keras.ops import operation_utils\nfrom keras.utils import file_utils\n\nBASE_WEIGHTS_PATH = (\n \"https://storage.googleapis.com/tensorflow/keras-applications/resnet/\"\n)\nWEIGHTS_HASHES = {\n \"resnet50\": (\n \"2cb95161c43110f7111970584f804107\",\n \"4d473c1dd8becc155b73f8504c6f6626\",\n ),\n \"resnet101\": (\n \"f1aeb4b969a6efcfb50fad2f0c20cfc5\",\n \"88cf7a10940856eca736dc7b7e228a21\",\n ),\n \"resnet152\": (\n \"100835be76be38e30d865e96f2aaae62\",\n \"ee4c566cf9a93f14d82f913c2dc6dd0c\",\n ),\n \"resnet50v2\": (\n \"3ef43a0b657b3be2300d5770ece849e0\",\n \"fac2f116257151a9d068a22e544a4917\",\n ),\n \"resnet101v2\": (\n \"6343647c601c52e1368623803854d971\",\n \"c0ed64b8031c3730f411d2eb4eea35b5\",\n ),\n \"resnet152v2\": (\n \"a49b44d1979771252814e80f8ec446f9\",\n \"ed17cf2e0169df9d443503ef94b23b33\",\n ),\n \"resnext50\": (\n \"67a5b30d522ed92f75a1f16eef299d1a\",\n \"62527c363bdd9ec598bed41947b379fc\",\n ),\n \"resnext101\": (\n \"34fb605428fcc7aa4d62f44404c11509\",\n \"0f678c91647380debd923963594981b3\",\n ),\n}\n\n\ndef ResNet(\n stack_fn,\n preact,\n use_bias,\n model_name=\"resnet\",\n include_top=True,\n weights=\"imagenet\",\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n classifier_activation=\"softmax\",\n):\n \"\"\"Instantiates the ResNet, ResNetV2, and ResNeXt architecture.\n\n Args:\n stack_fn: A function that returns output tensor for the\n stacked residual blocks.\n preact: Whether to use pre-activation or not. `True` for ResNetV2,\n `False` for ResNet and ResNeXt.\n use_bias: Whether to use biases for convolutional layers or not.\n `True` for ResNet and ResNetV2, `False` for ResNeXt.\n model_name: Name of the model.\n include_top: Whether to include the fully-connected\n layer at the top of the network.\n weights: One of `None` (random initialization),\n `\"imagenet\"` (pre-training on ImageNet),\n or the path to the weights file to be loaded.\n input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`)\n to use as image input for the model.\n input_shape: Optional shape tuple, only to be specified\n if `include_top` is `False` (otherwise the input shape\n has to be `(224, 224, 3)` (with `channels_last` data format)\n or `(3, 224, 224)` (with `\"channels_first\"` data format). It\n should have exactly 3 inputs channels.\n pooling: Optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model will be\n the 4D tensor output of the\n last convolutional layer.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: optional number of classes to classify images\n into, only to be specified if `include_top` is `True`,\n and if no `weights` argument is specified.\n classifier_activation: A `str` or callable. The activation\n function to use on the \"top\" layer. Ignored unless\n `include_top=True`. Set `classifier_activation=None` to\n return the logits of the \"top\" layer. When loading\n pretrained weights, `classifier_activation` can only be\n `None` or `\"softmax\"`.\n\n Returns:\n A Model instance.\n \"\"\"\n\n if not (weights in {\"imagenet\", None} or file_utils.exists(weights)):\n raise ValueError(\n \"The `weights` argument should be either \"\n \"`None` (random initialization), 'imagenet' \"\n \"(pre-training on ImageNet), \"\n \"or the path to the weights file to be loaded. Received: \"\n f\"weights={weights}\"\n )\n\n if weights == \"imagenet\" and include_top and classes != 1000:\n raise ValueError(\n \"If using `weights='imagenet'` with `include_top=True`, \"\n \"`classes` should be 1000. \"\n f\"Received classes={classes}\"\n )\n\n # Determine proper input shape\n input_shape = imagenet_utils.obtain_input_shape(\n input_shape,\n default_size=224,\n min_size=32,\n data_format=backend.image_data_format(),\n require_flatten=include_top,\n weights=weights,\n )\n\n if input_tensor is None:\n img_input = layers.Input(shape=input_shape)\n else:\n if not backend.is_keras_tensor(input_tensor):\n img_input = layers.Input(tensor=input_tensor, shape=input_shape)\n else:\n img_input = input_tensor\n\n if backend.image_data_format() == \"channels_last\":\n bn_axis = 3\n else:\n bn_axis = 1\n\n x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name=\"conv1_pad\")(\n img_input\n )\n x = layers.Conv2D(64, 7, strides=2, use_bias=use_bias, name=\"conv1_conv\")(x)\n\n if not preact:\n x = layers.BatchNormalization(\n axis=bn_axis, epsilon=1.001e-5, name=\"conv1_bn\"\n )(x)\n x = layers.Activation(\"relu\", name=\"conv1_relu\")(x)\n\n x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=\"pool1_pad\")(x)\n x = layers.MaxPooling2D(3, strides=2, name=\"pool1_pool\")(x)\n\n x = stack_fn(x)\n\n if preact:\n x = layers.BatchNormalization(\n axis=bn_axis, epsilon=1.001e-5, name=\"post_bn\"\n )(x)\n x = layers.Activation(\"relu\", name=\"post_relu\")(x)\n\n if include_top:\n x = layers.GlobalAveragePooling2D(name=\"avg_pool\")(x)\n\n # Validate activation for the classifier layer\n imagenet_utils.validate_activation(classifier_activation, weights)\n\n x = layers.Dense(\n classes, activation=classifier_activation, name=\"predictions\"\n )(x)\n else:\n if pooling == \"avg\":\n x = layers.GlobalAveragePooling2D(name=\"avg_pool\")(x)\n elif pooling == \"max\":\n x = layers.GlobalMaxPooling2D(name=\"max_pool\")(x)\n\n # Ensure that the model takes into account\n # any potential predecessors of `input_tensor`.\n if input_tensor is not None:\n inputs = operation_utils.get_source_inputs(input_tensor)\n else:\n inputs = img_input\n\n # Create model.\n model = Functional(inputs, x, name=model_name)\n\n # Load weights.\n if (weights == \"imagenet\") and (model_name in WEIGHTS_HASHES):\n if include_top:\n file_name = model_name + \"_weights_tf_dim_ordering_tf_kernels.h5\"\n file_hash = WEIGHTS_HASHES[model_name][0]\n else:\n file_name = (\n model_name + \"_weights_tf_dim_ordering_tf_kernels_notop.h5\"\n )\n file_hash = WEIGHTS_HASHES[model_name][1]\n weights_path = file_utils.get_file(\n file_name,\n BASE_WEIGHTS_PATH + file_name,\n cache_subdir=\"models\",\n file_hash=file_hash,\n )\n model.load_weights(weights_path)\n elif weights is not None:\n model.load_weights(weights)\n\n return model\n\n\ndef residual_block_v1(\n x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None\n):\n \"\"\"A residual block for ResNet*_v1.\n\n Args:\n x: Input tensor.\n filters: No of filters in the bottleneck layer.\n kernel_size: Kernel size of the bottleneck layer. Defaults to `3`.\n stride: Stride of the first layer. Defaults to `1`.\n conv_shortcut: Use convolution shortcut if `True`, otherwise\n use identity shortcut. Defaults to `True`\n name(optional): Name of the block\n\n Returns:\n Output tensor for the residual block.\n \"\"\"\n\n if backend.image_data_format() == \"channels_last\":\n bn_axis = 3\n else:\n bn_axis = 1\n\n if conv_shortcut:\n shortcut = layers.Conv2D(\n 4 * filters, 1, strides=stride, name=name + \"_0_conv\"\n )(x)\n shortcut = layers.BatchNormalization(\n axis=bn_axis, epsilon=1.001e-5, name=name + \"_0_bn\"\n )(shortcut)\n else:\n shortcut = x\n\n x = layers.Conv2D(filters, 1, strides=stride, name=name + \"_1_conv\")(x)\n x = layers.BatchNormalization(\n axis=bn_axis, epsilon=1.001e-5, name=name + \"_1_bn\"\n )(x)\n x = layers.Activation(\"relu\", name=name + \"_1_relu\")(x)\n\n x = layers.Conv2D(\n filters, kernel_size, padding=\"SAME\", name=name + \"_2_conv\"\n )(x)\n x = layers.BatchNormalization(\n axis=bn_axis, epsilon=1.001e-5, name=name + \"_2_bn\"\n )(x)\n x = layers.Activation(\"relu\", name=name + \"_2_relu\")(x)\n\n x = layers.Conv2D(4 * filters, 1, name=name + \"_3_conv\")(x)\n x = layers.BatchNormalization(\n axis=bn_axis, epsilon=1.001e-5, name=name + \"_3_bn\"\n )(x)\n\n x = layers.Add(name=name + \"_add\")([shortcut, x])\n x = layers.Activation(\"relu\", name=name + \"_out\")(x)\n return x\n\n\ndef stack_residual_blocks_v1(x, filters, blocks, stride1=2, name=None):\n \"\"\"A set of stacked residual blocks.\n\n Args:\n x: Input tensor.\n filters: Number of filters in the bottleneck layer in a block.\n blocks: Number of blocks in the stacked blocks.\n stride1: Stride of the first layer in the first block. Defaults to `2`.\n name: Stack label.\n\n Returns:\n Output tensor for the stacked blocks.\n \"\"\"\n\n x = residual_block_v1(x, filters, stride=stride1, name=name + \"_block1\")\n for i in range(2, blocks + 1):\n x = residual_block_v1(\n x, filters, conv_shortcut=False, name=name + \"_block\" + str(i)\n )\n return x\n\n\ndef residual_block_v2(\n x, filters, kernel_size=3, stride=1, conv_shortcut=False, name=None\n):\n \"\"\"A residual block for ResNet*_v2.\n\n Args:\n x: Input tensor.\n filters: No of filters in the bottleneck layer.\n kernel_size: Kernel size of the bottleneck layer. Defaults to `3`.\n stride: Stride of the first layer. Defaults to `1`.\n conv_shortcut: Use convolution shortcut if `True`, otherwise\n use identity shortcut. Defaults to `True`\n name(optional): Name of the block\n\n Returns:\n Output tensor for the residual block.\n \"\"\"\n\n if backend.image_data_format() == \"channels_last\":\n bn_axis = 3\n else:\n bn_axis = 1\n\n preact = layers.BatchNormalization(\n axis=bn_axis, epsilon=1.001e-5, name=name + \"_preact_bn\"\n )(x)\n preact = layers.Activation(\"relu\", name=name + \"_preact_relu\")(preact)\n\n if conv_shortcut:\n shortcut = layers.Conv2D(\n 4 * filters, 1, strides=stride, name=name + \"_0_conv\"\n )(preact)\n else:\n shortcut = (\n layers.MaxPooling2D(1, strides=stride)(x) if stride > 1 else x\n )\n\n x = layers.Conv2D(\n filters, 1, strides=1, use_bias=False, name=name + \"_1_conv\"\n )(preact)\n x = layers.BatchNormalization(\n axis=bn_axis, epsilon=1.001e-5, name=name + \"_1_bn\"\n )(x)\n x = layers.Activation(\"relu\", name=name + \"_1_relu\")(x)\n\n x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + \"_2_pad\")(x)\n x = layers.Conv2D(\n filters,\n kernel_size,\n strides=stride,\n use_bias=False,\n name=name + \"_2_conv\",\n )(x)\n x = layers.BatchNormalization(\n axis=bn_axis, epsilon=1.001e-5, name=name + \"_2_bn\"\n )(x)\n x = layers.Activation(\"relu\", name=name + \"_2_relu\")(x)\n\n x = layers.Conv2D(4 * filters, 1, name=name + \"_3_conv\")(x)\n x = layers.Add(name=name + \"_out\")([shortcut, x])\n return x\n\n\ndef stack_residual_blocks_v2(x, filters, blocks, stride1=2, name=None):\n \"\"\"A set of stacked residual blocks.\n\n Args:\n x: Input tensor.\n filters: Number of filters in the bottleneck layer in a block.\n blocks: Number of blocks in the stacked blocks.\n stride1: Stride of the first layer in the first block. Defaults to `2`.\n name: Stack label.\n\n Returns:\n Output tensor for the stacked blocks.\n \"\"\"\n\n x = residual_block_v2(x, filters, conv_shortcut=True, name=name + \"_block1\")\n for i in range(2, blocks):\n x = residual_block_v2(x, filters, name=name + \"_block\" + str(i))\n x = residual_block_v2(\n x, filters, stride=stride1, name=name + \"_block\" + str(blocks)\n )\n return x\n\n\n@keras_export(\n [\n \"keras.applications.resnet50.ResNet50\",\n \"keras.applications.resnet.ResNet50\",\n \"keras.applications.ResNet50\",\n ]\n)\ndef ResNet50(\n include_top=True,\n weights=\"imagenet\",\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n classifier_activation=\"softmax\",\n):\n \"\"\"Instantiates the ResNet50 architecture.\"\"\"\n\n def stack_fn(x):\n x = stack_residual_blocks_v1(x, 64, 3, stride1=1, name=\"conv2\")\n x = stack_residual_blocks_v1(x, 128, 4, name=\"conv3\")\n x = stack_residual_blocks_v1(x, 256, 6, name=\"conv4\")\n return stack_residual_blocks_v1(x, 512, 3, name=\"conv5\")\n\n return ResNet(\n stack_fn,\n False,\n True,\n \"resnet50\",\n include_top,\n weights,\n input_tensor,\n input_shape,\n pooling,\n classes,\n classifier_activation=classifier_activation,\n )\n\n\n@keras_export(\n [\n \"keras.applications.resnet.ResNet101\",\n \"keras.applications.ResNet101\",\n ]\n)\ndef ResNet101(\n include_top=True,\n weights=\"imagenet\",\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n classifier_activation=\"softmax\",\n):\n \"\"\"Instantiates the ResNet101 architecture.\"\"\"\n\n def stack_fn(x):\n x = stack_residual_blocks_v1(x, 64, 3, stride1=1, name=\"conv2\")\n x = stack_residual_blocks_v1(x, 128, 4, name=\"conv3\")\n x = stack_residual_blocks_v1(x, 256, 23, name=\"conv4\")\n return stack_residual_blocks_v1(x, 512, 3, name=\"conv5\")\n\n return ResNet(\n stack_fn,\n False,\n True,\n \"resnet101\",\n include_top,\n weights,\n input_tensor,\n input_shape,\n pooling,\n classes,\n classifier_activation=classifier_activation,\n )\n\n\n@keras_export(\n [\n \"keras.applications.resnet.ResNet152\",\n \"keras.applications.ResNet152\",\n ]\n)\ndef ResNet152(\n include_top=True,\n weights=\"imagenet\",\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n classifier_activation=\"softmax\",\n):\n \"\"\"Instantiates the ResNet152 architecture.\"\"\"\n\n def stack_fn(x):\n x = stack_residual_blocks_v1(x, 64, 3, stride1=1, name=\"conv2\")\n x = stack_residual_blocks_v1(x, 128, 8, name=\"conv3\")\n x = stack_residual_blocks_v1(x, 256, 36, name=\"conv4\")\n return stack_residual_blocks_v1(x, 512, 3, name=\"conv5\")\n\n return ResNet(\n stack_fn,\n False,\n True,\n \"resnet152\",\n include_top,\n weights,\n input_tensor,\n input_shape,\n pooling,\n classes,\n classifier_activation=classifier_activation,\n )\n\n\n@keras_export(\n [\n \"keras.applications.resnet50.preprocess_input\",\n \"keras.applications.resnet.preprocess_input\",\n ]\n)\ndef preprocess_input(x, data_format=None):\n return imagenet_utils.preprocess_input(\n x, data_format=data_format, mode=\"caffe\"\n )\n\n\n@keras_export(\n [\n \"keras.applications.resnet50.decode_predictions\",\n \"keras.applications.resnet.decode_predictions\",\n ]\n)\ndef decode_predictions(preds, top=5):\n return imagenet_utils.decode_predictions(preds, top=top)\n\n\npreprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(\n mode=\"\",\n ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_CAFFE,\n error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,\n)\ndecode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__\n\nDOC = \"\"\"\n\nReference:\n- [Deep Residual Learning for Image Recognition](\n https://arxiv.org/abs/1512.03385) (CVPR 2015)\n\nFor image classification use cases, see [this page for detailed examples](\n https://keras.io/api/applications/#usage-examples-for-image-classification-models).\n\nFor transfer learning use cases, make sure to read the\n[guide to transfer learning & fine-tuning](\n https://keras.io/guides/transfer_learning/).\n\nNote: each Keras Application expects a specific kind of input preprocessing.\nFor ResNet, call `keras.applications.resnet.preprocess_input` on your\ninputs before passing them to the model. `resnet.preprocess_input` will convert\nthe input images from RGB to BGR, then will zero-center each color channel with\nrespect to the ImageNet dataset, without scaling.\n\nArgs:\n include_top: whether to include the fully-connected\n layer at the top of the network.\n weights: one of `None` (random initialization),\n `\"imagenet\"` (pre-training on ImageNet), or the path to the weights\n file to be loaded.\n input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)\n to use as image input for the model.\n input_shape: optional shape tuple, only to be specified if `include_top`\n is `False` (otherwise the input shape has to be `(224, 224, 3)`\n (with `\"channels_last\"` data format) or `(3, 224, 224)`\n (with `\"channels_first\"` data format). It should have exactly 3\n inputs channels, and width and height should be no smaller than 32.\n E.g. `(200, 200, 3)` would be one valid value.\n pooling: Optional pooling mode for feature extraction when `include_top`\n is `False`.\n - `None` means that the output of the model will be the 4D tensor\n output of the last convolutional block.\n - `avg` means that global average pooling will be applied to the output\n of the last convolutional block, and thus the output of the\n model will be a 2D tensor.\n - `max` means that global max pooling will be applied.\n classes: optional number of classes to classify images into, only to be\n specified if `include_top` is `True`, and if no `weights` argument is\n specified.\n classifier_activation: A `str` or callable. The activation function to\n use on the \"top\" layer. Ignored unless `include_top=True`. Set\n `classifier_activation=None` to return the logits of the \"top\" layer.\n When loading pretrained weights, `classifier_activation` can only\n be `None` or `\"softmax\"`.\n\nReturns:\n A Model instance.\n\"\"\"\n\nsetattr(ResNet50, \"__doc__\", ResNet50.__doc__ + DOC)\nsetattr(ResNet101, \"__doc__\", ResNet101.__doc__ + DOC)\nsetattr(ResNet152, \"__doc__\", ResNet152.__doc__ + DOC)\n","repo_name":"keras-team/keras","sub_path":"keras/applications/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":18978,"program_lang":"python","lang":"en","doc_type":"code","stars":59773,"dataset":"github-code","pt":"67"} +{"seq_id":"28453234515","text":"import sys\nfrom collections import deque\n#1012\ninput = sys.stdin.readline\n\ndx = [1,-1,0,0]\ndy = [0,0,1,-1]\n\ndef bfs(x,y):\n q = deque()\n global result\n q.append([x,y])\n graph[x][y]=0\n\n while q:\n a,b = q.popleft()\n for i in range(4):\n ax = a + dy[i]\n ay = b + dx[i]\n if 0 <= ax < N and 0<= ay < M and graph[ax][ay]==1:\n q.append([ax,ay])\n graph[ax][ay] = 0\n result += 1\n\nT = int(input())\n\nfor m in range(T):\n M,N,K = map(int,input().split())\n result = 0\n graph = [[0]*(M) for i in range(N)]\n\n for i in range(K):\n a,b = map(int,input().split())\n graph[b][a] = 1\n\n for i in range(N):\n for j in range(M):\n if graph[i][j] == 1:\n bfs(i,j)\n print(result)\n","repo_name":"ChuiUpGiWon/algorithm_study","sub_path":"BOJ_SoonGyu/1012.py","file_name":"1012.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70451156055","text":"from hal.network import NetworkConnectionInterface\nfrom .bms_interface import BmsInterface\nfrom hal import get_interval\nfrom config import Config\nfrom mqtt import MQTTClient # type: ignore\nimport json\nfrom typing import Union\ntry:\n import uasyncio as asyncio # type: ignore\nexcept ImportError:\n import asyncio\n\n\nclass MqttOutput:\n def __init__(self, config: Config, bms: BmsInterface, network: NetworkConnectionInterface) -> None:\n self._config = config\n self.enabled = self._config.mqtt_enabled is True\n self.connected = False\n if self.enabled:\n self._bms = bms\n self._network = network\n self._client = MQTTClient(\"pyBms\", self._config.mqtt_host)\n self._interval = get_interval()\n self._interval.set(self._config.mqtt_output_interval)\n\n def _connect(self) -> None:\n if not self.connected and self._network.connected:\n try:\n print(f\"Connecting to MQTT server: {self._config.mqtt_host}\")\n self._client.connect()\n print(\"Connected to MQTT\")\n self.connected = True\n except OSError:\n self.connected = False\n print(\"Failed to connect to MQTT\")\n\n def _publish(self):\n if self._interval.ready and self._network.connected:\n self._connect()\n self._interval.reset()\n if self.connected:\n self._publish_topic(\"/voltage\", self._bms.battery_pack.voltage)\n self._publish_topic(\"/soc\", self._bms.state_of_charge)\n for module_index, module in enumerate(self._bms.battery_pack.modules):\n self._publish_topic(\n f\"/modules/{module_index}/voltage\", module.voltage)\n self._publish_topic(f\"/modules/{module_index}/fault\", int(module.fault))\n self._publish_topic(f\"/modules/{module_index}/alert\", int(module.alert))\n for temp_index, temp in enumerate(module.temperatures):\n self._publish_topic(\n f\"/modules/{module_index}/temperature/{temp_index}\", temp)\n for cell_index, cell in enumerate(module.cells):\n self._publish_topic(\n f\"/modules/{module_index}/cells/{cell_index}/voltage\", cell.voltage)\n self._publish_topic(\n f\"/modules/{module_index}/cells/{cell_index}/fault\", int(cell.fault))\n self._publish_topic(\n f\"/modules/{module_index}/cells/{cell_index}/alert\", int(cell.alert))\n\n def _publish_topic(self, topic: str, value: Union[int, bool, float]) -> None:\n self._client.publish(f\"{self._config.mqtt_topic_prefix}{topic}\", json.dumps(\n {\"value\": value}))\n\n async def main(self):\n while True:\n if self.enabled and self._network.connected:\n self._publish()\n await asyncio.sleep_ms(1)\n if self.connected:\n self._client.check_msg()\n","repo_name":"bjpirt/pyBMS","sub_path":"bms/mqtt_output.py","file_name":"mqtt_output.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"70632751573","text":"from datetime import datetime\nfrom operator import itemgetter\nfrom os.path import dirname, join\n\nimport pytest # noqa\nfrom city_scrapers_core.constants import BOARD, PASSED\nfrom city_scrapers_core.utils import file_response\nfrom freezegun import freeze_time\nfrom scrapy.http import XmlResponse\n\nfrom city_scrapers.spiders.cle_metro_school_district import CleMetroSchoolDistrictSpider\n\ninit_test_response = file_response(\n join(dirname(__file__), \"files\", \"cle_metro_school_district.xml\"),\n url=\"https://www.boarddocs.com/oh/cmsd/board.nsf/XML-ActiveMeetings\",\n)\ntest_response = XmlResponse(\n url=init_test_response.url,\n request=init_test_response.request,\n body=init_test_response.body,\n)\nspider = CleMetroSchoolDistrictSpider()\n\nfreezer = freeze_time(\"2019-09-09\")\nfreezer.start()\n\nparsed_items = sorted(\n [item for item in spider.parse(test_response)], key=itemgetter(\"start\")\n)\n\nfreezer.stop()\n\n\ndef test_count():\n assert len(parsed_items) == 250\n\n\ndef test_title():\n assert parsed_items[0][\"title\"] == \"2009 Organization Meeting/Work Session\"\n\n\ndef test_description():\n assert parsed_items[0][\"description\"] == \"\"\n\n\ndef test_start():\n assert parsed_items[0][\"start\"] == datetime(2009, 1, 13, 18, 30)\n\n\ndef test_end():\n assert parsed_items[0][\"end\"] is None\n\n\ndef test_time_notes():\n assert parsed_items[0][\"time_notes\"] == \"\"\n\n\ndef test_id():\n assert (\n parsed_items[0][\"id\"]\n == \"cle_metro_school_district/200901131830/x/2009_organization_meeting_work_session\" # noqa\n )\n\n\ndef test_status():\n assert parsed_items[0][\"status\"] == PASSED\n\n\ndef test_location():\n assert parsed_items[0][\"location\"] == {\n \"name\": \"Board of Education Administration Building Board Room\",\n \"address\": \"1111 Superior Ave E, Cleveland, OH 44114\",\n }\n assert parsed_items[-1][\"location\"] == {\n \"address\": \"1111 Superior Avenue, 4th Floor Conference Room, Cleveland, OH 44114\", # noqa\n \"name\": \"Cleveland Municipal School District Administrative Offices\",\n }\n\n\ndef test_source():\n assert (\n parsed_items[-1][\"source\"]\n == \"http://go.boarddocs.com/oh/cmsd/Board.nsf/goto?open&id=B7CTJX75F93D\"\n )\n\n\ndef test_links():\n assert parsed_items[-1][\"links\"] == [\n {\n \"href\": \"http://go.boarddocs.com/oh/cmsd/Board.nsf/goto?open&id=B7CTJX75F93D\", # noqa\n \"title\": \"Agenda\",\n }\n ]\n\n\ndef test_classification():\n assert parsed_items[0][\"classification\"] == BOARD\n\n\ndef test_all_day():\n assert parsed_items[0][\"all_day\"] is False\n","repo_name":"City-Bureau/city-scrapers-cle","sub_path":"tests/test_cle_metro_school_district.py","file_name":"test_cle_metro_school_district.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"67"} +{"seq_id":"9569509915","text":"#!/usr/bin/env python\n# file test_helpers.py\n# author Florent Guiotte \n# version 0.0\n# date 24 août 2020\n\"\"\"Abstract\n\ndoc.\n\"\"\"\n\nimport numpy as np\nimport pytest\nfrom idefix import helpers, io\n\n@pytest.fixture\ndef ma_raster():\n rs = np.random.RandomState(42)\n raster = rs.random((10,10))\n raster = np.ma.array(raster, mask=raster<.1)\n return raster\n\n@pytest.mark.parametrize('method', \n ['nearest', 'linear', 'cubic', 'idw'])\ndef test_interpolate(ma_raster, method):\n helpers.interpolate(ma_raster, method)\n\ndef _data_pc(datadir, set_id):\n path = datadir.join('pc{}.txt'.format(set_id))\n data = io.load_txt(path, 'x y z i'.split())\n return data\n\n@pytest.mark.parametrize('params', [\n {},\n {'cell_size': 2.},\n {'last': True}])\ndef test_dsm(datadir, params):\n pc = _data_pc(datadir, 0)\n dsm = helpers.dsm(pc, **params)\n\n assert dsm is not None, 'Did not return anything...'\n assert not np.isnan(dsm).any(), 'Some missing values in DSM'\n\ndef test_dtm(ma_raster):\n dtm = helpers.dtm_dh_filter(ma_raster)\n\n assert dtm is not None, 'Did not return anything...'\n\n@pytest.mark.parametrize('params', [\n {},\n {'bin_structure': 'pixel'},\n {'out_dir': True, 'crs': 'EPSG:26910'}])\ndef test_rasterize(datadir, params):\n # Workaround for out_dir with pytest\n if 'out_dir' in params:\n params['out_dir'] = datadir\n\n raster = helpers.rasterize(datadir.join('test.npz'), **params)\n\n assert raster is not None, 'Did not return anything...'\n","repo_name":"fguiotte/idefix","sub_path":"test/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72951386772","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom functools import partial\n\nEPS = 1e-8\n\n@torch.no_grad()\ndef quantize_symmetric_per_tensor(tensor:torch.Tensor, n_bits:int):\n t_max = torch.max(torch.abs(tensor))\n q_max = 2**(n_bits - 1) - 1\n scale = t_max.clamp(EPS).div(q_max)\n q_tensor = tensor.div(scale).round()\n return q_tensor, scale, torch.tensor(.0)\n\n@torch.no_grad()\ndef quantize_asymmetric_per_tensor(tensor:torch.Tensor, n_bits:int):\n t_max = torch.max(tensor)\n t_min = torch.min(tensor)\n scale = (t_max - t_min) / (2**n_bits - 1)\n zero_point = torch.round((t_max + t_min)/ 2 / scale)\n q_tensor = torch.round(tensor / scale) - zero_point\n return q_tensor, scale, zero_point\n\n \nclass FakeQuantLinear(nn.Module):\n def __init__(self, weight:torch.Tensor, bias:torch.Tensor, n_bits:int, quantize=quantize_symmetric_per_tensor) -> None:\n super().__init__()\n\n self.quantize = partial(quantize, n_bits=n_bits)\n q_w, s_w, z_w = self.quantize(weight)\n self.register_buffer('weight', q_w.squeeze(0))\n self.register_buffer('s_w', s_w)\n self.register_buffer('z_w', z_w)\n self.register_buffer(\"bias\", bias)\n \n def forward(self, x):\n q_x, s_x, z_x = self.quantize(x)\n scale = s_x * self.s_w\n bias = self.bias if self.bias is None else self.bias.div(scale).round()\n out = F.linear(q_x.sub(z_x), self.weight.sub(self.z_w), bias)\n return out.mul_(scale)\n \n\n# ===================================================\n# ===================== TEST ========================\n# ===================================================\n\ndef matmul_with_quantize(t1, t2, quantize):\n q1, s1, z1 = quantize(t1, 8)\n q2, s2, z2 = quantize(t2, 8)\n return torch.matmul(q1+z1, (q2+z2).T) * s1 * s2\n\ndef test_fakequant(t1, t2):\n result = torch.matmul(t1, t2.T)\n print('[symmetric] MSE loss', F.mse_loss(result, matmul_with_quantize(t1, t2, quantize_symmetric_per_tensor)))\n print('[asymmetric] MSE loss', F.mse_loss(result, matmul_with_quantize(t1, t2, quantize_asymmetric_per_tensor)))\n\ndef test_fake_quant_linear(t):\n linear = nn.Linear(t.size(1), 512)\n result = linear(t)\n quant_linear = FakeQuantLinear(linear.weight.detach(), linear.bias.detach(), 8)\n print('[quantized linear] MSE loss', F.mse_loss(result, quant_linear(t)).item())\n\ndef main():\n torch.manual_seed(42)\n # evenly distributed tensor\n t1 = torch.rand(128, 768) * 2 - 1\n t2 = torch.rand(128, 768) * 2 - 1\n print('=== test fake quantization')\n test_fakequant(t1, t2)\n \n print('=== test quantization of biased tensor')\n test_fakequant(t1 + 10, t2 - 10)\n\n print('=== test fakequant linear')\n test_fake_quant_linear(t1)\n \nif __name__ == '__main__':\n main()","repo_name":"yeti-s/nlp_quantization","sub_path":"smoothquant/fake_quant.py","file_name":"fake_quant.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22254290919","text":"import tensorflow as tf\nfrom ops import *\n\ndef teacher(input_image):\n input = adjust_contrast(input_image)\n with tf.variable_scope(\"generator_t\"):\n W0 = weight_variable([1, 7, 3, 16], name=\"W0\") # shape=[filter_height, filter_width, in_channel, out_channel], name\n b0 = bias_variable([16], name=\"b0\")\n c0 = lrelu(conv2d(input, W0) + b0)\n\n W1 = weight_variable([7, 1, 16, 16], name=\"W1\")\n b1 = bias_variable([16], name=\"b1\")\n c1 = lrelu(conv2d(c0, W1) + b1)\n\n out_c1 = tf.reduce_mean(c1, axis=-1, keepdims=True)\n\n # residual 1\n W2 = weight_variable([3, 3, 16, 32], name=\"W2\")\n b2 = bias_variable([32], name=\"b2\")\n c2 = lrelu(_instance_norm(conv2d(c1, W2) + b2))\n\n W3 = weight_variable([3, 3, 32, 16], name=\"W3\")\n b3 = bias_variable([16], name=\"b3\")\n c3 = _instance_norm(conv2d(c2, W3) + b3) + c1 # 16\n\n # residual 2 dialted residual block\n W4 = weight_variable([3, 3, 16, 32], name=\"W4\")\n b4 = bias_variable([32], name=\"b4\")\n c4 = lrelu(_instance_norm(atrous_conv2d(c3, W4, 2) + b4))\n\n W5 = weight_variable([3, 3, 32, 16], name=\"W5\")\n b5 = bias_variable([16], name=\"b5\")\n c5 = _instance_norm(atrous_conv2d(c4, W5, 2) + b5) + c3 # 16\n\n gather = tf.concat([c3, c5], axis=-1) ## 16*2=32\n W6 = weight_variable([1, 1, 32, 16], name=\"W6\")\n b6 = bias_variable([16], name=\"b6\")\n c6 = lrelu(conv2d(gather, W6) + b6)\n\n out_c6 = tf.reduce_mean(c6, axis=-1, keepdims=True)\n\n # Final\n W7 = weight_variable([3, 3, 16, 3], name=\"W7\")\n b7 = bias_variable([3], name=\"b7\")\n enhanced = tf.nn.tanh(conv2d(c6, W7) + b7) * 0.58 + 0.5\n\n return enhanced, out_c1, out_c6\n\ndef student(input_image):\n input = adjust_contrast(input_image)\n with tf.variable_scope(\"generator_s\"):\n W0 = weight_variable([1, 7, 3, 8], name=\"W0\")\n b0 = bias_variable([8], name=\"b0\")\n c0 = lrelu(conv2d(input, W0) + b0)\n\n W1 = weight_variable([7, 1, 8, 8], name=\"W1\")\n b1 = bias_variable([8], name=\"b1\")\n c1 = lrelu(conv2d(c0, W1) + b1)\n\n out_c1 = tf.reduce_mean(c1, axis=-1, keepdims=True)\n\n W2 = weight_variable([1, 7, 8, 16], name=\"W2\")\n b2 = bias_variable([16], name=\"b2\")\n c2 = lrelu(_instance_norm(atrous_conv2d(c1, W2, 2) + b2))\n\n W3 = weight_variable([7, 1, 16, 8], name=\"W3\")\n b3 = bias_variable([8], name=\"b3\")\n c3 = lrelu(_instance_norm(atrous_conv2d(c2, W3, 2) + b3)) + c1\n\n out_c3 = tf.reduce_mean(c3, axis=-1, keepdims=True)\n\n # Final\n W4 = weight_variable([3, 3, 8, 3], name=\"W4\")\n b4 = bias_variable([3], name=\"b4\")\n enhanced = tf.nn.tanh(conv2d(c3, W4) + b4) * 0.58 + 0.5\n\n return enhanced, out_c1, out_c3\n\ndef adversarial(image_):\n with tf.variable_scope(\"discriminator\"):\n conv1 = _conv_layer(image_, 64, 9, 4, batch_nn=False)\n conv2 = _conv_layer(conv1, 128, 5, 2)\n conv3 = _conv_layer(conv2, 192, 3, 1)\n conv4 = _conv_layer(conv3, 192, 3, 1)\n conv5 = _conv_layer(conv4, 256, 3, 2)\n conv6 = _conv_layer(conv5, 256, 3, 1)\n\n flat_size = 256 * 7 * 7\n conv6_flat = tf.reshape(conv6, [-1, flat_size])\n\n W_fc = tf.Variable(tf.truncated_normal([flat_size, 1024], stddev=0.01))\n bias_fc = tf.Variable(tf.constant(0.01, shape=[1024]))\n\n fc = leaky_relu(tf.matmul(conv6_flat, W_fc) + bias_fc)\n\n W_out = tf.Variable(tf.truncated_normal([1024, 2], stddev=0.01))\n bias_out = tf.Variable(tf.constant(0.01, shape=[2]))\n\n adv_out = tf.nn.softmax(tf.matmul(fc, W_out) + bias_out)\n\n return adv_out\n\ndef weight_variable(shape, name):\n\n initial = tf.truncated_normal(shape, stddev=0.01) # outputs random values from a truncated normal distribution\n return tf.Variable(initial, name=name)\n\ndef bias_variable(shape, name):\n\n initial = tf.constant(0.01, shape=shape)\n return tf.Variable(initial, name=name)\n\ndef conv2d(x, W): # tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu=True, data_format='NHWC', dilations=[1, 1, 1, 1], name=None)\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') # an input tensor of shape [batch, in_height, in_width, in_channels],\n# a filter/kernel tensor of shape [filter_height, filter_width, in_channels, out_channels]\n\ndef atrous_conv2d(x, W, rate):\n return tf.nn.atrous_conv2d(x, W, rate=rate, padding='SAME')\n\ndef depthwise_conv2d(x, W, rate=None):\n return tf.nn.depthwise_conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME', rate=rate)\n\ndef leaky_relu(x, alpha = 0.2): # for discriminator\n return tf.maximum(alpha * x, x)\n\ndef lrelu(x, alpha = 0.05): # for generator\n return tf.maximum(alpha * x, x)\n\ndef _conv_layer(net, num_filters, filter_size, strides, batch_nn=True):\n \n weights_init = _conv_init_vars(net, num_filters, filter_size)\n strides_shape = [1, strides, strides, 1]\n bias = tf.Variable(tf.constant(0.01, shape=[num_filters]))\n\n net = tf.nn.conv2d(net, weights_init, strides_shape, padding='SAME') + bias \n net = leaky_relu(net)\n\n if batch_nn:\n net = _instance_norm(net)\n\n return net\n\ndef _instance_norm(net):\n\n batch, rows, cols, channels = [i.value for i in net.get_shape()] # [N, H, W, C]\n var_shape = [channels]\n\n mu, sigma_sq = tf.nn.moments(net, [1, 2], keep_dims=True) # calculate the mean and variance of x\n # for so-called \"global normalization\", used with convolutional filters with shape [batch, height, width, depth],\n # pass axes=[0, 1, 2]\n # for simple batch normalization pass axes=[0] (batch only)\n\n shift = tf.Variable(tf.zeros(var_shape))\n scale = tf.Variable(tf.ones(var_shape))\n\n epsilon = 1e-3\n normalized = (net-mu)/(sigma_sq + epsilon)**(.5)\n\n return scale * normalized + shift\n\ndef _conv_init_vars(net, out_channels, filter_size, transpose=False):\n\n _, rows, cols, in_channels = [i.value for i in net.get_shape()] # the shape of the input tensor\n\n # [filter_height, filter_width, in_channels, out_channels]\n if not transpose:\n weights_shape = [filter_size, filter_size, in_channels, out_channels]\n else:\n weights_shape = [filter_size, filter_size, out_channels, in_channels]\n\n weights_init = tf.Variable(tf.truncated_normal(weights_shape, stddev=0.01, seed=1), dtype=tf.float32)\n return weights_init\n\ndef adjust_contrast(x):\n return tf.image.adjust_contrast(x, 1.2)","repo_name":"Zheng222/PPCN","sub_path":"train/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6506,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"67"} +{"seq_id":"422197190","text":"import re\nimport numpy as np\nfrom scipy import signal\nimport matplotlib.pyplot as plt\ndef read_pgm(filename, byteorder='>'):\n with open(filename, 'rb') as f:\n buffer = f.read()\n try:\n header, width, height, maxval = re.search(\n b\"(^P5\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n]\\s)*)\", buffer).groups()\n except AttributeError:\n raise ValueError(\"Not a raw PGM file: '%s'\" % filename)\n return np.frombuffer(buffer,\n dtype='u1' if int(maxval) < 256 else byteorder+'u2',\n count=int(width)*int(height),\n offset=len(header)\n ).reshape((int(height), int(width)))\n\ndef create_rxx(coef,array_size):\n rxx = np.empty((array_size,array_size))\n print(rxx)\n n = 0\n diagonal= np.full(array_size-n, coef[n])\n rxx = np.append(rxx,np.diag(diagonal,k=0))\n \n \"\"\"\n diagonal = np.full(array_size-n, coef[n])\n rxx = np.append(rxx,np.diag(diagonal,k=n))\n rxx = np.append(rxx,np.diag(diagonal,k=-n))\n \"\"\"\n print(rxx)\n\n\nif __name__ == \"__main__\":\n image = read_pgm(\"lena256.pgm\", byteorder='<')\n image = np.array(image)\n plt.figure(1)\n plt.imshow(image, plt.cm.gray)\n plt.show()\n\n new_image = image.flatten('F') - np.mean(image.flatten('F'))\n\n output_data = plt.acorr(new_image, maxlags=30,normed=True)\n auto_coef = output_data[1][30:]\n Rxx = create_rxx(auto_coef,30)","repo_name":"margolek/Study","sub_path":"Semester_5/DFT/Laboratorium/testfile.py","file_name":"testfile.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35650278409","text":"from Recognizer import *\nimport os\n\n\n# 用于获取验证码模板\n# 使用方法:\n# 运行程序后,点击分割出来的验证码字符按任意键(例如ESC)\n# 然后控制台会出现 \"该字符是:\" 后 输入该字符的正确值(例如图片是Y 你就输入Y)后回车\n# 再点击新分割出来的验证码字符图片重复操作即可。\n\nwhile True:\n OriginalImage = GetNewCaptcha()\n Img = RemoveNoise(OriginalImage) # 去除验证码噪点\n\n OriginalImage = cv2.imread(OriginalImage)\n\n OpeningKernel = np.ones((3, 3), np.uint8) # 获取自定义开运算核\n Openning = cv2.morphologyEx(Img, cv2.MORPH_OPEN, OpeningKernel) # 进行开运算\n\n Thresh = cv2.threshold(Openning, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1] # 进行图像阈值处理\n Thresh = cv2.threshold(Thresh, 0, 255, cv2.THRESH_BINARY_INV)[1] # 反转一下颜色\n\n ThreshCnts, Hierarchy = cv2.findContours(Thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # 寻找轮廓\n\n Cnts = ThreshCnts\n\n TempImg = Img.copy()\n cv2.drawContours(TempImg, Cnts, -1, (0, 0, 255), 3) # 画出轮廓\n\n ResultCaptcha = {}\n\n for (i, c) in enumerate(Cnts): # 过滤寻找到的轮廓\n\n x, y, w, h = cv2.boundingRect(c)\n\n if w > 10 and h > 10: # 过滤掉面积很小的轮廓 肯定不是验证码字符\n\n cv2.rectangle(OriginalImage, (x, y), (x + w, y + h), (0, 255, 0), 2) # 画出寻找到的四个字符 后续显示用\n\n SingleCharacterImg = Thresh[y:y + h, x:x + w] # 分割出单个字符\n\n Contours, Hierarchy = cv2.findContours(SingleCharacterImg.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE) # 寻找轮廓\n SingleCharacterCnts = Contours\n\n SingleCharacterImgCopy = SingleCharacterImg.copy()\n SingleCharacterImgCopy = cv2.morphologyEx(SingleCharacterImgCopy, cv2.MORPH_OPEN, OpeningKernel)\n\n cv2.imshow('',SingleCharacterImgCopy)\n cv2.waitKey(0)\n character = str(input(\"该字符是:\"))\n cv2.destroyAllWindows()\n path = './Template/' + character + '/'\n if not os.path.exists(path):\n os.makedirs(path)\n salt = ''.join(random.sample(string.ascii_letters + string.digits, 8))\n cv2.imwrite(path + salt + '.png' , SingleCharacterImgCopy.copy())\n print(\"验证码模板字符 \\\"\" + character + \"\\\" 已保存至: \" + path + salt + '.png')","repo_name":"SwaggyMacro/SxbCaptchaRecognizer","sub_path":"GetTemplate.py","file_name":"GetTemplate.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"36056788516","text":"\"\"\"\nGiven a dictionary, and a list of letters ( or consider as a string), find the longest word that only uses letters from\nthe string.\n\"\"\"\n\nfrom typing import List\nimport math\n\n\ndef longest_word_possible(letters: str, dictionary: List[str]):\n # result = []\n max_length_word = ''\n word_to_length = {}\n max_length_words = -math.inf\n for word in dictionary:\n if word not in word_to_length:\n word_to_length[word] = 0\n word_to_length[word] = len(word)\n\n sorted_letters = ''.join(sorted(letters))\n\n for word in word_to_length:\n unique_word = set(word)\n sorted_word = ''.join(sorted(unique_word))\n if sorted_word == sorted_letters:\n if len(word) > max_length_words:\n max_length_words = len(word)\n return [max_length_word]\n\n\nif __name__ == '__main__':\n letters = 'ote'\n dictionary = ['toe', 'banana', 'dogs', 'bark', 'eat', 'toes', 'eot']\n print(longest_word_possible(letters, dictionary))","repo_name":"smartinsert/CodingProblem","sub_path":"goldman_sachs/find_longest_word_possible_from_letters.py","file_name":"find_longest_word_possible_from_letters.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12730103958","text":"''' Voici le module principal '''\n\nfrom Game import hero # On importe tous les modules nécessaires dans cette fenêtre\nfrom Game import stage\nfrom Game import debug\nfrom Game import music\nfrom Game import graphics\nfrom Game import window\nfrom Game import controls\nimport time\n\n\ntimeInWinArea = None\n\ndef get_can():\n return window.root.canvas\n\ndef get_win():\n return window.root.window\n\ndef main_loop(): # Boucle principale exécutée à chaque frame\n t0 = time.time()\n\n global timeInWinArea\n \n if not debug.debugger.stop:\n get_can().update() # Mise à jour de l'affichage\n\n for obj in stage.stage0.objs[\"Platform\"]:\n stage.stage0.objs[\"Platform\"][obj].move()\n \n if hero.hero1.y < hero.hero2.y: # Car le carré du dessous a plus d'influence sur le carré du dessus qu'inversement\n hero.hero2.move() \n hero.hero1.move()\n else:\n hero.hero1.move() # Méthodes des deux personnages leur permettant de bouger\n hero.hero2.move()\n\n for col in stage.stage0.chunks:\n for ch in col:\n ch.calculate()\n \n count1 = 0\n for key in stage.stage0.objs[\"Win\"]:\n count1 += 1\n if stage.stage0.objs[\"Win\"][key].test():\n count1 -= 1\n if count1 == 0:\n timeInWinArea += 1\n if timeInWinArea == 80:\n debug.debugger.t0Win = stage.stage_end()\n else:\n timeInWinArea = 0\n\n graphics.set_graphics()\n debug.debugger.fps_add()\n \n t1 = time.time()\n if stage.hasWon and t1 - debug.debugger.t0Win > 3:\n stage.hasWon = False\n stage.change_level(stage.stage0.level+1)\n afterTime = int((wantedTimeGap - t1 + t0) * 1000 + 0.4) # +0.4 → int(x + 0.5) fait un arrondi, et -0.1 car il y a un petit temps mis pour faire les calculs du temps\n get_can().after(afterTime, main_loop) # Appelle de nouveau la fonction [afterTime] (en ms) plus tard\n\n\n# INITIALISATION du Tk et du Canvas (Tkinter)\nwindow.init()\n\n# Affichage du canvas\nget_can().pack()\n\ndebug.init(stage)\n\ntry:\n music.init()\n music.play_music()\nexcept:\n print(\"Error: No music available\")\n\n# Création des éléments principaux\nhero.init() # Les personnages\nstage.init(hero.hero1, hero.hero2, 8) # 8 est le nombre de niveaux\n\nstage.change_level(1) # Le niveau 1\n\ncontrols.init(get_can(), hero.hero1, hero.hero2) # Les contrôles\n\nframerate = 80\nwantedTimeGap = 1 / framerate # 1 seconde divisé par le nombre de fps voulu\n\n#───────────────────────────┐\nmain_loop() # La boucle principale est appelée ↑↑↑\n#───────────────────────────┘\n\nget_win().mainloop()\n","repo_name":"ClementDrn/ISN-Projet_Clement-Antoine","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17857690428","text":"'''\nCreated on Mar 6, 2017\n\n@author: Stanislav Petrov\n'''\n\nimport sys, io\n\ndef inplace_sort(input_string):\n number_index, string_index, output_string, number_list, string_list = 0, 0,\"\",[],[]\n for item in input_string.split(' '):\n if item.isdigit():\n number_list.append(int(item))\n else:\n string_list.append(item)\n \n number_list = sorted(number_list)\n string_list = sorted(string_list)\n \n for item in input_string.split():\n if item.isdigit():\n output_string += str(number_list[number_index]) + \" \"\n number_index += 1\n else:\n output_string += string_list[string_index] + \" \"\n string_index += 1\n\n return \"\\nOutput:\\n\"+output_string\n\nsys.stdin = io.StringIO(input(\"Input:\"))\ntest_string = inplace_sort(input())\nsys.stdout.write(test_string)\n","repo_name":"stanislav0001/Python-Test","sub_path":"src/testmodule.py","file_name":"testmodule.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"26065402221","text":"import unittest\n\nfrom udoco import feeds\nfrom udoco.tests import factory as _factory\n\n\nclass TestAllGameFeed(unittest.TestCase):\n\n def test_all_games(self):\n for i in range(0, 10):\n _factory.GameFactory()\n request = unittest.mock.Mock(method='GET')\n\n view = feeds.AllGameFeed()\n response = view(request)\n\n self.assertIn(\n 'X-WR-CALNAME:UDO Colorado Upcoming Events',\n response.content.decode('utf-8'))\n self.assertIn(\n 'X-WR-TIMEZONE:America/Denver',\n response.content.decode('utf-8'))\n self.assertEqual(\n 10,\n len([\n line for line in response.content.decode('utf-8').split('\\r\\n')\n if line == 'BEGIN:VEVENT']))\n\n\nclass TestLeagueGameFeed(unittest.TestCase):\n\n def test_league_games(self):\n league = _factory.LeagueFactory()\n for i in range(0, 10):\n _factory.GameFactory(league=league)\n for i in range(0, 10):\n _factory.GameFactory()\n request = unittest.mock.Mock(method='GET')\n\n view = feeds.LeagueGameFeed()\n response = view(request, league_id=league.id)\n\n self.assertIn(\n 'X-WR-CALNAME:{} Upcoming events'.format(league.name),\n response.content.decode('utf-8'))\n self.assertEqual(\n 10,\n len([\n line for line in response.content.decode('utf-8').split('\\r\\n')\n if line == 'BEGIN:VEVENT']))\n","repo_name":"AmeliaLLC/udoco","sub_path":"udoco/tests/test_feeds.py","file_name":"test_feeds.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"41845200533","text":"from __future__ import division\n\n# Start logging\nimport logging\nmodule_logger = logging.getLogger(__name__)\n\nimport numpy as np\n\n# local imports\nfrom dtocean_tidal.utils.misc import transec_surf\n\n\ndef blockage_ratio(hydro, array, debug=False):\n \"\"\"\n Computes blockage ratio relative to the lease area\n\n Args:\n hydro (dtocean_tidal.main.Hydro): dtocean_tidal's Hydro object\n array (dtocean_tidal.main.Array): dtocean_tidal's Array object\n \n Kwargs:\n debug (bool): debug flag\n \n Returns:\n rbr (float): relative blockage ration (i.e. rotor's surface / lease's transect surface)\n \n \"\"\"\n \n if debug: module_logger.info(\"Computing relative blockage ratio RBR...\")\n \n transect, first_row, speed = transec_surf(hydro, array, debug=debug)\n rotor_surf = 0.0\n \n n_digits = len(str(array.turbine_count))\n \n for i in first_row:\n turb_name = 'turbine{:0{width}d}'.format(i, width=n_digits)\n diam = array.features[turb_name]['Diam']\n ry = array.features[turb_name]['RY'] # relative yawing angle\n surf = np.pi * ((diam/2.0)**2.0) * abs(np.cos(np.radians(ry))) # ellipse area\n rotor_surf += surf\n \n # final check\n rbr = rotor_surf/transect\n if rbr > 1.0:\n rbr = 1.0\n if debug:\n module_logger.info(\"...transect surface = \" + str(transect) + \" m2...\")\n module_logger.info(\"...rotors surface = \" + str(rotor_surf) + \" m2...\")\n module_logger.info(\"...RBR = \" + str(rbr))\n \n return rbr\n","repo_name":"DTOcean/dtocean-hydrodynamics","sub_path":"dtocean_tidal/modules/blockage_ratio.py","file_name":"blockage_ratio.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"18535688721","text":"# -*- coding: utf-8 -*-\n\nimport argparse\nimport csv\nimport io\nimport json\nimport os\nimport re\nimport subprocess\nimport tempfile\nfrom datetime import datetime\nfrom logging import INFO, basicConfig, getLogger\n\nimport hglib\nimport joblib\nimport numpy as np\nfrom dateutil.relativedelta import relativedelta\nfrom libmozdata import vcs_map\nfrom libmozdata.phabricator import PhabricatorAPI\nfrom scipy.stats import spearmanr\n\nfrom bugbug import db, repository\nfrom bugbug.models.regressor import RegressorModel\nfrom bugbug.utils import (\n download_check_etag,\n get_secret,\n retry,\n to_array,\n zstd_decompress,\n)\n\nbasicConfig(level=INFO)\nlogger = getLogger(__name__)\n\nURL = \"https://index.taskcluster.net/v1/task/project.relman.bugbug.train_regressor.latest/artifacts/public/{}\"\n\n\n# ------------------------------------------------------------------------------\n# Copied from https://github.com/mozilla-conduit/lando-api/blob/4b583f9d773dfc8c3e8c39e3d3b7385568d744df/landoapi/commit_message.py\n\nSPECIFIER = r\"(?:r|a|sr|rs|ui-r)[=?]\"\nR_SPECIFIER = r\"\\br[=?]\"\nR_SPECIFIER_RE = re.compile(R_SPECIFIER)\n\nLIST = r\"[;,\\/\\\\]\\s*\"\n\n# Note that we only allows a subset of legal IRC-nick characters.\n# Specifically, we do not allow [ \\ ] ^ ` { | }\nIRC_NICK = r\"[a-zA-Z0-9\\-\\_]+\"\n\n# fmt: off\nREVIEWERS_RE = re.compile( # noqa: E131\n r\"([\\s\\(\\.\\[;,])\" # before \"r\" delimiter\n + r\"(\" + SPECIFIER + r\")\" # flag\n + r\"(\" # capture all reviewers\n + r\"#?\" # Optional \"#\" group reviewer prefix\n + IRC_NICK # reviewer\n + r\"!?\" # Optional \"!\" blocking indicator\n + r\"(?:\" # additional reviewers\n + LIST # delimiter\n + r\"(?![a-z0-9\\.\\-]+[=?])\" # don\"t extend match into next flag\n + r\"#?\" # Optional \"#\" group reviewer prefix\n + IRC_NICK # reviewer\n + r\"!?\" # Optional \"!\" blocking indicator\n + r\")*\"\n + r\")?\"\n)\n# fmt: on\n\n\ndef replace_reviewers(commit_description, reviewers):\n if not reviewers:\n reviewers_str = \"\"\n else:\n reviewers_str = \"r=\" + \",\".join(reviewers)\n\n if commit_description == \"\":\n return reviewers_str\n\n commit_description = commit_description.splitlines()\n commit_summary = commit_description.pop(0)\n commit_description = \"\\n\".join(commit_description)\n\n if not R_SPECIFIER_RE.search(commit_summary):\n commit_summary += \" \" + reviewers_str\n else:\n # replace the first r? with the reviewer list, and all subsequent\n # occurrences with a marker to mark the blocks we need to remove\n # later\n d = {\"first\": True}\n\n def replace_first_reviewer(matchobj):\n if R_SPECIFIER_RE.match(matchobj.group(2)):\n if d[\"first\"]:\n d[\"first\"] = False\n return matchobj.group(1) + reviewers_str\n else:\n return \"\\0\"\n else:\n return matchobj.group(0)\n\n commit_summary = re.sub(REVIEWERS_RE, replace_first_reviewer, commit_summary)\n\n # remove marker values as well as leading separators. this allows us\n # to remove runs of multiple reviewers and retain the trailing\n # separator.\n commit_summary = re.sub(LIST + \"\\0\", \"\", commit_summary)\n commit_summary = re.sub(\"\\0\", \"\", commit_summary)\n\n if commit_description == \"\":\n return commit_summary.strip()\n else:\n return commit_summary.strip() + \"\\n\" + commit_description\n\n\n# ------------------------------------------------------------------------------\n\n\nclass CommitClassifier(object):\n def __init__(self, cache_root, git_repo_dir, method_defect_predictor_dir):\n self.cache_root = cache_root\n\n assert os.path.isdir(cache_root), f\"Cache root {cache_root} is not a dir.\"\n self.repo_dir = os.path.join(cache_root, \"mozilla-central\")\n\n regressormodel_path = \"regressormodel\"\n if not os.path.exists(regressormodel_path):\n download_check_etag(\n URL.format(f\"{regressormodel_path}.zst\"), f\"{regressormodel_path}.zst\"\n )\n zstd_decompress(regressormodel_path)\n assert os.path.exists(regressormodel_path), \"Decompressed model exists\"\n\n regressormodel_data_X_path = \"regressormodel_data_X\"\n if not os.path.exists(regressormodel_data_X_path):\n download_check_etag(\n URL.format(f\"{regressormodel_data_X_path}.zst\"),\n f\"{regressormodel_data_X_path}.zst\",\n )\n zstd_decompress(regressormodel_data_X_path)\n assert os.path.exists(\n regressormodel_data_X_path\n ), \"Decompressed X dataset exists\"\n\n regressormodel_data_y_path = \"regressormodel_data_y\"\n if not os.path.exists(regressormodel_data_y_path):\n download_check_etag(\n URL.format(f\"{regressormodel_data_y_path}.zst\"),\n f\"{regressormodel_data_y_path}.zst\",\n )\n zstd_decompress(regressormodel_data_y_path)\n assert os.path.exists(\n regressormodel_data_y_path\n ), \"Decompressed y dataset exists\"\n\n self.model = RegressorModel.load(regressormodel_path)\n self.X = to_array(joblib.load(regressormodel_data_X_path))\n self.y = to_array(joblib.load(regressormodel_data_y_path))\n\n self.method_defect_predictor_dir = method_defect_predictor_dir\n self.clone_git_repo(\n \"https://github.com/lucapascarella/MethodDefectPredictor\",\n method_defect_predictor_dir,\n \"fa5269b959d8ddf7e97d1e92523bb64c17f9bbcd\",\n )\n self.git_repo_dir = git_repo_dir\n self.clone_git_repo(\"https://github.com/mozilla/gecko-dev\", git_repo_dir)\n\n def clone_git_repo(self, repo_url, repo_dir, rev=\"master\"):\n logger.info(f\"Cloning {repo_url}...\")\n\n if not os.path.exists(repo_dir):\n retry(\n lambda: subprocess.run([\"git\", \"clone\", repo_url, repo_dir], check=True)\n )\n\n retry(\n lambda: subprocess.run(\n [\"git\", \"pull\", repo_url, \"master\"],\n cwd=repo_dir,\n capture_output=True,\n check=True,\n )\n )\n\n retry(\n lambda: subprocess.run(\n [\"git\", \"checkout\", rev], cwd=repo_dir, capture_output=True, check=True\n )\n )\n\n def update_commit_db(self):\n repository.clone(self.repo_dir)\n\n if db.is_old_version(repository.COMMITS_DB) or not db.exists(\n repository.COMMITS_DB\n ):\n db.download(repository.COMMITS_DB, force=True, support_files_too=True)\n\n for commit in repository.get_commits():\n pass\n\n rev_start = \"children({})\".format(commit[\"node\"])\n\n repository.download_commits(self.repo_dir, rev_start)\n\n def apply_phab(self, hg, diff_id):\n def has_revision(revision):\n if not revision:\n return False\n try:\n hg.identify(revision)\n return True\n except hglib.error.CommandError:\n return False\n\n phabricator_api = PhabricatorAPI(\n api_key=get_secret(\"PHABRICATOR_TOKEN\"), url=get_secret(\"PHABRICATOR_URL\")\n )\n\n # Get the stack of patches\n stack = phabricator_api.load_patches_stack(diff_id)\n assert len(stack) > 0, \"No patches to apply\"\n\n # Find the first unknown base revision\n needed_stack = []\n revisions = {}\n for patch in reversed(stack):\n needed_stack.insert(0, patch)\n\n # Stop as soon as a base revision is available\n if has_revision(patch.base_revision):\n logger.info(\n f\"Stopping at diff {patch.id} and revision {patch.base_revision}\"\n )\n break\n\n if not needed_stack:\n logger.info(\"All the patches are already applied\")\n return\n\n # Load all the diff revisions\n diffs = phabricator_api.search_diffs(diff_phid=[p.phid for p in stack])\n revisions = {\n diff[\"phid\"]: phabricator_api.load_revision(\n rev_phid=diff[\"revisionPHID\"], attachments={\"reviewers\": True}\n )\n for diff in diffs\n }\n\n # Update repo to base revision\n hg_base = needed_stack[0].base_revision\n if not has_revision(hg_base):\n logger.warning(\"Missing base revision {} from Phabricator\".format(hg_base))\n hg_base = \"tip\"\n\n if hg_base:\n hg.update(rev=hg_base, clean=True)\n logger.info(f\"Updated repo to {hg_base}\")\n\n try:\n self.git_base = vcs_map.mercurial_to_git(hg_base)\n subprocess.run(\n [\"git\", \"checkout\", \"-b\", \"analysis_branch\", self.git_base],\n check=True,\n cwd=self.git_repo_dir,\n )\n logger.info(f\"Updated git repo to {self.git_base}\")\n except Exception as e:\n logger.info(f\"Updating git repo to Mercurial {hg_base} failed: {e}\")\n\n def load_user(phid):\n if phid.startswith(\"PHID-USER\"):\n return phabricator_api.load_user(user_phid=phid)\n elif phid.startswith(\"PHID-PROJ\"):\n # TODO: Support group reviewers somehow.\n logger.info(f\"Skipping group reviewer {phid}\")\n else:\n raise Exception(f\"Unsupported reviewer {phid}\")\n\n for patch in needed_stack:\n revision = revisions[patch.phid]\n\n message = \"{}\\n\\n{}\".format(\n revision[\"fields\"][\"title\"], revision[\"fields\"][\"summary\"]\n )\n\n author_name = None\n author_email = None\n\n if patch.commits:\n author_name = patch.commits[0][\"author\"][\"name\"]\n author_email = patch.commits[0][\"author\"][\"email\"]\n\n if author_name is None:\n author = load_user(revision[\"fields\"][\"authorPHID\"])\n author_name = author[\"fields\"][\"realName\"]\n # XXX: Figure out a way to know the email address of the author.\n author_email = author[\"fields\"][\"username\"]\n\n reviewers = list(\n filter(\n None,\n (\n load_user(reviewer[\"reviewerPHID\"])\n for reviewer in revision[\"attachments\"][\"reviewers\"][\n \"reviewers\"\n ]\n ),\n )\n )\n reviewers = set(reviewer[\"fields\"][\"username\"] for reviewer in reviewers)\n\n if len(reviewers):\n message = replace_reviewers(message, reviewers)\n\n logger.info(\n f\"Applying {patch.phid} from revision {revision['id']}: {message}\"\n )\n\n hg.import_(\n patches=io.BytesIO(patch.patch.encode(\"utf-8\")),\n message=message.encode(\"utf-8\"),\n user=f\"{author_name} <{author_email}>\".encode(\"utf-8\"),\n )\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n temp_file = os.path.join(tmpdirname, \"temp.patch\")\n with open(temp_file, \"w\") as f:\n f.write(patch.patch)\n\n subprocess.run(\n [\"git\", \"apply\", \"--3way\", temp_file],\n check=True,\n cwd=self.git_repo_dir,\n )\n subprocess.run(\n [\n \"git\",\n \"-c\",\n f\"user.name={author_name}\",\n \"-c\",\n f\"user.email={author_email}\",\n \"commit\",\n \"-am\",\n message,\n ],\n check=True,\n cwd=self.git_repo_dir,\n )\n\n def classify(self, diff_id):\n self.update_commit_db()\n\n with hglib.open(self.repo_dir) as hg:\n self.apply_phab(hg, diff_id)\n\n patch_rev = hg.log(revrange=\"not public()\")[0].node\n\n # Analyze patch.\n commits = repository.download_commits(\n self.repo_dir, rev_start=patch_rev.decode(\"utf-8\"), save=False\n )\n\n # We use \"clean\" commits as the background dataset for feature importance.\n # This way, we can see the features which are most important in differentiating\n # the current commit from the \"clean\" commits.\n background_dataset = self.X[self.y == 0]\n\n probs, importance = self.model.classify(\n commits[-1],\n probabilities=True,\n importances=True,\n background_dataset=background_dataset,\n importance_cutoff=0.1,\n )\n\n features = []\n for i, (val, feature_index, is_positive) in enumerate(\n importance[\"importances\"][\"classes\"][1][0]\n ):\n value = importance[\"importances\"][\"values\"][0, int(feature_index)]\n\n X = self.X[:, int(feature_index)]\n spearman = spearmanr(X, self.y)\n\n buggy_X = X[self.y == 1]\n clean_X = X[self.y == 0]\n median = np.median(X)\n median_clean = np.median(clean_X)\n median_buggy = np.median(buggy_X)\n\n perc_buggy_values_higher_than_median = (\n buggy_X > median\n ).sum() / buggy_X.shape[0]\n perc_buggy_values_lower_than_median = (\n buggy_X < median\n ).sum() / buggy_X.shape[0]\n perc_clean_values_higher_than_median = (\n clean_X > median\n ).sum() / clean_X.shape[0]\n perc_clean_values_lower_than_median = (\n clean_X < median\n ).sum() / clean_X.shape[0]\n\n logger.info(\"Feature: {}\".format(importance[\"feature_legend\"][str(i + 1)]))\n logger.info(\"Shap value: {}{}\".format(\"+\" if (is_positive) else \"-\", val))\n logger.info(f\"spearman: {spearman}\")\n logger.info(f\"value: {value}\")\n logger.info(f\"overall mean: {np.mean(X)}\")\n logger.info(f\"overall median: {np.median(X)}\")\n logger.info(f\"mean for y == 0: {np.mean(clean_X)}\")\n logger.info(f\"mean for y == 1: {np.mean(buggy_X)}\")\n logger.info(f\"median for y == 0: {np.median(clean_X)}\")\n logger.info(f\"median for y == 1: {np.median(buggy_X)}\")\n logger.info(\n f\"perc_buggy_values_higher_than_median: {perc_buggy_values_higher_than_median}\"\n )\n logger.info(\n f\"perc_buggy_values_lower_than_median: {perc_buggy_values_lower_than_median}\"\n )\n logger.info(\n f\"perc_clean_values_higher_than_median: {perc_clean_values_higher_than_median}\"\n )\n logger.info(\n f\"perc_clean_values_lower_than_median: {perc_clean_values_lower_than_median}\"\n )\n\n features.append(\n {\n \"index\": i + 1,\n \"name\": importance[\"feature_legend\"][str(i + 1)],\n \"shap\": float(f'{\"+\" if (is_positive) else \"-\"}{val}'),\n \"value\": importance[\"importances\"][\"values\"][0, int(feature_index)],\n \"spearman\": spearman,\n \"median\": median,\n \"median_bug_introducing\": median_buggy,\n \"median_clean\": median_clean,\n \"perc_buggy_values_higher_than_median\": perc_buggy_values_higher_than_median,\n \"perc_buggy_values_lower_than_median\": perc_buggy_values_lower_than_median,\n \"perc_clean_values_higher_than_median\": perc_clean_values_higher_than_median,\n \"perc_clean_values_lower_than_median\": perc_clean_values_lower_than_median,\n }\n )\n\n with open(\"probs.json\", \"w\") as f:\n json.dump(probs[0].tolist(), f)\n\n with open(\"importances.json\", \"w\") as f:\n json.dump(features, f)\n\n # Get commit hash from 4 months before the analysis time.\n # The method-level analyzer needs 4 months of history.\n four_months_ago = datetime.utcnow() - relativedelta(months=4)\n p = subprocess.run(\n [\n \"git\",\n \"rev-list\",\n \"-n\",\n \"1\",\n \"--until={}\".format(four_months_ago.strftime(\"%Y-%m-%d\")),\n \"HEAD\",\n ],\n check=True,\n capture_output=True,\n cwd=self.git_repo_dir,\n )\n\n stop_hash = p.stdout.decode().strip()\n\n # Run the method-level analyzer.\n subprocess.run(\n [\n \"python3\",\n \"tester.py\",\n \"--repo\",\n self.git_repo_dir,\n \"--start\",\n \"HEAD\",\n \"--stop\",\n stop_hash,\n \"--output\",\n os.path.abspath(\"method_level.csv\"),\n ],\n check=True,\n cwd=self.method_defect_predictor_dir,\n )\n\n method_level_results = []\n try:\n with open(\"method_level.csv\", \"r\") as f:\n reader = csv.DictReader(f)\n for item in reader:\n method_level_results.append(item)\n except FileNotFoundError:\n # No methods were classified.\n pass\n\n with open(\"method_level.json\", \"w\") as f:\n json.dump(method_level_results, f)\n\n\ndef main():\n description = \"Classify a commit\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\"cache_root\", help=\"Cache for repository clones.\")\n parser.add_argument(\"diff_id\", help=\"diff ID to analyze.\", type=int)\n parser.add_argument(\n \"git_repo_dir\", help=\"Path where the git repository will be cloned.\"\n )\n parser.add_argument(\n \"method_defect_predictor_dir\",\n help=\"Path where the git repository will be cloned.\",\n )\n\n args = parser.parse_args()\n\n classifier = CommitClassifier(\n args.cache_root, args.git_repo_dir, args.method_defect_predictor_dir\n )\n classifier.classify(args.diff_id)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rdrahul/bugbug","sub_path":"scripts/commit_classifier.py","file_name":"commit_classifier.py","file_ext":"py","file_size_in_byte":18756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"5912696937","text":"import json\nfrom typing import List\n\nimport dateutil.parser\nimport pendulum\nimport requests\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom prodict import Prodict\n\nfrom application.models import Product\nfrom my_secrets import secrets\nfrom users.models import User\nfrom webapp.tasks import init_close_user\n\n\nclass Email(Prodict):\n email: str\n type: str\n\n\nclass Contact(Prodict):\n id: str\n lead_id: str\n display_name: str\n emails: List[Email]\n\n\n@csrf_exempt\ndef create_or_update_lead_webhook(request):\n def get_contact_data(contact_ids):\n auth_token = \"YXBpXzNQM1ZIbnVua0preHVSdGV5UmMxN2suM2xySHg1SmJIaHhhSTNVekpWM09JNDo6\"\n headers = {\n \"Authorization\": f\"Basic {auth_token}\",\n \"Content-Type\": \"application/json\",\n }\n url = \"https://api.close.com/api/v1/contact/\"\n data = []\n for id in contact_ids:\n response = requests.get(url + id, headers=headers)\n data.append(Contact.from_dict(response.json()))\n return data\n\n def parse_duration(string: str):\n parts = string.split(\"-\")\n end = dateutil.parser.parse(parts[1], dayfirst=True)\n start_string = parts[0] + (str(end.year) if not parts[0].split(\".\")[-1] else \"\")\n start = dateutil.parser.parse(start_string, dayfirst=True)\n if start > end:\n start = pendulum.instance(start, tz=\"local\").subtract(years=1)\n return start, end\n\n def get_users(lead_id: str, contact_ids: list) -> List[User]:\n users = []\n\n qs = User.data.filter(lead_id=event.lead_id)\n if qs.count() == 2:\n return qs\n else:\n # contacts not present > create user objects\n contacts = get_contact_data(contact_ids)\n\n for contact in contacts:\n if not contact.emails:\n continue\n\n qs = User.data.filter(contact_id=contact.id)\n\n # try to find user via contact id\n if qs:\n user = qs.get()\n else:\n # try to find an existing contact via email\n names = contact.display_name.split(\" \")\n email = contact.emails[0].email\n\n qs = User.data.filter(email__iexact=email)\n if qs:\n user = qs.get()\n else:\n # otherwise crate a new user\n user = User.data.create(\n email=email,\n first_name=\" \".join(names[:-1]),\n last_name=names[-1],\n username=email,\n )\n # new users get a default password\n user.set_password(secrets.DEFAULT_USER_PASSWORD)\n\n user.contact_id = contact.id\n user.lead_id = contact.lead_id\n user.save()\n\n init_close_user(user)\n\n users.append(user)\n\n return users\n\n if request.content_type == \"application/json\":\n data = json.loads(request.body)\n\n event = Prodict.from_dict(data).event\n ic(event) # noqa\n duration_field = \"custom.cf_8zV6c7eijjmYfIbl1w1vfLzZunknUoLs4sb13uoOubp\"\n purchase_options = \"custom.cf_0eKueP25HDy5wnHDeZXB7ySzrlx3JhiQXjaczfIx2a1\" # Feld Kaufaktionen\n zoom_link = \"custom.cf_fsbXp5btDzxOJqP9QKmCNa62vc4MnHevvpXnkMkWMB8\"\n\n # only listen for Kaufaktionen being added or removed\n if (purchase_options in event.data) or (\n purchase_options not in event.data and \"previous_data\" in event and purchase_options in event.previous_data\n ):\n options = event.data[purchase_options] if purchase_options in event.data else []\n users = get_users(event.lead_id, event.data.contact_ids)\n\n to_remove = []\n to_add = []\n\n # for all listed products -> add access\n for product in Product.data.all():\n # add access for all listed products\n if product.name in options or product.free:\n to_add.append(product)\n else:\n to_remove.append(product)\n\n for user in users:\n if zoom_link in event.data:\n user.zoom_link = event.data[zoom_link]\n user.save()\n\n # now lets set or update the duration\n if duration_field in event.data:\n start, end = parse_duration(event.data[duration_field])\n\n user.start_date = start\n user.end_date = end\n user.save()\n\n # remove access for all products not listed\n for product in to_remove:\n ic(f\"removing {product.name}\") # noqa\n for course in product.courses.all():\n user.access_set.filter(training=course).delete()\n\n # add products afterward to avoid removing access granted by another product\n for product in to_add:\n ic(f\"adding {product.name}\") # noqa\n for course in product.courses.all():\n access, new = user.access_set.get_or_create(training=course)\n\n # set permissions\n product_ids = [p.id for p in to_add]\n products = Product.data.filter(id__in=product_ids)\n user.can_view_zoom_link = products.filter(can_view_zoom_link=True).exists()\n user.can_view_appointments = products.filter(can_view_appointments=True).exists()\n user.can_view_forum = products.filter(can_view_forum=True).exists()\n\n # set membership state\n user.bought_teaser = products.filter(teaser=True).exists()\n user.bought_membership = products.filter(membership=True).exists()\n\n user.save()\n\n return HttpResponse(status=202)\n else:\n return HttpResponse(status=400, content=\"Content-Type must be application/json\")\n","repo_name":"kakulukia/anna","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28638301137","text":"import json\nfrom datetime import datetime, timezone\n\nfrom core import HTTPEvent, JSONResponse\nfrom core.db.results import QueryResult\nfrom core.exceptions.forbidden import ForbiddenException\nfrom core.exceptions.invalid import InvalidException\nfrom core.router.router import Router\nfrom core.services.logs import LogsService, LogTag\nfrom core.services.rewards import RewardsFactory, RewardReason\nfrom core.services.tasks import TasksService\nfrom core.utils.key import split_key, join_key\nfrom schema import Schema, Optional\n\nUSER_VALID_TAGS = [LogTag.PROGRESS]\n\n\ndef query_logs(event: HTTPEvent):\n user_sub = event.params['sub']\n tag = LogTag.normalize(split_key(event.params['tag'])).upper() if event.params.get('tag') else None\n\n limit = event.queryParams.get('limit', 25)\n if not isinstance(limit, int) or limit > 100:\n raise InvalidException(\"Limit must be an integer and lower or equal than 100\")\n\n logs = LogsService.query(user_sub, tag, limit=limit)\n return JSONResponse(body=QueryResult.from_list([log.to_api_map() for log in logs]).as_dict())\n\n\ndef create_log(event: HTTPEvent):\n user_sub: str = event.params['sub']\n tag: str = event.params['tag'].upper()\n\n if event.authorizer.sub != user_sub:\n raise ForbiddenException(\"Only the same user can create logs\")\n parent_tag = LogTag.from_short(split_key(tag)[0])\n if parent_tag not in USER_VALID_TAGS:\n raise ForbiddenException(f\"A user can only create logs with the following tags: {USER_VALID_TAGS}\")\n\n body = event.json\n\n log = body['log']\n data = body.get('data')\n\n if len(body) > 1024:\n raise InvalidException(f\"A log can't have more than 1024 characters\")\n\n if data is not None:\n if len(json.dumps(data)) > 2048:\n raise InvalidException(f\"Log data is too big\")\n\n response_body = {}\n\n if parent_tag == LogTag.PROGRESS:\n if tag != parent_tag.short:\n raise InvalidException(f\"A progress log tag can't be compound\")\n if body.get('token') is None:\n raise InvalidException(f\"To post a PROGRESS log you must provide the task token\")\n\n objective = TasksService.get_task_token_objective(body['token'], authorizer=event.authorizer)\n tag = join_key(LogTag.PROGRESS.value, objective).upper()\n\n now = int(datetime.now(timezone.utc).timestamp() * 1000)\n last_progress_log = LogsService.get_last_log_with_tag(event.authorizer.sub, tag.upper())\n\n if last_progress_log is None or now - last_progress_log.timestamp > 24 * 60 * 60 * 1000:\n response_body['token'] = RewardsFactory.get_reward_token_by_reason(authorizer=event.authorizer,\n area=split_key(objective)[1],\n reason=RewardReason.PROGRESS_LOG)\n\n log = LogsService.create(user_sub, tag, log_text=log, data=body.get('data'), append_timestamp_to_tag=True)\n response_body['item'] = log.to_api_map()\n\n return JSONResponse(body=response_body)\n\n\nrouter = Router()\n\nrouter.get(\"/api/users/{sub}/logs/\", query_logs, authorized=False)\nrouter.get(\"/api/users/{sub}/logs/{tag}/\", query_logs, authorized=False)\nrouter.post(\"/api/users/{sub}/logs/{tag}/\", create_log, schema=Schema({\n 'log': str,\n Optional('data'): dict,\n Optional('token'): str\n}))\n\n\ndef handler(event: dict, _) -> dict:\n event = HTTPEvent(event)\n response = router.route(event)\n return response.as_dict()\n","repo_name":"pankandev/scout-progression-system-sam","sub_path":"pps/logs/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16928989521","text":"import win32gui\n\nwdname = \"TOPRice_2.4.2_CN\"\nhwnd = win32gui.FindWindow(None, wdname)\nprint(hwnd)\ntext = win32gui.GetWindowText(hwnd) # 获取窗口标题\nprint('窗口标题为:', text)\nclsname = win32gui.GetClassName(hwnd) # 获取窗口类名\nprint('窗口类名:', clsname)\nleft, top, right, bottom = win32gui.GetWindowRect(hwnd) # 获取窗口位置\n# left, top是左上角坐标;right, bottom是右下角坐标\nprint('窗口位置', left, top, right, bottom)\n\nmenuHandle = win32gui.GetMenu(hwnd) # 获取窗口的菜单句柄\nprint('记事本菜单句柄:', menuHandle)\n\n# 关闭窗口\nwin32gui.CloseWindow(hwnd)\n# #获取第一个子UI句柄\n# w2hd=win32gui.FindWindowEx(hwnd,None,None,\"File\")\n# print(w2hd)\n\n\n# 调用win32gui.EnumWindows()枚举所有窗口句柄\n# hWndList = []\n# win32gui.EnumWindows(lambda hWnd, param: param.append(hWnd), hWndList)\n# for hwnd in hWndList:\n# title = win32gui.GetWindowText(hwnd)\n# print(title)\n","repo_name":"libaojie/python_auto_nongke","sub_path":"project/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"40224260333","text":"import shutil\nimport os\nimport sys\nfrom pathlib import Path\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[0]\nif str(ROOT) not in sys.path:\n sys.path.append(str(ROOT)) # add ROOT to PATH\nROOT = Path(os.path.abspath(ROOT)) # relative\nWORK_DIR = os.path.dirname(ROOT)\nsys.path.insert(0, WORK_DIR)\n\n\ndef main():\n folder_datasets = '/root/card-transformation/uniarts/datasets/images'\n for root, folders, files in os.walk(folder_datasets, topdown=True):\n for file in files:\n if os.path.isfile(os.path.join(root, file)):\n # os.rename(os.path.join(root, file), os.path.join(root, root.split('/')[-1] + '_' + file))\n # if os.path.exists(os.path.join(folder_datasets, root.split('/')[-1] + '_' + file)):\n # continue\n shutil.move(os.path.join(root, file), folder_datasets)\n # shutil.rmtree(root)\n for item in os.listdir(folder_datasets):\n if os.path.isdir(os.path.join(folder_datasets, item)):\n os.system('rm -rf %s' % os.path.join(folder_datasets, item))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Syun1208/eKYC-ID-Card-Detection","sub_path":"data/move_data_from_folder.py","file_name":"move_data_from_folder.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"73126415894","text":"\"\"\" /*\n * Reto #2\n * LA SUCESIÓN DE FIBONACCI\n * Fecha publicación enunciado: 10/01/22\n * Fecha publicación resolución: 17/01/22\n * Dificultad: DIFÍCIL\n *\n * Enunciado: Escribe un programa que imprima los 50 primeros números de la sucesión de Fibonacci empezando en 0.\n * La serie Fibonacci se compone por una sucesión de números en la que el siguiente siempre es la suma de los dos anteriores.\n * 0, 1, 1, 2, 3, 5, 8, 13...\n */ \"\"\"\nimport math\n# 1, 2, 3, 5, 7, 12, 19, \n\ndef fibonacci(num):\n if num == 0 or num == 1:\n return num\n else:\n return fibonacci(num-1) + fibonacci(num-2)\n\nvar1 = 0\nvar2 = 1\n\nfor x in range(1,51):\n #print(\"Fibo \",x,\"Es\", fibonacci(x))\n print(\"Fibo \",x,\"Es\", var1)\n\n \"\"\" fib = var1\n var1 = var2 + fib\n var2 = fib \"\"\"\n\n fib = var1 + var2\n var1 = var2\n var2 = fib \n\n ","repo_name":"JulsBetan/Retos_2022","sub_path":"Retos 2022 R2 FIBONACCI.py","file_name":"Retos 2022 R2 FIBONACCI.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70098900053","text":"import numpy as np\nimport pandas as pd\nfrom pydub import AudioSegment\nfrom modules.frame import Frame\n\n\nclass App:\n # stylowanie wykresów\n _XAXIS_PARAMS = {\n \"tickmode\": \"array\",\n \"linecolor\": \"black\",\n \"gridcolor\": \"#c4cfc9\",\n \"showline\": True,\n \"mirror\": True,\n \"ticks\": \"outside\",\n \"title\": \"Time [s]\",\n }\n _YAXIS_PARAMS = {\n \"linecolor\": \"black\",\n \"gridcolor\": \"#c4cfc9\",\n \"showline\": False,\n \"ticks\": \"outside\",\n }\n _DEFAULT_PARAMS = {\n \"width\": 900,\n \"height\": 400,\n \"margin\": dict(l=0, r=0, t=50, b=0),\n \"hovermode\": False,\n \"template\": \"plotly_white\",\n \"showlegend\": False,\n }\n\n def __init__(\n self, filepath_or_bytes, frame_duration_miliseconds: int = 5, normalize=True\n ):\n self.read_wav(filepath_or_bytes, normalize=normalize)\n self.frame_duration_miliseconds = frame_duration_miliseconds\n self.frames = [\n frame for frame in self.frame_generator(frame_duration_miliseconds)\n ]\n self.filepath_or_bytes = filepath_or_bytes\n\n def read_wav(self, filepath_or_bytes, normalize=True):\n self.audio_segment = AudioSegment.from_wav(filepath_or_bytes)\n self.frame_rate = self.audio_segment.frame_rate\n self.samples = np.asarray(\n self.audio_segment.get_array_of_samples(), dtype=float\n )\n if normalize:\n self.samples = self.samples / np.abs(self.samples).max()\n\n def frame_generator(self, frame_duration_miliseconds=10):\n n = int(self.frame_rate * frame_duration_miliseconds / 1000)\n offset = 0\n timestamp = 0.0\n duration = n / self.frame_rate\n for offset in range(0, len(self.samples), n):\n yield Frame(self.samples[offset : offset + n], timestamp, duration)\n timestamp += duration\n offset += n\n","repo_name":"zakrzewow/aipd1","sub_path":"modules/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39501570377","text":"#!/usr/bin/env python\n#!/usr/bin/python\n# -*- coding: utf- -*-\nimport os\nimport os.path\nimport argparse\nimport codecs\nimport logging\nimport shutil\nimport subprocess\nimport traceback\nimport datetime\nimport app\nimport gitlab\nimport gitbackup\nimport pgsql\nimport rhc\nimport re\nimport fnmatch\n\n#======= Global constants =======\nm_desc = \"Simple backup automation utility for Linux distributive.\"\nm_version = \"SaveData version: 0.04~beta\"\n\n#======= Global vars =======\nenv_mode = \"production\"\nenv = {\n \"gconf\" : \"/etc/savedata/global.conf\"\n}\n\ngconf = {\n \"work_path\" : \"/var/lib/savedata\",\n \"logging\" : {\n \"mode\" : \"on\",\n \"path\" : \"/var/log/savedata\"\n }\n}\n\nsession = {\n \"name\" : \"{session_name}\", \n \"cache\" : \"gconf['work_path']/.cache\",\n \"spath\" : \"session['cache']/{session_name}\",\n \"clean\" : True,\n \"init\" : False,\n}\n#======= Main function =======\n\ndef parseConfigs(backupsfName, serversFName):\n\n source = app.parseYamlFile(backupsfName)\n dest = app.parseYamlFile(serversFName)\n conf = {}\n conf[\"source\"] = source \n conf[\"dest\"] = dest\n\n return conf\n\n\n\ndef createSession(gconf):\n session_name = (\"%s\" % datetime.datetime.now()).replace(\" \", \"\")\n work_path = gconf[\"work_path\"]\n cache_path = \"%s/.cache\" % work_path\n session_path = \"%s/%s\" % (cache_path, session_name)\n loggingFileName = \"savedata-backup-%s.log\" % session_name\n session = {\n \"init\" : False,\n \"name\" : session_name, \n \"cache\" : cache_path,\n \"spath\" : session_path,\n \"clean\" : True,\n \"log\" : loggingFileName\n }\n\n # make session path\n if not os.path.isdir(session_path):\n try:\n os.makedirs(session_path)\n except OSError:\n msg = \"Can not create sesssion path. Please, check configuration file: %s\" % env[\"gconf\"] \n raise Exception(msg) \n # prepare logging session\n logging_mode = gconf[\"logging\"][\"mode\"]\n logging_path = gconf[\"logging\"][\"path\"]\n app.logFile(logging_mode,logging_path,session[\"log\"], env[\"gconf\"])\n\n session[\"init\"] = True\n return session \n\n\ndef deleteSession(session):\n if session[\"init\"] is False:\n return\n try:\n logging_path = gconf[\"logging\"][\"path\"]\n fullLogFileName = \"%s/%s\" % (logging_path, session[\"log\"])\n os.remove(fullLogFileName)\n except OSError as e:\n pass\n\n try:\n if session[\"clean\"] is True:\n shutil.rmtree(session[\"spath\"], ignore_errors=True)\n except OSError:\n pass \n session[\"init\"] = False\n os.system(\"unset PASSPHRASE\")\n\ndef sendLogToEmail(session, status):\n if status!=\"success\" and status!=\"failed\":\n raise Exception(\"Email status can be only 'success' or 'failed' \")\n if not \"email\" in gconf:\n return\n if not \"smtp_settings\" in gconf[\"email\"]:\n return\n\n try:\n if gconf[\"email\"][\"send_succes\"] is False and status == \"success\":\n return\n if gconf[\"email\"][\"send_failed\"] is False and status == \"failed\":\n return\n smtp_settings = gconf[\"email\"][\"smtp_settings\"]\n sender = gconf[\"email\"][\"from\"]\n destination =gconf[\"email\"][\"to\"]\n logging_path = gconf[\"logging\"][\"path\"]\n fullLogFileName = \"%s/%s\" % (logging_path, session[\"log\"])\n logarr = ''\n with open(fullLogFileName) as f:\n logarr = f.readlines()\n loginfo = \"\"\n for line in logarr:\n loginfo += line\n loginfo = loginfo.decode('string_escape')\n content = \"Log information from last backupping:\\n%s\" % loginfo\n subject = \"SaveData-Backup : %s\" % status\n app.sendEmail(smtp_settings, sender, destination, content, subject)\n except OSError:\n msg = \"Can not send email. Please, check your global configuration file %s\" % env[\"gconf\"]\n raise Exception(msg) \n\n\ndef dump(conf):\n pgsql.dump(conf)\n gitlab.dump(conf)\n gitbackup.dump(conf)\n rhc.dump(conf)\n\n\ndef rewriteBackup(server, rewrite):\n if rewrite is False:\n return\n if server[\"type\"] == \"local\":\n try:\n shutil.rmtree(server[\"remote_path\"], ignore_errors=True)\n except OSError:\n pass\n\ndef getFilterOpts(backup):\n opts = \"\"\n if not \"filter\" in backup:\n return opts\n filter = backup[\"filter\"]\n for f in filter:\n type = f[\"type\"]\n patterns = f[\"pattern\"]\n for p in patterns:\n opts += ' --%s \"%s\"' % (type, p)\n\n return opts\n\n#duplicity --full-if-older-than 1M /etc ftp://ftpuser@other.host/etc\n#duplicity remove-older-than 6M --force ftp://ftpuser@other.host/etc\n#\n#\ndef remove_old_backups(backup, server_url, srcKey):\n if not backup[\"period\"]:\n return\n env_pass = \"export PASSPHRASE=\" + backup[\"passphrase\"] + \"; \"\n duplicity_opts = \"remove-older-than %s\" % backup[\"period\"]\n cmd = \"duplicity --ssl-no-check-certificate %s %s/%s\" % (duplicity_opts, server_url, srcKey)\n output, errors = subprocess.Popen(env_pass + cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE).communicate()\n if output:\n logging.info(output)\n if errors:\n logging.error(errors)\n raise Exception('Can not remove old backups. Please, check configuration file and try agan.') \n os.system(\"unset PASSPHRASE\")\n\ndef make_backup(conf, server_url, server_key):\n source = conf[\"source\"]\n dest = conf[\"dest\"]\n backups = source[\"backups\"]\n session = conf[\"session\"]\n base_opts = \" --ssl-no-check-certificate\"\n for srcKey in backups:\n backup = backups[srcKey]\n filter_opts = getFilterOpts(backup)\n key_opts = \"\"\n if \"full\" in backup:\n key_opts += \"--full-if-older-than %s \" % backup[\"full\"]\n\n if backup[\"type\"] == \"gitlab\":\n src_path = \"%s/%s\" % (session[\"spath\"], srcKey)\n key_opts += \" --allow-source-mismatch\"\n elif backup[\"type\"] == \"pgsql\":\n src_path = \"%s/%s\" % (session[\"spath\"], srcKey)\n key_opts += \" --allow-source-mismatch\"\n elif backup[\"type\"] == \"git\":\n src_path = \"%s/%s\" % (session[\"spath\"], srcKey)\n key_opts += \" --allow-source-mismatch\"\n elif backup[\"type\"] == \"rhc\":\n src_path = \"%s/%s\" % (session[\"spath\"], srcKey)\n key_opts += \" --allow-source-mismatch\"\n elif backup[\"type\"] == \"dir\":\n src_path = backup[\"path\"]\n else:\n continue\n # make env-variable with passphrase\n passphrase = backup[\"passphrase\"]\n env_pass = \"export PASSPHRASE=\" + passphrase + \"; \"\n\n logging.info(\"backuping src[%s:%s] -> dest[%s]\", srcKey, src_path, server_key)\n duplicity_opts = \" %s %s %s\" % (base_opts, filter_opts, key_opts)\n logging.debug(\"duplicity opts: %s\", duplicity_opts) \n cmd = \"duplicity %s %s %s/%s\" % (duplicity_opts, src_path, server_url, srcKey)\n output, errors = subprocess.Popen(env_pass + cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE).communicate()\n if output:\n logging.info(output)\n if errors:\n logging.error(errors)\n raise Exception('Can not create backup. Please, check configuration file and try agan.') \n remove_old_backups(backup,server_url,srcKey)\n os.system(\"unset PASSPHRASE\")\n\n\ndef backup(conf,rewrite):\n servers = conf[\"dest\"][\"servers\"]\n for destKey in servers:\n server = servers[destKey] \n logging.info('making backup in dest: [%s]...', destKey)\n if server[\"type\"] == \"local\":\n rewriteBackup(servers[destKey], rewrite)\n elif servers[destKey][\"type\"] == \"webdavs\":\n pass\n else:\n continue\n server_url = app.buildServerURL(server)\n make_backup(conf, server_url, destKey)\n \n # change permissions\n if server[\"type\"] == \"local\":\n if (\"chown\" in server) and len(server[\"chown\"]) > 0:\n cmd = \"chown %s -R %s\" % (server[\"chown\"], server[\"remote_path\"])\n output, errors = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE).communicate()\n if output:\n logging.info(output)\n if errors:\n logging.error(errors)\n raise Exception('Can not change files owner(chown) in destination directory') \n\n\ndef filter_dictionary(filter_str, _dict):\n arr = filter_str.split(',')\n ragexes = []\n for e in arr:\n if len(e) <= 1 or (e[0] != 'i' and e[0] != 'e'):\n msg = \"Can not parse ragex list. Pleasem check input arguments.\"\n raise Exception(msg)\n obj = {\n \"type\" : e[0],\n \"name\" : e[1:]\n } \n ragexes.append(obj)\n \n \n ##\n strfilter = {}\n for name in _dict:\n strfilter[name] = -1\n\n ## \n for e in ragexes:\n for name in strfilter:\n if fnmatch.fnmatchcase(name, e[\"name\"])==True and strfilter[name] == -1:\n if e[\"type\"] == 'i':\n strfilter[name] = True\n else:\n strfilter[name] = False\n for name in strfilter:\n if strfilter[name] == -1:\n strfilter[name] = False\n\n filter_dict = {}\n \n for name in _dict:\n if strfilter[name] == True:\n filter_dict[name] = _dict[name]\n\n return filter_dict\n\ndef main(args):\n global gconf\n global session\n global env_mode\n global env\n\n # setup utility-settings and prepare paths and sys files for working. Function return global configuration settings\n gconf,env,env_mode = app.prepare(args.debug,\"savedata-backup.log\")\n\n # read and parse data from configuration file (backups.json and servers.json)\n conf = parseConfigs(args.backups_fname, args.servers_fname)\n # create session\n session = createSession(gconf)\n conf[\"session\"] = session\n \n\n conf[\"source\"][\"backups\"] = filter_dictionary(args.backups, conf[\"source\"][\"backups\"])\n conf[\"dest\"][\"servers\"] = filter_dictionary(args.servers, conf[\"dest\"][\"servers\"])\n\n # dump in cache directory\n dump(conf)\n\n # backup \n backup(conf, args.rewrite)\n\n return\n\n\ndef prepare_argsParser():\n argsParser = argparse.ArgumentParser(\n version=m_version, fromfile_prefix_chars='@', description=m_desc, formatter_class=argparse.RawTextHelpFormatter)\n argsParser.add_argument(\n \"-b\", dest=\"backups_fname\", help=\"Configuration file with sorce information, which you want backuping (ymal format).\",\n default=\"/etc/savedata/backups.yml\", required=False)\n argsParser.add_argument(\n \"-s\", dest=\"servers_fname\", help=\"Configuration file with destination settings, in which you want storage backups (ymal format).\",\n default=\"/etc/savedata/servers.yml\", required=False)\n argsParser.add_argument(\n \"--backups\", dest=\"backups\", help=\"Include backup\", default=\"i*\", required=False)\n argsParser.add_argument(\n \"--servers\", dest=\"servers\", help=\"List of name servers.\", default=\"i*\", required=False)\n argsParser.add_argument(\n \"--rewrite\", dest=\"rewrite\", help=\"Rewrite backups in destination(use, when you want change your passphrase)\", action='store_true', default=False)\n argsParser.add_argument(\n \"--debug\", dest=\"debug\", help=\"Output debug information into console\", action='store_true', default=False)\n return argsParser\n\ndef start_app():\n app.console_configure()\n argsParser = prepare_argsParser()\n try:\n main(argsParser.parse_args())\n logging.info(\"FINISH. OK. \")\n # send email\n sendLogToEmail(session, \"success\")\n except Exception as e:\n logging.error(e)\n logging.debug(traceback.format_exc())\n logging.warning(\"FAILED.\")\n # send email\n sendLogToEmail(session, \"failed\")\n \n # delete session\n deleteSession(session)\n\nstart_app()\n\n \n","repo_name":"rshafeev/sysutils.savedata","sub_path":"savedata-backup/savedata-backup.py","file_name":"savedata-backup.py","file_ext":"py","file_size_in_byte":11975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10214305528","text":"import json\nfrom flask import Flask, request, jsonify\nfrom api import speech_to_text\nfrom api import natural_language_understanding\nfrom api import recomendacao\n\nimport os\nimport csv\n\n\n@app.route('/api/dataset', methods=['GET'])\ndef dataset():\n path = 'C:/Users/RODRIGO/Documents/Profissional/IBM/train_dataset/'\n arquivos = os.listdir(path)\n dataset = []\n indice = 0\n with open('dataset.csv', 'w', encoding='utf-8') as dataset1:\n dataset1.write('indice,entity,sentiment,mention\\n')\n\n for arquivo in arquivos:\n path_arquivo = path + arquivo\n with open(path_arquivo, 'r', encoding='utf-8') as texto:\n texto_arquivo = texto.read()\n\n texto_analisado = natural_language_understanding.analise_sentimento(texto_arquivo)\n\n for entidade in texto_analisado['entities']:\n sentiment = entidade['sentiment']['score']\n mention = entidade['text']\n entity = entidade['type']\n with open('dataset.csv', 'a', newline='', encoding='utf-8') as dataset2:\n gravacao = csv.writer(dataset2)\n gravacao.writerow([indice, entity, sentiment, mention])\n\n indice += 1\n\n\n return jsonify(arquivos)\n\n","repo_name":"rodrigofuku/desafio8-FCA-mbtc2020","sub_path":"rating/rating.py","file_name":"rating.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40625648612","text":"\"\"\"\npinchy.py - download mixes from pinchyandfriends.com\n\"\"\"\n\nimport argparse\nimport logging\nimport os\n\nfrom concurrent.futures import ThreadPoolExecutor\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nimport requests\n\nfrom bs4 import BeautifulSoup\nfrom supabase import Client, create_client\n\n# storage dir: keep one directory per mix\n# directory will have:\n# - mp3\n# - artwork\n# - tracklist (if available)\nLOCAL_DIR = os.path.expanduser(\"~/media/audio/pinchy/\")\n\nBASE_URL = \"http://pinchyandfriends.com\"\n\nMIX_TABLE = \"pinchy_mixes\"\n\nlog = logging.getLogger(\"pinchy\")\n\n\ndef get_supa_client() -> Client:\n return create_client(os.environ[\"SUPABASE_URL\"], os.environ[\"SUPABASE_SECRET\"])\n\n\n@dataclass\nclass PinchyMixMetadata:\n \"\"\"\n PinchyMixMetadata : class for storing pinchy mix metadata\n \"\"\"\n\n mix_name: str\n artist: str\n mix_landing_url: str\n mix_id: str\n\n @staticmethod\n def from_div(div):\n \"\"\"\n new : parse the div and return a new PinchyMixMetadata object\n \"\"\"\n mix_name = div[\"data-name2\"]\n artist = div[\"data-name1\"]\n rel = div[\"onclick\"].split(\"=\")[1].strip().replace(\"'\", \"\").replace(\";\", \"\")[1:]\n mix_id = rel.split(\"/\")[0]\n return PinchyMixMetadata(artist, mix_name, rel, mix_id)\n\n\ndef format_mix_info(mixes):\n \"\"\"\n Create a table based on the list of pinchy mixes\n Pads out the titles based on the widest name in the list\n \"\"\"\n artist_len = max([len(mix.artist) for mix in mixes])\n title_len = max([len(mix.mix_name) for mix in mixes])\n separator = f\"|{''.ljust(artist_len + title_len + 1, '=')}|\"\n header = [\n separator,\n f\"|{'artist'.ljust(artist_len)}|{'mix name'.ljust(title_len)}|\",\n separator,\n ]\n footer = [separator]\n return \"\\n\".join(\n header\n + [\n f\"|{mix.artist.ljust(artist_len)}|{mix.mix_name.ljust(title_len)}|\"\n for mix in mixes\n ]\n + footer\n )\n\n\ndef get_existing_mix_ids():\n \"\"\"\n get_existing_mix_ids — return a list of pinchy mix ids\n creates directory if not already present\n \"\"\"\n if not os.path.isdir(LOCAL_DIR):\n os.makedirs(LOCAL_DIR)\n return []\n\n mix_dir = lambda x: os.path.join(LOCAL_DIR, x)\n return {mix for mix in os.listdir(LOCAL_DIR) if os.path.isdir(mix_dir(mix))}\n\n\ndef get_available_pinchy_info(content):\n \"\"\"\n get_available_pinchy_info\n\n parse the html provided and return a list of dictionaries\n\n all the mixes will be under the div with the id 'grid'\n each div looks like this:\n
\n \n
\n\n data-name1 = mix name\n data-name2 = artist name\n onclick = \"window.location = '///'\"\n \"\"\"\n page = BeautifulSoup(content, features=\"html.parser\")\n rel = page.find(id=\"grid_rel\")\n return [\n PinchyMixMetadata.from_div(child)\n for child in rel.children\n if child.name == \"div\"\n ]\n\n\ndef download_file(local_name, url, overwrite=False):\n \"\"\"\n download_file\n stream a file from the given url to the given local filename\n if the local file already exists, do not overwrite it unless told\n to do so\n \"\"\"\n if os.path.isfile(local_name) and not overwrite:\n return\n resp = requests.get(url, stream=True)\n with open(local_name, \"wb\") as output:\n for chunk in resp.iter_content(chunk_size=1024):\n if chunk:\n output.write(chunk)\n\n\ndef location(track: PinchyMixMetadata, filename: str) -> str:\n \"\"\"\n given a track, return a relative path to the mix\n \"\"\"\n return os.path.join(track.mix_id, filename)\n\n\ndef write_to_supa(\n client: Client, track: PinchyMixMetadata, location: str, art_location: Optional[str]\n):\n \"\"\"\n write mix information to supabase\n \"\"\"\n data = {\n \"pinchy_id\": int(track.mix_id),\n \"name\": track.mix_name,\n \"artist_name\": track.artist,\n \"location\": location,\n \"art_location\": art_location,\n }\n client.table(MIX_TABLE).insert(data).execute()\n\n\ndef upload_to_supa(client: Client, locations: dict[str, str]):\n \"\"\"\n take a dictionary of image/mix locations and upload file to supabase\n \"\"\"\n storage = client.storage()\n file_storage = storage.StorageFileAPI(\"pinchy-files\")\n for key, local_filename in locations.items():\n # key is the path in the supabase bucket\n # local_filename is the local path where the data is stored\n file_storage.upload(os.path.join(\"pinchy-files\", key), local_filename)\n\n\ndef get_mix_page_details(mix: PinchyMixMetadata):\n url = os.path.join(BASE_URL, mix.mix_landing_url)\n resp = requests.get(url)\n resp.raise_for_status()\n\n soup = BeautifulSoup(resp.content)\n dl_link = soup.find(id=\"download\").a[\"href\"]\n grid = soup.find(id=\"grid\")\n img_rel_link = grid.img[\"src\"][0:]\n tracklist = grid.p.string\n return {\n \"download\": dl_link,\n \"img\": img_rel_link,\n \"tracklist\": tracklist,\n }\n\n\ndef scrape_mix_page_and_download(mix: PinchyMixMetadata):\n \"\"\"\n scrape_mix_page_and_download\n\n get next page\n create directory for mix\n download mix and photo and tracklist\n \"\"\"\n log.info(\"downloading {}\".format(mix.mix_name))\n url = os.path.join(BASE_URL, mix.mix_landing_url)\n resp = requests.get(url)\n resp.raise_for_status() # handle this later, too\n\n soup = BeautifulSoup(resp.content)\n dl_link = soup.find(id=\"download\").a[\"href\"]\n mix_filename = os.path.split(dl_link)[1]\n grid = soup.find(id=\"grid\")\n img_rel_link = grid.img[\"src\"][0:]\n art_filename = os.path.split(img_rel_link)[1]\n tracklist = grid.p.string\n\n local_mix_dir = os.path.join(LOCAL_DIR, mix.mix_id)\n if not os.path.isdir(local_mix_dir):\n os.makedirs(local_mix_dir)\n\n # TODO: use this as the supabase path, mostly\n # just, like, remove all the stuff that's only relevant for the local filesystem\n mix_file_name = os.path.join(local_mix_dir, mix_filename)\n download_file(mix_file_name, dl_link)\n\n img_file_name = os.path.join(local_mix_dir, art_filename)\n img_dl_link = os.path.join(BASE_URL)\n download_file(img_file_name, img_dl_link)\n\n tracklist_file_name = os.path.join(local_mix_dir, \"tracklist.txt\")\n with open(tracklist_file_name, \"w\") as tracklist_file:\n tracklist_file.write(tracklist)\n\n client = get_supa_client()\n mix_loc = location(mix, mix_filename)\n art_loc = location(mix, art_filename)\n write_to_supa(client, mix, mix_loc, art_loc)\n # write the files to storage\n\n return\n\n\ndef get_pinchy_homepage():\n \"\"\"\n ye olde http request\n \"\"\"\n resp = requests.get(BASE_URL)\n resp.raise_for_status() # handle this later\n return resp.content\n\n\ndef get_args():\n \"\"\"\n get_args:\n - parse command-line arguments\n list: show which mixes you have locally and also mixes that at one the site\n download: save the mixes locally\n publish: push the mixes somewhere (maybe add a value that indicates\n where to publish?? like gcp or aws or dropbox or whatever?)\n \"\"\"\n parser = argparse.ArgumentParser()\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n \"--list\", help=\"Print all local and remote mixes\", action=\"store_true\"\n )\n group.add_argument(\"--download\", help=\"Download mixes only\", action=\"store_true\")\n group.add_argument(\n \"--upload\", help=\"Upload mixes to google play\", action=\"store_true\"\n )\n group.add_argument(\n \"--threads\",\n default=1,\n help=\"number of threads to use. default is single-threaded\",\n )\n return parser.parse_args()\n\n\ndef main():\n \"\"\"\n main :\n - bootstrap (get a list of all downloaded mixes)\n - scrape pinchyandfriends.com and look for IDs that aren't found locally\n - if list: print downloaded mixes + mixes available on the site\n - if download: just download any remote mixes and exit\n - if upload: download any remote mixes not found locally/in google play\n - if present, save the tracklist\n \"\"\"\n args = get_args()\n mix_ids = get_existing_mix_ids()\n mixes = [\n mix\n for mix in get_available_pinchy_info(get_pinchy_homepage())\n if mix.mix_id not in mix_ids\n ]\n if args.list:\n print(format_mix_info(mixes))\n return\n if args.download:\n with ThreadPoolExecutor(max_workers=args.threads) as executor:\n for mix in mixes:\n executor.submit(scrape_mix_page_and_download, mix)\n # now that the mix is downloaded loally, we should also upload all\n # this to supabase\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"SoryRawyer/pinchy","sub_path":"pinchy.py","file_name":"pinchy.py","file_ext":"py","file_size_in_byte":9049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20885710455","text":"from encoders.OneHotLabelEncoder import OneHotLabelEncoder\nfrom featuretools.primitives import Count, Mean, Min\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\nimport featuretools as ft\nimport json\nimport numpy as np\nimport pandas as pd\n\n\ndef make_agg_features(X):\n '''\n Creates aggregated features for every pitcher_id in the training set.\n\n Engineered aggregation features:\n COUNT(pitcher)\n MEAN(pitcher.inning)\n MIN(pitcher.inning)\n\n Target-encoded engineered aggregation features:\n MEAN(pitcher.pitch_type_Changeup)\n MEAN(pitcher.pitch_type_Curveball)\n MEAN(pitcher.pitch_type_Cutter)\n MEAN(pitcher.pitch_type_Fastball)\n MEAN(pitcher.pitch_type_Off-Speed\n MEAN(pitcher.pitch_type_Purpose_Pitch)\n MEAN(pitcher.pitch_type_Sinker)\n MEAN(pitcher.pitch_type_Slider)\n '''\n X = X.copy()\n X['all'] = -1 # aggregation for all data, -1 used as index for pitcher_id\n\n subset_cols = ['pitch_type', 'pitcher_id', 'all', 'inning']\n X = OneHotLabelEncoder(labels=['pitch_type']).fit_transform(X[subset_cols])\n\n es = ft.EntitySet(id='baseball_pitches')\n\n es = es.entity_from_dataframe(entity_id='pitcher', dataframe=X,\n make_index=True, index='index')\n\n es = es.normalize_entity(base_entity_id='pitcher',\n new_entity_id='pitcher_ids', index='pitcher_id')\n es = es.normalize_entity(base_entity_id='pitcher',\n new_entity_id='pitchers_all', index='all')\n\n p_pitcher, _ = ft.dfs(target_entity='pitcher_ids',\n entityset=es,\n agg_primitives=[Count, Mean, Min],\n drop_contains=['MIN(pitcher.pitch_type'],\n ignore_variables={'pitcher': ['all']})\n\n p_pitcher_all, _ = ft.dfs(target_entity='pitchers_all',\n entityset=es,\n agg_primitives=[Count, Mean, Min],\n drop_contains=['MIN(pitcher.pitch_type'])\n\n p_pitcher = p_pitcher[sorted(p_pitcher.columns)]\n p_pitcher_all = p_pitcher_all[sorted(p_pitcher_all.columns)]\n p_pitcher = pd.concat([p_pitcher, p_pitcher_all], axis=0)\n\n return p_pitcher\n\n\ndef make_pitcher_dict(p_pitcher):\n '''\n Renames columns from the DataFrame returned from make_agg_features.\n Also converts COUNT(pitcher) column to dictionary with pitcher_id mapping.\n\n Returns results as a dictionary.\n '''\n cols_p_pitcher_id = ['MEAN(pitcher.inning) | pitcher_id',\n 'p(pitch_type_Changeup | pitcher_id)',\n 'p(pitch_type_Curveball | pitcher_id)',\n 'p(pitch_type_Cutter | pitcher_id)',\n 'p(pitch_type_Fastball | pitcher_id)',\n 'p(pitch_type_Off-Speed | pitcher_id)',\n 'p(pitch_type_Purpose_Pitch | pitcher_id)',\n 'p(pitch_type_Sinker | pitcher_id)',\n 'p(pitch_type_Slider | pitcher_id)',\n 'MIN(pitcher.inning) | pitcher_id']\n p_pitcher_id = pd.DataFrame(p_pitcher.iloc[:, 1:].values,\n columns=cols_p_pitcher_id)\n p_pitcher_id = p_pitcher_id.set_index(p_pitcher.index)\n p_pitcher_id = p_pitcher_id.iloc[:, [0, 9, 1, 2, 3, 4, 5, 6, 7, 8]]\n\n dict_pitch = {'pitcher_cnt_dict': p_pitcher['COUNT(pitcher)'].to_dict(),\n 'p_pitcher_id': p_pitcher_id}\n\n return dict_pitch\n\n\nclass FeatureEngineering(BaseEstimator, TransformerMixin):\n '''\n Class for adding engineered features to a DataFrame, Series, or JSON input.\n\n Engineered features:\n runs_diff\n lead\n on_base_1\n on_base_2\n on_base_3\n on_base_any\n batterHand_pitcherHand\n slg_2010\n park_factor_H\n\n Engineered near-realtime features:\n last_count_type\n last_pitch\n Changeup_L10\n Curveball_L10\n Cutter_L10\n Fastball_L10\n Off-Speed_L10\n Purpose_Pitch_L10\n Sinker_L10\n Slider_L10\n\n Engineered aggregation features:\n MEAN(pitcher.inning) | pitcher_id\n MIN(pitcher.inning) | pitcher_id\n\n Target-encoded engineered aggregation features:\n p(pitch_type_Changeup | pitcher_id)\n p(pitch_type_Curveball | pitcher_id)\n p(pitch_type_Cutter | pitcher_id)\n p(pitch_type_Fastball | pitcher_id)\n p(pitch_type_Off-Speed | pitcher_id)\n p(pitch_type_Purpose_Pitch | pitcher_id)\n p(pitch_type_Sinker | pitcher_id)\n p(pitch_type_Slider | pitcher_id)\n '''\n def __init__(self, dict_slg, dict_park_H, epp_df,\n min_pitches_from_pitcher=150):\n self.dict_slg = dict_slg # dict of 2010 SLG data with batter_id mapping\n self.dict_park_H = dict_park_H # dict of park factors with team_id mapping\n self.epp_df = epp_df # DataFrame with engineered near-realtime features\n self.min_pitches_from_pitcher = min_pitches_from_pitcher\n\n @staticmethod\n def _height(val):\n feet, inches = map(int, val.split(\"-\"))\n return feet*12 + inches\n\n @staticmethod\n def _lead(val):\n if val > 0:\n return 'Y'\n elif val == 0:\n return 'T'\n else:\n return 'N'\n\n def _features(self, X):\n X.loc[:, 'runs_diff'] = (X['home_team_runs'] - X['away_team_runs']) * (1 - 2*X['top'])\n X.loc[:, 'lead'] = X['runs_diff'].map(self._lead)\n\n X.loc[:, 'on_base_1'] = pd.notnull(X['on_1b'])\n X.loc[:, 'on_base_2'] = pd.notnull(X['on_2b'])\n X.loc[:, 'on_base_3'] = pd.notnull(X['on_3b'])\n X.loc[:, 'on_base_any'] = X['on_base_1'] | X['on_base_2'] | X['on_base_3']\n\n X.loc[:, 'batterHand_pitcherHand'] = X['stand'] + '_' + X['p_throws']\n\n X.loc[:, 'b_height'] = X['b_height'].map(lambda x: self._height(x))\n X.loc[:, 'slg_2010'] = X['batter_id'].map(self.dict_slg)\n\n h1 = X['team_id_p'].map(self.dict_park_H) # pitching team is at home when on top\n h2 = X['team_id_b'].map(self.dict_park_H) # batting team is at home when on bottom\n X.loc[:, 'park_factor_H'] = np.where(X['top'] == 1, h1, h2)\n\n def _input_cleanup(self, X):\n if X.__class__ == pd.DataFrame:\n return X.copy()\n elif X.__class__ == pd.Series:\n return pd.DataFrame(X).T.convert_objects(convert_numeric=True)\n elif X.__class__ == str or X.__class__ == unicode:\n row = pd.Series(json.loads(X), index=self.columns)\n return pd.DataFrame(row).T.convert_objects(convert_numeric=True)\n\n def _make_pitch_freq(self, X, y=None):\n if 'pitch_type' not in X.columns:\n X = pd.concat([X, pd.Series(y, name='pitch_type')], axis=1)\n\n p_pitcher = make_agg_features(X)\n dict_pfreq = make_pitcher_dict(p_pitcher)\n\n self.pfreq = dict_pfreq\n\n def fit(self, X, y=None):\n self.columns = X.columns\n self._make_pitch_freq(X, y)\n return self\n\n def transform(self, X):\n X = self._input_cleanup(X)\n\n self._features(X)\n X = X.join(self.epp_df, on='uid')\n\n # overall pitch distribution (key == -1) will be used for pitcher_id\n # with insufficient pitches thrown (< self.min_pitches_from_pitcher)\n # or unknown pitcher_id\n pitches_thrown = X['pitcher_id'].map(self.pfreq['pitcher_cnt_dict'])\n pid_recode = X['pitcher_id'].where(pitches_thrown >= self.min_pitches_from_pitcher, -1)\n\n freq_data1 = self.pfreq['p_pitcher_id'].loc[pid_recode]\n freq_data1 = freq_data1.reset_index(drop=True).set_index(X.index)\n\n drops = ['home_team_runs', 'away_team_runs', 'on_1b', 'on_2b', 'on_3b',\n 'stand', 'p_throws', 'batter_id', 'team_id_p', 'team_id_b',\n 'top', 'uid', 'game_pk', 'year', 'date', 'at_bat_num',\n 'start_tfs', 'start_tfs_zulu', 'pitch_id', 'pitcher_id']\n X = X.drop(drops, axis=1)\n\n output = pd.concat([X, freq_data1], axis=1)\n\n return output\n","repo_name":"alvinthai/MLB_pitch_type_predictor","sub_path":"functions/feature_engineering.py","file_name":"feature_engineering.py","file_ext":"py","file_size_in_byte":8175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70118719893","text":"# Link - https://leetcode.com/problems/online-election/\n\n# Space: O(n)\n# Time:\n # __init__: O(n)\n # q: O(log(n))\n\nclass TopVotedCandidate:\n\n def __init__(self, persons: List[int], times: List[int]):\n \n leader, votes, win = None, Counter(), {}\n\n for i, person in enumerate(persons):\n \n votes[person] += 1\n \n if leader == None or votes[person] >= votes[leader]:\n leader = person\n \n win[times[i]] = leader\n \n self.win, self.times = win, times\n \n def q(self, t: int) -> int:\n \n left, right = 0, len(self.times) - 1\n \n while left < right:\n \n mid = (left + right)//2\n \n if self.times[mid] <= t:\n left = mid + 1\n else:\n right = mid\n\n if self.times[left] > t:\n left -= 1\n \n return self.win[self.times[left]]","repo_name":"dryeab/competitive-programming","sub_path":"Leetcode/911. Online Election.py","file_name":"911. Online Election.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"31992932891","text":"#学生管理系统1.0版本,数据存储在列表的字典里。\n\nstudents_info = []\n#开始程序后,主程序的欢迎界面。\ndef greet_welcome ():\n print (\"\\n欢迎进入学生管理系统。\")\n print (\"输入'1',添加学生信息。\")\n print (\"输入'2',修改学生信息。\")\n print (\"输入'3',删除学生信息。\")\n print (\"输入'4',查询学生信息。\")\n print (\"输入'5',查询所有学生信息。\")\n print (\"输入'6',退出此程序。\")\n\n#打印学生信息\ndef print_student_info ():\n print(\"%s\\t%s\\t%s\\t%s\\t%s\" % (\"ID\", \"姓名\", \"性别\", \"年龄\", \"专业\"))\n\n#添加学生信息,press 1。\ndef add_students ():\n '''学生信息包括ID、姓名、性别、专业。'''\n add_id = input (\"\\n请输入您要添加的学生ID:\")\n if students_info:\n for stu_info in students_info:\n '''检查新增加的ID,是否和现有的ID有冲突。'''\n if stu_info[\"ID\"] == add_id:\n print (\"您输入的学生ID,已存在,请换一个。\\n\")\n return\n '''如果上面检查没有冲突,添加到students字典里。'''\n add_name = input (\"请输入您要添加的学生姓名:\")\n add_sex = input (\"请输入您要添加的学生性别:\")\n add_age = input (\"请输入您要添加的学生年龄:\")\n add_subject = input (\"请输入你要添加的学生专业:\")\n students = {}\n students[\"ID\"] = add_id\n students[\"name\"] = add_name\n students[\"sex\"] = add_sex\n students[\"age\"] = add_age\n students[\"subject\"] = add_subject\n students_info.append(students)\n print (\"添加成功.\\n\")\n\n#修改学生信息,press 2\ndef modify_students ():\n '''只能修改学生的姓名、性别、专业,学生ID无法修改。'''\n print (\"\\n只能修改学生的姓名、性别、专业,学生ID无法修改。\")\n modify_id = input(\"请输入您要修改学生的ID:\")\n for mod_info in students_info:\n if mod_info[\"ID\"] == modify_id:\n '''如果上面检查没有冲突,添加到students字典里。'''\n mod_info[\"name\"] = input(\"请输入您要修改的学生姓名:\")\n mod_info[\"sex\"] = input(\"请输入您要修改的学生性别:\")\n mod_info[\"age\"] = input(\"请输入您要修改的学生年龄:\")\n mod_info[\"subject\"] = input(\"请输入你要修改的学生专业:\")\n print (\"您已成功完成修改。\\n\")\n return\n print (\"您输入的ID不存在,请检查。\\n\")\n\n#根据学生ID,删除相关信息,press 3\ndef del_student ():\n del_id = input(\"\\n请输入您要删除的学生ID:\")\n #检查到匹配的用户ID,直接把列表里的对应字典删除了.\n for del_stu in range(len(students_info)):\n #找到列表对应的字典索引,然后删除该索引.\n if students_info[del_stu][\"ID\"] == del_id:\n del students_info[del_stu]\n print (\"学生ID%s,删除成功.\\n\" %del_id)\n return\n print (\"您输入的ID不存在,请检查。\\n\")\n\n#根据学生ID查找学生信息,press 4\ndef search_student ():\n sear_id = input(\"\\n请输入您要查找的学生ID:\")\n for sear_info in students_info:\n if sear_info[\"ID\"] == sear_id:\n '''打印学生头部信息'''\n print_student_info()\n print (\"%s\\t%s\\t%s\\t%s\\t%s\\n\"\n %(sear_info[\"ID\"], sear_info[\"name\"],\n sear_info[\"sex\"], sear_info[\"age\"],\n sear_info[\"subject\"]))\n return\n print (\"您输入的ID不存在,请检查。\\n\")\n\n#查询所有的学生信息,press 5\ndef search_all_students ():\n print_student_info() #打印学生头部信息\n for sear_all in students_info:\n print (\"%s\\t%s\\t%s\\t%s\\t%s\"\n %(sear_all[\"ID\"], sear_all[\"name\"],\n sear_all[\"sex\"], sear_all[\"age\"],\n sear_all[\"subject\"]))\n print (\"--\" * 20)\n\n#程序主程序\ndef main_students ():\n try:\n while True:\n greet_welcome()\n press = input(\"\\n请输入您的选择:\")\n if press == \"1\":\n add_students()\n elif press == \"2\":\n modify_students()\n elif press == \"3\":\n del_student()\n elif press == \"4\":\n search_student()\n elif press == \"5\":\n search_all_students()\n else:\n print(\"您已选择退出操作.\")\n break\n except BaseException:\n print (\"\\n\\n您的操作不正确,操作失败.\\n\\n\")\n\nmain_students()","repo_name":"linqunbin/Lab","sub_path":"students1.0.py","file_name":"students1.0.py","file_ext":"py","file_size_in_byte":4624,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23493711181","text":"import pandas as pd\nimport yaml\nimport subprocess\n\n\ndef set_credentials_on_environment(credential_path: str) -> bool:\n import os\n os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credential_path\n return True\n\n\ndef convert_data_to_df(data, columns) -> pd.DataFrame:\n dic = {}\n for i in range(len(data)):\n dic[columns[i]] = [data[i]]\n df = pd.DataFrame(dic)\n return df\n\n\ndef load_config(property_name: str = None, config_file_name: str = 'conf.yml'):\n with open(config_file_name, \"r\") as yml_file:\n cfg = yaml.safe_load(yml_file)\n\n conf_dic = cfg[\"table\"]\n if property_name:\n if property_name in conf_dic.keys(): return conf_dic[property_name]\n return conf_dic\n\n\n# print(load_config(property_name='default_values'))\n\ndef get_binary_from_gs_bucket(gs_file_path: str = 'boston_housing_pred/output',\n binary_file_name: str = 'boston_model_binary'):\n cmd = f'gsutil cp gs://{gs_file_path}/{binary_file_name} .'\n print(subprocess.getoutput(cmd))\n return True\n\n\nset_credentials_on_environment(\"/Users/av/PycharmProjects/Boston-Home-Prediction/key.json\")\n#print(get_binary_from_gs_bucket())\n","repo_name":"aravindmyd/Boston-Housing-","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23806302559","text":"s = list(input().split())\n\nl =[]\nfor k,i in enumerate(s):\n for j in range(int(i)):\n l.append(str(k))\n\no = []\n\ncnt = 0\nfor i in l:\n if i == '0':\n cnt += 1\n continue\n elif len(o) == 0:\n o.append(i)\n while cnt > 0:\n o.append('0')\n cnt -= 1\n else:\n o.append(i)\n\nprint(''.join(o))","repo_name":"liuchuo/PAT","sub_path":"BasicLevel_Python/1023 组个最小数 (20 分).py","file_name":"1023 组个最小数 (20 分).py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":3185,"dataset":"github-code","pt":"67"} +{"seq_id":"6105758236","text":"class Solution70:\n def climbStairs(self, n: int) -> int:\n\n if n == 1:\n return 1\n if n == 2:\n return 2\n\n a,b,res = 1,2,0\n \n for _ in range(3,n+1):\n res = a + b\n a = b\n b = res\n\na = Solution70()\nprint(a.climbStairs(28))","repo_name":"xiaoqi25478/Job","sub_path":"算法与数据结构/LeetCode/动态规划(DP)/b_70爬楼梯.py","file_name":"b_70爬楼梯.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"22170895613","text":"#The Hamming distance between two integers is the number of positions at which the corresponding bits are different.\n#we will store the representation in bits of the 2 integers in lists and we'll be comparing element by element\nclass Solution(object):\n def hammingDistance(self, x, y):\n a=list(bin(x)[2:]) #bin() transforms an integer into its representation in bits\n b=list(bin(y)[2:]) # bin(5) = 0b101 , we'll copy everything starting from third position till the end. that's our number\n \n # now these next if statements are used to make the lists of equal size because 12 is 1100 and 5 is 101 the number of bits is not equal\n\n if len(a) > len(b):\n b=['0']*(len(a)-len(b))+b \n elif len(a) < len(b) :\n a= ['0']*(len(b)-len(a))+a\n\n #continuing from here is easy, we'll just compare element by element and store the number of different bits in d variable\n\n d=0\n for i in range(len(a)):\n if a[i] != b[i]:\n d+=1\n\n return d\n","repo_name":"SHY-Corp/LeetCode-Solutions","sub_path":"Python/461.HammingDistance.py","file_name":"461.HammingDistance.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":240,"dataset":"github-code","pt":"67"} +{"seq_id":"73387237654","text":"\"\"\"Synthetic data generator.\"\"\"\nimport math\nimport pickle\nimport random\nfrom numpy.random import choice as npchoice\n\nVERBS = pickle.load(open('verbs.p', 'rb'))\nCOMMON_INSERTS = set(pickle.load(open('common_inserts.p', 'rb')))\nCOMMON_REPLACES = pickle.load(open('common_replaces.p', 'rb'))\nCOMMON_DELETES = pickle.load(open('common_deletes.p','rb'))\n\nclass Errorifier:\n \"\"\"Generate errors in good sentences!\"\"\"\n\n def __init__(self, sentence: str):\n self.original_sentence = sentence.rstrip()\n self.sentence = self.original_sentence\n self.tokenized = None\n self.tokenize()\n\n def tokenize(self):\n self.tokenized = self.sentence.split()\n\n def correct(self):\n return self.original_sentence\n\n def no_error(self):\n return ' '.join(self.tokenized)\n\n def delete_error(self):\n if len(self.tokenized) > 0:\n insertable = list(range(len(self.tokenized)))\n index = random.choice(insertable)\n \n\n plist = list(COMMON_DELETES.values())\n plistsum = sum(plist)\n plist = [x / plistsum for x in plist]\n\n # Choose a bad word\n ins_word = npchoice(list(COMMON_DELETES.keys()), p=plist)\n self.tokenized.insert(index,ins_word)\n\n return ' '.join(self.tokenized)\n\n\n def verb_error(self, redir=True):\n \"\"\"Introduce a verb error from morphs.txt.\"\"\"\n\n if len(self.tokenized) > 0:\n verbs = [i for i, w in enumerate(self.tokenized) if w in VERBS]\n if not verbs:\n if redir:\n return self.replace_error(redir=False)\n return self.sentence\n\n index = random.choice(verbs)\n word = self.tokenized[index]\n if not VERBS[word]:\n return self.sentence\n repl = random.choice(VERBS[word])\n self.tokenized[index] = repl\n\n return ' '.join(self.tokenized)\n\n def insert_error(self):\n \"\"\"Delete a commonly inserted word.\"\"\"\n if len(self.tokenized) > 1:\n deletable = [i for i, w in enumerate(self.tokenized) if w in COMMON_INSERTS]\n if not deletable:\n return self.sentence\n\n index = random.choice(deletable)\n del self.tokenized[index]\n return ' '.join(self.tokenized)\n\n def replace_error(self, redir=True):\n \"\"\"Add a common replace error.\"\"\"\n if len(self.tokenized) > 0:\n deletable = [i for i, w in enumerate(self.tokenized) if w in COMMON_REPLACES]\n if not deletable:\n if redir:\n return self.verb_error(redir=False)\n return self.sentence\n\n index = random.choice(deletable)\n word = self.tokenized[index]\n if not COMMON_REPLACES[word]:\n return self.sentence\n\n # Normalize probabilities\n plist = list(COMMON_REPLACES[word].values())\n plistsum = sum(plist)\n plist = [x / plistsum for x in plist]\n\n # Choose a bad word\n repl = npchoice(list(COMMON_REPLACES[word].keys()), p=plist)\n self.tokenized[index] = repl\n\n return ' '.join(self.tokenized)\n\n def error(self):\n \"\"\"Introduce a random error.\"\"\"\n\n #count = math.floor(pow(random.randint(1, 11), 2) / 50) + 1\n count = npchoice([0,1,2,3,4],p=[0.05,0.07,0.25,0.35,0.28]) #original (a1)\n #count = npchoice([0,1,2,3,4],p=[0.1,0.1,0.2,0.3,0.3]) # (a2)\n #count = npchoice([0,1,2,3,4,5],p=[0.1,0.1,0.2,0.2,0.2,0.2]) # (a3)\n #count = npchoice([0,1,2,3,4,5],p=[0.1,0.1,0.2,0.2,0.2,0.2]) # (a4)\n #count = npchoice([0,1,2,3,4,5],p=[0.0,0.0,0.25,0.25,0.25,0.25]) # (a5)\n\n for x in range(count):\n # Note: verb_error redirects to replace_error and vice versa if nothing happened\n error_probs = [.30,.25,.25,.20] #original (a1)\n #error_probs = [.25,.30,.30,.15] # (a2)\n #error_probs = [.40,.25,.25,.10] #(a3)\n #error_probs = [.30,.30,.30,.10] #(a4)\n #error_probs = [.35,.25,.25,.15] #(a5)\n\n error_fun = npchoice([self.insert_error, self.verb_error, self.replace_error, self.delete_error],p=error_probs)\n self.sentence = error_fun()\n self.tokenize()\n\n return self.sentence\n","repo_name":"awasthiabhijeet/PIE","sub_path":"errorify/errorifier.py","file_name":"errorifier.py","file_ext":"py","file_size_in_byte":4362,"program_lang":"python","lang":"en","doc_type":"code","stars":222,"dataset":"github-code","pt":"67"} +{"seq_id":"26534632367","text":"import numpy as np\nimport torch.nn as nn\nimport random\nfrom collections import namedtuple, deque\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport json\n\n\ndef parse_params(params_dir):\n with open(params_dir) as fp:\n params = json.load(fp)\n return params\n\n\nclass ReplayBuffer:\n \"\"\"Fixed-size buffer to store experience tuples.\"\"\"\n\n def __init__(self, action_size, buffer_size, batch_size, seed, device):\n \"\"\"Initialize a ReplayBuffer object.\n Params\n ======\n buffer_size (int): maximum size of buffer\n batch_size (int): size of each training batch\n \"\"\"\n self.action_size = action_size\n self.memory = deque(maxlen=buffer_size) # internal memory (deque)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\",\n field_names=[\"state\", \"action\", \"reward\",\n \"next_state\", \"done\"])\n random.seed(seed)\n self.device = device\n\n def add(self, state, action, reward, next_state, done, num_agents):\n \"\"\"Add a new experience to memory.\"\"\"\n for i in range(num_agents):\n e = self.experience(state[i], action[i], reward[i], next_state[i],\n done[i])\n self.memory.append(e)\n\n def sample(self):\n \"\"\"Randomly sample a batch of experiences from memory.\"\"\"\n experiences = random.sample(self.memory, k=self.batch_size)\n states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(self.device)\n actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(self.device)\n rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(self.device)\n next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(self.device)\n dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(self.device)\n\n return (states, actions, rewards, next_states, dones)\n\n def __len__(self):\n \"\"\"Return the current size of internal memory.\"\"\"\n return len(self.memory)\n\n\nclass Actor_critic_model(nn.Module):\n def __init__(self, params_dir, input_dim, act_size):\n super().__init__()\n self.input_dim = input_dim\n self.act_size = act_size\n self.params = parse_params(params_dir)\n self.actor = self.create_actor()\n self.critic = self.create_critic()\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n if m.out_features <= 2:\n m.weight.data.uniform_(-3e-3, 3e-3)\n else:\n nn.init.kaiming_normal_(m.weight, mode='fan_in')\n\n def create_actor(self):\n torch.manual_seed(self.params['seed'])\n module_list = nn.ModuleList()\n layer = nn.Sequential()\n fc = nn.Linear(self.input_dim, self.params['hidden_dim'])\n layer.add_module(f\"fc_layer_1\", fc)\n layer.add_module(f\"RELU_layer_1\", nn.ReLU())\n module_list.append(layer)\n self.add_hidden_layer(module_list, self.params['actor_h_num'],\n self.params['hidden_dim'],\n int(self.params['hidden_dim']-100))\n out_put_layer = nn.Sequential()\n out_layer = nn.Sequential(nn.Linear(int(self.params['hidden_dim']-100),\n self.act_size))\n out_put_layer.add_module(f\"out_put_layer\", out_layer)\n out_put_layer.add_module(f\"Tanh_out\", nn.Tanh())\n module_list.append(out_put_layer)\n module_list.apply(self._init_weights)\n return module_list\n\n def create_critic(self):\n torch.manual_seed(self.params['seed'])\n module_list = nn.ModuleList()\n layer = nn.Sequential()\n fc = nn.Linear(self.input_dim, self.params['hidden_dim'])\n layer.add_module(f\"fc_layer_1\", fc)\n # layer.add_module(f\"bn_layer_1\",\n # nn.BatchNorm1d(self.params['hidden_dim']))\n # layer.add_module(f\"RELU_layer_1\", nn.LeakyReLU())\n layer.add_module(f\"RELU_layer_1\", nn.ReLU())\n module_list.append(layer)\n self.add_hidden_layer(module_list, self.params['critic_h_num'],\n self.params['hidden_dim']+self.act_size,\n int(self.params['hidden_dim'])-100)\n out_put_layer = nn.Sequential()\n out_layer = nn.Sequential(nn.Linear(int(self.params['hidden_dim'])-100,\n 1))\n out_put_layer.add_module(f\"out_put_layer\", out_layer)\n module_list.append(out_put_layer)\n module_list.apply(self._init_weights)\n return module_list\n\n def add_hidden_layer(self, module_list, num_hidden_layer,\n input_dim, output_dim):\n if num_hidden_layer == 0:\n return\n for i in range(1, num_hidden_layer+1):\n layer = nn.Sequential()\n fc = nn.Linear(input_dim, output_dim)\n layer.add_module(f\"fc_layer_{i}\", fc)\n # layer.add_module(f\"bn_layer_{i}\",\n # nn.BatchNorm1d(output_dim))\n # layer.add_module(f\"RELU_layer_{i}\", nn.LeakyReLU())\n layer.add_module(f\"RELU_layer_{i}\", nn.ReLU())\n module_list.append(layer)\n input_dim = output_dim\n\n def forward(self, states, action=None, actor=True, train=True):\n '''\n If actor is True, output actions\n If Critic (actor = False), output state value\n '''\n x_ = states\n if actor:\n for m in self.actor:\n x_ = m(x_)\n return x_\n # forward in value path\n for idx, v in enumerate(self.critic):\n if idx == 1:\n x_ = torch.cat((x_, action), dim=1)\n x_ = v(x_)\n return x_\n\n\nclass Agent:\n def __init__(self, params_dir, state_size, action_size, num_agents,\n device):\n self.params = parse_params(params_dir)\n random.seed(self.params['seed'])\n self.num_agents = num_agents\n self.device = device\n self.epsilon = 1\n # Local Model\n self.model = Actor_critic_model(params_dir, state_size,\n action_size).to(device)\n self.actor_optimizer = optim.Adam(self.model.actor.parameters(),\n lr=self.params[\"actor_lr\"])\n # Target Model\n self.target = Actor_critic_model(params_dir, state_size,\n action_size).to(device)\n self.critic_optimizer = optim.Adam(self.model.critic.parameters(),\n lr=self.params[\"critic_lr\"],\n weight_decay=0.0)\n # Replay Buffer\n self.memory = ReplayBuffer(action_size, self.params[\"buffer_size\"],\n self.params[\"batch_size\"],\n self.params[\"seed\"], device)\n\n # Make local and target models are identical during initialization\n self.hard_update()\n self.t_step = 0\n\n def step(self, state, action, reward, next_state, done):\n # Save experience / reward\n self.memory.add(state, action, reward, next_state, done, self.num_agents)\n\n if len(self.memory):\n self.t_step = (self.t_step + 1) % self.params[\"update_freq\"]\n if self.t_step == 0:\n # Only start learning when there are enough samples in the memory\n if len(self.memory) > self.params[\"batch_size\"]:\n for _ in range(self.params[\"update_freq\"]):\n experiences = self.memory.sample()\n self.learn(experiences)\n\n def act(self, state):\n state = torch.from_numpy(state).float().to(self.device)\n self.model.eval()\n with torch.no_grad():\n action = self.model(state).cpu().data.numpy()\n self.model.train()\n return action\n\n def learn(self, experiences):\n states, actions, rewards, next_states, dones = experiences\n\n # ---------------------------- update critic ---------------------------- #\n # Get predicted next-state actions and Q values from target models\n actions_next = self.target(next_states)\n Q_targets_next = self.target(next_states, actions_next,\n actor=False)\n # Compute Q targets for current states (y_i)\n Q_targets = rewards + (self.params['gamma'] * Q_targets_next * (1 - dones))\n # Compute critic loss\n Q_expected = self.model(states, actions, actor=False)\n critic_loss = F.mse_loss(Q_expected, Q_targets)\n # Minimize the loss\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n\n # ---------------------------- update actor ---------------------------- #\n # Compute actor loss\n actions_pred = self.model(states)\n\n # we want to maximizde the state value using predicted actions,\n # However, since Pytorch is designed to find the minumum, we add\n # neagative in front of the actor loss\n actor_loss = -self.model(states, actions_pred, actor=False).mean()\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n # ----------------------- update target networks ----------------------- #\n self.soft_update() \n self.epsilon = max(self.epsilon - 0.00001, 0.1)\n\n def soft_update(self):\n for tp, lp in zip(self.target.parameters(),\n self.model.parameters()):\n tp.data.copy_(self.params['TAU']*lp.data +\n (1.0-self.params['TAU'])*tp.data)\n\n def hard_update(self):\n for tp, lp in zip(self.target.parameters(),\n self.model.parameters()):\n tp.data.copy_(lp.data)\n\n","repo_name":"jayden199012/Multi-Agent","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"86608268871","text":"def clean_inp(inp):\n return inp.upper().replace(\" \", \"\") # Convert to upper case, remove whitespace\n\n\ndef encrypt(message, key):\n m = clean_inp(message)\n k = clean_inp(key)\n ciphertext = \"\"\n for i in range(len(m)):\n letter = (ord(m[i]) - 65 + ord(k[i]) - 65) % 26 + 65\n ciphertext += chr(letter)\n return ciphertext\n\n\ndef decrypt(message, key):\n m = clean_inp(message)\n k = clean_inp(key)\n plaintext = \"\"\n for i in range(len(m)):\n letter = (ord(m[i]) - ord(k[i])) % 26 + 65\n plaintext += chr(letter)\n return plaintext\n","repo_name":"geetesh-gupta/cryptography_demystified","sub_path":"vernam.py","file_name":"vernam.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13549566733","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\nfrom urllib import request\nimport json\nimport time\nimport subprocess\nimport os\nimport sys\nimport getopt\nimport logging.config\nimport datetime\nfrom manager import LivyServerManager, YarnServerManager\n\n\ndef parseCommand(argv):\n if len(argv) == 1:\n print('命令行参数错误,请输入 -h 获取更多信息')\n sys.exit()\n try:\n options, args = getopt.getopt(argv[1:], \"he:\", [\"help\", \"env=\"])\n except getopt.GetoptError:\n print('命令行参数错误,请输入 -h 获取更多信息')\n sys.exit()\n ret = ''\n\n for option, value in options:\n if option in (\"-h\", \"--help\"):\n print(\"-e 指定服务环境\")\n print(\"-e dohko 测试线\")\n print(\"-e product 生产线\")\n sys.exit()\n if option in (\"-e\", \"--env\"):\n ret = value.upper()\n\n if ret == 'DOHKO' or ret == 'PRODUCT':\n return ret\n else:\n print('命令行参数错误,请输入 -h 获取更多信息')\n sys.exit()\n\n\ndef setupLogging(default_path=\"logging.json\", default_level=logging.INFO, env_key=\"LOG_CFG\"):\n path = default_path\n value = os.getenv(env_key, None)\n if value:\n path = value\n if os.path.exists(path):\n with open(path, \"r\") as f:\n config = json.load(f)\n logging.config.dictConfig(config)\n else:\n logging.basicConfig(level=default_level)\n\n\ndef main():\n print(\"run main\")\n env = parseCommand(sys.argv)\n\n setupLogging('config/logconf.json')\n logger = logging.getLogger('scheduler')\n\n yarnServerManager = YarnServerManager(env)\n livyServerManager = LivyServerManager(env, yarnServerManager)\n\n waitTimeForCloseAllLivyProcessInMinute = 5\n\n # while True:\n try:\n dt = datetime.datetime.now()\n hour = dt.__getattribute__(\"hour\")\n minute = dt.__getattribute__(\"minute\")\n if hour == 0 and minute < waitTimeForCloseAllLivyProcessInMinute:\n livyServerManager.restartLivyServer(waitTimeForCloseAllLivyProcessInMinute)\n\n # 获取livy所有session\n livyServerManager.getAllSessions()\n livyServerManager.classifySessions()\n livyServerManager.clearUselessSessions()\n (readingSessionList, writingSessionList) = livyServerManager.getYQSSessoins()\n (readingAppList, writingAppList) = yarnServerManager.getRunningYQSApps()\n\n # 提取 livy sessions 中的 appId\n readingSessionAppIds = []\n writingSessionAppIds = []\n for i in range(len(readingSessionList)):\n readingSessionAppIds.append(readingSessionList[i]['appId'])\n for i in range(len(writingSessionList)):\n writingSessionAppIds.append(writingSessionList[i]['appId'])\n\n # yarn 中 appliaction 还在运行, livy server 中 对应的session 已经不存在\n # 需要将这些 appliaction kill 释放资源\n for i in range(len(readingAppList)):\n readingApp = readingAppList[i]\n if readingApp['id'] not in readingSessionAppIds:\n logger.warning('killing reading apps: yarn app -kill %s' % (readingApp['id']))\n cmd = 'yarn app -kill ' + readingApp['id']\n os.system(cmd)\n\n for i in range(len(writingAppList)):\n writingApp = writingAppList[i]\n if writingApp['id'] not in writingSessionAppIds:\n logger.warning('killing writing apps: yarn app -kill %s' % (writingApp['id']))\n cmd = 'yarn app -kill ' + writingApp['id']\n os.system(cmd)\n\n YQS_READ_SESSION = 0\n YQS_WRITE_SESSION = 1\n if len(readingSessionList) == 0:\n livyServerManager.createSession(YQS_READ_SESSION)\n if len(writingSessionList) == 0:\n livyServerManager.createSession(YQS_WRITE_SESSION)\n # time.sleep(60)\n except Exception as e:\n logger.error('error occured: %s' % (repr(e)))\n # time.sleep(60)\n\n\nif __name__ == '__main__':\n main()\n # livyServerManager = LivyServerManager(serverConfig['dohko']['livyServerUri'])\n # livyServerManager.uselessSessions =[{'id': 23593}, {'id': 23594}]\n # livyServerManager.clearUselessSessions()\n # yarnServerManager = YarnServerManager(serverConfig['dohko']['yarnServerUri'])\n # livyServerManager.createSession(0)\n","repo_name":"fjl121029xx/yarn-api-python","sub_path":"oho/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":4374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"69898781014","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib.auth.models import Group\nfrom django.db import migrations\n\nfrom referral_project.users.enums import GroupName\n\n\ndef create_groups(apps, schema_editor):\n db_alias = schema_editor.connection.alias\n Group.objects.using(db_alias).bulk_create([\n Group(name=name) for name in GroupName\n ])\n\n\ndef delete_groups(apps, schema_editor):\n db_alias = schema_editor.connection.alias\n Group.objects.using(db_alias).filter(name__in=GroupName).delete()\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('users', '0004_user_status'),\n ]\n\n operations = [\n migrations.RunPython(create_groups, reverse_code=delete_groups),\n ]\n","repo_name":"alperendev98/referral","sub_path":"referral_project/users/migrations/0005_groups.py","file_name":"0005_groups.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"36891611258","text":"import pandas as pd\nimport numpy as np\nimport re\nimport matplotlib.pyplot as plt\nimport nltk\nimport stocks_preprocessing\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.feature_extraction.text import CountVectorizer \nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.model_selection import train_test_split \nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split \nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import GridSearchCV\nfrom scipy.stats import randint\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.svm import SVC\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.neighbors import KNeighborsClassifier\nimport keras\nfrom keras import layers\nfrom keras.models import Sequential\nfrom keras.utils import to_categorical\nfrom sklearn.model_selection import train_test_split \nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom keras.callbacks import EarlyStopping\nimport matplotlib.pyplot as plt\nfrom keras import datasets\nfrom keras import backend as K\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nimport preprocess_nyt_data\n\nplt.style.use('ggplot')\n\ndef plot_history(history):\n acc = history.history['acc']\n val_acc = history.history['val_acc']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n x = range(1, len(acc) + 1)\n\n plt.figure(figsize=(12, 5))\n plt.subplot(1, 2, 1)\n plt.plot(x, acc, 'b', label='Training acc')\n plt.plot(x, val_acc, 'r', label='Validation acc')\n plt.title('Training and validation accuracy')\n plt.legend()\n plt.subplot(1, 2, 2)\n plt.plot(x, loss, 'b', label='Training loss')\n plt.plot(x, val_loss, 'r', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n plt.show()\n\ndef create_embedding_matrix(filepath, word_index, embedding_dim):\n vocab_size = len(word_index) + 1 # Adding again 1 because of reserved 0 index\n embedding_matrix = np.zeros((vocab_size, embedding_dim))\n\n with open(filepath, encoding=\"utf8\") as f:\n for line in f:\n word, *vector = line.split()\n if word in word_index:\n idx = word_index[word] \n embedding_matrix[idx] = np.array(\n vector, dtype=np.float32)[:embedding_dim]\n\n return embedding_matrix\n\n\ndef nn(full_df):\n\n\n y = full_df['went_up'].values\n text = full_df['all_text_processed'].values\n\n sentences_train, sentences_test, y_train, y_test = train_test_split(\n text, y, test_size=0.2, random_state=22)\n\n tokenizer = Tokenizer(num_words=5000)\n tokenizer.fit_on_texts(sentences_train)\n\n X_train = tokenizer.texts_to_sequences(sentences_train)\n X_test = tokenizer.texts_to_sequences(sentences_test)\n\n vocab_size = len(tokenizer.word_index) + 1 # Adding 1 because of reserved 0 index\n lstm_output_size = 70\n maxlen = 100\n embedding_dim = 50\n pool_size = 4\n embedding_matrix = create_embedding_matrix(\n '../datasets/large_data/glove_word_embeddings/glove.6B.50d.txt',\n tokenizer.word_index, embedding_dim)\n\n \n X_train = pad_sequences(X_train, padding='post', maxlen=maxlen)\n X_test = pad_sequences(X_test, padding='post', maxlen=maxlen) \n\n\n input_dim = X_train.shape[1] # Number of features\n\n model = Sequential()\n model.add(layers.Embedding(vocab_size, embedding_dim, \n weights=[embedding_matrix], \n input_length=maxlen, \n trainable=True))\n model.add(layers.Dropout(0.25))\n model.add(layers.Conv1D(128, 5, activation='relu')) \n model.add(layers.MaxPooling1D(pool_size=pool_size))\n model.add(layers.LSTM(lstm_output_size))\n model.add(layers.Dense(20, activation='relu'))\n model.add(layers.Dropout(0.25))\n model.add(layers.Dense(1, activation='sigmoid'))\n model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n model.summary()\n history = model.fit(X_train, y_train,\n epochs=5,\n verbose=True,\n validation_data=(X_test, y_test),\n batch_size=30)\n loss, accuracy = model.evaluate(X_train, y_train, verbose=False)\n print(\"Training Accuracy: {:.4f}\".format(accuracy))\n loss, accuracy = model.evaluate(X_test, y_test, verbose=False)\n print(\"Testing Accuracy: {:.4f}\".format(accuracy))\n plot_history(history)\n\n\ndef build_fit_model(full_df):\n\n #X = full_df['headline_processed'].values\n y = full_df['went_up'].values\n text = full_df['all_text_processed'].values\n\n print('Vectorize')\n vectorizer = CountVectorizer(max_features=1500, min_df=5, max_df=0.7, stop_words=stopwords.words('english'), ngram_range=(1, 2)) \n X = vectorizer.fit_transform(text).toarray() \n\n print('TfidfTransform')\n tfidfconverter = TfidfTransformer() \n X = tfidfconverter.fit_transform(X).toarray() \n\n print('train_test_split')\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=12) \n print('Train Shape: ' + str(X_train.shape))\n print('Test Shape: ' + str(X_test.shape))\n\n classifier = LogisticRegression()\n #classifier = RandomForestClassifier(n_estimators=1000, random_state=0) \n\n #classifier = SVC(probability=True)\n #classifier = KNeighborsClassifier(n_neighbors = 10)\n\n print('fit')\n history = classifier.fit(X_train, y_train) \n\n print('predict')\n y_pred = classifier.predict(X_test) \n\n #y_pred2 = model.predict(X_test)\n\n print(confusion_matrix(y_test, y_pred)) \n print(classification_report(y_test, y_pred)) \n print(accuracy_score(y_test, y_pred)) \n\n y_pred_prob = classifier.predict_proba(X_test)[:,1]\n # print(y_pred_prob)\n # Generate ROC curve values: fpr, tpr, thresholds\n fpr, tpr, tresholds = roc_curve(y_test, y_pred_prob)\n\n # # Plot ROC curve\n # plt.plot([0, 1], [0, 1], 'k--')\n # plt.plot(fpr, tpr)\n # plt.xlabel('False Positive Rate')\n # plt.ylabel('True Positive Rate')\n # plt.title('ROC Curve')\n # plt.show()\n\n advwords = vectorizer.get_feature_names()\n advcoeffs = classifier.coef_.tolist()[0]\n advcoeffdf = pd.DataFrame({'Words' : advwords, \n 'Coefficient' : advcoeffs})\n advcoeffdf = advcoeffdf.sort_values(['Coefficient', 'Words'], ascending=[0, 1])\n print(advcoeffdf.head(10))\n\n # loss, accuracy = classifier.evaluate(X_train, y_train, verbose=False)\n # print(\"Training Accuracy: {:.4f}\".format(accuracy))\n # loss, accuracy = classifier.evaluate(X_test, y_test, verbose=False)\n # print(\"Testing Accuracy: {:.4f}\".format(accuracy))\n #plot_history(history)\n # # Compute and print AUC score\n # print(\"AUC: {}\".format(roc_auc_score(y_test, y_pred_prob)))\n\n # # Compute cross-validated AUC scores: cv_auc\n # cv_auc = cross_val_score(classifier, X, y, cv=5, scoring='roc_auc')\n\n # # Print list of AUC scores\n # print(\"AUC scores computed using 5-fold cross-validation: {}\".format(cv_auc))\n\n\n\nstocks_path = '../datasets/stock_data/MSFT_2000_1_2019_3.csv'\nnyt_path = '../datasets/large_data/nyt_archive_2000_1_2019_1.csv'\nnyt_preprod_path = '../datasets/large_data/nyt_preprocessed_2000_1_2019_1.csv'\n#nyt_data = pd.read_csv(nyt_path, parse_dates=True)\n#df = preprocess_nyt_data.preprocess(nyt_data, save_preprocessed=True, base_path='../datasets/large_data/', filename='nyt_preprocessed_2000_1_2019_1.csv')\nnyt_data = pd.read_csv(nyt_preprod_path, parse_dates=True)\n#stocks_preprocessing.train_vw(nyt_data)\n\nstocks_preprocessing.load_vw()\n# df = stocks_precprocessing.preprocess(stocks_path, nyt_path, save_preprocessed=True)\n# df = pd.read_csv('../datasets/stock_data/preprocessed_nyt_stock_MSFT.csv', parse_dates=True)\n# build_fit_model(df)\n# nn(df)","repo_name":"f4tca7/data_science","sub_path":"stocks/stocks.py","file_name":"stocks.py","file_ext":"py","file_size_in_byte":8407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18950938642","text":"file = open('anagram.txt')\n\ndef input():\n return file.readline()\n\n\nN = int(input().strip())\n\nfrom collections import Counter\n\nfor i in range(N):\n s12 = input().strip()\n\n L = len(s12)\n if L % 2 == 1:\n print(-1)\n continue\n\n s1 = s12[:L // 2]\n s2 = s12[L // 2:]\n\n s1c = Counter(s1)\n s2c = Counter(s2)\n\n s1c -= s2c\n\n print(sum([x for x in s1c.values()]))\n","repo_name":"chvjak/hr-practice","sub_path":"anagram.py","file_name":"anagram.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19275793799","text":"import random\nfrom calc import eval\n\n# Sinh bieu thuc\nx = random.randint(0, 10)\ny = random.randint(0, 10)\nerror = random.choice([-2, -1, 0, 0, 0, 1, 2, 3])\nop = random.choice([\"+\", \"-\", \"*\", \"/\"])\nkq = eval(x, y, op) + error\nprint(x, op, y, \"=\", kq)\nuser_input = input(\"Your answer (Y/N)\").upper()\nresult = \"\"\nif error == 0:\n if user_input == \"Y\":\n result = \"Yay\"\n elif user_input == \"N\":\n result = \"Nay\"\nelse:\n if user_input == \"Y\":\n result = \"Wrong\"\n elif user_input == \"N\":\n result = \"Yay\"\nprint(result)","repo_name":"duongnt52/ngotungduong-fundamentals-c4e25","sub_path":"Labs/Lab3/f_math2.py","file_name":"f_math2.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20434169667","text":"from pynput import mouse, keyboard\nfrom pprint import pprint\nfrom time import perf_counter, time, sleep\nimport os\nimport json\nimport threading\nimport pyautogui\n\n\nclass MouseAndKeyTracking:\n\n #CONSTANTS\n OUTPUT_FILENAME = 'actions_test_01'\n __FILE__ = './data'\n\n #PROPERTIES\n #Declare mouse_listener globally so we can stop thread with keyboard\n mouse_listener = None\n #Declare start time so callbacks can reference it\n start_time = perf_counter()\n #Store unreleased keyboard key press\n unreleased_keys = []\n #Store all input events\n input_events = []\n\n class EventType():\n KEYDOWN = 'keyDown'\n KEYUP = 'keyUp'\n CLICK = 'click'\n POS = 'pos'\n\n def main(self):\n self.countdownTimer()\n trackThread = threading.Thread(target=tracker.mousePosTicker)\n trackThread.start()\n self.runListeners()\n \n print(\"Recording Duration: {} seconds\".format(self.elapsed_time()))\n global input_events\n #pprint(json.dumps(input_events))\n\n script_dir = os.path.dirname(self.__FILE__)\n filepath = os.path.join(script_dir, 'data', '{}.json'.format(self.OUTPUT_FILENAME))\n with open(filepath, 'w') as outfile:\n json.dump(self.input_events, outfile, indent=4)\n\n def elapsed_time(self):\n return perf_counter() - self.start_time\n\n def countdownTimer(self):\n # Countdown timer\n print(\"Starting\", end=\"\", flush=True)\n for i in range(0, 5):\n print(\".\", end=\"\", flush=True)\n sleep(1)\n print(\"Go\")\n\n def mousePosTicker(self):\n button = None\n counter = 200\n\n while True:\n sleep(.3)\n pos = pyautogui.position()\n self.record_event(self.EventType.POS, self.elapsed_time(), button, pos)\n print(\"Timestamp TIME: {} , POSITION: {}\".format(self.elapsed_time(), pos))\n counter -= 1\n if counter == 0:\n break\n\n\n\n def record_event(self, event_type, event_time, button=\"none\", pos=None):\n self.input_events.append({\n 'time': event_time,\n 'type': event_type,\n 'button': str(button),\n 'pos': pos\n })\n\n if event_type == self.EventType.CLICK:\n print('{} on {} pos {} at {}'.format(event_type, button, pos, event_time))\n\n def on_press(self, key):\n #we only want to record the first keypress event until key has been released. \n if key in self.unreleased_keys:\n return\n else:\n self.unreleased_keys.append(key)\n try:\n self.record_event(self.EventType.KEYDOWN, self.elapsed_time(), key.char)\n except AttributeError:\n self.record_event(self.EventType.KEYDOWN, self.elapsed_time(), key)\n\n def on_release(self, key):\n #mark key as no longer pressed\n print(self.unreleased_keys)\n try:\n self.unreleased_keys.remove(key)\n except ValueError:\n print('{} released at {}'.format(key))\n\n print('{} released at {}'.format(key, self.elapsed_time()))\n\n try:\n self.record_event(self.EventType.KEYUP, self.elapsed_time(), key.char)\n except AttributeError:\n self.record_event(self.EventType.KEYUP, self.elapsed_time(), key)\n\n if key == keyboard.Key.esc:\n #Stop mouse listener\n self.mouse_listener.stop()\n #Stop Keyboard listener\n raise keyboard.Listener.StopException()\n\n \n\n def on_click(self, x, y, button, pressed):\n global unreleased_press\n if not pressed:\n self.record_event(self.EventType.CLICK, self.elapsed_time(), button, (x, y))\n print('Clicked {} at {} time {}'.format(button, (x, y), self.elapsed_time()))\n\n\n def runListeners(self):\n #collect mouse input events\n self.mouse_listener = mouse.Listener(on_click=self.on_click)\n self.mouse_listener.start()\n self.mouse_listener.wait()\n print(\"LISTENER RUNNING AT {}\".format(time()))\n\n #Collect keyboard inputs until released\n with keyboard.Listener(\n on_press=self.on_press,\n on_release=self.on_release) as listener:\n\n #Start time globally before thread start\n global start_time\n start_time = time()\n listener.join()\n\n\n\ntracker = MouseAndKeyTracking()\ntracker.main()\n\n\n","repo_name":"Jared-P-111/Project_Alpha","sub_path":"m_and_k_tracking.py","file_name":"m_and_k_tracking.py","file_ext":"py","file_size_in_byte":4474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71323815575","text":"#Write a Python program to write a list to a file.\n\nimport os\n\npath = r'/Users/mac/Documents/GitHub/pp2-22B030588/'\n\nos.chdir(path)\ntxt = input()\n\noutput = open(txt, 'w') #creating new file\noutput.write(str(list(map(int, input().split())))) #reading user's input and writing it to neww file\noutput.close()","repo_name":"Aluakx/pp2-22B031106","sub_path":"tsis6/directories/5df.py","file_name":"5df.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"17623122428","text":"from rest_framework.response import Response\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework import status\nfrom .models import Product\nfrom .serializers import ProductSerializer\nfrom rest_framework.permissions import IsAuthenticated\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef index(request):\n products = Product.objects.filter(owner_id=request.user.id)\n serializer = ProductSerializer(products, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef show(request, id):\n try:\n product = Product.objects.get(owner_id=request.user.id, id=id)\n except Product.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = ProductSerializer(product)\n return Response(serializer.data)\n\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef add(request):\n request.data['owner'] = request.user.id\n serializer = ProductSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n else:\n return Response(serializer.errors)\n return Response(serializer.data)\n\n\n@api_view(['PUT', 'PATCH'])\n@permission_classes([IsAuthenticated])\ndef update(request, id):\n try:\n product = Product.objects.get(owner_id=request.user.id, id=id)\n except Product.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = ProductSerializer(product, data=request.data)\n if serializer.is_valid():\n serializer.save()\n else:\n return Response(serializer.errors)\n return Response(serializer.data)\n\n\n@api_view(['DELETE'])\n@permission_classes([IsAuthenticated])\ndef delete(request, id):\n try:\n product = Product.objects.get(owner_id=request.user.id, id=id)\n except Product.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n product.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n","repo_name":"kvyaceslav/simple-python-api","sub_path":"products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"32562594265","text":"# encoding:utf-8\nimport tensorflow as tf\n\n# 定义一个LSTM结构。在tensorflow中通过一句简单的命令就可以实现一个完整LSTM结构。\n# LSTM中使用的变量也会在该函数中自动被声明\nlstm = rnn_cel.BasicLSTMCell(lstm_hidden_size)\n\n# 将LSTM中的状态初始化为全0数组。和其他神经网络类似,在优化循环神经网络时,每次也\n# 会使用一个batch的训练样本。以下代码中,batch_size给出一个batch的大小\n# BasicLSTMCell类提供了zero_state函数来生成全0的初始状态\nstate = lstm.zero_state(batch_size,tf.float32)\n\n# 定义损失函数\nloss = 0.0\n# 在8.1节中介绍过,虽然理论上循环神经网络可以处理任意长度的序列,但是在训练时为了\n# 避免梯度消散的问题,会规定一个最大的序列长度。在一下代码中,用num_steps\n# 来表示这个长度\nfor i in range(num_steps):\n # 在第一个时刻声明LSTM结构中使用的变量,在之后的时刻都需要复用之前定义好的变量\n if i > 0: tf.get_variable_scope().reuse_variables()\n\n # 每一步处理时间序列中的一个时刻。将当前输入(current_input)和前一时刻状态\n # (state)传入定义的LSTM结构可以得到当前LSTM结构的输出lstm_output和更新后\n # 的状态state\n lstm_output, state = lstm(current_input, state)\n # 将当前时刻LSTM结构的输出传入一个全连接层得到最后的输出\n final_output = fully_connected(lstm_output)\n # 计算当前时刻输出的损失\n loss += calc_loss(final_output, expected_output)","repo_name":"ShenXiaoJun/tf-work","sub_path":"p207.py","file_name":"p207.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31348134653","text":"# -*- coding: utf-8 -*-\nfrom odoo import http\nfrom odoo.http import request, route\n\n\nclass CarRequestCustomer(http.Controller):\n # @http.route('/car_request_customer/car_request_customer', auth='public')\n # def index(self, **kw):\n # return \"Hello, world\n\n @route('/request/list/objects', auth='public', website=True, type='http')\n def list(self, **kw):\n\n return request.render('car_request_customer.listing', {\n 'root': '/request/detail',\n 'objects': request.env['car.request'].search([]),\n })\n\n @route('/request/detail/objects/', auth='public', website=True)\n def object(self, obj, **kw):\n return request.render('car_request_customer.object', {\n 'object': obj\n })\n\n @route('/api/list/request', type='json', auth='public', methods=['POST'])\n def car_request_api_list(self, **kw):\n domain = []\n if 'say' in kw:\n print(kw)\n # domain.append()\n car_requests = request.env['car.request'].sudo().search_read(domain, fields=['name', 'destination', 'start_date', 'end_date'], order='name DESC')\n return {'message': \"Success!\", 'data': car_requests}","repo_name":"mohamedmagdy/iti2023_training","sub_path":"car_request_customer/controllers/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"35039871820","text":"import pytest\n\nfrom zoo.models.recommendation.utils import *\nfrom zoo.common.nncontext import *\nfrom test.zoo.pipeline.utils.test_utils import ZooTestCase\n\nnp.random.seed(1337) # for reproducibility\n\n\nclass TestRecommenderUtils:\n\n def test_get_boundaries(self):\n index = get_boundaries(42, [18, 25, 30, 35, 40, 45, 50, 55, 60, 65])\n assert index == 5\n\n def test_categorical_from_vocab_list(self):\n MARITAL_STATUS_VOCAB = [\"Married-civ-spouse\", \"Divorced\", \"Married-spouse-absent\",\n \"Never-married\", \"Separated\", \"Married-AF-spouse\", \"Widowed\"]\n index = categorical_from_vocab_list(\"Never-married\", MARITAL_STATUS_VOCAB)\n assert index == 3\n\n def test_hash_bucket(self):\n np.random.seed(1337)\n res = hash_bucket(\"Prof-specialty\", 1000)\n assert res < 1000\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__])\n","repo_name":"intel-analytics/analytics-zoo","sub_path":"pyzoo/test/zoo/models/recommendation/test_recommender_utils.py","file_name":"test_recommender_utils.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":2592,"dataset":"github-code","pt":"67"} +{"seq_id":"21834263635","text":"import time\nfrom mcpi_tetris.hardware.hardware import Hardware\n\n\ndef pause():\n input('Press enter to continue ...')\n\n\ndef test_led():\n print('Test LED ...')\n\n from mcpi_tetris.hardware.led import LED\n led = LED()\n\n print('\\tled.on(0xFFFFFF) // white')\n led.on(0xFFFFFF)\n pause()\n\n print('\\tled.on(0xFF0000) // red')\n led.on(0xFF0000)\n pause()\n\n print('\\tled.on(0x00FF00) // green')\n led.on(0x00FF00)\n pause()\n\n print('\\tled.on(0x0000FF) // blue')\n led.on(0x0000FF)\n pause()\n\n print('\\tled.on(0xFF00FB) // pink')\n led.on(0xFF00FB)\n pause()\n\n print('\\tled.off()')\n led.off()\n pause()\n\n led.close()\n\n\ndef test_buzzer():\n print('Test Buzzer ...')\n\n from mcpi_tetris.hardware.buzzer import Buzzer\n buzzer = Buzzer()\n\n print('\\tbuzzer.play_tetris_bgm()')\n print('\\tPress Ctrl+C to continue ...')\n\n try:\n buzzer.play_tetris_bgm_loop()\n except KeyboardInterrupt:\n pass\n\n buzzer.close()\n\n\ndef test_lcd():\n print('Test LCD ...')\n\n from mcpi_tetris.hardware.lcd import LCD\n lcd = LCD()\n\n print('\\tlcd.send(1, \"Hello World!\")')\n lcd.send(1, 'Hello World!')\n pause()\n\n print('\\tlcd.send(2, \"Test 123456\")')\n lcd.send(2, 'Test 123456')\n pause()\n\n lcd.close()\n\n\ndef test_joystick():\n print('Test Joystick')\n\n from mcpi_tetris.hardware.controller import RPiGPIOJoystickController\n controller = RPiGPIOJoystickController(1)\n controller.preinitialize()\n\n print('\\tPress Ctrl+C to continue ...')\n\n try:\n while True:\n key = controller.pop()\n if key is None:\n time.sleep(0.1)\n continue\n\n print('Key pressed:', key)\n\n except KeyboardInterrupt:\n pass\n\n controller.close()\n\n\ndef before_test():\n from dotenv import load_dotenv\n load_dotenv()\n\n Hardware.enable_hardwares()\n\n\ndef after_test():\n Hardware.cleanup_hardwares()\n\n\ndef test():\n before_test()\n\n try:\n # Add your unit test\n test_led()\n test_lcd()\n test_buzzer()\n test_joystick()\n pause()\n except KeyboardInterrupt:\n pass\n finally:\n after_test()\n\n\nif __name__ == '__main__':\n test()","repo_name":"deu-coders/mcpi-tetris","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72254119892","text":"import pygame\r\nimport sys\r\nimport GameDeom.Button as Button\r\nimport GameDeom.SelectPlayer as SelectPlayer\r\nimport GameDeom.SelectCheckpoint as SelectCheckpoint\r\nimport GameDeom.Info as Info\r\nbackground_filename = 'Image/strat_background.jpg'\r\ngs_button_up = 'Image/strat_button_before.png'\r\ngs_button_down = 'Image/strat_button_after.png'\r\ngq_button_up = 'Image/return_button_before.png'\r\ngq_button_down = 'Image/return_button_after.png'\r\nselect_button = 'Image/button_select.png'\r\ninfo_button_up = 'Image/button_info_before.png'\r\ninfo_button_down = 'Image/button_info_after.png'\r\nplayer_status = list()\r\nfor i in range(0, 4):\r\n player_status.append(pygame.transform.scale(pygame.image.load(f'Image/player{i}.png'), (100, 100)))\r\n player_status.append(pygame.transform.scale(pygame.image.load(f'Image/player{i}_left.png'), (100, 100)))\r\n player_status.append(pygame.transform.scale(pygame.image.load(f'Image/player{i}_right.png'), (100, 100)))\r\n player_status.append(pygame.transform.scale(pygame.image.load(f'Image/player{i}_invincible.png'), (100, 100)))\r\n player_status.append(pygame.transform.scale(pygame.image.load(f'Image/player{i}_left_invincible.png'), (100, 100)))\r\n player_status.append(pygame.transform.scale(pygame.image.load(f'Image/player{i}_right_invincible.png'), (100, 100)))\r\nstart_sound_file = 'Sound/StartGame.wav'\r\n\r\n\r\ndef start_screen(screen):\r\n game_start_button = Button.Button(gs_button_up, gs_button_down, (520, 200), 150, 70)\r\n game_quit_button = Button.Button(gq_button_up, gq_button_down, (520, 400), 150, 70)\r\n select_checkpoint_button = Button.Button(select_button, select_button, (80, 200), 150, 70)\r\n info_button = Button.Button(info_button_up, info_button_down, (80, 400), 150, 70)\r\n background = pygame.image.load(background_filename)\r\n background = pygame.transform.scale(background, (600, 600))\r\n clock = pygame.time.Clock()\r\n start_sound = pygame.mixer.Sound(start_sound_file)\r\n start_sound.play(loops=-1)\r\n while True:\r\n clock.tick(60)\r\n screen.blit(background, (0, 0))\r\n game_start_button.change(screen)\r\n game_quit_button.change(screen)\r\n select_checkpoint_button.change(screen)\r\n info_button.change(screen)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n if event.type == pygame.MOUSEBUTTONUP:\r\n if game_start_button.is_over():\r\n start_sound.stop()\r\n SelectPlayer.SelectPlayer(screen, player_status, 1)\r\n if game_quit_button.is_over():\r\n sys.exit()\r\n if select_checkpoint_button.is_over():\r\n start_sound.stop()\r\n SelectCheckpoint.SelectCheckpoint(screen)\r\n if info_button.is_over():\r\n start_sound.stop()\r\n Info.Info(screen)\r\n pygame.display.flip()\r\n","repo_name":"FreeToBelieve/Pygame-Demo","sub_path":"StartScreen.py","file_name":"StartScreen.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28610196158","text":"import os\nimport sys\nimport time\n\nimport numpy as np\nimport paddle.fluid as fluid\nimport paddle_fl.mpc as pfl_mpc\nfrom paddle_fl.mpc.data_utils.data_utils import get_datautils\n\nsys.path.append('..')\nimport network\nimport process_data\n\n\nmpc_protocol_name = 'aby3'\nmpc_du = get_datautils(mpc_protocol_name)\n\ndef load_uci_update(role, ip, server, port, mpc_model_dir, mpc_model_filename, updated_model_dir):\n \"\"\"\n Load, update and save uci MPC model.\n\n \"\"\"\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n # Step 1. initialize MPC environment and load MPC model into default_main_program to update.\n pfl_mpc.init(mpc_protocol_name, role, ip, server, port)\n mpc_du.load_mpc_model(exe=exe,\n mpc_model_dir=mpc_model_dir,\n mpc_model_filename=mpc_model_filename)\n\n # Step 2. MPC update\n epoch_num = network.MPC_UPDATE_EPOCH\n batch_size = network.BATCH_SIZE\n mpc_data_dir = \"../mpc_data/\"\n feature_file = mpc_data_dir + \"house_feature\"\n feature_shape = (13,)\n label_file = mpc_data_dir + \"house_label\"\n label_shape = (1,)\n loss_file = \"./tmp/uci_mpc_loss.part{}\".format(role)\n if os.path.exists(loss_file):\n os.remove(loss_file)\n updated_model_name = 'mpc_updated_model'\n feature_name = 'x'\n label_name = 'y'\n # fetch loss if needed\n loss = fluid.default_main_program().global_block().var('mean_0.tmp_0')\n loader = process_data.get_mpc_dataloader(feature_file, label_file, feature_shape, label_shape,\n feature_name, label_name, role, batch_size)\n start_time = time.time()\n for epoch_id in range(epoch_num):\n step = 0\n for sample in loader():\n mpc_loss = exe.run(feed=sample, fetch_list=[loss.name])\n if step % 50 == 0:\n print('Epoch={}, Step={}, Loss={}'.format(epoch_id, step, mpc_loss))\n with open(loss_file, 'ab') as f:\n f.write(np.array(mpc_loss).tostring())\n step += 1\n end_time = time.time()\n print('Mpc Updating of Epoch={} Batch_size={}, cost time in seconds:{}'\n .format(epoch_num, batch_size, (end_time - start_time)))\n\n # Step 3. save updated MPC model as a trainable model.\n mpc_du.save_trainable_model(exe=exe,\n model_dir=updated_model_dir,\n model_filename=updated_model_name)\n print('Successfully save mpc updated model into:{}'.format(updated_model_dir))\n\n\nif __name__ == '__main__':\n role, server, port = int(sys.argv[1]), sys.argv[2], int(sys.argv[3])\n mpc_model_dir = './tmp/mpc_models_to_update/model_share_{}'.format(role)\n mpc_model_filename = 'model_to_update'\n updated_model_dir = './tmp/mpc_models_updated/updated_model_share_{}'.format(role)\n load_uci_update(role=role,\n ip='localhost',\n server=server,\n port=port,\n mpc_model_dir=mpc_model_dir,\n mpc_model_filename=mpc_model_filename,\n updated_model_dir=updated_model_dir)\n","repo_name":"PaddlePaddle/PaddleFL","sub_path":"python/paddle_fl/mpc/examples/model_encryption/update/update_mpc_model.py","file_name":"update_mpc_model.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","stars":470,"dataset":"github-code","pt":"67"} +{"seq_id":"20714018581","text":"import pandas as pd\r\nimport glob\r\nimport os\r\nfrom moviepy.editor import VideoFileClip\r\n\r\ndef extract_time_frames_from_evaluation_files(path, output_dir):\r\n txt_files = glob.glob(os.path.join(path, \"*.txt\"))\r\n for file_name in txt_files:\r\n f = open(file_name, \"r\")\r\n lines = f.readlines()\r\n time_frames = []\r\n for i,line in enumerate(lines):\r\n if line.startswith('['):\r\n row = line.strip().split('\\t')\r\n #print(row)\r\n turn = row[1]\r\n time = row[0][1:-1].split('-')\r\n time_frames.append([turn, time[0], time[1]])\r\n #print(time_frames)\r\n else:\r\n continue\r\n time_frames_df = pd.DataFrame(time_frames)\r\n time_frame_filename = output_dir + file_name.split('\\\\')[-1]\r\n time_frames_df.to_csv(time_frame_filename, header=None, index=None, sep=',')\r\n '''print(time_frame_filename)\r\n print(len(time_frames_df))'''\r\n return \r\n\r\n\r\ndef split_avi(txt_file, video_file, output_dir):\r\n \r\n with open(txt_file) as f:\r\n times = f.readlines()\r\n times = [x.strip() for x in times] \r\n for i, time in enumerate(times):\r\n turnname = time.split(\",\")[0]\r\n starttime = float(time.split(\",\")[1])\r\n endtime = float(time.split(\",\")[2])\r\n \r\n target_file = output_dir + turnname + '.mp4'\r\n extract_subclip(video_file, starttime, endtime, targetname=target_file) \r\n\r\n\r\n\r\ndef extract_subclip(filename, t1, t2, targetname):\r\n videoclip = VideoFileClip(filename)\r\n clip = videoclip.subclip(t1, t2)\r\n no_audio = clip.without_audio()\r\n no_audio.write_videofile(targetname, codec='libx264') \r\n\r\n\r\n","repo_name":"minooshayan97/Emotion_Recognition","sub_path":"video/IEMOCAP_video_preparation.py","file_name":"IEMOCAP_video_preparation.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70639669335","text":"from ctypes import *\r\nimport math\r\nimport random\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nimport time\r\nfrom . import darkdll\r\nfrom threading import Thread\r\nnetMain = None\r\nmetaMain = None\r\naltNames = None\r\n\r\ndef YOLO():\r\n global metaMain, netMain, altNames\r\n configPath ='./New/yolov4-tiny.cfg'\r\n weightPath = './New/yolov4-tiny_best.weights'\r\n metaPath = './New/yolo.data'\r\n if not os.path.exists(configPath):\r\n raise ValueError(\"Invalid config path `\" +\r\n os.path.abspath(configPath) + \"`\")\r\n if not os.path.exists(weightPath):\r\n raise ValueError(\"Invalid weight path `\" +\r\n os.path.abspath(weightPath) + \"`\")\r\n if not os.path.exists(metaPath):\r\n raise ValueError(\"Invalid data file path `\" +\r\n os.path.abspath(metaPath) + \"`\")\r\n if netMain is None:\r\n netMain = darkdll.load_net_custom(configPath.encode(\"ascii\"), weightPath.encode(\"ascii\"), 0, 1) # batch size = 1\r\n if metaMain is None:\r\n metaMain = darkdll.load_meta(metaPath.encode(\"ascii\"))\r\n if altNames is None:\r\n try:\r\n with open(metaPath) as metaFH:\r\n metaContents = metaFH.read()\r\n import re\r\n match = re.search(\"names *= *(.*)$\", metaContents,\r\n re.IGNORECASE | re.MULTILINE)\r\n if match:\r\n result = match.group(1)\r\n else:\r\n result = None\r\n try:\r\n if os.path.exists(result):\r\n with open(result) as namesFH:\r\n namesList = namesFH.read().strip().split(\"\\n\")\r\n altNames = [x.strip() for x in namesList]\r\n except TypeError:\r\n pass\r\n except Exception:\r\n pass\r\n # cap = cv2.VideoCapture(0)\r\n # video_path = 'D:/DATA_SET/Cap_roi/videos/VID_20200205_160720.mp4'\r\n # is_camera = 'rtsp://' in video_path\r\n # cap = cv2.VideoCapture(0)\r\n cap = cv2.VideoCapture(r\"D:\\Downloads\\CDS\\LHU2.avi\")\r\n cap.set(3, 1280)\r\n cap.set(4, 720)\r\n # out = cv2.VideoWriter(\r\n # \"output.avi\", cv2.VideoWriter_fourcc(*\"MJPG\"), 10.0,\r\n # (darknet.network_width(netMain), darknet.network_height(netMain)))\r\n # print(\"Starting the YOLO loop...\")\r\n\r\n # Create an image we reuse for each detect\r\n darknet_image = darkdll.make_image(darkdll.network_width(netMain),\r\n darkdll.network_height(netMain), 3)\r\n\r\n fps = cap.get(cv2.CAP_PROP_FPS)\r\n pos_slider_max = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\r\n while cap.isOpened():\r\n ret, frame = cap.read()\r\n if ret == True:\r\n prev_time = time.time()\r\n frame_ = frame.copy()\r\n frame_rgb = np.rot90(frame_, 4)\r\n frame_rgb = cv2.cvtColor(frame_rgb, cv2.COLOR_BGR2RGB)\r\n frame_resized = cv2.resize(frame_rgb,\r\n (darkdll.network_width(netMain),\r\n darkdll.network_height(netMain)),\r\n interpolation = cv2.INTER_LINEAR)\r\n\r\n darkdll.copy_image_from_bytes(darknet_image, frame_resized.tobytes())\r\n detections = darkdll.detect_image(netMain, metaMain, darknet_image, thresh = 0.25)\r\n\r\n shape_img = frame.shape\r\n data, abc = darkdll.cvDrawBoxesssss(detections, frame_rgb, shape_img, darkdll.network_height(netMain))\r\n image = cv2.cvtColor(data, cv2.COLOR_BGR2RGB)\r\n cv2.namedWindow(\"Demo\", cv2.WINDOW_AUTOSIZE)\r\n cv2.imshow('Demo', image)\r\n cv2.namedWindow(\"Demo1\", cv2.WINDOW_AUTOSIZE)\r\n cv2.imshow('Demo1', frame_)\r\n # print(1/(time.time()-prev_time))\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n else:\r\n break\r\n cap.release()\r\n # out.release()\r\n cv2.destroyAllWindows()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n YOLO()\r\n","repo_name":"BuiKhoi/DigitalRace2019","sub_path":"online_codes/team504/scripts/ob_detect.py","file_name":"ob_detect.py","file_ext":"py","file_size_in_byte":4083,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"1495975179","text":"from typing import List\nfrom django.conf import settings\nfrom drf_spectacular.utils import extend_schema_field, extend_schema_serializer\nfrom drf_spectacular.types import OpenApiTypes\nfrom rest_framework.reverse import reverse\nfrom rest_framework import serializers, relations\n\nfrom pulp_ansible.app import models, serializers as ansible_serializers\nfrom pulpcore.plugin.models import RepositoryVersion\nfrom pulpcore.plugin import serializers as core_serializers\n\n\ndef _get_distro_context(context):\n distro_context = {}\n if \"path\" in context:\n distro_context[\"path\"] = context[\"path\"]\n if \"distro_base_path\" in context:\n distro_context[\"distro_base_path\"] = context[\"distro_base_path\"]\n return distro_context\n\n\nclass CollectionSerializer(serializers.ModelSerializer):\n \"\"\"A serializer for a Collection.\"\"\"\n\n deprecated = serializers.BooleanField()\n created_at = serializers.SerializerMethodField()\n updated_at = serializers.SerializerMethodField()\n href = serializers.SerializerMethodField()\n\n versions_url = serializers.SerializerMethodField()\n highest_version = serializers.SerializerMethodField()\n download_count = serializers.SerializerMethodField()\n\n class Meta:\n fields = (\n \"href\",\n \"namespace\",\n \"name\",\n \"deprecated\",\n \"versions_url\",\n \"highest_version\",\n \"created_at\",\n \"updated_at\",\n \"download_count\",\n )\n model = models.Collection\n\n def get_href(self, obj) -> str:\n \"\"\"Get href.\"\"\"\n ctx = _get_distro_context(self.context)\n return reverse(\n settings.ANSIBLE_URL_NAMESPACE + \"collections-detail\",\n kwargs={**ctx, \"namespace\": obj.namespace, \"name\": obj.name},\n )\n\n def get_versions_url(self, obj) -> str:\n \"\"\"Get a link to a collection versions list.\"\"\"\n ctx = _get_distro_context(self.context)\n return reverse(\n settings.ANSIBLE_URL_NAMESPACE + \"collection-versions-list\",\n kwargs={**ctx, \"namespace\": obj.namespace, \"name\": obj.name},\n )\n\n @extend_schema_field(OpenApiTypes.DATETIME)\n def get_created_at(self, obj):\n \"\"\"Get the timestamp of the lowest version CollectionVersion's created timestamp.\"\"\"\n return obj.pulp_created\n\n @extend_schema_field(OpenApiTypes.DATETIME)\n def get_updated_at(self, obj):\n \"\"\"Get the timestamp of the latest version CollectionVersion's created timestamp.\"\"\"\n # NOTE: this should reflect the last time the collection was updated, not the last\n # time that the latest version was updated. See https://pulp.plan.io/issues/7775\n return obj.latest_version_modified\n\n @extend_schema_field(OpenApiTypes.OBJECT)\n def get_highest_version(self, obj):\n \"\"\"Get a highest version and its link.\"\"\"\n ctx = _get_distro_context(self.context)\n href = reverse(\n settings.ANSIBLE_URL_NAMESPACE + \"collection-versions-detail\",\n kwargs={\n **ctx,\n \"namespace\": obj.namespace,\n \"name\": obj.name,\n \"version\": obj.highest_version,\n },\n )\n return {\"href\": href, \"version\": obj.highest_version}\n\n def get_download_count(self, obj):\n \"\"\"Get the download count of the collection\"\"\"\n\n return obj.download_count or 0\n\n\nclass CollectionVersionListSerializer(serializers.ModelSerializer):\n \"\"\"A serializer for a CollectionVersion list item.\"\"\"\n\n href = serializers.SerializerMethodField()\n created_at = serializers.DateTimeField(source=\"pulp_created\")\n updated_at = serializers.DateTimeField(source=\"pulp_last_updated\")\n marks = serializers.SerializerMethodField()\n\n class Meta:\n fields = (\n \"version\",\n \"href\",\n \"created_at\",\n \"updated_at\",\n \"requires_ansible\",\n \"marks\",\n )\n model = models.CollectionVersion\n\n def get_marks(self, obj) -> List[str]:\n \"\"\"Get a list of mark values filtering only those in the current repo.\"\"\"\n return [x.value for x in obj.marks.all()]\n\n def get_href(self, obj) -> str:\n \"\"\"\n Get href.\n \"\"\"\n ctx = _get_distro_context(self.context)\n\n return reverse(\n settings.ANSIBLE_URL_NAMESPACE + \"collection-versions-detail\",\n kwargs={\n **ctx,\n \"namespace\": obj.namespace,\n \"name\": obj.name,\n \"version\": obj.version,\n },\n )\n\n\nclass ArtifactRefSerializer(serializers.Serializer):\n \"\"\"A serializer for an Artifact reference.\"\"\"\n\n filename = serializers.CharField(source=\"relative_path\")\n sha256 = serializers.CharField(source=\"artifact.sha256\")\n size = serializers.IntegerField(source=\"artifact.size\")\n\n\nclass CollectionRefSerializer(serializers.Serializer):\n \"\"\"\n A serializer for a Collection reference.\n \"\"\"\n\n id = serializers.CharField(source=\"pk\")\n name = serializers.CharField()\n href = serializers.SerializerMethodField()\n\n def get_href(self, obj) -> str:\n \"\"\"Returns link to a collection.\"\"\"\n ctx = _get_distro_context(self.context)\n return reverse(\n settings.ANSIBLE_URL_NAMESPACE + \"collections-detail\",\n kwargs={**ctx, \"namespace\": obj.namespace, \"name\": obj.name},\n )\n\n\nclass CollectionMetadataSerializer(serializers.ModelSerializer):\n \"\"\"\n A serializer for a CollectionVersion metadata.\n \"\"\"\n\n tags = relations.ManyRelatedField(relations.StringRelatedField())\n\n class Meta:\n model = models.CollectionVersion\n fields = (\n \"authors\",\n \"contents\",\n \"dependencies\",\n \"description\",\n \"documentation\",\n \"homepage\",\n \"issues\",\n \"license\",\n \"repository\",\n \"tags\",\n )\n\n\nclass CollectionNamespaceSerializer(serializers.Serializer):\n \"\"\"\n A serializer for a Collection Version namespace field.\n \"\"\"\n\n name = serializers.CharField(source=\"namespace\")\n metadata_sha256 = serializers.CharField(source=\"namespace_sha256\", allow_null=True)\n\n\nclass CollectionVersionSignatureSerializer(serializers.ModelSerializer):\n \"\"\"\n A serializer for the signatures on a Collection Version.\n \"\"\"\n\n signature = serializers.SerializerMethodField()\n signing_service = serializers.SlugRelatedField(\n slug_field=\"name\",\n allow_null=True,\n read_only=True,\n )\n\n def get_signature(self, obj):\n \"\"\"\n Get the signature data.\n \"\"\"\n return obj.data\n\n class Meta:\n model = models.CollectionVersionSignature\n fields = (\"signature\", \"pubkey_fingerprint\", \"signing_service\", \"pulp_created\")\n\n\nclass UnpaginatedCollectionVersionSerializer(CollectionVersionListSerializer):\n \"\"\"\n A serializer for unpaginated CollectionVersion.\n \"\"\"\n\n collection = CollectionRefSerializer(read_only=True)\n artifact = serializers.SerializerMethodField()\n download_url = serializers.SerializerMethodField()\n git_url = serializers.SerializerMethodField()\n git_commit_sha = serializers.SerializerMethodField()\n\n metadata = CollectionMetadataSerializer(source=\"*\", read_only=True)\n namespace = CollectionNamespaceSerializer(source=\"*\", read_only=True)\n signatures = CollectionVersionSignatureSerializer(many=True)\n\n class Meta:\n model = models.CollectionVersion\n fields = CollectionVersionListSerializer.Meta.fields + (\n \"artifact\",\n \"collection\",\n \"download_url\",\n \"name\",\n \"namespace\",\n \"signatures\",\n \"metadata\",\n \"git_url\",\n \"git_commit_sha\",\n )\n\n @extend_schema_field(ArtifactRefSerializer)\n def get_artifact(self, obj):\n \"\"\"\n Get artifact summary.\n \"\"\"\n ca = obj.artifacts[0]\n if ca.artifact:\n return ArtifactRefSerializer(ca).data\n\n def get_download_url(self, obj) -> str:\n \"\"\"\n Get artifact download URL.\n \"\"\"\n if obj.artifacts[0].artifact:\n distro_base_path = self.context.get(\"path\", self.context[\"distro_base_path\"])\n filename_path = obj.relative_path.lstrip(\"/\")\n\n # Note: We're using ANSIBLE_API_HOSTNAME here instead of calling reverse with request=\n # because using the request context to generate the full URL causes the download URL\n # to be inaccessible when pulp is running behind a reverse proxy.\n host = settings.ANSIBLE_API_HOSTNAME.strip(\"/\")\n path = reverse(\n settings.ANSIBLE_URL_NAMESPACE + \"collection-artifact-download\",\n kwargs={\"distro_base_path\": distro_base_path, \"filename\": filename_path},\n ).strip(\"/\")\n\n return f\"{host}/{path}\"\n\n def get_git_url(self, obj) -> str:\n \"\"\"\n Get the git URL.\n \"\"\"\n ca = obj.artifacts[0]\n if not ca.artifact:\n return ca.remoteartifact_set.all()[0].url[:-47]\n\n def get_git_commit_sha(self, obj) -> str:\n \"\"\"\n Get the git commit sha.\n \"\"\"\n ca = obj.artifacts[0]\n if not ca.artifact:\n return ca.remoteartifact_set.all()[0].url[-40:]\n\n\nclass CollectionVersionSerializer(UnpaginatedCollectionVersionSerializer):\n \"\"\"\n A serializer for a CollectionVersion.\n \"\"\"\n\n manifest = serializers.JSONField(\n help_text=\"A JSON field holding MANIFEST.json data.\", read_only=True\n )\n files = serializers.JSONField(help_text=\"A JSON field holding FILES.json data.\", read_only=True)\n\n class Meta:\n model = models.CollectionVersion\n fields = UnpaginatedCollectionVersionSerializer.Meta.fields + (\n \"manifest\",\n \"files\",\n )\n\n\nclass CollectionVersionDocsSerializer(serializers.ModelSerializer):\n \"\"\"A serializer to display the docs_blob of a CollectionVersion.\"\"\"\n\n docs_blob = serializers.JSONField()\n\n class Meta:\n fields = (\"docs_blob\",)\n model = models.CollectionVersion\n\n\nclass RepoMetadataSerializer(serializers.ModelSerializer):\n \"\"\"A serializer to display RepositoryVersion metadata.\"\"\"\n\n published = serializers.DateTimeField(source=\"pulp_created\")\n\n class Meta:\n fields = (\"published\",)\n model = RepositoryVersion\n\n\nclass ClientConfigurationSerializer(serializers.Serializer):\n \"\"\"Configuration settings for the ansible-galaxy client.\"\"\"\n\n default_distribution_path = serializers.CharField(allow_null=True)\n\n\n@extend_schema_serializer(component_name=\"CollectionSummary\")\nclass CollectionSummarySerializer(ansible_serializers.CollectionVersionSerializer):\n \"\"\"Collection Version serializer without docs blob.\"\"\"\n\n class Meta:\n model = models.CollectionVersion\n fields = (\n \"pulp_href\",\n \"namespace\",\n \"name\",\n \"version\",\n \"requires_ansible\",\n \"pulp_created\",\n \"contents\",\n \"dependencies\",\n \"description\",\n \"tags\",\n )\n\n\nclass NamespaceSummarySerializer(ansible_serializers.AnsibleNamespaceMetadataSerializer):\n \"\"\"Namespace serializer without resources.\"\"\"\n\n class Meta:\n model = models.AnsibleNamespaceMetadata\n fields = (\n \"pulp_href\",\n \"name\",\n \"company\",\n \"description\",\n \"avatar\",\n \"avatar_url\",\n )\n\n\nclass CollectionVersionSearchListSerializer(CollectionVersionListSerializer):\n \"\"\"Cross-repo search results.\"\"\"\n\n # All of these fields have to operate differently from the parent class\n repository = core_serializers.RepositorySerializer()\n collection_version = CollectionSummarySerializer()\n repository_version = serializers.SerializerMethodField()\n namespace_metadata = NamespaceSummarySerializer(allow_null=True)\n\n is_highest = serializers.BooleanField()\n is_signed = serializers.BooleanField()\n is_deprecated = serializers.BooleanField()\n\n class Meta:\n model = models.CrossRepositoryCollectionVersionIndex\n\n fields = (\n \"repository\",\n \"collection_version\",\n \"repository_version\",\n \"namespace_metadata\",\n \"is_highest\",\n \"is_deprecated\",\n \"is_signed\",\n )\n\n def get_repository_version(self, obj):\n if obj.repository_version:\n return obj.repository_version.number\n else:\n return \"latest\"\n","repo_name":"pulp/pulp_ansible","sub_path":"pulp_ansible/app/galaxy/v3/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":12679,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"67"} +{"seq_id":"37807209323","text":"from bs4 import BeautifulSoup\nfrom urllib2 import urlopen\nimport numpy as np\n\ndef owlmag_album_scrape(num_pages = 1, section_url = 'category/album-reviews/page'):\n\n BASE_URL = \"http://www.theowlmag.com/\"\n\n albumartistslist = []\n linkslist = []\n\n for num in range(1,num_pages+1):\n\n html = urlopen(BASE_URL + section_url+str(num)).read()\n soup = BeautifulSoup(html, \"lxml\")\n\n links = soup.find(\"div\", {\"id\": \"main\"})\n\n albums = links.find_all('h2')\n album = [meta.contents[0] for meta in albums]\n metadata = [s.contents[0] for s in album]\n\n link = [s['href'] for s in album]\n\n for each in metadata:\n each = each.replace(u\"\\u2018\", \"\").replace(u\"\\u2033\", \"\").replace(u\"\\u201c\",\"\").replace(u\"\\u201d\", \"\")\n each = each.split(' by ')\n album, artist = each[0], each[1:]\n albumartistslist.append((artist, album))\n if set(links)<= linkslist:\n return albumartistslist,linkslist\n\n linkslist.extend(link)\n\n return albumartistslist, linkslist\n\ndef owlmag_text_scrape(url):\n try:\n link = 'http://www.theowlmag.com/'\n html = urlopen(url).read()\n soup = BeautifulSoup(html, \"lxml\")\n text = soup.findAll('p')\n final = [t.findAll(text=True) for t in text]\n title = soup.find('h1', {'class':'entry-title'})\n album, artist = title.text.replace(u\"\\u2018\", \"\").replace(u\"\\u2033\", \"\").replace(u\"\\u201c\",\"\").replace(u\"\\u201d\", \"\").split(' by ')\n s = \"\"\n for each in np.array(final[2:-1]).flatten():\n s += ''.join(each).encode('utf-8')\n except:\n return\n return artist, album, ''.join([i for i in s if ord(i)<128]), url\n\n#temp = owlmag_review_text('http://www.theowlmag.com/album-reviews/currents-by-tame-impala/')\n","repo_name":"rlkelly/CAPSTONE","sub_path":"crawlers/owl_scrape.py","file_name":"owl_scrape.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20884109905","text":"from load_data import load_binary_diabetes_uci, load_heart_uci, load_breast_cancer,\\\n load_adult, load_adult_race, load_adult_race_white_vs_black, laod_propublica_fairml, laod_propublica_fairml_race,\\\n laod_propublica_fairml_hotencoded, load_default, load_hepatitis, load_arrhythmia\nfrom load_data import load_experiments\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import svm\nfrom sklearn.metrics import accuracy_score\nfrom measures import fair_tpr_from_precomputed, subgrups_sensible_feature_data\nfrom uncorrelation import UncorrelationMethod\nfrom collections import namedtuple\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import KFold\nfrom validation_method import two_step_validation_with_DEO\nfrom collections import namedtuple\nfrom toy_problem_lasso import toy_test_generator\nfrom uncorrelation_nonlinear import Fair_SVM\n\nnp.random.seed(15)\nparam_grid = {'C': np.logspace(-6, 6, 40), 'gamma': np.logspace(-6, 6, 40)}\nparam_grid = {'C': np.logspace(-4, 4, 20), 'gamma': np.logspace(-4, 4, 20)}\nparam_grid = {'gamma': np.logspace(-4, 0, 20), 'C': np.logspace(-1, 4, 20)}\n\nhyperlist = [(c, g) for c in param_grid['C'] for g in param_grid['gamma']]\nprint('Hyperlist:', hyperlist)\n\ntoytest = False\nevaluate_approx_on_train = False\n\nif toytest:\n # Dataset\n n_samples = 100 * 10\n n_samples_low = 20 * 10\n lasso_dataset = False\n number_of_random_features = 2000\n varA = 0.8\n aveApos = [-1.0, -1.0]\n aveAneg = [1.0, 1.0]\n varB = 0.5\n aveBpos = [0.5, -0.5]\n aveBneg = [0.5, 0.5]\n X, y, X_test, y_test, idx_A, idx_B, _, sensible_feature_id =\\\n toy_test_generator(n_samples, n_samples_low, varA, aveApos, aveAneg, varB, aveBpos, aveBneg,\n lasso_dataset, number_of_random_features)\n dataset_train = namedtuple('_', 'data, target')(X, y)\n dataset_test = namedtuple('_', 'data, target')(X_test, y_test)\nelse:\n # 12, 8, 2, 13, 14\n experiment_number = 13\n iteration = 0\n verbose = 3\n smaller_option = True\n dataset_train, dataset_test, sensible_feature_id = load_experiments(experiment_number,\n smaller_option,\n verbose)\n\nnot_fair_stats = {'error': [], 'deo': [], 'deo_approx': []}\nfair_stats = {'error': [], 'deo': [], 'deo_approx': [], 'delta0': [], 'delta1': []}\n\n# Not fair err\\deo values:\nfor C, gamma in hyperlist:\n estimator = svm.SVC(C=C, kernel='rbf', gamma=gamma)\n estimator.fit(dataset_train.data, dataset_train.target)\n prediction = estimator.predict(dataset_test.data)\n error = 1.0 - accuracy_score(dataset_test.target, prediction)\n subgropus_idxs = subgrups_sensible_feature_data(dataset_test.data, sensible_feature_id)\n deo = fair_tpr_from_precomputed(dataset_test.target, prediction, subgropus_idxs)\n val0 = np.min(list(deo.keys()))\n val1 = np.max(list(deo.keys()))\n not_fair_stats['error'].append(error)\n not_fair_stats['deo'].append(np.abs(deo[val0] - deo[val1]))\n\n if evaluate_approx_on_train:\n adeo0 = np.mean([estimator.decision_function([ex]) for idx, ex in enumerate(dataset_train.data)\n if dataset_train.target[idx] == 1 and dataset_train.data[idx][sensible_feature_id] == val0])\n adeo1 = np.mean([estimator.decision_function([ex]) for idx, ex in enumerate(dataset_train.data)\n if dataset_train.target[idx] == 1 and dataset_train.data[idx][sensible_feature_id] == val1])\n else:\n adeo0 = np.mean([estimator.decision_function([ex]) for idx, ex in enumerate(dataset_test.data)\n if dataset_test.target[idx] == 1 and dataset_test.data[idx][sensible_feature_id] == val0])\n adeo1 = np.mean([estimator.decision_function([ex]) for idx, ex in enumerate(dataset_test.data)\n if dataset_test.target[idx] == 1 and dataset_test.data[idx][sensible_feature_id] == val1])\n not_fair_stats['deo_approx'].append(np.abs(adeo0 - adeo1))\n # not_fair_stats['EO_prod'].append(deo[val0] * deo[val1])\n print('SVM - C, gamma:', C, gamma, '- error:', error, '- EO:', deo, '- DEO:', np.abs(deo[val0] - deo[val1]), '- AppDEO:', np.abs(adeo0 - adeo1))\n\n# Fair err\\deo values:\nfor C, gamma in hyperlist:\n estimator = Fair_SVM(C=C, kernel='rbf', gamma=gamma, sensible_feature=sensible_feature_id)\n estimator.fit(dataset_train.data, dataset_train.target)\n prediction = estimator.predict(dataset_test.data)\n error = 1.0 - accuracy_score(dataset_test.target, prediction)\n subgropus_idxs = subgrups_sensible_feature_data(dataset_test.data, sensible_feature_id)\n deo = fair_tpr_from_precomputed(dataset_test.target, prediction, subgropus_idxs)\n val0 = np.min(list(deo.keys()))\n val1 = np.max(list(deo.keys()))\n fair_stats['error'].append(error)\n fair_stats['deo'].append(np.abs(deo[val0] - deo[val1]))\n\n if evaluate_approx_on_train:\n adeo0 = np.mean([estimator.decision_function([ex]) for idx, ex in enumerate(dataset_train.data)\n if dataset_train.target[idx] == 1 and dataset_train.data[idx][sensible_feature_id] == val0])\n adeo1 = np.mean([estimator.decision_function([ex]) for idx, ex in enumerate(dataset_train.data)\n if dataset_train.target[idx] == 1 and dataset_train.data[idx][sensible_feature_id] == val1])\n else:\n adeo0 = np.mean([estimator.decision_function([ex]) for idx, ex in enumerate(dataset_test.data)\n if dataset_test.target[idx] == 1 and dataset_test.data[idx][sensible_feature_id] == val0])\n adeo1 = np.mean([estimator.decision_function([ex]) for idx, ex in enumerate(dataset_test.data)\n if dataset_test.target[idx] == 1 and dataset_test.data[idx][sensible_feature_id] == val1])\n\n adeo0lim = np.mean([np.max([-1, np.min([1, estimator.decision_function([ex])])])\n for idx, ex in enumerate(dataset_test.data)\n if dataset_test.target[idx] == 1 and dataset_test.data[idx][sensible_feature_id] == val0])\n adeo1lim = np.mean([np.max([-1, np.min([1, estimator.decision_function([ex])])])\n for idx, ex in enumerate(dataset_test.data)\n if dataset_test.target[idx] == 1 and dataset_test.data[idx][sensible_feature_id] == val1])\n\n delta0 = np.abs(deo[val0] - 0.5 - adeo0lim)\n delta1 = np.abs(deo[val1] - 0.5 - adeo1lim)\n fair_stats['deo_approx'].append(np.abs(adeo0 - adeo1))\n fair_stats['delta0'].append(delta0)\n fair_stats['delta1'].append(delta1)\n # fair_stats['EO_prod'].append(deo[val0] * deo[val1])\n print('Fair-SVM - C, gamma:', C, gamma, '- error:', error, '- EO:', deo, '- DEO:', np.abs(deo[val0] - deo[val1]), '- AppDEO:', np.abs(adeo0 - adeo1),\n '\\nDelta0:', delta0, 'Delta1:', delta1)\n\n\nprint('Not-fair STATS:', not_fair_stats)\nprint('Not-fair smallest error:', np.min(not_fair_stats['error']))\nprint('Not-fair smallest deo:', np.min(not_fair_stats['deo']))\nprint('Fair STATS:', fair_stats)\nprint('Fair smallest error:', np.min(fair_stats['error']))\nprint('Fair smallest deo:', np.min(fair_stats['deo']))\n\n# besterr = np.array(fair_stats['error']).argsort()[0]\nbesterr = np.min(fair_stats['error'])\nnearminidx = np.array([idx for idx, v in enumerate(fair_stats['error']) if v <= besterr * 1.05])\n# bestallidx = nearminidx[np.argmin(fair_stats['deo'][nearminidx])]\nbestallidx = nearminidx[np.array(fair_stats['deo'])[nearminidx].argsort()[:5]]\nprint('Best with err:', np.array(fair_stats['error'])[bestallidx])\nprint('Best with deo:', np.array(fair_stats['deo'])[bestallidx])\nbestdelta0 = np.array(fair_stats['delta0'])[bestallidx]\nbestdelta1 = np.array(fair_stats['delta1'])[bestallidx]\nprint('Delta0 (over the best 5 errors):', np.mean(bestdelta0), '+-', np.std(bestdelta0))\nprint('Delta1 (over the best 5 errors):', np.mean(bestdelta1), '+-', np.std(bestdelta1))\n\nSMALL_SIZE = 25\nMEDIUM_SIZE = 25\nBIGGER_SIZE = 28\nplt.rc('font', size=SMALL_SIZE) # controls default text sizes\nplt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\nplt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\nplt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\nplt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\nplt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize\nplt.rc('figure', titlesize=MEDIUM_SIZE) # fontsize of the figure title\n\n\nfig = plt.figure(1, figsize=(9, 8), dpi=80)\nplt.plot(fair_stats['error'], fair_stats['deo'], 'o', markersize=15, label='Our method')\nplt.plot(not_fair_stats['error'], not_fair_stats['deo'], '*', markersize=15, label='SVM')\nplt.xlabel('Error')\nplt.ylabel('DEO')\nif toytest:\n if not lasso_dataset:\n strtitle = 'Toytest - Non-linear'\n else:\n strtitle = 'Lasso_Toytest - Non-linear'\n plt.title(strtitle)\n plt.savefig(strtitle)\nelse:\n strtitle = 'Experiment_%d - Non-linear' % experiment_number\n plt.title(strtitle)\n plt.savefig(strtitle)\n\n\nhyperlist = np.array(hyperlist)\nhypershape = (len(param_grid['C']), len(param_grid['gamma']))\nfor k in fair_stats:\n fair_stats[k] = np.array(fair_stats[k])\n fair_stats[k].shape = hypershape\nfor k in not_fair_stats:\n not_fair_stats[k] = np.array(not_fair_stats[k])\n not_fair_stats[k].shape = hypershape\n\n\ncmap = 'binary'\n\nfig = plt.figure(2, figsize=(9, 8), dpi=80)\nplt.imshow(fair_stats['deo'], cmap=cmap, label='DEO')\nplt.xlabel('log(C)')\nplt.ylabel('log($\\gamma$)')\n#plt.xticks(param_grid['C'])\n#plt.yticks(param_grid['gamma'])\nplt.colorbar()\n#plt.legend()\nplt.axes().get_xaxis().set_ticks([])\nplt.axes().get_yaxis().set_ticks([])\nif toytest:\n if not lasso_dataset:\n strtitle = 'Toytest - Our method - DEO'\n else:\n strtitle = 'Lasso_Toytest - Our method - DEO'\n plt.title(strtitle)\n plt.savefig(strtitle)\nelse:\n # strtitle = 'Experiment_%d - Our method - DEO' % experiment_number\n strtitle = 'Our method - DEO'\n plt.title(strtitle)\n plt.savefig(strtitle)\n\n\nfig = plt.figure(3, figsize=(9, 8), dpi=80)\nplt.imshow(fair_stats['deo_approx'], cmap=cmap, label='Approx DEO')\nplt.xlabel('log(C)')\nplt.ylabel('log($\\gamma$)')\nplt.colorbar()\n#plt.legend()\nplt.axes().get_xaxis().set_ticks([])\nplt.axes().get_yaxis().set_ticks([])\nif toytest:\n if not lasso_dataset:\n strtitle = 'Toytest - Our method - DEO Approx'\n else:\n strtitle = 'Lasso_Toytest - Our method - DEO Approx'\n plt.title(strtitle)\n plt.savefig(strtitle)\nelse:\n # strtitle = 'Experiment_%d - Our method - DEO Approx' % experiment_number\n strtitle = 'Our method - DEO Approx'\n plt.title(strtitle)\n plt.savefig(strtitle)\n\nfig = plt.figure(4, figsize=(9, 8), dpi=80)\nplt.imshow(fair_stats['error'], cmap=cmap, label='Error')\nplt.xlabel('log(C)')\nplt.ylabel('log($\\gamma$)')\nplt.colorbar()\n#plt.legend()\nplt.axes().get_xaxis().set_ticks([])\nplt.axes().get_yaxis().set_ticks([])\nif toytest:\n if not lasso_dataset:\n strtitle = 'Toytest Error - Our method'\n else:\n strtitle = 'Lasso_Toytest Error - Our method'\n plt.title(strtitle)\n plt.savefig(strtitle)\nelse:\n # strtitle = 'Experiment_%d Error - Our method' % experiment_number\n strtitle = 'Our method - Error'\n plt.title(strtitle)\n plt.savefig(strtitle)\n\nfig = plt.figure(5, figsize=(9, 8), dpi=80)\nplt.imshow(not_fair_stats['deo'], cmap=cmap, label='DEO')\nplt.xlabel('log(C)')\nplt.ylabel('log($\\gamma$)')\nplt.colorbar()\n#plt.legend()\nplt.axes().get_xaxis().set_ticks([])\nplt.axes().get_yaxis().set_ticks([])\nif toytest:\n if not lasso_dataset:\n strtitle = 'Toytest - SVM - DEO'\n else:\n strtitle = 'Lasso_Toytest - SVM - DEO'\n plt.title(strtitle)\n plt.savefig(strtitle)\nelse:\n # strtitle = 'Experiment_%d - SVM - DEO' % experiment_number\n strtitle = 'SVM - DEO'\n plt.title(strtitle)\n plt.savefig(strtitle)\n\nfig = plt.figure(6, figsize=(9, 8), dpi=80)\n#plt.imshow(not_fair_stats['deo_approx'], interpolation='bilinear', cmap=cmap, label='Approx DEO')\nplt.imshow(not_fair_stats['deo_approx'], cmap=cmap, label='Approx DEO')\n\nplt.xlabel('log(C)')\nplt.ylabel('log($\\gamma$)')\nplt.colorbar()\n#plt.legend()\nplt.axes().get_xaxis().set_ticks([])\nplt.axes().get_yaxis().set_ticks([])\nif toytest:\n if not lasso_dataset:\n strtitle = 'Toytest - SVM - DEO Approx'\n else:\n strtitle = 'Lasso_Toytest - SVM - DEO Approx'\n plt.title(strtitle)\n plt.savefig(strtitle)\nelse:\n # strtitle = 'Experiment_%d - SVM - DEO Approx' % experiment_number\n strtitle = 'SVM - DEO Approx'\n plt.title(strtitle)\n plt.savefig(strtitle)\n\nfig = plt.figure(7, figsize=(9, 8), dpi=80)\n#plt.imshow(not_fair_stats['error'], interpolation='bilinear', cmap=cmap, label='Error')\nplt.imshow(not_fair_stats['error'], cmap=cmap, label='Error')\nplt.xlabel('log(C)')\nplt.ylabel('log($\\gamma$)')\nplt.colorbar()\n#plt.legend()\nplt.axes().get_xaxis().set_ticks([])\nplt.axes().get_yaxis().set_ticks([])\nif toytest:\n if not lasso_dataset:\n strtitle = 'Toytest Error - SVM'\n else:\n strtitle = 'Lasso_Toytest Error - SVM'\n plt.title(strtitle)\n plt.savefig(strtitle)\nelse:\n # strtitle = 'Experiment_%d Error - SVM' % experiment_number\n strtitle = 'SVM - Error'\n plt.title(strtitle)\n plt.savefig(strtitle)\n\n\nplt.show()\n\n","repo_name":"jmikko/fairnessML","sub_path":"hyperparameter_curve_kernel.py","file_name":"hyperparameter_curve_kernel.py","file_ext":"py","file_size_in_byte":13422,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"41846758042","text":"# coding=utf-8\n# 需要先安装pip install beautifulsoup4\nfrom urllib.request import *\nfrom bs4 import BeautifulSoup\nimport re\n\nurl = 'https://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=result&fr=&sf=1&fmq=1492334463206_R&pv=&ic=0&nc=1&z=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&word=%E6%90%9E%E7%AC%91+gif'\nhtml = urlopen(url)\nobj = BeautifulSoup(html,'html.parser')\nindex = 0\nurls = re.findall(r'\"objURL\":\"(.*?)\"',str(obj))\nfor url in urls:\n if index < 100:\n try:\n urlretrieve(url,'pic'+str(index)+'.png')\n index +=1\n except Exception:\n print ('下载失败 第%d张图片' % index)\n else:\n print('下载完成')\n break\n","repo_name":"j717273419/python3_test","sub_path":"BaiduImage/BaiduImage.py","file_name":"BaiduImage.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73721887254","text":"txt = open(\"test.txt\", \"r\")\nmapkaZPliku = txt.readlines();\niloscKolumn = int(mapkaZPliku[0].split()[0]);\niloscWierszy = int(mapkaZPliku[0].split()[1]);\ntablicaPol = []\ntablicaTablicObiektow = []\nmapaPol = []\nmapaNagrod = []\ngamma = 0.5\ntablicaPotencjalow = []\ntablicaMozliwychAkcji = []\ntablicaRuchu = []\ntempTablicaAkcji = []\nstaraTablicaPotencjalow = []\n#============================================================================================#============================================================================================\n#============================================================================================#============================================================================================\nclass Pole(object):\n wiersz = 0\n kolumna = 0\n typ = 0\n potencjal = 0\n nagroda = 0\n\n def __init__(self, wierszArg, kolumnaArg, typArg, potencjalArg, nagrodaArg):\n self.wiersz = wierszArg\n self.kolumna = kolumnaArg\n self.typ = typArg\n self.potencjal = potencjalArg\n self.nagroda = nagrodaArg\n\nclass Akcja(object):\n wGore = 0\n wPrawo = 0\n wDol = 0\n wLewo = 0\n zostaje = 0\n wspolczynnik = 0\n\n def __init__(self, wGoreArg, wPrawoArg, wDolArg, wLewoArg, zostajeArg, wspolczynnikArg):\n self.wGore = wGoreArg\n self.wPrawo = wPrawoArg\n self.wDol = wDolArg\n self.wLewo = wLewoArg\n self.zostaje = zostajeArg\n self.wspolczynnik = wspolczynnikArg\n\n#========================================================================================================================================================================================\n#========================================================================================================================================================================================\n\n\ndef wGoreMetoda(pole, akcja):\n if int(pole.wiersz) == 0:\n akcja.zostaje = akcja.zostaje + 0.8\n else:\n if int(tablicaTablicObiektow[pole.wiersz - 1][pole.kolumna].typ) != 0:\n akcja.wGore = akcja.wGore + 0.8\n else:\n akcja.zostaje = akcja.zostaje + 0.8\n if int(pole.kolumna) == 0 or int(tablicaTablicObiektow[pole.wiersz][pole.kolumna - 1].typ) == 0:\n akcja.zostaje = akcja.zostaje + 0.1\n else:\n akcja.wLewo = akcja.wLewo + 0.1\n if int(pole.kolumna) == iloscKolumn - 1 or int(tablicaTablicObiektow[pole.wiersz][pole.kolumna + 1].typ) == 0:\n akcja.zostaje = akcja.zostaje + 0.1\n else:\n akcja.wPrawo = akcja.wPrawo + 0.1\n return akcja\npass\n\n\ndef wPrawoMetoda(pole, akcja):\n if int(pole.kolumna) == iloscKolumn - 1:\n akcja.zostaje = akcja.zostaje + 0.8\n else:\n if int(tablicaTablicObiektow[pole.wiersz][pole.kolumna + 1].typ) != 0:\n akcja.wPrawo = akcja.wPrawo + 0.8\n else:\n akcja.zostaje = akcja.zostaje + 0.8\n if int(pole.wiersz) == 0 or int(tablicaTablicObiektow[pole.wiersz - 1][pole.kolumna].typ) == 0:\n akcja.zostaje = akcja.zostaje + 0.1\n else:\n akcja.wGore = akcja.wGore + 0.1\n if int(pole.wiersz) == iloscWierszy - 1 or int(tablicaTablicObiektow[pole.wiersz + 1][pole.kolumna].typ) == 0:\n akcja.zostaje = akcja.zostaje + 0.1\n else:\n akcja.wDol = akcja.wDol + 0.1\n return akcja\npass\n\n\ndef wDolMetoda(pole, akcja):\n if int(pole.wiersz) == iloscWierszy - 1:\n akcja.zostaje = akcja.zostaje + 0.8\n else:\n if int(tablicaTablicObiektow[pole.wiersz + 1][pole.kolumna].typ) != 0:\n akcja.wDol = akcja.wDol + 0.8\n else:\n akcja.zostaje = akcja.zostaje + 0.8\n if int(pole.kolumna) == 0 or int(tablicaTablicObiektow[pole.wiersz][pole.kolumna - 1].typ) == 0:\n akcja.zostaje = akcja.zostaje + 0.1\n else:\n akcja.wLewo = akcja.wLewo + 0.1\n if int(pole.kolumna) == iloscKolumn - 1 or int(tablicaTablicObiektow[pole.wiersz][pole.kolumna + 1].typ) == 0:\n akcja.zostaje = akcja.zostaje + 0.1\n else:\n akcja.wPrawo = akcja.wPrawo + 0.1\n return akcja\npass\n\n\ndef wLewoMetoda(pole, akcja):\n if int(pole.kolumna) == 0:\n akcja.zostaje = akcja.zostaje + 0.8\n else:\n if int(tablicaTablicObiektow[pole.wiersz][pole.kolumna - 1].typ) != 0:\n akcja.wLewo = akcja.wLewo + 0.8\n else:\n akcja.zostaje = akcja.zostaje + 0.8\n if int(pole.wiersz) == 0 or int(tablicaTablicObiektow[pole.wiersz - 1][pole.kolumna].typ) == 0:\n akcja.zostaje = akcja.zostaje + 0.1\n else:\n akcja.wGore = akcja.wGore + 0.1\n if int(pole.wiersz) == iloscWierszy - 1 or int(tablicaTablicObiektow[pole.wiersz + 1][pole.kolumna].typ) == 0:\n akcja.zostaje = akcja.zostaje + 0.1\n else:\n akcja.wDol = akcja.wDol + 0.1\n return akcja\npass\n\n#============================================================================================#============================================================================================\n#============================================================================================#============================================================================================\n\nfor x in range(len(mapkaZPliku)):\n if x > int(0) and x <= int(iloscWierszy) :\n mapaPol.append(mapkaZPliku[x].split())\n if x > int(iloscWierszy) + 1:\n mapaNagrod.append(mapkaZPliku[x].split())\nfor numerWiersza in range(iloscWierszy):\n tablicaMozliwychAkcji.append([])\n tablicaPotencjalow.append([])\n staraTablicaPotencjalow.append([])\n for numerKolumny in range(iloscKolumn):\n tablicaPotencjalow[numerWiersza].append(float(0))\n staraTablicaPotencjalow[numerWiersza].append(float(0))\n tablicaMozliwychAkcji[numerWiersza].append([])\n tablicaPol.append(Pole(numerWiersza, numerKolumny, mapaPol[numerWiersza][numerKolumny], mapaNagrod[numerWiersza][numerKolumny], mapaNagrod[numerWiersza][numerKolumny]))\n tablicaPotencjalow[numerWiersza][numerKolumny] = mapaNagrod[numerWiersza][numerKolumny]\n staraTablicaPotencjalow[numerWiersza][numerKolumny] = mapaNagrod[numerWiersza][numerKolumny]\n tablicaTablicObiektow.append(tablicaPol)\n tablicaPol = []\nfor numerWiersza in range(iloscWierszy):\n tablicaRuchu.append([])\n for numerKolumny in range(iloscKolumn):\n akcjaWGore = Akcja(0, 0, 0, 0, 0,0)\n akcjaWPrawo = Akcja(0, 0, 0, 0, 0, 0)\n akcjaWDol = Akcja(0, 0, 0, 0, 0, 0)\n akcjaWLewo = Akcja(0, 0, 0, 0, 0, 0)\n if int(tablicaTablicObiektow[numerWiersza][numerKolumny].typ) == 1:\n tablicaRuchu[numerWiersza].append(int(1))\n tablicaMozliwychAkcji[numerWiersza][numerKolumny].append(wGoreMetoda(tablicaTablicObiektow[numerWiersza][numerKolumny], akcjaWGore))\n tablicaMozliwychAkcji[numerWiersza][numerKolumny].append(wPrawoMetoda(tablicaTablicObiektow[numerWiersza][numerKolumny], akcjaWPrawo))\n tablicaMozliwychAkcji[numerWiersza][numerKolumny].append(wDolMetoda(tablicaTablicObiektow[numerWiersza][numerKolumny], akcjaWDol))\n tablicaMozliwychAkcji[numerWiersza][numerKolumny].append(wLewoMetoda(tablicaTablicObiektow[numerWiersza][numerKolumny], akcjaWLewo))\n else:\n tablicaRuchu[numerWiersza].append(int(0))\nfor x in range(1000):\n for numerWiersza in range(iloscWierszy):\n for numerKolumny in range(iloscKolumn):\n if float(tablicaTablicObiektow[numerWiersza][numerKolumny].typ) == float(1):\n najlepszaAkcja = float(0)\n for numerAkcji in range(len(tablicaMozliwychAkcji[numerWiersza][numerKolumny])):\n tablicaMozliwychAkcji[numerWiersza][numerKolumny][numerAkcji].wspolczynnik = float(0)\n if float(tablicaMozliwychAkcji[numerWiersza][numerKolumny][numerAkcji].wGore) != float(0):\n tablicaMozliwychAkcji[numerWiersza][numerKolumny][numerAkcji].wspolczynnik += float(\n tablicaMozliwychAkcji[numerWiersza][numerKolumny][numerAkcji].wGore) * float(\n tablicaPotencjalow[numerWiersza - 1][numerKolumny])\n if float(tablicaMozliwychAkcji[numerWiersza][numerKolumny][numerAkcji].wPrawo) != float(0):\n tablicaMozliwychAkcji[numerWiersza][numerKolumny][numerAkcji].wspolczynnik += float(\n tablicaMozliwychAkcji[numerWiersza][numerKolumny][numerAkcji].wPrawo) * float(\n tablicaPotencjalow[numerWiersza][numerKolumny + 1])\n if float(tablicaMozliwychAkcji[numerWiersza][numerKolumny][numerAkcji].wDol) != float(0):\n tablicaMozliwychAkcji[numerWiersza][numerKolumny][numerAkcji].wspolczynnik += float(\n tablicaMozliwychAkcji[numerWiersza][numerKolumny][numerAkcji].wDol) * float(\n tablicaPotencjalow[numerWiersza + 1][numerKolumny])\n if float(tablicaMozliwychAkcji[numerWiersza][numerKolumny][numerAkcji].wLewo) != float(0):\n tablicaMozliwychAkcji[numerWiersza][numerKolumny][numerAkcji].wspolczynnik += float(\n tablicaMozliwychAkcji[numerWiersza][numerKolumny][numerAkcji].wLewo) * float(\n tablicaPotencjalow[numerWiersza][numerKolumny - 1])\n if float(tablicaMozliwychAkcji[numerWiersza][numerKolumny][numerAkcji].zostaje) != float(0):\n tablicaMozliwychAkcji[numerWiersza][numerKolumny][numerAkcji].wspolczynnik += float(\n tablicaMozliwychAkcji[numerWiersza][numerKolumny][numerAkcji].zostaje) * float(\n tablicaPotencjalow[numerWiersza][numerKolumny])\n tempTablicaAkcji.append(float(tablicaMozliwychAkcji[numerWiersza][numerKolumny][numerAkcji].wspolczynnik))\n najlepszaAkcja = float(max(tempTablicaAkcji))\n for y in range(len(tempTablicaAkcji)):\n if tempTablicaAkcji[y] == najlepszaAkcja:\n numerAkcji = y + 1\n break\n tablicaRuchu[numerWiersza][numerKolumny] = numerAkcji;\n staraTablicaPotencjalow[numerWiersza][numerKolumny] = tablicaPotencjalow[numerWiersza][numerKolumny]\n tablicaPotencjalow[numerWiersza][numerKolumny] = float(\n mapaNagrod[numerWiersza][numerKolumny]) + float(gamma) * float(najlepszaAkcja)\n tempTablicaAkcji = []\n zmiany = int(0)\n for numerWiersza in range(iloscWierszy):\n for numerKolumny in range(iloscKolumn):\n if float(tablicaPotencjalow[numerWiersza][numerKolumny]) - float(staraTablicaPotencjalow[numerWiersza][numerKolumny]) > float(0.0001):\n zmiany += int(1)\n if zmiany == 0:\n print(x)\n break\n\nprint('====== TABLICA RUCHU==========')\nprint(tablicaRuchu[0])\nprint(tablicaRuchu[1])\nprint(tablicaRuchu[2])\nprint('====== TABLICA POTENCJAŁÓW==========')\nprint(tablicaPotencjalow[0])\nprint(tablicaPotencjalow[1])\nprint(tablicaPotencjalow[2])\n","repo_name":"zbigniewkuminski/Robotyka","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11102,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17989505808","text":"'''\n\nHackerRank : https://www.hackerrank.com/challenges/collections-counter/problem\n\nSample Input\n\n10\n2 3 4 5 6 8 7 6 5 18\n6\n6 55\n6 45\n6 55\n4 40\n18 60\n10 50\nSample Output\n\n200\nExplanation\n\nCustomer 1: Purchased size 6 shoe for $55.\nCustomer 2: Purchased size 6 shoe for $45.\nCustomer 3: Size 6 no longer available, so no purchase.\nCustomer 4: Purchased size 4 shoe for $40.\nCustomer 5: Purchased size 18 shoe for $60.\nCustomer 6: Size 10 not available, so no purchase.\n\nTotal money = 55+45+40+60 = 200\n\n'''\n\nn1=int(input())\nl1=list(map(int,input().split()))\nn2=int(input())\nprice=0\nl2=[]\nfor x in range(n2):\n a,b=map(int,input().split())\n t=(a,b)\n l2.append(t)\nfor x in range(len(l2)):\n if l2[x][0] in l1:\n l1.remove(l2[x][0])\n price+=l2[x][1]\nprint(price)\n \n \n","repo_name":"venkatesh799/DataStructures-Algorithms","sub_path":"Hackerrank/collections.Counter().py","file_name":"collections.Counter().py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"31160386102","text":"\"\"\"\nthis module contain shared fixtures\n\"\"\"\n\nimport pytest\nimport selenium.webdriver\nimport json\n\n\n@pytest.fixture\ndef config(scope='session'): # read the configration file once\n\n # Read the file\n with open('config.json') as config_file:\n config = json.load(config_file)\n\n assert config['browser'] in ['Chrome','Firefox','Headless Chrome']\n assert isinstance(config['implicit_wait'],int)\n assert config['implicit_wait'] > 0\n\n # Return config so it can be used\n return config\n\ndef getBrowser(config):\n browser = config['browser']\n\n if browser == \"Chrome\":\n b = selenium.webdriver.Chrome()\n\n elif browser == \"Firefox\": \n b = selenium.webdriver.Firefox()\n\n elif browser == \"Headless Chrome\":\n opts = selenium.webdriver.ChromeOptions()\n opts.add_argument('headless')\n b = selenium.webdriver.Chrome(options=opts)\n else:\n raise Exception(f'Browser\"{config[\"browser\"]}\" is not supported')\n\n return b\n\n\n\n@pytest.fixture\ndef browser(config):\n\n b = getBrowser(config)\n\n b.implicitly_wait(config['implicit_wait'])\n\n yield b\n\n b.quit()","repo_name":"Abdelbary/duckduck-web-SearchEnging-Test","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37460137451","text":"# beats 93% in time and 92% in space\n\nclass Solution:\n def vowelStrings(self, words: List[str], left: int, right: int) -> int:\n\n vowels = set(['a', 'e', 'i', 'o', 'u'])\n count = 0\n for i in range(left, right + 1):\n if len(words[i]) == 1 and words[i][0] in vowels:\n count += 1\n continue\n if words[i][0] in vowels and words[i][len(words[i]) - 1] in vowels:\n count += 1\n\n return count","repo_name":"DavidSober/Data-Structures-and-Algorithms-","sub_path":"LeetcodeProblems/2609.py","file_name":"2609.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25301455807","text":"# 코드업 4037 소인수 분해 문제\n\nn = int(input())\ns = 2 \narray = [] \n\nwhile n>1 :\n if n % s != 0 :\n s += 1 \n continue\n\n if n % s == 0 :\n n /= s \n array.append(s)\n \n s = 2 \n \nfor i in array : \n print(i,end= \" \")","repo_name":"rhdtn311/Coding-practice","sub_path":"studyCode25.py","file_name":"studyCode25.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16298735969","text":"import re\nfrom django.db import connections\nimport Results.models\nfrom ADSMSettings.utils import scenario_filename\n\n\ndef camel_case_spaces(name_with_spaces):\n r = re.sub(r' +(\\w)', lambda match: match.group(1).upper(), name_with_spaces).strip() # upper case first letter of any word\n if r:\n r = r[0].upper() + r[1:] # upper case first letter\n r = r.replace(' ', '')\n return r\n\n\ndef number(string):\n try:\n return int(string)\n except:\n try:\n return float(string)\n except:\n return -1\n\n\ndef build_composite_field_map( table):\n road_map = {}\n for prefix, field in table:\n if prefix not in ('iteration', 'day', 'production_type', 'zone'): # skip the selector fields\n road_map[prefix] = prefix\n\n return road_map\n\n\nclass DailyParser(object):\n def __init__(self, header_line, production_types, zones):\n self.production_types = production_types\n self.zones = zones\n self.headers = header_line.strip().split(',') # there was a trailing /r/n to remove\n self.possible_zones = {x[1] for x in zones}.union({'Background'})\n self.possible_pts = {x[1] for x in production_types}.union({''})\n self.failures = set()\n\n def populate_tables_with_matching_fields(self, model_class_name, instance_dict, sparse_info):\n \"\"\"Populates all combinations of a particular table in one go. This method must be called once for each\n model class that you want populated.\n model_class_name: named of table defined in Results.models\n instance_dict: a dictionary containing one instance of every combination of parameters. Keys are the \"suffix\" e.g. _Bull_HighRisk\n sparse_info: Dictionary containing all the key, value pairs that the simulation put out\n field_map: Keys are all column names to match to (prefix only), values are exact field name in that model. The distinction allows\n the program to map multiple columns onto the same field. There are some special cases where column name is not exactly field + suffix.\n \"\"\"\n field_map = build_composite_field_map(getattr(Results.models, model_class_name)() ) # creates a table instance\n keys_to_delete = []\n for suffix_key, instance in instance_dict.items(): # For each combination: DailyByZoneAndProductionType with (Bull_HighRisk), (Swine_MediumRisk), etc.\n instance_needed = False\n for column_name, model_field in field_map.items():\n if column_name + suffix_key in sparse_info:\n setattr(instance, model_field, sparse_info[column_name + suffix_key])\n instance_needed = True\n try:\n self.failures.remove(column_name + suffix_key)\n except KeyError:\n print('Error: Column was assigned twice. Second copy in %s.%s for output column %s.' % (model_class_name, model_field, column_name + suffix_key))\n else:\n pass # It's okay for the model to specify a field that the C Engine doesn't output. No harm done\n if not instance_needed:\n keys_to_delete.append(suffix_key)\n for suffix_key in keys_to_delete:\n del instance_dict[suffix_key] \n return [instance for key, instance in instance_dict.items()]\n\n def construct_combinatorial_instances(self, day, iteration, last_line):\n \"\"\"This constructs a mapping between the name of the column 'suffix' for example: 'BackgroundCattle' and maps it\n to the appropriate Django query settings to grab the matching model instance. For 'BackgroundCattle' the query\n should be `DailyByZoneAndProductionType(production_type__name=Cattle, zone=None, ...`.\n This handles the special blank case for both \"All ProductionType\" = '' and \"Background Zone\" = None.\n \n It returns a dict which is the collection of all the model instances which will need to be populated each day:\n 1 DailyControls\n 1*pt DailyByProductionType\n zones*pt DailyByZoneAndProductionType\n zones*1 DailyByZone\n \"\"\"\n daily_instances = {table_name:{} for table_name in [\"DailyByProductionType\", \"DailyByZone\", \"DailyByZoneAndProductionType\", \"DailyControls\"]}\n\n daily_by_pt = daily_instances[\"DailyByProductionType\"]\n for pt_name in self.possible_pts:\n try:\n pt = [x[0] for x in self.production_types if x[1] == pt_name][0]\n except IndexError:\n pt = None\n daily_by_pt[camel_case_spaces(pt_name)] = \\\n Results.models.DailyByProductionType(production_type_id=pt, iteration=iteration, day=day, last_day=last_line)\n\n daily_instances[\"DailyByZone\"] = {}\n for zone_name in self.possible_zones:\n try:\n zone = [x[0] for x in self.zones if x[1] == zone_name][0]\n except IndexError:\n zone = None\n instance = Results.models.DailyByZone(zone_id=zone, iteration=iteration, day=day, last_day=last_line)\n daily_instances[\"DailyByZone\"][camel_case_spaces(zone_name)] = instance\n\n daily_by_pt_zone = daily_instances[\"DailyByZoneAndProductionType\"]\n for pt_name in self.possible_pts:\n try:\n pt = [x[0] for x in self.production_types if x[1] == pt_name][0]\n except IndexError:\n pt = None\n for zone_name in self.possible_zones:\n try:\n zone = [x[0] for x in self.zones if x[1] == zone_name][0]\n except IndexError:\n zone = None\n daily_by_pt_zone[camel_case_spaces(zone_name) + camel_case_spaces(pt_name)] = \\\n Results.models.DailyByZoneAndProductionType(production_type_id=pt, zone_id=zone, iteration=iteration, day=day, last_day=last_line)\n\n daily_instances[\"DailyControls\"] = {'': Results.models.DailyControls(iteration=iteration, day=day, last_day=last_line)} # there's only one of these\n return daily_instances\n\n def populate_db_from_daily_report(self, sparse_info, last_line):\n \"\"\"Parses the C Engine stdout and populates the appropriate models with the information. Takes one line\n at a time, representing one DailyReport.\"\"\"\n assert isinstance(sparse_info, dict)\n # sparse_info = literal_eval(report.sparse_dict)\n # print(sparse_info)\n try:\n iteration = sparse_info['Run']\n del sparse_info['Run']\n day = sparse_info['Day']\n del sparse_info['Day']\n del sparse_info['versionMajor']\n del sparse_info['versionMinor']\n del sparse_info['versionRelease']\n scenario_name = scenario_filename()\n if last_line:\n print(\"%s - Finished Iteration %i: %i Days\" % (scenario_name, iteration, day))\n except:\n return []\n self.failures = set(sparse_info.keys()) # whatever is left is a failure\n\n #construct the set of tables we're going to use for this day\n daily_instances = self.construct_combinatorial_instances(day, iteration, last_line)\n\n results = []\n for class_name in daily_instances:\n result = self.populate_tables_with_matching_fields(class_name, daily_instances[class_name], sparse_info) # there was a lot of preamble to get this line to work\n results.extend(result)\n\n if len(self.failures) and day == 1:\n print('Unable to match columns: ', len(self.failures), sorted(self.failures))\n return results\n\n def parse_daily_strings(self, cmd_string, last_line=False, create_version_entry=False):\n results = []\n if cmd_string:\n values = cmd_string.split(',')\n if len(values):\n pairs = zip(self.headers, values)\n sparse_values = {a: number(b) for a, b in pairs}\n if create_version_entry:\n version = Results.models.ResultsVersion()\n version.versionMajor = sparse_values['versionMajor']\n version.versionMinor = sparse_values['versionMinor']\n version.versionRelease = sparse_values['versionRelease']\n version.id = 1\n results.extend([version])\n results.extend(self.populate_db_from_daily_report(sparse_values, last_line))\n return results\n\n @staticmethod\n def parse_unit_stats_string(cmd_string):\n values = []\n for substring in cmd_string.split(','):\n try:\n values.append(int(substring))\n except ValueError:\n values.append(0)\n if len(values) == 5 and (values[1] or values[2] or values[3] or values[4]):\n unit = values[0]\n was_infected, was_zone_focus, was_vaccinated, was_destroyed = values[1], values[2], values[3], values[4]\n command = 'UPDATE Results_unitstats ' \\\n ' SET cumulative_infected=cumulative_infected+%i,' \\\n ' cumulative_zone_focus=cumulative_zone_focus+%i,' \\\n ' cumulative_vaccinated=cumulative_vaccinated+%i,' \\\n ' cumulative_destroyed=cumulative_destroyed+%i' \\\n ' WHERE unit_id=%i' % (1 if was_infected else 0, 1 if was_zone_focus else 0, 1 if was_vaccinated else 0, 1 if was_destroyed else 0, unit)\n try:\n cursor = connections['scenario_db'].cursor()\n cursor.execute(command)\n\n return True\n except BaseException as e:\n print(\"Command Failure\", e, command)\n return False\n","repo_name":"NAVADMC/ADSM","sub_path":"Results/output_parser.py","file_name":"output_parser.py","file_ext":"py","file_size_in_byte":9784,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"67"} +{"seq_id":"7850459963","text":"from powerservice import trading\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\nimport csv\n\n\n\n\ntodaytrades = trading.get_trades(date='21/01/2022')\nyesterdaytrades = trading.get_trades(date='20/01/2022')\n\ndftoday= pd.DataFrame(todaytrades[1])\n\ntodaytime = pd.to_datetime(dftoday.time)\n\ntodaytimegrouped = dftoday.groupby([todaytime.dt.hour]).agg(volume=('volume', 'sum'))\n\noutput = todaytimegrouped.reset_index()[['time', 'volume']]\n\n\"\"\"convert time column to datetime 24 hour format, shifting las row to first\"\"\"\noutput['time'] = pd.to_datetime(output.time, format='%H')\noutput['time'] = output['time'].dt.strftime('%H:%M')\noutput=output.apply(np.roll, shift=1)\n\noutput2 = output[[\"time\",\"volume\"]]\n\n\"\"\"get current date/time and create file name\"\"\" \ndateTimeObj = datetime.now()\ndt_string = dateTimeObj.strftime(\"%Y%m%d_%H%M\")\n\n\"\"\"CSV file\"\"\"\nfile_name=\"PowerService_\" + dt_string + \".csv\"\noutput2.to_csv(file_name, index=False)\ndatetime_series = pd.to_datetime(dftoday['time'])\ndatetime_index = pd.DatetimeIndex(datetime_series.values)\ndf3=dftoday.set_index(datetime_index)\n\ndf4 = df3.resample('5T').mean()\ndf3=df3.set_index(df4.index)\ndf3=df3.set_index(df4.index).reset_index()\ndf3 = df3.fillna(0)\ndf_quality1= df3[df3['time'] == 0]\ndf_quality1['index'] = df_quality1['index'].dt.strftime('%H:%M')\n\ndf_quality1.rename(columns = {'index':'missed_intervals'}, inplace = True)\ndf_quality1 = df_quality1[[\"date\",\"missed_intervals\",\"id\"]]\n\n\"\"\"CSV data quality file\"\"\"\nfile_name=\"PowerService_\" + dt_string + \"_data_quality.csv\"\n\nfields=['<< TIME INTERVAL VALIDATION >>']\nwith open(file_name, 'a') as f:\n writer = csv.writer(f)\n writer.writerow([fields])\n \n\"\"\"export csv file\"\"\"\ndf_quality1.to_csv(file_name, mode='a', index=False)\n\ndf_quality2=df3[(df3['volume'] == 0) & (df3['time'] != 0)]\ndf_quality2 = df_quality2[[\"date\",\"time\",\"volume\",\"id\"]]\n\nfields=['<< MISSING VALUES VALIDATION >>']\nwith open(file_name, 'a') as f:\n writer = csv.writer(f)\n writer.writerow([fields])\n\n\"\"\"export csv file\"\"\"\ndf_quality2.to_csv(file_name, mode='a', index=False)\n\n\"\"\"Print the datetime_index\"\"\"\nprint(df4.describe())\n\n\"\"\"Start/End time validation\"\"\"\nif output2.time[0] == '23:00' and output2.time[23] == '22:00':\n print('START AND END TIME: CORRECT')\n validationtime = 'START AND END TIME: CORRECT'\nelse:\n print('START AND END TIME: INCORRECT')\n validationtime ='START AND END TIME: INCORRECT'\n\nwith open(file_name, 'a') as f:\n writer = csv.writer(f)\n writer.writerow([validationtime])\n\ntry:\n pd.to_datetime(output2['time'], format='%H:%M', errors='raise')\n print('Valid')\n validationformat = 'TIME FORMAT: VALID'\nexcept ValueError:\n print('Invalid')\n validationformat = 'TIME FORMAT: INVALID'\n\nwith open(file_name, 'a') as f:\n writer = csv.writer(f)\n writer.writerow([validationformat])\n\n\nfile_name2=\"PowerService_\" + dt_string + \"_data_profiling.csv\"\nfields=file_name2\nwith open(file_name2, 'a') as f:\n writer = csv.writer(f)\n writer.writerow([fields])","repo_name":"Jasemma/PowerServiceChallenge","sub_path":"python-powerservice/src/powerservice/csvvalidate.py","file_name":"csvvalidate.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2317385649","text":"import contextlib\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nfrom pathlib import Path\n\nfrom invoke import task\n\n\nWINDOWS = sys.platform.startswith(\"win\")\nROOT_DIR = Path(__file__).parent\nBIN_DIR = Path(sys.executable).parent\nTHEME_DIR = ROOT_DIR / \"theme\"\nOUTPUT_DIR = ROOT_DIR / \"output\"\nSERVE_PORT = 8000\nSSH_CONFIG = ROOT_DIR / \"private\" / \"ssh.config\"\nSSH_KEY = ROOT_DIR / \"private\" / \"id_rsa\"\nDEPLOY_PATH = \"/var/www/html/main/\"\n\n\n@task\ndef requirements(ctx):\n \"\"\"Update requirements*.txt from requirements*.in.\"\"\"\n in_files = ROOT_DIR.glob(\"requirements*.in\")\n for in_file in in_files:\n ctx.run(\n f'{BIN_DIR / \"pip-compile\"} {in_file}',\n env={\"CUSTOM_COMPILE_COMMAND\": \"inv requirements\"},\n )\n\n\n@task\ndef build_content(ctx):\n \"\"\"Build the site content.\"\"\"\n ctx.run(\n f'{BIN_DIR / \"pelican\"} --fatal warnings ' f'-s {ROOT_DIR / \"pelicanconf.py\"}'\n )\n\n\n@task\ndef build_theme(ctx):\n \"\"\"Build the site theme.\"\"\"\n scss_file = ROOT_DIR / \"theme\" / \"scss\" / \"main.scss\"\n scss_mtime = os.stat(scss_file).st_mtime\n css_file = ROOT_DIR / \"theme\" / \"static\" / \"css\" / \"main.css\"\n css_mtime = os.stat(css_file).st_mtime\n if scss_mtime >= css_mtime:\n ctx.run(f'{BIN_DIR / \"pysassc\"} --sourcemap {scss_file} {css_file}')\n\n\n@task(pre=[build_theme, build_content])\ndef build(_ctx):\n \"\"\"Build everything.\"\"\"\n\n\n@task\ndef clean(_ctx):\n \"\"\"Clean the build output.\"\"\"\n with contextlib.suppress(FileNotFoundError):\n shutil.rmtree(OUTPUT_DIR)\n OUTPUT_DIR.mkdir()\n\n\n@task\ndef serve(ctx):\n \"\"\"Start a web server to serve up the site (blocks).\"\"\"\n print(\"Serving on http://localhost:\", SERVE_PORT, sep=\"\")\n ctx.run(f'{BIN_DIR / \"pelican\"} --listen --port {SERVE_PORT}')\n\n\n@task\ndef deploy(ctx):\n \"\"\"Deploy latest build to production.\"\"\"\n print(\"Copying files...\")\n source = str(OUTPUT_DIR) + \"/*\"\n if WINDOWS:\n ctx.run(f'scp -F \"{SSH_CONFIG}\" -r \"{source}\" web:{DEPLOY_PATH}')\n else:\n ctx.run(\n \"rsync --delete --stats -pthrzv -c \"\n f'-e \"ssh -F {SSH_CONFIG} -oStrictHostKeyChecking=no\" '\n f\"{source} web:{DEPLOY_PATH}\"\n )\n\n\n@task(help={\"host\": \"Host from ssh.config to which to connect.\"})\ndef ssh(_ctx, host=\"web\"):\n \"\"\"ssh into the blog's host.\"\"\"\n user_ssh_dir = Path.home() / \".ssh\" # Ensure .ssh dir exists.\n user_ssh_dir.mkdir(mode=0o700, exist_ok=True)\n with chdir(ROOT_DIR):\n command = [\"ssh\", \"-F\", f\"{SSH_CONFIG}\", f\"{host}\"]\n print(\" \".join(command))\n subprocess.run(command)\n\n\n@contextlib.contextmanager\ndef chdir(path):\n old = os.getcwd()\n os.chdir(path)\n try:\n yield\n finally:\n os.chdir(old)\n","repo_name":"genericmoniker/blog","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13562806914","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = u'George J. London'\nSITENAME = u'George J. London'\nTAGLINE = 'Tinkerer/Thinkerer.'\nSITEURL = 'http://rogueleaderr.github.io'\n\nTIMEZONE = 'America/New_York'\n\nDEFAULT_LANG = u'en'\n\n\n# can be useful in development, but set to False when you're ready to publish\nRELATIVE_URLS = True\n\nGITHUB_URL = 'http://github.com/rogueleaderr/'\nDISQUS_SITENAME = \"blog-rogueleaderr\"\nPDF_GENERATOR = False\nREVERSE_CATEGORY_ORDER = True\nDEFAULT_DATE = (2012, 3, 2, 14, 1, 1)\n\nFEED_ALL_RSS = 'feeds/all.rss.xml'\nCATEGORY_FEED_RSS = 'feeds/%s.rss.xml'\nTHEME = './themes/svbtle'\nOUTPUT_PATH = 'output'\nPATH = 'content'\n\nARTICLE_URL = 'posts/{date:%Y}/{date:%m}/{slug}/'\nARTICLE_SAVE_AS = 'posts/{date:%Y}/{date:%m}/{slug}/index.html'\n\n# Custom Home page\nPAGE_DIR = 'pages'\nPAGES_DIR = 'pages'\n\nDIRECT_TEMPLATES = (('index', 'tags', 'categories', 'archives'))\n#PAGINATED_DIRECT_TEMPLATES = (('blog',))\nTEMPLATE_PAGES = {'home.html': 'index.html',}\n\nGENERATED_PAGES = ()\nDISPLAY_PAGES_ON_MENU = True\n\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\n\n# Blogroll\n#LINKS = (('LinerNotes', 'http://www.linernotes.com'),)\n\nDEFAULT_PAGINATION = False\n\n# Uncomment following line if you want document-relative URLs when developing\nRELATIVE_URLS = True\n\n\nSOCIAL = (('twitter', 'http://twitter.com/rogueleaderr'),)\n\n# global metadata to all the contents\nDEFAULT_METADATA = (('yeah', 'it is'),)\n\n# # path-specific metadata\nEXTRA_PATH_METADATA = {\n 'extra/robots.txt': {'path': 'robots.txt'},\n 'extra/.htaccess': {'path': '.htaccess'},\n\n }\n#\n# # static paths will be copied without parsing their contents\nSTATIC_PATHS = [\n 'pictures',\n 'extra/robots.txt',\n 'themes/svbtle'\n ]\n#\n# custom page generated with a jinja2 template\n#TEMPLATE_PAGES = {'pages/jinja2_template.html': 'jinja2_template.html'}\n#\n# # code blocks with line numbers\nPYGMENTS_RST_OPTIONS = {'linenos': 'table'}\n#\n# # foobar will not be used, because it's not in caps. All configuration keys\n# # have to be in caps\n# foobar = \"barbaz\"","repo_name":"gjlondon/rogueleaderr.github.io","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6783538784","text":"import marshmallow as ma\nfrom oarepo_model_builder.datatypes import ModelDataType\n\n\nclass DraftDataType(ModelDataType):\n model_type = \"draft_record\"\n\n class ModelSchema(ModelDataType.ModelSchema):\n type = ma.fields.Str(\n load_default=\"draft_record\",\n required=False,\n validate=ma.validate.Equal(\"draft_record\"),\n )\n\n def prepare(self, context):\n self.published_record = context[\"published_record\"]\n super().prepare(context)\n","repo_name":"oarepo/oarepo-model-builder-drafts","sub_path":"oarepo_model_builder_drafts/datatypes/draft.py","file_name":"draft.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14756857490","text":"from graphics import *\nfrom src.Peer import Peer\nimport graphics as gf\nimport time\nfrom tkinter import *\nfrom tkinter import ttk, messagebox\nimport threading\nfrom src.UserInterface import UserInterface\n\n\ndef exit_program():\n window.destroy()\n window.quit()\n os._exit(10)\n return\n\n\nclass MyGraphWin(gf.GraphWin):\n def getMouse(self):\n \"\"\"Wait for mouse click and return Point object representing\n the click\"\"\"\n self.mouseX = None\n self.mouseY = None\n while self.mouseX == None or self.mouseY == None:\n self.update()\n #_tkCall(self.update)\n if self.isClosed():\n return 0\n time.sleep(.1) # give up thread\n x, y = self.toWorld(self.mouseX, self.mouseY)\n self.mouseX = None\n self.mouseY = None\n return gf.Point(x, y)\n\n\n\"\"\" \nazed variables: \n Peer.network_graph \n right_child, left_child\n root_address=(\"127.0.0.1\", 10000) \n\"\"\" # COPS FROM AZED\n\n\nCIRCLE_R = 15\nroot = None\n\n\nclass GNode:\n def __init__(self, address=None, name=\"\", is_root=False):\n self.port = None\n self.address = None\n self.name = name\n if address is None:\n self.peer = None\n else:\n if is_root:\n if root is not None:\n print(\"Root Already Exists\")\n return\n self.create_server()\n else:\n self.port = int(address[1])\n self.create_client()\n\n #self.address = (str(address[0]), str(address[1]))\n self.neighbors = []\n self.location = None\n\n def create_server(self):\n self.peer = Peer(\"127.0.0.1\", 10000, is_root=True)\n self.port = self.peer.server_port\n self.address = (self.peer.server_ip, self.peer.server_port)\n self.peer.UI.name = self.name\n print(\"SERVER ADDED\")\n thread = threading.Thread(target=self.peer.run)\n thread.start()\n\n def create_client(self):\n self.peer = Peer(\"127.0.0.1\", self.port, is_root=False, root_address=(\"127.0.0.1\", 10000))\n self.port = self.peer.server_port\n self.address = (self.peer.server_ip, self.peer.server_port)\n self.peer.UI.name = self.name\n print(\"CLIENT ADDED\")\n thread = threading.Thread(target=self.peer.run)\n thread.start()\n\n def connect_to(self, g_node):\n if g_node in self.neighbors:\n pass\n else:\n self.neighbors.append(g_node)\n g_node.connect_to(self)\n\n def show(self, graphics_window, location=None, color=None):\n if color is None:\n if self.peer.registered:\n color = \"green\"\n else:\n color = \"white\"\n\n if location is None:\n location = self.location\n else:\n self.location = location\n circle = gf.Circle(location, CIRCLE_R)\n circle.setFill(color)\n circle.setOutline(\"yellow\")\n circle.draw(graphics_window)\n label = gf.Text(location, self.name)\n label.setTextColor('red')\n label.setSize(CIRCLE_R - CIRCLE_R//4)\n label.draw(graphics_window)\n\n def is_inside(self, point):\n if (point.x - self.location.x)**2 + (point.y - self.location.y)**2 <= (CIRCLE_R + 1)**2:\n return True\n return False\n\n\nclass Tree:\n def __init__(self, root):\n self.root = root\n self.nodes = None\n\n def show(self, graphics_window):\n marked = [self.root]\n x, y = graphics_window.width/2, CIRCLE_R + 10\n queue = [(self.root, gf.Point(x, y), 0)]\n self.root.show(graphics_window, gf.Point(x, y))\n while len(queue) > 0:\n node, location, depth = queue.pop(0)\n i = 0\n for child in node.neighbors:\n if child not in marked:\n marked.append(child)\n new_location = gf.Point(location.x + (2*i-1)*graphics_window.width/(2**(depth+2)),\n location.y + 50)\n queue.append((child, new_location, depth+1))\n line = gf.Line(gf.Point(location.x, location.y + CIRCLE_R),\n gf.Point(new_location.x, new_location.y - CIRCLE_R))\n line.setFill('yellow')\n line.setOutline('yellow')\n line.draw(graphics_window)\n child.show(graphics_window, new_location)\n i += 1\n self.nodes = marked\n\n\nwindow = Tk()\nwindow.protocol('WM_DELETE_WINDOW', exit_program)\nwindow.title(\"\")\nwindow.geometry('650x450')\nnodes = {}\ntabs = ttk.Notebook(window)\nactions = ttk.Frame(tabs)\ntabs.add(actions, text='actions')\n#page2 = ttk.Frame(tabs)\n#tabs.add(page2, text='console')\ntabs.pack(expand=1, fill='both')\nLabel(actions).grid(column=0, row=0) # blank\n\n\ndef add_client():\n if PORT.get() == \"\":\n messagebox.showinfo(\"No Port Given!\", \"please fill the port field\")\n return\n for address in nodes:\n if address[1] == PORT.get():\n messagebox.showinfo(\"Client Already Exists!\", \"the input port is in use\")\n #Label(actions, text=\"client with IP:'127.0.0.1' and PORT:'\" + PORT.get() + \"' already exists!\").grid(column=5, row=1)\n return\n name = \"N\" + len(nodes).__str__()\n node = GNode((IP.get(), PORT.get()), name)\n nodes[node.address] = node\n #nodes.append(GNode((IP.get(), PORT.get()), name))\n Label(actions, text=\"added client with IP:'127.0.0.1' and PORT:'\" + PORT.get() + \"' as \" + name).grid(\n column=5, row=1)\n\n\ndef add_root():\n global root\n if root is not None:\n messagebox.showinfo(\"Root Already Exists!\", \"can't add more than one root\")\n return\n root = GNode((\"127.000.000.001\", \"10000\"), \"R\", is_root=True) # needs checking if IP and PORT are valid\n Label(actions, text=\"added root with IP:'127.0.0.1' and PORT:'10000' as R\").grid(\n column=5, row=1)\n #messagebox.showinfo(\"Root Added!\", \"added root with IP:'127.0.0.1' and PORT:'10000' as R\")\n nodes[root.address] = root\n nodes[root.address] = root\n #nodes.append(root)\n\n\nLabel(actions, text=\"IP:\").grid(column=0, row=1)\nLabel(actions, text=\"PORT:\").grid(column=0, row=2)\nIP = Entry(actions)\nIP.insert(END, \"(optional)\")\nIP.grid(column=1, row=1)\nPORT = Entry(actions)\nPORT.grid(column=1, row=2)\n\n\nadd_client_button = Button(actions, text=\"add client\", command=add_client)\nadd_client_button.grid(column=3, row=1)\n\nadd_root_button = Button(actions, text=\"add root\", command=add_root)\nadd_root_button.grid(column=4, row=1)\n\n\nclass RefreshButton:\n def __init__(self):\n self.x1 = 10\n self.x2 = 80\n self.y1 = 10\n self.y2 = 30\n self.rect = gf.Rectangle(gf.Point(self.x1, self.y1), gf.Point(self.x2, self.y2))\n self.rect.setOutline(\"red\")\n self.message = gf.Text(Point(45, 20), \"REFRESH\")\n self.message.setTextColor('black')\n self.message.setSize(10)\n\n def show(self, graph_win, color=\"white\"):\n try:\n self.rect.undraw()\n self.message.undraw()\n except Exception:\n pass\n self.rect.setFill(color)\n self.rect.draw(graph_win)\n self.message.draw(graph_win)\n\n def is_inside(self, point):\n if point.x > self.x1 and point.y > self.y1 and point.x < self.x2 and point.y < self.y2:\n return True\n return False\n\n\ndef build_root_tree(root_node):\n network_graph = root.peer.graph\n\n # if graph nodes is list\n for address in nodes:\n for node in network_graph.nodes:\n if node.address == address:\n if node.left_child is not None:\n nodes[address].connect_to(nodes[node.left_child.address])\n if node.right_child is not None:\n nodes[address].connect_to(nodes[node.right_child.address])\n\n # if graph nodes is dictionary\n \"\"\"for address in nodes:\n if network_graph.nodes.__contains__(address):\n if network_graph.nodes[address].left_child is not None:\n nodes[address].connect_to(nodes[network_graph.nodes[address].left_child.address])\n if network_graph.nodes[address].right_child is not None:\n nodes[address].connect_to(nodes[network_graph.nodes[address].right_child.address])\"\"\"\n return Tree(root_node)\n\n\ndef show_network():\n win = gf.GraphWin(\"main\", 700, 500, autoflush=False)\n win.setBackground('black')\n exit = False\n while exit is False:\n win.update()\n refresh = RefreshButton()\n refresh.show(win)\n if root is not None:\n tree = build_root_tree(root)\n tree.show(win)\n tree_nodes = tree.nodes\n else:\n tree_nodes = []\n i = 0\n for address in nodes:\n is_in_tree = False\n for node in tree_nodes:\n if address == node.address:\n is_in_tree = True\n if not is_in_tree:\n nodes[address].show(win, gf.Point(3*(i+1)*CIRCLE_R, win.height - (CIRCLE_R + 10)))\n i += 1\n try:\n clicked_point = win.getMouse()\n if clicked_point is None:\n pass\n elif refresh.is_inside(clicked_point):\n refresh.show(win, \"grey\")\n for item in win.items[:]:\n item.undraw()\n win.update()\n refresh.show(win, \"white\")\n else:\n for address in nodes:\n if nodes[address].is_inside(clicked_point):\n print(nodes[address].name)\n nodes[address].show(win, color=\"grey\")\n nodes[address].peer.UI.name = nodes[address].name\n nodes[address].peer.UI.open_window()\n\n \"\"\"\n new_window = MyGraphWin(\"SAG\", 100, 100)\n new_window.getMouse()\n new_window.close()\n \"\"\"\n nodes[address].show(win)\n except gf.GraphicsError as err:\n exit = True\n break\n win.close()\n\n\nshow_network_button = Button(actions, text=\"Show Network\", command=show_network)\nshow_network_button.grid(column=1, row=3)\n\nexit_button = Button(actions, text=\"Exit\", command=exit_program)\nexit_button.place(relx=1.0, rely=1.0, anchor=SE)\n\nwindow.mainloop()\n\n\n\n\n","repo_name":"pajouheshgar/CN_Project","sub_path":"src/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":10526,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"74305962454","text":"class Solution:\n def removeDuplicates(self, nums: list[int]) -> int:\n last = None\n idx = 0\n while idx < len(nums):\n if nums[idx]!=last:\n last = nums[idx]\n idx += 1\n else:\n del nums[idx]\n print(nums)\n return len(nums)\n\n def removeDuplicates_best(self, nums: list[int]) -> int:\n '''\n \n '''\n l = r = 1\n for i in range(1, len(nums)):\n if nums[i] != nums[i-1]:\n nums[l] = nums[i]\n l += 1\n else:\n r += 1\n return l\n\nif __name__==\"__main__\":\n solution = Solution()\n input = [0, 0, 1, 1, 1, 2, 2, 3, 3, 4]\n k = solution.removeDuplicates(input)\n print(k)","repo_name":"timsu-98/Leetcode","sub_path":"python3/26_remove_duplicates_from_sorted_array.py","file_name":"26_remove_duplicates_from_sorted_array.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30287688234","text":"import os\nimport yaml\n\nfrom pathlib import Path\nfrom great_expectations.core.batch import BatchRequest\nfrom great_expectations.data_context.types.base import (\n DataContextConfig,\n CheckpointConfig\n)\n\nbase_path = Path(__file__).parents[3]\ndata_dir = os.path.join(base_path, \"include\", \"data\")\nge_root_dir = os.path.join(base_path, \"include\", \"great_expectations\")\nconnection_string = \"\"\n\n# Note: The user must first configure a `config_variable.yml` file for this to work\n# The file is not included with this repo.\nwith open(\n f\"{ge_root_dir}/uncommitted/config_variables.yml\",\n \"r\",\n) as f:\n connection_string = yaml.safe_load(f).get(\"my_snowflake_db\")\n\nsnowflake_data_context_config = DataContextConfig(\n **{\n \"config_version\": 3.0,\n \"datasources\": {},\n \"config_variables_file_path\": os.path.join(\n ge_root_dir, \"uncommitted\", \"config_variables.yml\"\n ),\n \"stores\": {\n \"expectations_store\": {\n \"class_name\": \"ExpectationsStore\",\n \"store_backend\": {\n \"class_name\": \"TupleFilesystemStoreBackend\",\n \"base_directory\": os.path.join(ge_root_dir, \"expectations\"),\n },\n },\n \"validations_store\": {\n \"class_name\": \"ValidationsStore\",\n \"store_backend\": {\n \"class_name\": \"TupleFilesystemStoreBackend\",\n \"base_directory\": os.path.join(\n ge_root_dir, \"uncommitted\", \"validations\"\n ),\n },\n },\n \"evaluation_parameter_store\": {\"class_name\": \"EvaluationParameterStore\"},\n \"checkpoint_store\": {\n \"class_name\": \"CheckpointStore\",\n \"store_backend\": {\n \"class_name\": \"TupleFilesystemStoreBackend\",\n \"suppress_store_backend_id\": True,\n \"base_directory\": os.path.join(ge_root_dir, \"checkpoints\"),\n },\n },\n },\n \"expectations_store_name\": \"expectations_store\",\n \"validations_store_name\": \"validations_store\",\n \"evaluation_parameter_store_name\": \"evaluation_parameter_store\",\n \"checkpoint_store_name\": \"checkpoint_store\",\n \"data_docs_sites\": {\n \"local_site\": {\n \"class_name\": \"SiteBuilder\",\n \"show_how_to_buttons\": True,\n \"store_backend\": {\n \"class_name\": \"TupleFilesystemStoreBackend\",\n \"base_directory\": os.path.join(\n ge_root_dir, \"uncommitted\", \"data_docs\", \"local_site\"\n ),\n },\n \"site_index_builder\": {\"class_name\": \"DefaultSiteIndexBuilder\"},\n }\n },\n \"anonymous_usage_statistics\": {\n \"data_context_id\": \"abcdabcd-1111-2222-3333-abcdabcdabcd\",\n \"enabled\": False,\n },\n \"notebooks\": None,\n \"concurrency\": {\"enabled\": False},\n }\n)\n\nsnowflake_checkpoint_config = CheckpointConfig(\n **{\n \"name\": \"taxi.pass.chk\",\n \"config_version\": 1.0,\n \"template_name\": None,\n \"module_name\": \"great_expectations.checkpoint\",\n \"class_name\": \"Checkpoint\",\n \"run_name_template\": \"%Y%m%d-%H%M%S-my-run-name-template\",\n \"expectation_suite_name\": \"taxi.demo\",\n \"batch_request\": None,\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\"class_name\": \"StoreValidationResultAction\"},\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\"class_name\": \"StoreEvaluationParametersAction\"},\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\"class_name\": \"UpdateDataDocsAction\", \"site_names\": []},\n },\n ],\n \"evaluation_parameters\": {},\n \"runtime_configuration\": {},\n \"validations\": [\n {\n \"batch_request\": {\n \"datasource_name\": \"my_snowflake_datasource\",\n \"data_connector_name\": \"default_inferred_data_connector_name\",\n \"data_asset_name\": \"yellow_tripdata\",\n },\n }\n ],\n \"profilers\": [],\n \"ge_cloud_id\": None,\n \"expectation_suite_ge_cloud_id\": None,\n }\n)\n\nsnowflake_audit_checkpoint_config = CheckpointConfig(\n **{\n \"name\": \"taxi.pass.chk\",\n \"config_version\": 1.0,\n \"template_name\": None,\n \"class_name\": \"Checkpoint\",\n \"run_name_template\": \"%Y%m%d-%H%M%S-my-run-name-template\",\n \"expectation_suite_name\": \"taxi.demo\",\n \"batch_request\": None,\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\"class_name\": \"StoreValidationResultAction\"},\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\"class_name\": \"StoreEvaluationParametersAction\"},\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\"class_name\": \"UpdateDataDocsAction\", \"site_names\": []},\n },\n ],\n \"evaluation_parameters\": {},\n \"runtime_configuration\": {},\n \"validations\": [\n {\n \"batch_request\": {\n \"datasource_name\": \"my_snowflake_datasource\",\n \"data_connector_name\": \"default_inferred_data_connector_name\",\n \"data_asset_name\": \"yellow_tripdata_audit\",\n },\n }\n ],\n \"profilers\": [],\n \"ge_cloud_id\": None,\n \"expectation_suite_ge_cloud_id\": None,\n }\n)\n\nsnowflake_batch_request = BatchRequest(\n **{\n \"datasource_name\": \"my_snowflake_db\",\n \"data_connector_name\": \"default_inferred_data_connector_name\",\n \"data_asset_name\": \"yellow_tripdata_sample_2019-01.csv\",\n \"data_connector_query\": {\"index\": -1},\n }\n)\n","repo_name":"astronomer/airflow-data-quality-demo","sub_path":"include/great_expectations/configs/snowflake_configs.py","file_name":"snowflake_configs.py","file_ext":"py","file_size_in_byte":6079,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"67"} +{"seq_id":"909003871","text":"from graphics import *\nimport time\nimport os\n\n#This method will draw the clock face.\ndef drawClock( win ):\n #Draw outer circle\n outline= Circle(Point(250,250), 200)\n outline.setWidth(2)\n outline.draw(win)\n\n center= Circle(Point(250,250), 5)\n center.setWidth(2)\n center.draw(win)\n #@secHand: var for the second hand on clock\n secHand = Line(Point(250, 250), Point(250,450))\n secHand.setWidth(2)\n secHand.draw(win)\n #@minHand: var for the minute hand on clock\n minHand = Line(Point(250, 250), Point(250,450))\n minHand.setWidth(3)\n minHand.draw(win)\n #@hrHand: var for the hour hand on clock\n hrHand= Line(Point(250,250), Point(250,350))\n hrHand.setFill('red')\n hrHand.setWidth(6)\n hrHand.draw(win)\n #Draw hour hand, minute hand, second hand\n secHand.delete(self.id)\n minHand.delete(self.id)\n hrHand.delete(self.id)\n secHand.undraw()\n minHand.undraw()\n hrHand.undraw()\n \ndef drawSecPic( win ):\n outline= Circle(Point(250,250), 200)\n outline.setWidth(2)\n outline.draw(win)\n\n center= Circle(Point(250,250), 5)\n center.setWidth(2)\n center.draw(win)\n\n secHand = Line(Point(250, 250), Point(350,300))\n secHand.setWidth(2)\n secHand.draw(win)\n\n minHand = Line(Point(250, 250), Point(100,450))\n minHand.setWidth(3)\n minHand.draw(win)\n\n hrHand= Line(Point(250,250), Point(300,200))\n hrHand.setFill('red')\n hrHand.setWidth(6)\n hrHand.draw(win)\n \n#def cls():\n #os.system(\"clear\")\n \ndef main():\n #runs code\n win = GraphWin(\"Greatest Clock Ever\", 500, 500)\n win.yUp()\n drawClock(win)\n time.sleep(4)\n #cls()\n drawSecPic(win)\n \n win.promptClose(win.getWidth()/2, 20)\n\nmain()\n","repo_name":"hazelv/Python","sub_path":"Clock/TestClock.py","file_name":"TestClock.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37128734832","text":"#encoding:utf-8\r\nimport os\r\nimport numpy as np\r\nimport json\r\nimport torch\r\nfrom ..utils.util import ensure_dir\r\n\r\nclass ModelCheckpoint(object):\r\n\r\n def __init__(self, checkpoint_dir,\r\n monitor,\r\n logger,\r\n save_best_only=False,\r\n mode='min',\r\n epoch_freq=1,\r\n arch='ckpt',\r\n best = None):\r\n self.monitor = monitor\r\n self.checkpoint_dir = checkpoint_dir\r\n self.save_best_only = save_best_only\r\n self.epoch_freq = epoch_freq\r\n self.arch = arch\r\n self.logger = logger\r\n # 计算模式\r\n if mode == 'min':\r\n self.monitor_op = np.less\r\n self.best = np.Inf\r\n elif mode == 'max':\r\n self.monitor_op = np.greater\r\n self.best = -np.Inf\r\n # 这里主要重新加载模型时候\r\n #对best重新赋值\r\n if best:\r\n self.best = best\r\n ensure_dir(checkpoint_dir)\r\n\r\n def step(self, state,current):\r\n #checkpoint文件名\r\n filename = os.path.join(self.checkpoint_dir, '{}-checkpoint-epoch{}.pth'.format(state['arch'],state['epoch']))\r\n # 是否保存最好模型\r\n if self.save_best_only:\r\n if self.monitor_op(current, self.best):\r\n self.logger.info('\\nEpoch %05d: %s improved from %0.5f to %0.5f'% (state['epoch'], self.monitor, self.best,\r\n current))\r\n self.best = current\r\n state['best'] = self.best\r\n best_path = os.path.join(self.checkpoint_dir, '{}-model_best.pth'.format(state['arch']))\r\n torch.save(state, best_path)\r\n # 每隔几个epoch保存下模型\r\n else:\r\n if state['epoch'] % self.epoch_freq == 0:\r\n self.logger.info(\"\\nEpoch %05d: save model to disk.\"%(state['epoch']+1))\r\n torch.save(state, filename)","repo_name":"lonePatient/pytorch_fashionMNIST_practice","sub_path":"pytorchFashion/callback/modelcheckpoint.py","file_name":"modelcheckpoint.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"67"} +{"seq_id":"7722620401","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport inspect\nimport math\nimport tensorflow as tf\n\n\ndef cutout(image, pad_size, replace=0):\n 'Apply cutout (https://arxiv.org/abs/1708.04552) to image.\\n\\n This operation applies a (2*pad_size x 2*pad_size) mask of zeros to\\n a random location within `img`. The pixel values filled in will be of the\\n value `replace`. The located where the mask will be applied is randomly\\n chosen uniformly over the whole image.\\n\\n Args:\\n image: An image Tensor of type uint8.\\n pad_size: Specifies how big the zero mask that will be generated is that\\n is applied to the image. The mask will be of size\\n (2*pad_size x 2*pad_size).\\n replace: What pixel value to fill in the image in the area that has\\n the cutout mask applied to it.\\n\\n Returns:\\n An image Tensor that is of type uint8.\\n '\n image_height = tf.shape(image)[0]\n image_width = tf.shape(image)[1]\n cutout_center_height = tf.random_uniform(shape=[], minval=0, maxval=image_height, dtype=tf.int32)\n cutout_center_width = tf.random_uniform(shape=[], minval=0, maxval=image_width, dtype=tf.int32)\n lower_pad = tf.maximum(0, (cutout_center_height - pad_size))\n upper_pad = tf.maximum(0, ((image_height - cutout_center_height) - pad_size))\n left_pad = tf.maximum(0, (cutout_center_width - pad_size))\n right_pad = tf.maximum(0, ((image_width - cutout_center_width) - pad_size))\n cutout_shape = [(image_height - (lower_pad + upper_pad)), (image_width - (left_pad + right_pad))]\n padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]\n mask = tf.pad(tf.zeros(cutout_shape, dtype=image.dtype), padding_dims, constant_values=1)\n mask = tf.expand_dims(mask, (- 1))\n mask = tf.tile(mask, [1, 1, 3])\n image = tf.where(tf.equal(mask, 0), (tf.ones_like(image, dtype=image.dtype) * replace), image)\n return image\n","repo_name":"menna161/API-Wizard","sub_path":"Dataset/Dataset/tf.pad/snippets/snippet930220.py","file_name":"snippet930220.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33516287958","text":"import os\nimport json\nimport pandas as pd\nimport numpy as np\nfrom pathlib import Path\nfrom datetime import timedelta, date, datetime\n\n\n# paths\nROOT_PATH = Path('/opt')\nroot_folder = ROOT_PATH / 'covid19-public'\nvax_national_csv = root_folder / 'vaccination' / 'vax_malaysia.csv'\nvax_state_csv = root_folder / 'vaccination' / 'vax_state.csv'\nreg_national_csv = root_folder / 'registration' / 'vaxreg_malaysia.csv'\nreg_state_csv = root_folder / 'registration' / 'vaxreg_state.csv'\nstatic_pop = root_folder / 'static' / 'population.csv'\n\nDATA_EXPORT_PATH = f'{str(ROOT_PATH)}/vaxapp-prod/data/data3.json'\n\nHERD_TARGET_PCT = 0.8\nPHASE2_TARGET_PCT = 0.2\nPHASE3_TARGET_PCT = 0.4\nPHASE4_TARGET_PCT = 0.6\nNINETY_TARGET_PCT = 0.9\nFULL_TARGET_PCT = 1.0\nPERIOD_WINDOW = 14 # for daily doses data\nROLL_WINDOW = 7 # for vaccination rate\nN_TOP_STATES = 5\n\n# proportion of AZ in total vaccine supply (updated 3/8)\nPROP_AZ = 0.0857\nPROP_OTHERS = 0.9143\n# weighted average of dose interval based on 3 weeks pfizer/sinovac and 9 weeks AZ\nAVG_DOSE_INT = round((21*PROP_OTHERS) + (63*PROP_AZ))\nPFSN_DOSE_INT = 21\nAZ_DOSE_INT = 63\n\nstate_abbr = {'Johor': 'JHR',\n 'Kedah': 'KDH',\n 'Kelantan': 'KTN',\n 'Melaka': 'MLK',\n 'Negeri Sembilan': 'NSN',\n 'Pahang': 'PHG',\n 'Perak': 'PRK',\n 'Perlis': 'PLS',\n 'Pulau Pinang': 'PNG',\n 'Sabah': 'SBH',\n 'Sarawak': 'SWK',\n 'Terengganu': 'TRG',\n 'W.P. Labuan': 'LBN',\n 'Klang Valley': 'KV',\n 'Malaysia': 'MY'}\n\nsummarized_states = ['W.P. Kuala Lumpur', 'W.P. Putrajaya', 'Selangor']\n\n# for console printing\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\ndef preprocess_csv(national_csv, state_csv, dfpop):\n \"\"\"\n Main pre-process funciton to combine national and state CSVs\n National level data is treated as a State\n Returns aggregated summary by state for latest date in data set\n For vaccination CSV, also returns doses data by state and target hits\n \"\"\"\n\n # read national csv\n dfvn = pd.read_csv(national_csv)\n dfvn['state'] = 'Malaysia'\n\n # read state csv\n dfvs = pd.read_csv(state_csv)\n dfvs = pd.concat([dfvs, dfvn]) # concat national and state\n dfvs['date_dt'] = pd.to_datetime(dfvs.date, format='%Y-%m-%d')\n dfvs.set_index(['date_dt', 'state'], inplace=True)\n\n # create KV summary\n dfvs_kv = dfvs.xs('Selangor', level=1) + dfvs.xs('W.P. Kuala Lumpur',\n level=1) + dfvs.xs('W.P. Putrajaya', level=1)\n dfvs_kv['state'] = 'Klang Valley'\n dfvs_kv['date'] = dfvs.xs('Selangor', level=1)['date'] # fixed summed date\n dfvs_kv.set_index('state', append=True, inplace=True)\n dfvs = pd.concat([dfvs, dfvs_kv]).sort_index() \n\n # get latest day slice\n dfvs_dateindex = dfvs.index.get_level_values('date_dt')\n dfvs_dateindex = pd.DatetimeIndex(dfvs_dateindex)\n latest_date = dfvs_dateindex.max() # USE latest date in dataset! \n\n # vax rate by state - only for vax dataset\n state_doses_data_byvax = {}\n state_target_hits = {}\n if 'pfizer1' in dfvs.columns.tolist():\n # cumulative by vax type\n pfizer1_cumul = dfvs.groupby('state')['pfizer1'].sum()\n pfizer2_cumul = dfvs.groupby('state')['pfizer2'].sum()\n sinovac1_cumul = dfvs.groupby('state')['sinovac1'].sum()\n sinovac2_cumul = dfvs.groupby('state')['sinovac2'].sum()\n astra1_cumul = dfvs.groupby('state')['astra1'].sum()\n astra2_cumul = dfvs.groupby('state')['astra2'].sum()\n cansino2_cumul = dfvs.groupby('state')['cansino'].sum()\n \n dfvs['cumul_full_adult'] = dfvs.cumul_full - dfvs.cumul_full_child\n dfvs['cumul_partial_adult'] = dfvs.cumul_partial - dfvs.cumul_partial_child\n dfvs['dose2_pct_adult'] = dfvs.cumul_full_adult/dfpop['pop_18']\n dfvs['dose2_pct_child'] = dfvs.cumul_full_child/dfpop['pop_12']\n dfvs['dose2_pct_total'] = dfvs.cumul_full/dfpop['pop']\n dfvs['pfsn1'] = dfvs['pfizer1'] + dfvs['sinovac1']\n\n dfvs['daily_partial_child'] = dfvs.groupby('state')['cumul_partial_child'].diff()\n dfvs['daily_full_child'] = dfvs.groupby('state')['cumul_full_child'].diff()\n\n latest_dfv = dfvs.loc[latest_date]\n latest_lastday_dfv = dfvs.loc[latest_date - timedelta(days=1)]\n\n # extract milestones that were hit: when (date) and doses administered\n state_target_hits = {}\n for pop_level in ['adult', 'total', 'child']:\n state_target_hits[pop_level] = {}\n for state_name, state_period in dfvs.groupby('state'):\n state_target_hits[pop_level][state_name] = state_target_hits[pop_level][state_name] if state_name in state_target_hits[pop_level].keys() else {\n }\n\n for target in [PHASE2_TARGET_PCT, PHASE3_TARGET_PCT, PHASE4_TARGET_PCT, HERD_TARGET_PCT, NINETY_TARGET_PCT, FULL_TARGET_PCT]:\n if pop_level == 'adult':\n dose2_pct = state_period.dose2_pct_adult\n elif pop_level == 'child':\n dose2_pct = state_period.dose2_pct_child\n else:\n dose2_pct = state_period.dose2_pct_total\n\n target_period = state_period[dose2_pct > target]\n if not target_period.empty:\n target_hit_date = datetime.combine(\n target_period[:1].iloc[0].name[0], datetime.min.time())\n if pop_level == 'adult':\n target_hit_dose2 = target_period[:1].iloc[0].cumul_full_adult\n elif pop_level == 'child':\n target_hit_dose2 = target_period[:1].iloc[0].cumul_full_child\n else:\n target_hit_dose2 = target_period[:1].iloc[0].cumul_full\n\n state_target_hits[pop_level][state_name][target] = (target_hit_date, target_hit_dose2)\n print(f'{state_name} hit {target} target at {target_hit_date} achieving {target_hit_dose2}') \n\n # calculate last n day daily dose rate\n dfvs_lastweek = dfvs[latest_date -\n pd.offsets.Day(ROLL_WINDOW - 1):latest_date]\n avg_dose1_rate = dfvs_lastweek.groupby('state')['daily_partial'].mean()\n avg_dose2_rate = dfvs_lastweek.groupby('state')['daily_full'].mean()\n avg_total_rate = dfvs_lastweek.groupby('state')['daily'].mean()\n\n avg_pf_rate = dfvs_lastweek.groupby('state')['pfizer1'].mean()\n avg_sn_rate = dfvs_lastweek.groupby('state')['sinovac1'].mean()\n avg_pfsn_rate = dfvs_lastweek.groupby('state')['pfsn1'].mean()\n avg_az_rate = dfvs_lastweek.groupby('state')['astra1'].mean()\n avg_child_rate = dfvs_lastweek.groupby('state')['daily_partial_child'].mean()\n\n # compare with shifted back (old) rate\n dfvs_lastweek_shifted = dfvs[latest_date -\n pd.offsets.Day(ROLL_WINDOW):latest_date - pd.offsets.Day(1)]\n avg_dose1_rate_shifted = dfvs_lastweek_shifted.groupby('state')['daily_partial'].mean()\n\n # from latest date in dataset, 24 days projected sum of dose 2 based on last 24 days dose 1\n dfvs_dose_interval = dfvs[latest_date -\n pd.offsets.Day(AVG_DOSE_INT - 1):]\n states_projected_dose2_total = dfvs_dose_interval.groupby('state')[\n 'daily_partial'].sum()\n states_projected_dose2_total_list = dfvs_dose_interval.groupby('state')[\n 'daily_partial'].apply(list)\n\n states_pf_dose2_list = dfvs[latest_date - pd.offsets.Day(\n PFSN_DOSE_INT - 1):latest_date].groupby('state')['pfizer1'].apply(list).fillna(0)\n states_sn_dose2_list = dfvs[latest_date - pd.offsets.Day(\n PFSN_DOSE_INT - 1):latest_date].groupby('state')['sinovac1'].apply(list).fillna(0)\n states_pfsn_dose2_list = dfvs[latest_date - pd.offsets.Day(\n PFSN_DOSE_INT - 1):latest_date].groupby('state')['pfsn1'].apply(list).fillna(0)\n states_az_dose2_list = dfvs[latest_date - pd.offsets.Day(\n AZ_DOSE_INT - 1):latest_date].groupby('state')['astra1'].apply(list).fillna(0)\n\n latest_dfv.loc[:, 'avg_dose1_rate'] = avg_dose1_rate\n latest_dfv.loc[:, 'avg_dose2_rate'] = avg_dose2_rate\n latest_dfv.loc[:, 'avg_total_rate'] = avg_total_rate\n latest_dfv.loc[:, 'avg_pfsn_rate'] = avg_pfsn_rate\n latest_dfv.loc[:, 'avg_az_rate'] = avg_az_rate\n latest_dfv.loc[:, 'projected_dose2_total_list'] = states_projected_dose2_total_list\n latest_dfv.loc[:, 'states_pfsn_dose2_list'] = states_pfsn_dose2_list\n latest_dfv.loc[:, 'states_az_dose2_list'] = states_az_dose2_list\n\n latest_dfv.loc[:, 'is_daily_rate_incr'] = latest_dfv.daily > latest_lastday_dfv.daily\n latest_dfv.loc[:,\n 'is_avg_rate_incr'] = avg_dose1_rate > avg_dose1_rate_shifted\n\n latest_dfv.loc[:, 'pfizer1_cumul'] = pfizer1_cumul\n latest_dfv.loc[:, 'pfizer2_cumul'] = pfizer2_cumul\n latest_dfv.loc[:, 'sinovac1_cumul'] = sinovac1_cumul\n latest_dfv.loc[:, 'sinovac2_cumul'] = sinovac2_cumul\n latest_dfv.loc[:, 'astra1_cumul'] = astra1_cumul\n latest_dfv.loc[:, 'astra2_cumul'] = astra2_cumul\n latest_dfv.loc[:, 'astra2_cumul'] = astra2_cumul\n latest_dfv.loc[:, 'cansino2_cumul'] = cansino2_cumul\n\n # aggregate daily doses data by state\n dfvs_period_window = dfvs[latest_date -\n pd.offsets.Day(PERIOD_WINDOW - 1):]\n for state_name, state_period in dfvs_period_window.groupby('state'):\n state_doses_data_byvax[state_name] = prepare_doses_byvax_data(\n state_period, avg_pf_rate[state_name], avg_sn_rate[state_name], avg_az_rate[state_name], states_pf_dose2_list[state_name], states_sn_dose2_list[state_name], states_az_dose2_list[state_name])\n else:\n latest_dfv = dfvs.loc[latest_date]\n date_lastday_idx_slice = latest_date - timedelta(days=1)\n # workaround for missing dates in index\n # would've filled na if not for this bug: https://github.com/pandas-dev/pandas/issues/25460\n if date_lastday_idx_slice in dfvs.index.get_level_values(0):\n latest_lastday_dfv = dfvs.loc[date_lastday_idx_slice]\n else: \n latest_lastday_dfv = pd.DataFrame(\n data=0, columns=dfvs.columns, index=list(state_abbr.keys()))\n\n latest_dfv['date_dt'] = pd.to_datetime(\n latest_dfv.date, format='%Y-%m-%d', errors='ignore')\n latest_dfv = latest_dfv.drop(summarized_states)\n return latest_dfv, state_doses_data_byvax, state_target_hits\n\n\n\ndef prepare_doses_byvax_data(dfvn, avg_pf_rate, avg_sn_rate, avg_az_rate, pf_dose2_list, sn_dose2_list, az_dose2_list):\n daily_data = []\n last_date = None\n for _, day_row in dfvn[-PERIOD_WINDOW:].iterrows():\n daily_dict = {\n 'date': day_row['date'],\n 'dose1_pfizer': day_row['pfizer1'],\n 'dose1_sino': day_row['sinovac1'],\n 'dose1_astra': day_row['astra1'],\n 'dose1_display': f\"{day_row['daily_partial']:,}\",\n 'dose2_pfizer': day_row['pfizer2'],\n 'dose2_sino': day_row['sinovac2'],\n 'dose2_astra': day_row['astra2'],\n 'dose2_cansino': day_row['cansino'],\n 'dose2_display': f\"{day_row['daily_full']:,}\",\n 'full_display': f\"{day_row['daily']:,}\",\n }\n daily_data.append(daily_dict)\n last_date = day_row.name[0]\n\n for ind in range(7):\n avg_rate_total = int(round(avg_pf_rate + avg_sn_rate + avg_az_rate,0))\n dose2_total = pf_dose2_list[ind] + sn_dose2_list[ind] + az_dose2_list[ind]\n full_total = avg_rate_total + dose2_total\n daily_dict = {\n 'date': (last_date + timedelta(days=ind+1)).strftime(\"%Y-%m-%d\"),\n 'dose1_pfizer': round(avg_pf_rate,0),\n 'dose1_sino': round(avg_sn_rate,0),\n 'dose1_astra': round(avg_az_rate,0),\n 'dose1_display': f\"{avg_rate_total:,}\",\n 'dose2_pfizer': pf_dose2_list[ind],\n 'dose2_sino': sn_dose2_list[ind],\n 'dose2_astra': az_dose2_list[ind],\n 'dose2_cansino': 0, #TODO: include average rate of cansino doses\n 'dose2_display': f\"{dose2_total:,}\",\n 'full_display': f\"{full_total:,}\",\n 'projection': True\n }\n daily_data.append(daily_dict)\n return daily_data\n\n\ndef estimate_complete_by_target(target_pct, target_pop, pfsn_vax_rate, az_vax_rate, current_vax_total, pfsn_dose2_list=[], az_dose2_list=[], start_date=date.today()):\n \"\"\"\n Given target percent, target pop and current progress and rate, \n calculate days remaining to hit target.\n \"\"\"\n target_pop = target_pop*target_pct\n\n az_dose2_list_21 = az_dose2_list[:PFSN_DOSE_INT]\n az_dose2_list_beyond_21 = az_dose2_list[PFSN_DOSE_INT:]\n\n remaining = 0\n days_remaining = 0\n days_remaining_21d_after = 0\n # project 21 days and beyond\n projected_sum_21d = current_vax_total + \\\n sum(pfsn_dose2_list) + sum(az_dose2_list_21)\n days_remaining_21d = 0\n if projected_sum_21d < target_pop:\n days_remaining_21d = PFSN_DOSE_INT\n current_vax_total = projected_sum_21d\n remaining = target_pop - current_vax_total\n else:\n #\n for i in range(PFSN_DOSE_INT):\n current_vax_total = current_vax_total + \\\n pfsn_dose2_list[i] + az_dose2_list[i]\n\n if current_vax_total > target_pop:\n break\n else:\n remaining = target_pop - current_vax_total\n days_remaining_21d += 1\n # print(\n # f'Day {days_remaining_21d}: Fully vaxed: {current_vax_total}, remaining {remaining}')\n # print(\n # f'End of {days_remaining_21d} days: Fully vaxed: {current_vax_total}, remaining {remaining}')\n\n # project next 42 days (21 days to 63 days)\n if remaining > 0:\n # target 42 days and beyond\n projected_sum_63d = current_vax_total + \\\n sum(az_dose2_list_beyond_21) + \\\n (pfsn_vax_rate*(AZ_DOSE_INT-PFSN_DOSE_INT))\n days_remaining_21d_after = 0\n if projected_sum_63d < target_pop:\n days_remaining_21d_after = AZ_DOSE_INT-PFSN_DOSE_INT\n current_vax_total = projected_sum_63d\n remaining = target_pop - current_vax_total\n\n # if still remaining beyond 63 days\n if remaining > 0:\n days_remaining = remaining/(pfsn_vax_rate + az_vax_rate)\n remaining = 0\n # print(f'End of {days_remaining+days_remaining_21d+days_remaining_21d_after} days (++ {days_remaining} days): Fully vaxed: {current_vax_total}, remaining {remaining}')\n\n else:\n # target within next 42 days\n for i in range(AZ_DOSE_INT-PFSN_DOSE_INT):\n current_vax_total = current_vax_total + pfsn_vax_rate + \\\n az_dose2_list_beyond_21[i]\n if current_vax_total > target_pop:\n break\n else:\n remaining = target_pop - current_vax_total\n\n days_remaining_21d_after += 1\n # print(\n # f'Day {days_remaining_21d+days_remaining_21d_after}: Fully vaxed: {round(current_vax_total,2)}, pfsn: {round(pfsn_vax_rate,2)}, az2: {round(az_dose2_list_beyond_21[i],2)}, remaining {round(remaining,2)}')\n # print(f'End of {days_remaining_21d+days_remaining_21d_after} days (+ {days_remaining_21d_after} days): Fully vaxed: {current_vax_total}, remaining {remaining}')\n\n days_remaining = days_remaining+days_remaining_21d+days_remaining_21d_after\n target_date = start_date + timedelta(days=days_remaining + 1)\n\n if target_date <= date.today():\n print(f'\\t{bcolors.OKBLUE}Check{bcolors.ENDC}: Target date has passed')\n\n return days_remaining, target_date\n\n\ndef summary_by_state(state_name, dfpop, dfvs, dfrs, pop_level='adult', state_target_hits={}):\n \"\"\"Calculate progress summary and projections by state\"\"\"\n dfr = dfrs.loc[state_name]\n \n if pop_level == 'adult':\n total_pop = dfpop.loc[state_name]['pop_18']\n total_reg = dfr.total - dfr.children\n elif pop_level == 'child':\n total_pop = dfpop.loc[state_name]['pop_12']\n total_reg = dfr.children\n else:\n total_pop = dfpop.loc[state_name]['pop']\n total_reg = dfr.total\n\n # get latest values\n dfv = dfvs.loc[state_name]\n progress_data = {}\n progress_data[pop_level], pfsn_vax_rate, az_vax_rate, pfsn_dose2_list, az_dose2_list, latest_dose2_total = calculate_overall_progress(\n total_pop, total_reg, dfv, pop_level)\n\n # projection_start_date = date.today() + timedelta(AVG_DOSE_INT-1)\n projection_start_date = dfv.date_dt\n\n # build timeline data\n milestones = {}\n milestones[pop_level], herd_date_total, herd_days_total = calculate_milestone_projections(state_name,\n pop_level, total_pop, pfsn_vax_rate, az_vax_rate, latest_dose2_total, pfsn_dose2_list, az_dose2_list, projection_start_date, state_target_hits[state_name])\n\n \n # visualize next 7 days\n first_dose_7d = [round(pfsn_vax_rate + az_vax_rate,0)] * 7\n second_dose_7d = list(np.add(pfsn_dose2_list[:7], az_dose2_list[:7]))\n\n # don't abs this, if passed leave it as negative so frontend can handle\n progress_data[pop_level]['herd_days'] = int(herd_days_total)\n progress_data[pop_level]['herd_date_dp'] = herd_date_total.strftime(\n '%d %B %Y')\n\n # build state chart data\n state_chart_data = {\n 'full': progress_data[pop_level]['full'],\n 'full_display': progress_data[pop_level]['full_dp'],\n 'full_count': progress_data[pop_level]['full_count_dp'],\n 'partial': progress_data[pop_level]['partial'],\n 'partial_display': progress_data[pop_level]['partial_dp'],\n 'partial_count': progress_data[pop_level]['partial_count_dp'],\n 'reg': progress_data[pop_level]['reg'],\n 'reg_display': progress_data[pop_level]['reg_dp'],\n 'reg_count': progress_data[pop_level]['reg_count_dp'],\n 'unreg': progress_data[pop_level]['unreg'],\n 'unreg_display': progress_data[pop_level]['unreg_dp'],\n 'unreg_count': progress_data[pop_level]['unreg_count_dp'],\n 'name': state_name,\n 'name_abbr': state_abbr[state_name],\n 'herd_n_days': progress_data[pop_level]['herd_days'],\n 'herd_date_dp': progress_data[pop_level]['herd_date_dp']\n }\n\n # exceed bar charts - fix by chipping the extra off the FULL bar\n sum_pct = sum([state_chart_data['full'], state_chart_data['partial'],\n state_chart_data['reg'], state_chart_data['unreg']])\n if sum_pct > 1.0:\n exceed = sum_pct - 1\n if 'unreg' in state_chart_data.keys() and state_chart_data['unreg'] > 0 and abs(exceed) > 0:\n if state_chart_data['unreg'] > 0 and exceed > state_chart_data['unreg']:\n exceed -= state_chart_data['unreg']\n state_chart_data['unreg'] = 0\n else:\n state_chart_data['unreg'] -= exceed\n exceed = 0\n\n if 'reg' in state_chart_data.keys() and state_chart_data['reg'] > 0 and abs(exceed) > 0:\n if abs(exceed) > state_chart_data['reg']:\n exceed -= state_chart_data['reg']\n state_chart_data['reg'] = 0\n else:\n state_chart_data['reg'] -= exceed\n exceed = 0\n if 'partial' in state_chart_data.keys() and state_chart_data['partial'] > 0 and abs(exceed) > 0:\n if abs(exceed) > state_chart_data['partial']:\n exceed -= state_chart_data['partial']\n state_chart_data['partial'] = 0\n else:\n state_chart_data['partial'] -= exceed\n exceed = 0\n\n if 'full' in state_chart_data.keys() and state_chart_data['full'] > 0 and abs(exceed) > 0:\n if abs(exceed) > state_chart_data['full']:\n exceed -= state_chart_data['full']\n state_chart_data['full'] = 0\n else:\n state_chart_data['full'] -= abs(exceed)\n exceed = 0\n\n sum_pct_new = sum([state_chart_data['full'], state_chart_data['partial'],\n state_chart_data['reg'], state_chart_data['unreg']])\n\n print(f'State chart: {state_name} ori sum_pct {sum_pct} new sum_pct {sum_pct_new}')\n\n return progress_data, milestones, state_chart_data, herd_date_total, first_dose_7d, second_dose_7d\n\n\ndef calculate_overall_progress(total_pop, total_reg, dfvn, pop_level):\n \"\"\"\n State level progress milestones and rates based on latest data.\n Takes in filtered `total_pop` and `total_reg` based on total or adult level.\n Returns: \n progress data dictionary\n projected_dose2_date: \n Average dose 1 rate: projection based on dose 1 + dose interval = future dose 2 rate\n projected_dose2_total:\n Project total of dose 2 in next dose interval days based on dose 1 + latest actual dose 2\n \"\"\"\n # get latest values\n latest_total = dfvn.cumul # total administered\n latest_cansino_cumul = dfvn.cansino2_cumul\n\n # cumul_partial is now unique individuals vaxxed (incl at least dose 1, cansino)\n if pop_level == 'adult':\n latest_dose1_total = dfvn.cumul_partial_adult\n latest_dose2_total = dfvn.cumul_full_adult\n elif pop_level == 'child':\n latest_dose1_total = dfvn.cumul_partial_child\n latest_dose2_total = dfvn.cumul_full_child\n else:\n latest_dose1_total = dfvn.cumul_partial\n latest_dose2_total = dfvn.cumul_full\n \n # received only one dose (partially vaxxed) - waiting for 2nd dose \n # cansino gets cancelled out here\n latest_partial_vax = latest_dose1_total - latest_dose2_total \n latest_daily_rate = dfvn.daily\n latest_daily_dose1 = dfvn.daily_partial\n latest_daily_dose2 = dfvn.daily_full\n latest_date = dfvn.index.max()\n\n # boolean to indicate increase or decrease in daily rate\n is_daily_rate_incr = dfvn.is_daily_rate_incr\n latest_rate_per_100 = latest_daily_rate/total_pop*100\n\n projected_dose2_total_list = dfvn.projected_dose2_total_list\n\n avg_dose1_rate = dfvn.avg_dose1_rate\n avg_dose2_rate = dfvn.avg_dose2_rate\n avg_total_rate = dfvn.avg_total_rate\n avg_rate_per_100 = dfvn.avg_total_rate/total_pop*100\n\n is_avg_rate_incr = dfvn.is_avg_rate_incr\n\n # calculating percentages - vax type breakdown pct wrt to dose group\n dose2_pct = latest_dose2_total/total_pop # fully vaxxed\n dose2_pf_pct = dfvn.pfizer2_cumul/latest_dose2_total # fully vaxxed\n dose2_sn_pct = dfvn.sinovac2_cumul/latest_dose2_total # fully vaxxed\n dose2_az_pct = dfvn.astra2_cumul/latest_dose2_total # fully vaxxed\n dose2_cn_pct = dfvn.cansino2_cumul/latest_dose2_total # fully vaxxed\n partial_pct = latest_partial_vax/total_pop # partially vaxxed\n\n # if pop_level == \"adult\":\n # # assuming kids are vax mostly with pfizer\n # partial_pf = dfvn.pfizer1_cumul - dfvn.pfizer2_cumul - dfvn.cumul_partial_child\n # else:\n partial_pf = dfvn.pfizer1_cumul - dfvn.pfizer2_cumul\n\n partial_pf_pct = partial_pf/latest_partial_vax # partially vaxxed\n partial_sn_pct = (dfvn.sinovac1_cumul - dfvn.sinovac2_cumul) / \\\n latest_partial_vax # partially vaxxed\n partial_az_pct = (dfvn.astra1_cumul - dfvn.astra2_cumul) / \\\n latest_partial_vax # partially vaxxed\n\n # percentages wrt full progress bar\n dose2_sn_bar_pct = dfvn.sinovac2_cumul/total_pop\n dose2_pf_bar_pct = dfvn.pfizer2_cumul/total_pop\n dose2_az_bar_pct = dfvn.astra2_cumul/total_pop\n dose2_cn_bar_pct = dfvn.cansino2_cumul/total_pop\n partial_pf_bar_pct = partial_pf / \\\n total_pop\n partial_sn_bar_pct = (dfvn.sinovac1_cumul - dfvn.sinovac2_cumul) / \\\n total_pop\n partial_az_bar_pct = (dfvn.astra1_cumul - dfvn.astra2_cumul) / \\\n total_pop\n\n # registered but unvaccinated\n # this should contrasted from latest cumul_partial\n total_reg_unvaxed = max(total_reg - latest_dose1_total, 0)\n total_reg_unvaxed_pct = max(total_reg_unvaxed/total_pop, 0)\n\n total_unreg = max(total_pop - total_reg, 0)\n total_unreg_pct = max(total_unreg/total_pop, 0)\n\n # adjust for more than 100% - else graphs will break\n partial_pct_disp = None\n sum_pct = sum(\n [dose2_pct, partial_pct, total_reg_unvaxed_pct, total_unreg_pct])\n if sum_pct > 1.0:\n print(f'sum_pct: {sum_pct}')\n # adjust unreg_pct if not zero\n if total_unreg_pct > 0:\n print('\\t Adjusting total_unreg_pct')\n total_unreg_pct = max(total_unreg_pct - (sum_pct - 1.0), 0)\n elif total_reg_unvaxed_pct > 0:\n print('\\t Adjusting total_reg_unvaxed_pct')\n total_reg_unvaxed_pct = total_reg_unvaxed_pct - (sum_pct - 1.0)\n if total_reg_unvaxed_pct < 0:\n diff = abs(total_reg_unvaxed_pct)\n total_reg_unvaxed_pct = 0\n # partial_pct_disp = partial_pct \n # partial_pf_pct_disp = partial_pf_pct*partial_pct_disp\n # partial_sn_pct_disp = partial_sn_pct*partial_pct_disp\n # partial_az_pct_disp = partial_az_pct*partial_pct_disp\n print(\n f'\\tNew sum_pct {sum([dose2_pct, partial_pct, total_reg_unvaxed_pct, total_unreg_pct])}')\n\n # build json\n progress_data = {\n 'today_date_dp': dfvn.date_dt.strftime('%d %b'),\n 'total_pop_dp': f'{total_pop:,}',\n\n 'full': dose2_pct,\n 'full_dp': f'{dose2_pct*100:.1f}%',\n 'full_count_dp': f'{latest_dose2_total:,}',\n\n 'full_pf': round(dose2_pf_pct, 3),\n 'full_pf_bar': round(dose2_pf_bar_pct, 3),\n 'full_pf_bar_dp': f'{dose2_pf_bar_pct*100:.1f}%',\n 'full_pf_dp': f'{dose2_pf_pct*100:.1f}%',\n 'full_pf_count_dp': f'{dfvn.pfizer2_cumul:,}',\n\n 'full_sn': round(dose2_sn_pct, 3),\n 'full_sn_bar': round(dose2_sn_bar_pct, 3),\n 'full_sn_bar_dp': f'{dose2_sn_bar_pct*100:.1f}%',\n 'full_sn_dp': f'{dose2_sn_pct*100:.1f}%',\n 'full_sn_count_dp': f'{dfvn.sinovac2_cumul:,}',\n\n 'full_az': round(dose2_az_pct, 3),\n 'full_az_bar': round(dose2_az_bar_pct, 3),\n 'full_az_bar_dp': f'{dose2_az_bar_pct*100:.1f}%',\n 'full_az_dp': f'{dose2_az_pct*100:.1f}%',\n 'full_az_count_dp': f'{dfvn.astra2_cumul:,}',\n\n 'full_cn': round(dose2_cn_pct, 3),\n 'full_cn_bar': round(dose2_cn_bar_pct, 3),\n 'full_cn_bar_dp': f'{dose2_cn_bar_pct*100:.1f}%',\n 'full_cn_dp': f'{dose2_cn_pct*100:.1f}%',\n 'full_cn_count_dp': f'{dfvn.cansino2_cumul:,}',\n\n # if partial_pct_disp is None else partial_pct_disp,\n 'partial': round(partial_pct, 3),\n 'partial_dp': f'{partial_pct*100:.1f}%',\n 'partial_count_dp': f'{latest_partial_vax:,}',\n\n 'partial_pf': round(partial_pf_pct, 3),\n 'partial_pf_bar': round(partial_pf_bar_pct, 3),\n 'partial_pf_bar_dp': f'{partial_pf_bar_pct*100:.1f}%',\n 'partial_pf_dp': f'{partial_pf_pct*100:.1f}%',\n 'partial_pf_count_dp': f'{partial_pf:,}',\n\n 'partial_sn': round(partial_sn_pct, 3),\n 'partial_sn_bar': round(partial_sn_bar_pct, 3),\n 'partial_sn_bar_dp': f'{partial_sn_bar_pct*100:.1f}%',\n 'partial_sn_dp': f'{partial_sn_pct*100:.1f}%',\n 'partial_sn_count_dp': f'{(dfvn.sinovac1_cumul - dfvn.sinovac2_cumul):,}',\n\n 'partial_az': round(partial_az_pct, 3),\n 'partial_az_bar': round(partial_az_bar_pct, 3),\n 'partial_az_bar_dp': f'{partial_az_bar_pct*100:.1f}%',\n 'partial_az_dp': f'{partial_az_pct*100:.1f}%',\n 'partial_az_count_dp': f'{(dfvn.astra1_cumul - dfvn.astra2_cumul):,}',\n\n 'total_count_dp': f'{latest_total:,}',\n 'total_dose1_dp': f'{latest_dose1_total:,}',\n\n 'reg': round(total_reg_unvaxed_pct, 3),\n 'reg_dp': f'{total_reg_unvaxed_pct*100:.1f}%',\n 'reg_count_dp': f'{total_reg_unvaxed:,}',\n 'total_reg_count_dp': f'{total_reg:,}',\n\n 'unreg': round(total_unreg_pct, 3),\n 'unreg_dp': f'{total_unreg_pct*100:.1f}%',\n 'unreg_dp_tw': f'w-[{total_unreg_pct*100:.1f}%]',\n 'unreg_count_dp': f'{total_unreg:,}',\n\n 'rate_latest': f'{latest_daily_rate:,}',\n 'rate_latest_d1': f'{latest_daily_dose1:,}',\n 'rate_latest_d2': f'{latest_daily_dose2:,}',\n 'rate_latest_100': f'{latest_rate_per_100:.1f}',\n 'is_rate_latest_incr': bool(is_daily_rate_incr),\n\n 'rate_avg': f'{int(avg_total_rate):,}',\n 'rate_avg_d1': f'{int(avg_dose1_rate):,}',\n 'rate_avg_d2': f'{int(avg_dose2_rate):,}',\n 'rate_avg_100': f'{avg_rate_per_100:.1f}',\n 'is_rate_avg_incr': bool(is_avg_rate_incr),\n }\n return progress_data, dfvn.avg_pfsn_rate, dfvn.avg_az_rate, dfvn.states_pfsn_dose2_list, dfvn.states_az_dose2_list, latest_dose2_total\n\n\ndef calculate_milestone_projections(state_name, pop_level, total_pop, pfsn_vax_rate, az_vax_rate, latest_dose2_total, pfsn_dose2_list=[], az_dose2_list=[], start_date=datetime.today(), target_hits={}):\n \"\"\"\n Run estimations for each milestone to build timeline data\n Returns estimation projection results for herd target for progress_data\n \"\"\"\n milestones = {} # (days remaining, target date, dose2)\n for target in [PHASE2_TARGET_PCT, PHASE3_TARGET_PCT, PHASE4_TARGET_PCT, HERD_TARGET_PCT, NINETY_TARGET_PCT, FULL_TARGET_PCT]:\n if target in target_hits.keys(): # (date hit, dose 2)\n milestones[target] = ((target_hits[target][0] - pd.Timestamp(datetime.today())).days + 1, # 'subtract' one day here as past date + extra hours counted as one day\n target_hits[target][0], int(target_hits[target][1]))\n else:\n # return - (days remaining, target date)\n days_remaining, target_date = estimate_complete_by_target(\n target, total_pop, pfsn_vax_rate, az_vax_rate, latest_dose2_total, pfsn_dose2_list, az_dose2_list, start_date)\n milestones[target] = (days_remaining, target_date, None)\n print(\n f'{milestones[target][0]} days to target {target} ({milestones[target][1]}). ')\n\n # build dict\n milestones_list = [\n {\n 'name': 'begin',\n 'name_display': 'Start',\n 'date': datetime(2021, 2, 24),\n 'x_pct': '20%', # fixed\n 'x_pct_ori': 0.0,\n 'n_days': abs(datetime.today() - datetime(2021, 2, 24)).days,\n 'n_count': 0,\n },\n {\n 'name': '20pct',\n 'name_display': '20%',\n 'date': milestones[PHASE2_TARGET_PCT][1],\n 'x_pct_ori': 0.2,\n 'n_days': int(milestones[PHASE2_TARGET_PCT][0]),\n 'n_count': milestones[PHASE2_TARGET_PCT][2]\n # 'n_count': \"3,190,789\", source: https://www.theedgemarkets.com/article/ten-cent-population-fully-vaccinated-%E2%80%94-khairy\n },\n {\n 'name': '40pct',\n 'name_display': '40%',\n 'date': milestones[PHASE3_TARGET_PCT][1],\n 'x_pct_ori': 0.4,\n 'n_days': int(milestones[PHASE3_TARGET_PCT][0]),\n 'n_count': milestones[PHASE3_TARGET_PCT][2]\n },\n {\n 'name': '60pct',\n 'name_display': '60%',\n 'date': milestones[PHASE4_TARGET_PCT][1],\n 'x_pct_ori': 0.6,\n 'n_days': int(milestones[PHASE4_TARGET_PCT][0]),\n 'n_count': milestones[PHASE4_TARGET_PCT][2]\n },\n {\n 'name': '80pct',\n 'name_display': '80%',\n 'date': milestones[HERD_TARGET_PCT][1],\n 'x_pct_ori': 0.8,\n 'n_days': int(milestones[HERD_TARGET_PCT][0]),\n 'n_count': milestones[HERD_TARGET_PCT][2]\n }\n ]\n if pop_level == 'adult':\n milestones_list.append({\n 'name': '90pct',\n 'name_display': '90%',\n 'date': milestones[NINETY_TARGET_PCT][1],\n 'x_pct_ori': 0.9,\n 'n_days': int(milestones[NINETY_TARGET_PCT][0]),\n 'n_count': milestones[NINETY_TARGET_PCT][2]\n })\n\n # calculate timeline data for drawing\n # length of full timeline in days VERSION 2\n max_date = milestones[FULL_TARGET_PCT][1]\n min_date = milestones[PHASE2_TARGET_PCT][1]\n full_date_range = milestones[FULL_TARGET_PCT][1] - min_date\n past_range = datetime.today() - min_date # left of timeline\n\n def scale_to_range(num, target_min, target_max, ori_min, ori_max):\n return (target_max - target_min) * (num - ori_min) / (ori_max - ori_min) + target_min\n\n # post processing milestones\n for ind, milestone in enumerate(milestones_list):\n milestones_list[ind]['date_display'] = milestone['date'].strftime(\n '%d %b')\n\n if milestone['name'] == 'begin':\n pct = 0.2\n else:\n days_since_min_date = (milestone['date'] - min_date).days\n if datetime.today() < milestone['date']:\n # future - RHS of timeline scale to 50%-80%\n pct = scale_to_range(days_since_min_date, 0.5,\n 0.8, past_range.days, full_date_range.days)\n else:\n # past - LHS of timeline scale to 30%-49%\n pct = scale_to_range(days_since_min_date, 0.3,\n 0.5, 0, past_range.days)\n\n milestones_list[ind]['x_pct'] = f'{pct*100:.1f}%'\n milestones_list[ind]['x_pct_val'] = round(pct, 1)\n\n milestones_list[ind]['has_past'] = datetime.today(\n ) >= milestone['date']\n\n if milestones_list[ind]['n_count'] is None:\n # unreached milestones - calculate target population\n milestones_list[ind]['n_count'] = int(\n milestone['x_pct_ori']*total_pop)\n milestones_list[ind]['n_count'] = f\"{milestones_list[ind]['n_count']:,}\"\n\n milestones_list[ind]['n_days'] = abs(milestones_list[ind]['n_days'])\n del milestones_list[ind]['date']\n\n if milestones_list[4]['has_past']:\n # if 80pct target reached, remove 40% 60%\n del milestones_list[2:4]\n\n # if pop_level == 'adult' and state_name in ['Malaysia', 'Negri Sembilan', 'Melaka', 'Klang Valley', 'Terrengganu', 'Perlis','W.P. Labuan']:\n # return milestones_list, milestones[NINETY_TARGET_PCT][1], milestones[NINETY_TARGET_PCT][0]\n # else:\n return milestones_list, milestones[HERD_TARGET_PCT][1], milestones[HERD_TARGET_PCT][0] \n\n\nif __name__ == \"__main__\":\n\n # prepare population data\n dfpop = pd.read_csv(static_pop)\n # create klang valley population\n kv_pop = dfpop[(dfpop.state == 'Selangor') | (\n dfpop.state == 'W.P. Kuala Lumpur') | (dfpop.state == 'W.P. Putrajaya')].sum()\n kv_pop.state = 'Klang Valley'\n kv_pop.name = 17\n dfpop = dfpop.append(kv_pop)\n dfpop.set_index('state', inplace=True)\n\n # preprocess vax and reg CSVs\n latest_dfv, state_doses_data_byvax, state_target_hits = preprocess_csv(\n vax_national_csv, vax_state_csv, dfpop)\n latest_dfr, _, _ = preprocess_csv(\n reg_national_csv, reg_state_csv, dfpop)\n\n # START BUILDING JSON DATA\n data_levels = ['total', 'adult']\n state_charts_data = {}\n top_states_data = {}\n by_state_data = {}\n for pop_level in data_levels:\n # PROCESS ALL STATES\n states_list = []\n state_charts_data[pop_level] = []\n for state_name, _ in latest_dfv.iterrows():\n print(\n f'Processing state: {bcolors.WARNING}{state_name} ({pop_level}){bcolors.ENDC}')\n by_state_data[state_name] = by_state_data.get(state_name, {})\n progress_data, milestones_data, state_chart_data, herd_date, first_dose_7d, second_dose_7d = summary_by_state(\n state_name, dfpop, latest_dfv, latest_dfr, pop_level, state_target_hits[pop_level])\n\n if state_name != \"Malaysia\":\n state_charts_data[pop_level].append(state_chart_data)\n by_state_data[state_name]['progress'] = by_state_data[state_name].get(\n 'progress', {})\n by_state_data[state_name]['progress'].update(progress_data)\n\n by_state_data[state_name]['doses_byvax'] = by_state_data[state_name].get('doses_byvax', {\n })\n by_state_data[state_name]['doses_byvax'] = state_doses_data_byvax[state_name]\n\n by_state_data[state_name]['timeline'] = by_state_data[state_name].get(\n 'timeline', {})\n by_state_data[state_name]['timeline'].update(milestones_data)\n\n if int(progress_data[pop_level]['herd_days']) >= 0 and state_name != 'Malaysia':\n states_list.append({'name': state_name, 'herd_n_days': progress_data[pop_level]['herd_days'], 'herd_date_dp': herd_date.strftime(\n '%d %b')}) # for top states\n\n # sort state_charts_data\n state_charts_data[pop_level] = sorted(\n state_charts_data[pop_level], key=lambda state_chart: state_chart['full'], reverse=True)\n # sort top states data\n top_states_data[pop_level] = sorted(\n states_list, key=lambda state: state['herd_n_days'])[:5]\n\n all_data = {\n 'by_state': by_state_data, # combined progress, timeline, doses\n 'top_states': top_states_data,\n 'state': state_charts_data\n }\n\n with open(DATA_EXPORT_PATH, 'w') as fp:\n json.dump(all_data, fp)\n","repo_name":"shenghann/my-vax-progress","sub_path":"loader/scriptv2.py","file_name":"scriptv2.py","file_ext":"py","file_size_in_byte":38086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27103519087","text":"import argparse\nimport os\nimport sys\nimport os.path as osp\nimport subprocess\nsys.path.append('./')\nimport pickle\nimport cv2\nimport time\t\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\nfrom easydict import EasyDict as edict\nfrom torchvision import transforms as T\nfrom torchvision.models.detection import fasterrcnn_resnet50_fpn\n\nfrom hybrik.models import builder\nfrom hybrik.utils.config import update_config\nfrom hybrik.utils.presets import SimpleTransform3DSMPLCam\nfrom hybrik.utils.render import SMPLRenderer\nfrom hybrik.utils.render_pytorch3d import render_mesh\nfrom hybrik.utils.vis import get_max_iou_box, get_one_box, vis_2d, vis_smpl_3d\n\ndet_transform = T.Compose([T.ToTensor()])\ntorch.set_grad_enabled(False)\n\n# Index pairs representing connections between joints\nbones_jts_29 = np.array([ \n [0, 3],\t # pelvis -> spine1 # Color 1 (verde) [self.JOINT_NAMES.index('pelvis'),self.JOINT_NAMES.index('spine1')\n [3, 6],\t # spine1 -> spine2\n [6, 9], \t# spine2 -> spine3\n [9, 12],\t# spine3 -> neck\n [12, 15],\t# neck -> jaw\n # [15, 24],\t# jaw -> head \n [0, 1], \t# pelvis -> left_hip # Color 2.1 (azul)\n [1, 4],\t # left_hip -> left_knee\n [4, 7],\t # left_knee -> left_ankle\n [7, 10],\t# left_ankle -> left_foot\n # [10, 27],\t# left_foot -> left_bigtoe\n [9, 13],\t# spine3 -> left_collar # Color 2.2 (azul)\n [13, 16],\t# left_collar -> left_shoulder\n [16, 18],\t# left_shoulder -> left_elbow\n [18, 20],\t# left_elbow -> left_wrist\n [20, 22],\t# left_wrist -> left_thumb \n # [22, 25], # left_thumb -> left_middle\n [0, 2],\t # pelvis -> right_hip # Color 3.1 (vermelho)\n [2, 5],\t # right_hip -> right_knee\n [5, 8],\t # right_knee -> right_ankle\n [8, 11],\t# right_ankle -> right_foot\n # [11, 28],\t# right_foot -> right_bigtoe\n [9, 14],\t# spine3 -> right_collar # Color 3.2 (vermelho)\n [14, 17],\t# right_collar -> right_shoulder\n [17, 19],\t# right_shoulder -> right_elbow\n [19, 21],\t# right_elbow -> right_wrist\n [21, 23],\t# right_wrist -> right_thumb \n # [23, 26],\t# right_thumb -> right_middle \n])\ncolors = ['green'] * 5 + ['blue'] * 9 + ['red'] * 9\n\ndef xyxy2xywh(bbox):\n x1, y1, x2, y2 = bbox\n\n cx = (x1 + x2) / 2\n cy = (y1 + y2) / 2\n w = x2 - x1\n h = y2 - y1\n return [cx, cy, w, h]\n\ndef cxcywh2xyxy(bbox):\n\n cx, cy, w, h = bbox\n x1 = cx - w/2\n y1 = cy - h/2\n x2 = cx + w/2\n y2 = cy + h/2\n \n return [x1, y1, x2, y2]\n\n\ndef images_to_video(img_dir, out_path, img_fmt=\"%06d.jpg\", fps=30, crf=25, verbose=True):\n os.makedirs(osp.dirname(out_path), exist_ok=True)\n FFMPEG_PATH = '/usr/bin/ffmpeg' if osp.exists('/usr/bin/ffmpeg') else 'ffmpeg'\n cmd = [FFMPEG_PATH, '-y', '-r', f'{fps}', '-f', 'image2', '-start_number', '1',\n '-i', f'{img_dir}/{img_fmt}', '-vcodec', 'libx264', '-crf', f'{crf}', '-pix_fmt', 'yuv420p', out_path]\n if not verbose:\n cmd += ['-hide_banner', '-loglevel', 'error']\n subprocess.run(cmd)\n\nparser = argparse.ArgumentParser(description='HybrIK Demo')\n\nparser.add_argument('--gpu',\n help='gpu',\n default=0,\n type=int)\nparser.add_argument('--multi',\n help='multi-person',\n default=0,\n type=int)\nparser.add_argument('--img_folder',\n help='images path ',\n default='out/glamr_static/workout_5s/pose_est/frames',\n type=str)\nparser.add_argument('--out_dir',\n help='output folder',\n default='out/glamr_static/workout_5s/pose_est',\n type=str)\nparser.add_argument('--MPT_method',\n help='strongsort, deepocsort, ocsort, bytetrack, botsort or sort (original)',\n default='ocsort',\n type=str)\nparser.add_argument('--person_detection_method',\n help='strongsort, deepocsort, ocsort, bytetrack, botsort or fasterrcnn_resnet50 (original)',\n default='ocsort',\n type=str)\nparser.add_argument('--high_performance', \n help='dont plot 2D poses and dont reconstruct mesh', \n default=False)\n\nopt = parser.parse_args()\n\n\ncfg_file = 'configs/256x192_adam_lr1e-3-hrw48_cam_2x_w_pw3d_3dhp.yaml'\n# cfg_file = 'configs/256x192_adam_lr1e-3-hrw48_cam_2x_wo_pw3d.yaml'\nCKPT = './pretrained_models/hybrik_hrnet48_w3dpw.pth'\n# CKPT = './pretrained_models/hybrik_hrnet48_wo3dpw.pth'\ncfg = update_config(cfg_file)\n\nbbox_3d_shape = getattr(cfg.MODEL, 'BBOX_3D_SHAPE', (2000, 2000, 2000))\nbbox_3d_shape = [item * 1e-3 for item in bbox_3d_shape]\ndummpy_set = edict({\n 'joint_pairs_17': None,\n 'joint_pairs_24': None,\n 'joint_pairs_29': None,\n 'bbox_3d_shape': bbox_3d_shape\n})\n\ntransformation = SimpleTransform3DSMPLCam(\n dummpy_set, scale_factor=cfg.DATASET.SCALE_FACTOR,\n color_factor=cfg.DATASET.COLOR_FACTOR,\n occlusion=cfg.DATASET.OCCLUSION,\n input_size=cfg.MODEL.IMAGE_SIZE,\n output_size=cfg.MODEL.HEATMAP_SIZE,\n depth_dim=cfg.MODEL.EXTRA.DEPTH_DIM,\n bbox_3d_shape=bbox_3d_shape,\n rot=cfg.DATASET.ROT_FACTOR, sigma=cfg.MODEL.EXTRA.SIGMA,\n train=False, add_dpg=False,\n loss_type=cfg.LOSS['TYPE'])\n\nhybrik_model = builder.build_sppe(cfg.MODEL)\n\nprint(f'Loading model from {CKPT}...')\nsave_dict = torch.load(CKPT, map_location='cpu')\nif type(save_dict) == dict:\n model_dict = save_dict['model']\n hybrik_model.load_state_dict(model_dict)\nelse:\n hybrik_model.load_state_dict(save_dict)\n\nhybrik_model.cuda(opt.gpu)\nhybrik_model.eval()\n\nres_images_path = os.path.join(opt.out_dir, 'res_images')\nres_2D_poses_images_path = os.path.join(opt.out_dir, 'res_2D_poses_images')\nres_3D_poses_images_path = os.path.join(opt.out_dir, 'res_3D_poses_images')\n\nos.makedirs(res_images_path, exist_ok=True)\nos.makedirs(res_2D_poses_images_path, exist_ok=True)\nos.makedirs(res_3D_poses_images_path, exist_ok=True)\n\n\nfiles = os.listdir(f'{opt.img_folder}')\nfiles.sort()\n\nimg_path_list = []\nfor file in tqdm(files):\n if not os.path.isdir(file) and file[-4:] in ['.jpg', '.png']:\n img_path = os.path.join(opt.img_folder, file)\n img_path_list.append(img_path)\n\nif opt.multi:\n\n if torch.cuda.is_available():\n device = torch.device('cuda:{}'.format(opt.gpu)) \n print(f'-> {torch.cuda.get_device_name(0)} available!\\nRunning inference on GPU :) ')\n else:\n device = torch.device('cpu') \n print(f\"-> NVIDIA not available! -> Running inference on CPU :'(\")\n\n if opt.MPT_method==\"sort\":\n # load multi-person tracking model\n # mot = MPT(\n # device=device,\n # batch_size=4,\n # display=False,\n # detection_threshold=0.7,\n # detector_type='yolo',\n # output_format='dict',\n # yolo_img_size=416,\n # )\n # print('\\n### Run MPT...')\n # tracking_results = mot(opt.img_folder)\n # offset_frames=0\n print(\"Sort não implementado, escolher outro método (e.g., ocsort)\")\n else: # strongsort, deepocsort, ocsort, bytetrack ou botsort \n # from src.MPT.posprocess_human_track import convert_track_info\n track_pkl_file = os.path.join(opt.out_dir, 'track')\n tracking_results = pickle.load(open(f'{track_pkl_file}/mpt.pkl', 'rb')) \n if opt.MPT_method=='strongsort':\n offset_frames = 2 # Required to initialize the model: https://github.com/mikel-brostrom/yolo_tracking/issues/379 https://github.com/mikel-brostrom/yolo_tracking/blob/8885642c9d049c933c6e1df1d05478dab4a0c37c/deep_sort/configs/deep_sort.yaml#L6 \n img_1 = cv2.imread(img_path_list[0]) # See file: src/yolo_tracking/boxmot/strongsort/configs/strongsort.yaml\n img_2 = cv2.imread(img_path_list[1])\n text = f'Model initialization ({opt.MPT_method}): frame 1'\n textsize = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 1, 2)[0]\n textX = (img_1.shape[1] - textsize[0]) / 2\n textY = (img_1.shape[0]/5 - textsize[1])\n cv2.putText(img_1, text, (int(textX), int(textY)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)\n cv2.putText(img_2, f'Model initialization ({opt.MPT_method}): frame 2', (int(textX), int(textY)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)\n cv2.putText(img_1, text, (int(textX+2), int(textY+2)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)\n cv2.putText(img_2, f'Model initialization ({opt.MPT_method}): frame 2', (int(textX+2), int(textY+2)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)\n save_dirs=[res_images_path,res_2D_poses_images_path] # res_3D_poses_images_path\n for dir in save_dirs:\n img_1_path = os.path.join(dir, '000000.jpg') \n img_2_path = os.path.join(dir, '000001.jpg') \n cv2.imwrite(img_1_path, img_1)\n cv2.imwrite(img_2_path, img_2)\n else:\n offset_frames = 0\n\n detection_all = defaultdict(dict)\n for person_id in tracking_results: # Cycle through all detected people\n frames_ids = tracking_results[person_id]['frames']\n for idx in range(len(frames_ids)): # Cycle through all frames that detect the same person -> Conversion from cx,cy,w,h to x1,y1,x2,y2\n frames_id = frames_ids[idx]\n cx, cy, w, h = tracking_results[person_id]['bbox'][idx]\n x1, y1, x2, y2 = max(0, cx-w//2), max(0, cy-h//2), cx+w//2, cy+h//2\n detection_all[frames_id][person_id-1] = [x1, y1, x2, y2] # detection_all -> frame -> person ID -> person bbox\n\n out_dict = defaultdict(lambda: defaultdict(list)) # Dictionary of lists (index must be int)\n bbox_exist = defaultdict(list) # Unique dictionary (\"index\" is a string -> dictionary)\n bboxes = defaultdict(list)\n poses_dict = defaultdict(lambda: defaultdict(list))\n\n time_oper_by_person = []\n averages_times_by_frame = []\n cnt_num_persons=0 # number of people in a given frame\n\n # initialize\n for person_id in tracking_results:\n bbox_exist[person_id-1] = [0 for _ in range(len(img_path_list))]\n\n #####################\n\n print('\\n### Run HybrIK (HRnet-48) multi person...')\n for frame_idx in tqdm(range(len(img_path_list)-offset_frames)):\n\n frame_idx+=offset_frames\n img_path = img_path_list[frame_idx]\n # dirname = os.path.dirname(img_path)\n # basename = os.path.basename(img_path)\n\n input_image = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)\n image = input_image.copy()\n image_pose_2d = input_image.copy()\n \n if frame_idx in detection_all:\n # For each detected person, starting from 0,1,...\n for idx in detection_all[frame_idx]:\n tight_bbox = detection_all[frame_idx][idx]\n bbox_exist[idx][frame_idx] = 1.0\n cnt_num_persons+=1 # increases the number of people detected\n \n # Run HybrIK\n pose_input, bbox, img_center = transformation.test_transform(input_image, tight_bbox) # Used to prepare HybrIK model input\n pose_input = pose_input.to(opt.gpu)[None, :, :, :]\n start_timer = time.time()\n pose_output = hybrik_model( # ModelOutput = namedtuple(typename='ModelOutput',\n pose_input, flip_test=True, # field_names=['pred_shape', 'pred_theta_mats', 'pred_phi', 'pred_delta_shape', 'pred_leaf',\n bboxes=torch.from_numpy(np.array(bbox)).to(pose_input.device).unsqueeze(0).float(), # 'pred_uvd_jts', 'pred_xyz_jts_29', 'pred_xyz_jts_24', 'pred_xyz_jts_24_struct',\n img_center=torch.from_numpy(img_center).to(pose_input.device).unsqueeze(0).float() # 'pred_xyz_jts_17', 'pred_vertices', 'maxvals', 'cam_scale', 'cam_trans', 'cam_root',\n ) \n time_oper_by_person.append((time.time()-start_timer)) # 'uvd_heatmap', 'transl', 'img_feat'] \n uv_3D_29 = pose_output.pred_uvd_jts.reshape(29, 3)\n uv_29 = uv_3D_29[:, :2] # RReshape to exclusively obtain the coordinates (x,y) of the 29 rows (joints) of the pose_output.pred_uvd_jts matrix that contained 29 rows and 3 columns (x,y,z)\n \n # Convert poses to image and save to figure\n \n # Visualization\n img_size = (image.shape[0], image.shape[1])\n focal = np.array([1000, 1000])\n bbox_xywh = xyxy2xywh(bbox)\n # princpt = [bbox_xywh[0], bbox_xywh[1]]\n princpt = [img_center[0], img_center[1]]\n # SMPL Render from main point, faces and focal point of the camera\n renderer = SMPLRenderer(faces=hybrik_model.smpl.faces,\n img_size=img_size, focal=focal,\n princpt=princpt)\n transl = pose_output.transl.detach()\n transl_camsys = transl.clone()\n transl_camsys = transl_camsys * 256 / bbox_xywh[2] # Attention here, new transl_camsys nomenclature\n transl = pose_output.transl.detach().cpu().numpy().squeeze()\n transl[2] = transl[2] * 256 / bbox_xywh[2]\n\n # vis 3d \n # res_path = os.path.join(res_3D_poses_images_path, f'{frame_idx:06d}.jpg')\n pts_3D = uv_3D_29 * bbox_xywh[2]\n pts_3D[:, 0] = pts_3D[:, 0] + bbox_xywh[0] # shift x from bbox\n pts_3D[:, 1] = pts_3D[:, 1] + bbox_xywh[1] # shift y from bbox\n # draw_3D_skeleton(pts_3D, res_path, bones = bones_jts_29, colors = colors, show_image = True)\n\n # vis 2d\n pts = uv_29 * bbox_xywh[2] \n pts[:, 0] = pts[:, 0] + bbox_xywh[0] # shiftar x da bbox\n pts[:, 1] = pts[:, 1] + bbox_xywh[1] # shiftar y da bbox\n\n bboxes[idx].append(np.array(bbox_xywh))\n\n image_pose_2d, tamanho_texto, espessura = vis_2d(image_pose_2d, tight_bbox, pts, idx, \n bones = bones_jts_29, extended = True)\n\n # Generate image with smpl rendering of human in 2D image\n image = vis_smpl_3d(\n pose_output, image, cam_root=transl, bbox_xywh=bbox_xywh,\n f=focal, c=princpt, renderer=renderer, color_id=idx)\n cv2.putText(image, f'{idx}', (int(pts[24][0])-5, int(pts[24][1]) - 15), cv2.FONT_HERSHEY_SIMPLEX, tamanho_texto, (0, 0, 0), espessura)\n cv2.putText(image, f'{idx}', (int(pts[24][0])-4, int(pts[24][1]) - 14), cv2.FONT_HERSHEY_SIMPLEX, tamanho_texto, (255, 255, 255), espessura)\n\n new_princpt = np.array([image.shape[1], image.shape[0]]) * 0.5\n transl[:2] += (np.array(princpt) - new_princpt) * transl[2] / np.array(focal) \n princpt = new_princpt\n\n # save to dict\n K = np.eye(3)\n K[[0, 1], [0, 1]] = focal\n K[:2, 2] = princpt\n out_dict[idx]['smpl_pose_quat_wroot'].append(pose_output.pred_theta_mats[0].cpu().numpy().reshape(-1, 4)) # Joint matrices in quartinions and not rotation matrices\n out_dict[idx]['smpl_beta'].append(pose_output.pred_shape[0].cpu().numpy())\n out_dict[idx]['root_trans'].append(transl)\n out_dict[idx]['kp_2d'].append(pts.cpu().numpy())\n out_dict[idx]['cam_K'].append(K.astype(np.float32))\n # Added to save data rendered by hybrik\n out_dict[idx]['kp_3d'].append(pts_3D.cpu().numpy()) # KEYPOINTS 3D (pixels size)\n # out_dict[idx]['uv_3D_29'].append(uv_3D_29.cpu().numpy()) # KEYPOINTS 3D (referencial da camara)\n # out_dict[idx]['heatmaps'].append(pose_output.uvd_heatmap.cpu().numpy())\n # out_dict[idx]['renderer_hybrik_faces'].append(renderer.faces)\n # out_dict[idx]['renderer_hybrik_focal'].append(renderer.focal)\n # out_dict[idx]['renderer_hybrik_h'].append(renderer.h)\n # out_dict[idx]['renderer_hybrik_w'].append(renderer.w)\n # out_dict[idx]['renderer_hybrik_princpt'].append(renderer.princpt)\n # out_dict[idx]['renderer_hybrik_vertices_mesh'].append(pose_output.pred_vertices.detach().cpu().numpy().squeeze()) # 6890 vertices generated in the hybrik smpl mesh (ready to use in vis_smpl_3d() and replace the first line of code in that function)\n \n averages_times_by_frame.append((sum(time_oper_by_person) / cnt_num_persons)) # append of the average time per frame (already considering the number of people detected)\n cnt_num_persons = 0 # resets number of people counter\n time_oper_by_person = [] # resets the timer for each person\n \n image_vis = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n image_pose_2d = cv2.cvtColor(image_pose_2d, cv2.COLOR_RGB2BGR)\n\n res_path = os.path.join(res_images_path, f'{frame_idx+1:06d}.jpg')\n cv2.imwrite(res_path, image_vis)\n res_path = os.path.join(res_2D_poses_images_path, f'{frame_idx+1:06d}.jpg')\n cv2.imwrite(res_path, image_pose_2d)\n\n if len(averages_times_by_frame) > 1:\n averages_times_by_frame[0] = averages_times_by_frame[1]\n average_time_by_frame = sum(averages_times_by_frame) / (len(img_path_list)-offset_frames) \n \n mot_bboxes = defaultdict(dict)\n for idx in bbox_exist:\n mot_bboxes[idx]['id'] = idx\n mot_bboxes[idx]['bbox'] = np.stack(bboxes[idx]),\n mot_bboxes[idx]['exist'] = np.array(bbox_exist[idx])\n \n find = np.where(mot_bboxes[idx]['exist'])[0]\n mot_bboxes[idx]['id'] = idx\n mot_bboxes[idx]['start'] = find[0]\n mot_bboxes[idx]['end'] = find[-1]\n mot_bboxes[idx]['num_frames'] = mot_bboxes[idx]['exist'].sum()\n mot_bboxes[idx]['exist_frames'] = find\n \n for idx, pose_dict in out_dict.items():\n for key in pose_dict.keys():\n pose_dict[key] = np.stack(pose_dict[key])\n pose_dict['frames'] = mot_bboxes[idx]['exist_frames'] # out_dict[idx]['frames']\n pose_dict['frame2ind'] = {f: i for i, f in enumerate(pose_dict['frames'])} # out_dict[idx]['frame2ind']\n pose_dict['bboxes_dict'] = mot_bboxes[idx]\n\n #####################\n\nelse:\n # load detection model\n if opt.person_detection_method==\"fasterrcnn_resnet50\":\n det_model = fasterrcnn_resnet50_fpn(pretrained=True)\n det_model.cuda(opt.gpu)\n det_model.eval()\n print('\\n### Run HybrIK (HRnet-48) single person...')\n\n prev_box = None\n # renderer = None\n out_dict = defaultdict(lambda: defaultdict(list))\n idx = 0 # single person id\n\n # frame_idx = 0\n\n bbox_exist = [] \n bboxes = []\n time_oper = []\n # smpl_faces = torch.from_numpy(hybrik_model.smpl.faces.astype(np.int32))\n\n if not opt.person_detection_method==\"fasterrcnn_resnet50\": \n track_pkl_file = os.path.join(opt.out_dir, 'track') \n tracking_results = pickle.load(open(f'{track_pkl_file}/mpt.pkl', 'rb'))\n\n for frame_idx,img_path in enumerate(tqdm(img_path_list)):\n\n with torch.no_grad():\n input_image = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)\n if opt.person_detection_method==\"fasterrcnn_resnet50\":\n # dirname = os.path.dirname(img_path)\n # basename = os.path.basename(img_path)\n # Run Detection\n det_input = det_transform(input_image).to(opt.gpu)\n det_output = det_model([det_input])[0]\n\n if prev_box is None:\n tight_bbox = get_one_box(det_output) # xyxy\n if tight_bbox is None:\n continue\n else:\n tight_bbox = get_max_iou_box(det_output, prev_box) # xyxy\n\n if tight_bbox is None:\n bbox_exist.append(0.0)\n continue\n else:\n bbox_exist.append(1.0)\n else: \n # track_pkl_file = os.path.join(opt.out_dir, 'track')\n # tracking_results = pickle.load(open(f'{track_pkl_file}/mpt.pkl', 'rb'))\n if frame_idx in tracking_results[1]['frames']:\n bbox_idx = np.where(np.array(tracking_results[1]['frames']) == frame_idx)[0]\n tight_bbox = cxcywh2xyxy(tracking_results[1]['bbox'][bbox_idx[0]])\n bbox_exist.append(1.0)\n else:\n bbox_exist.append(0.0)\n tight_bbox= None\n res_path = os.path.join(res_2D_poses_images_path, f'{frame_idx+1:06d}.jpg')\n cv2.imwrite(res_path, cv2.cvtColor(input_image, cv2.COLOR_RGB2BGR))\n res_path = os.path.join(res_images_path, f'{frame_idx+1:06d}.jpg')\n cv2.imwrite(res_path, cv2.cvtColor(input_image, cv2.COLOR_RGB2BGR))\n continue\n\n prev_box = tight_bbox\n\n # Run HybrIK\n # bbox: [x1, y1, x2, y2]\n pose_input, bbox, img_center = transformation.test_transform(input_image, tight_bbox)\n pose_input = pose_input.to(opt.gpu)[None, :, :, :] # pose_output with structure defined at the end of def forward(self, x, flip_item=None, flip_output=False, **kwargs)\n start_timer = time.time()\n pose_output = hybrik_model( # ModelOutput = namedtuple(typename='ModelOutput',\n pose_input, flip_test=True, # field_names=['pred_shape', 'pred_theta_mats', 'pred_phi', 'pred_delta_shape', 'pred_leaf',\n bboxes=torch.from_numpy(np.array(bbox)).to(pose_input.device).unsqueeze(0).float(), # 'pred_uvd_jts', 'pred_xyz_jts_29', 'pred_xyz_jts_24', 'pred_xyz_jts_24_struct',\n img_center=torch.from_numpy(img_center).to(pose_input.device).unsqueeze(0).float() # 'pred_xyz_jts_17', 'pred_vertices', 'maxvals', 'cam_scale', 'cam_trans', 'cam_root',\n )\n time_oper.append((time.time()-start_timer)) # 'uvd_heatmap', 'transl', 'img_feat'] \n uv_3D_29 = pose_output.pred_uvd_jts.reshape(29, 3)\n uv_29 = uv_3D_29[:, :2]\n \n # Convert poses to image and save to figure\n\n # Visualization\n image = input_image.copy()\n image_pose_2d = input_image.copy()\n img_size = (image.shape[0], image.shape[1])\n focal = np.array([1000, 1000])\n bbox_xywh = xyxy2xywh(bbox)\n # princpt = [bbox_xywh[0], bbox_xywh[1]]\n princpt = [img_center[0], img_center[1]]\n # focal = focal / 256 * bbox_xywh[2]\n # focal_length=((2 * focal_length / min(height, width), 2 * focal_length / min(height, width)),),\n transl = pose_output.transl.detach()\n transl_camsys = transl.clone()\n transl_camsys = transl_camsys * 256 / bbox_xywh[2] # Attention here, new transl_camsys nomenclature\n\n # SMPL Render from main point, faces and camera focal length\n renderer = SMPLRenderer(faces=hybrik_model.smpl.faces,\n img_size=img_size, focal=focal,\n princpt=princpt)\n\n transl = pose_output.transl.detach().cpu().numpy().squeeze()\n transl[2] = transl[2] * 256 / bbox_xywh[2]\n\n # frame_idx += 1\n\n pts = uv_29 * bbox_xywh[2]\n pts[:, 0] = pts[:, 0] + bbox_xywh[0] # shift x from bbox\n pts[:, 1] = pts[:, 1] + bbox_xywh[1] # shift y from bbox\n\n bboxes.append(np.array(bbox_xywh))\n bbox_img, tamanho_texto, espessura = vis_2d(image, tight_bbox, pts, idx, bones = bones_jts_29, extended=False) \n bbox_img = cv2.cvtColor(bbox_img, cv2.COLOR_RGB2BGR) \n res_path = os.path.join(res_2D_poses_images_path, f'{frame_idx+1:06d}.jpg')\n cv2.imwrite(res_path, bbox_img)\n\n image_vis = vis_smpl_3d(\n pose_output, image_pose_2d, cam_root=transl, bbox_xywh=bbox_xywh,\n f=focal, c=princpt, renderer=renderer)\n cv2.putText(image_vis, f'{idx}', (int(pts[24][0])-15, int(pts[24][1]) - 15), cv2.FONT_HERSHEY_SIMPLEX, tamanho_texto, (0, 0, 0), espessura)\n cv2.putText(image_vis, f'{idx}', (int(pts[24][0])-14, int(pts[24][1]) - 14), cv2.FONT_HERSHEY_SIMPLEX, tamanho_texto, (255, 255, 255), espessura)\n image_vis = cv2.cvtColor(image_vis, cv2.COLOR_RGB2BGR)\n res_path = os.path.join(res_images_path, f'{frame_idx+1:06d}.jpg')\n cv2.imwrite(res_path, image_vis)\n\n # vertices = pose_output.pred_vertices.detach()\n # vis 3d \n # res_path = os.path.join(res_3D_poses_images_path, f'{frame_idx:06d}.jpg')\n pts_3D = uv_3D_29 * bbox_xywh[2]\n pts_3D[:, 0] = pts_3D[:, 0] + bbox_xywh[0] # shift x from bbox\n pts_3D[:, 1] = pts_3D[:, 1] + bbox_xywh[1] # shift y from bbox\n # draw_3D_skeleton(pts_3D, bones = bones_jts_29, colors = colors, save_path = res_path, show_image = False)\n\n new_princpt = np.array([image.shape[1], image.shape[0]]) * 0.5\n transl[:2] += (np.array(princpt) - new_princpt) * transl[2] / np.array(focal) \n princpt = new_princpt\n\n # save to dict\n K = np.eye(3)\n K[[0, 1], [0, 1]] = focal\n K[:2, 2] = princpt\n out_dict[idx]['smpl_pose_quat_wroot'].append(pose_output.pred_theta_mats[0].cpu().numpy().reshape(-1, 4)) # QUATERNIONS HYBRIK\n out_dict[idx]['smpl_beta'].append(pose_output.pred_shape[0].cpu().numpy()) # BETA HYBRIK\n out_dict[idx]['root_trans'].append(transl) # ROOT TRANSLATION GLAMR\n out_dict[idx]['kp_2d'].append(pts.cpu().numpy()) # KEYPOINTS 2D\n out_dict[idx]['cam_K'].append(K.astype(np.float32)) # INTRINSECOS GLAMR\n out_dict[idx]['kp_3d'].append(pts_3D.cpu().numpy()) # KEYPOINTS 3D (pixel size)\n # out_dict[idx]['uv_3D_29'].append(uv_3D_29.cpu().numpy()) # KEYPOINTS 3D (referencial da camara)\n out_dict[idx]['heatmaps'].append(pose_output.uvd_heatmap.cpu().numpy())\n out_dict[idx]['maxvals'].append(pose_output.maxvals.cpu().numpy()) \n mot_bboxes = {\n 0: {\n 'id': idx,\n 'bbox': np.stack(bboxes),\n 'exist': np.array(bbox_exist),\n }\n }\n # mot_bboxes[0]['bbox'][numero_box] # tem coordenadas do centro (x,y,w,h) -> de bbox_xywh\n find = np.where(mot_bboxes[idx]['exist'])[0]\n mot_bboxes[idx]['id'] = idx\n mot_bboxes[idx]['start'] = find[idx]\n mot_bboxes[idx]['end'] = find[-1]\n mot_bboxes[idx]['num_frames'] = mot_bboxes[idx]['exist'].sum()\n mot_bboxes[idx]['exist_frames'] = find\n for idx, pose_dict in out_dict.items():\n for key in pose_dict.keys():\n pose_dict[key] = np.stack(pose_dict[key])\n pose_dict['frames'] = mot_bboxes[idx]['exist_frames']\n pose_dict['frame2ind'] = {f: i for i, f in enumerate(pose_dict['frames'])}\n pose_dict['bboxes_dict'] = mot_bboxes[idx]\n\n if len(time_oper) > 1:\n time_oper[0] = time_oper[1]\n average_time_by_frame = sum(time_oper) / len(img_path_list) \n\n# average_time_by_person= sum(time_oper) / len(img_path_list) \nprint(f\"Average running time by frame: {average_time_by_frame:.4f} seconds ({1/average_time_by_frame:.2f} fps)\")\n\nnew_dict = dict()\nfor k in sorted(out_dict.keys()): # out_dict.keys() has people identifiers (basketball scenario -> out_dict.keys() = [1, 0, 2])\n v = out_dict[k] # Information by person ID (output dictionary)\n new_dict[k] = dict() # Dictionary within a dictionary\n for ck, cv in v.items(): # ck = key word(ids -> 0, 1, 2, ... number of persons) and cv = value assigned to key ('cam_K', 'bboxes_dict, 'frames')\n new_dict[k][ck] = cv \npickle.dump(new_dict, open(f'{opt.out_dir}/pose.pkl', 'wb')) \n\nimages_to_video(res_images_path, f'{opt.out_dir}/render.mp4', img_fmt='%06d.jpg')\nimages_to_video(res_2D_poses_images_path, f'{opt.out_dir}/render_2D_pose.mp4', img_fmt='%06d.jpg')\n# images_to_video(res_3D_poses_images_path, f'{opt.out_dir}/render_3D_pose.mp4', img_fmt='%06d.jpg')\n# shutil.rmtree(f'{opt.out_dir}/res_images')\n","repo_name":"AndreOliveira00/3D-Pose-and-Shape-Estimation-with-a-Camera-System","sub_path":"pose_est/hybrik_demo/demo_hrnet48.py","file_name":"demo_hrnet48.py","file_ext":"py","file_size_in_byte":29585,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"38663882859","text":"'''\nimport_sensor_data.py\n\nThis reads sensor data from a root directory in specific sensor formats\nNow two formats are supported (1) van Essen \"Diver\" sensor native\nfiles and (2) Star-ODDI sensor native files.\n\nThis code might be modified to optionally drop and recreate the output table,\nnamed lcroyster_buoyobservation before importing the data from the input files.\nHowever, mysql workbench can be used to delete rows from the table.\n\nBy not dropping the current database table, this allows the user to keep\nthe current data in the table while importing the input files. However,\nif any sensor readings in the input files are duplicates of readings already\nin the database, the log will present warning messages that the duplicates\nwere not inserted, and this will also significantly slow down execution\ntime of the total import process.\nIt may be easier and faster to drop the database table and always\nimport all the data than to jocky potential input files around in your file\nspace to ensure only the 'new readings' will be included.\nIf there are no duplicate readings, this import process should import\nabout 15,000 readings/rows per minute.\n\nPython 3.6+ code\n'''\n\nimport sys, os, os.path, platform\n\ndef register_modules():\n platform_name = platform.system().lower()\n if platform_name == 'linux':\n modules_root = '/home/robert/'\n #raise ValueError(\"MISSING: Enter code here to define modules_root\")\n else:\n # assume rvp office pc running windows\n modules_root=\"C:\\\\rvp\\\\\"\n sys.path.append('{}'.format(modules_root))\n sys.path.append('{}git/citrus/modules'.format(modules_root))\n return platform_name\n\n#platform_name=register_modules()\n\nimport os, sys, os.path\nMY_SECRETS_FOLDER = os.environ['MY_SECRETS_FOLDER']\n\nprint(\"Using MY_SECRETS_FOLDER={}\".format(MY_SECRETS_FOLDER))\n\nsys.path.append(os.path.abspath(MY_SECRETS_FOLDER))\nprint(\"Using system.path={}\".format(sys.path))\n\n#from etl import sequence_paths\nfrom pathlib import Path\nfrom collections import OrderedDict\n\n#### Sqlalchemy\nfrom sqlalchemy import (\n Boolean, create_engine,\n CheckConstraint, Column,\n Date, DateTime,Float, FLOAT, ForeignKeyConstraint,\n inspect, Integer,\n MetaData, Sequence, String, Table, Text, UniqueConstraint,\n )\n\nfrom sqlalchemy.schema import CreateTable\n#from my_secrets.settings_sqlalchemy import get_engine_spec_by_name\n\nfrom sqlalchemy.sql import select\nimport sqlalchemy.sql.sqltypes\nfrom sqlalchemy.dialects.postgresql import ARRAY\n#from sqlalchemy_tools.core.utils import drop_if_exists\n\n#import regex\nimport re\nimport datetime\nimport math\n\n'''\nGeneral notes- context about the Oyster files.\n\nAssumptions: From this import process, the output table receives only sensor\ndata and the table has already been created, so it is just inserted into.\n\nEach given file is either a 'diver sensor' or a 'oddi star sensor'\ntext file of readings data, which adheres to the strict format expected here.\n\nThe header lines of a sensor data file provides the\nsensor id and the channel/axis/column data names,\nand they are in order, in both the [Logger settings] section and\nthe [Series settings] section as: PRESSSURE (in cm), TEMPERATURE, CONDUCTIVITY.\n\nOther header values are ignored, not checked, until and if use\ncases arise and are required and implemented that require\nthem being checked.\n\nAlso the sensor location is NOT authoritative, because\nUF WEC registers location in external files or logs and does\nnot practice updating the sensors to always accurately have the\nsensor headers display where\nthey are placed.\n\nTHe MON file is a text file with latin1 encoding.\n\nThe [Data] has a line after it with a single integer that is ignored.\nSubsequent lines in the data section each conform to this format:\n\n(Date)4 digit year, slash(forward), 2 digit month, 2 digit day of month,\nspace:\n(Time) 2 digit milatary time hour of day, colon, 2 digit minute, colon,\n2 digit second, period, 1 digit of seconds precision\none or more spaces,\n(first measurement): a string of decimal digits followed by a period,\n followed by 3 digits of precision.\n(second measurement): a string of decimal digits followed by a period,\nfollowed by 3 digits of precision:\n(third measurement): a string of decimal digits followed by a period,\nfollowed by 3 digits of precision:\n(or or more spaces followed by end of line)\n\nThe last line in the file is a sentinel line with the text:\nEND OF DATA FILE OF DATALOGGER FOR WINDOWS\n\nEach data line is used to insert a row in an observations table.\nThat table must have a unique index on the composite (sensor_id, date, time)\nif a MON file row duplicates those values, it is skipped and not inserted,\nthough a log file line is issued to log any consecutive range of line numbers\nwithin the input MON file that has the duplcate (already inserted data)\n\n\nregular expression for float:\nfloat_rx = r'[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?'\n\nfloat_pattern = re.compile(float_rx)\n\nregular expression for sensor data line:\nalso see: https://stackoverflow.com/questions/6260777/python-regex-to-parse-string-and-return-tuple#6260945\n\ndata_line_pattern = re.compile(\n r\"\"\"\\s*(?P.*?)\\s*(?P.*?)\\s*(?P
.*?)\n \\s*(?P.*)\n \\s*(?P.*)\n \\s*(?P.*)\\s*\"\"\", re.VERBOSE\n )\n\n----------------\nsample sensor data line to date, time and 3 floats for\n\n(1)pressure_cm, (2)temperature_c, (3)conductivity_mS_cm\n2017/08/11 12:00:00.0 1106.592 29.100 1.472\n----------------\n\n'''\n'''Class SalinityPsuCalculatorDouglass2010\n\nCalculate salinity with params, with formulae and constant value Defaults\ngiven by spreadsheet uf-emailed from Joe Aufmuth 20180510.\n\n'''\nclass SalinityPsuCalculatorDouglass2010():\n '''\n\n '''\n def __init__(self,standard=42.9, verbosity=0):\n self.me = 'SalinityPsuCalculatorDouglass2010'\n self.verbosity = verbosity\n # may make a param later - resusing referenced cell names as var\n # names here too\n self.temperature_c = None # can be set from a10 value later\n self.conductivity_mS_cm= None # can be set from b10 value later\n\n self.c10 = standard\n self.ref_cond_at_35psu_15c = self.c10\n\n\n if verbosity > 0:\n print(\"{}: Initializing salinity_calculator:\"\n .format(self.me))\n #end if\n # End def __init__\n\n def from_temperature_c_conductivity_mS_cm(self,temperature_c=None,\n conductivity_mS_cm=None\n ):\n # Calculate and return salinity in Practical Salinity Unit value\n # First calculate sub-terms to clarify the formula\n temperature_c = float(temperature_c)\n conductivity_mS_cm = float(conductivity_mS_cm)\n self.a10 = temperature_c\n self.b10 = conductivity_mS_cm\n\n #conductivity_ratio is measured conductivity/reference conductivity\n self.d10 = self.b10/self.c10\n\n #self.g10 is rt calculation based on a10\n self.g10 = ( 0.6766097 + 0.0200564*self.a10\n + 0.0001104259 * self.a10 ** 2\n + (-6.9698*10**-7) * self.a10**3\n + (1.0031*10**-9) * self.a10**4\n )\n\n # self.e10 is Conductivity ratio/rt\n self.e10 = self.d10/self.g10\n\n #self.f10 is dS\n self.f10 = (\n ((self.a10 -15)/(1 + 0.0162 * (self.a10-15)))\n * (0.0005\n + (-0.0056) * self.e10**0.5\n + (-0.0066) * self.e10\n + (-0.0375) * self.e10**1.5\n + (0.0636) * self.e10**2\n + (-0.0144) * self.e10**2.5\n )\n )\n\n #self.h10 is calculated salnity in psu (practical salinity units)\n self.h10 = ( 0.008\n + (-0.1692 * self.e10**0.5)\n + 25.3851 * self.e10\n + 14.0941 * self.e10**1.5\n + (-7.0261) * self.e10**2\n + 2.7081 * self.e10**2.5\n + self.f10\n )\n return self.h10\n # end def from_temperature_c_conductivity_mS_cm\n# end class salinity_calculator1\n\ndef sequence_paths(input_folders=None, input_path_globs=None, verbosity=0):\n # NOTE: I changed arg input_path_glob to input_path_globs\n # apologies to callers that need to adapt\n me = 'sequence_paths'\n if (input_folders is None or input_path_globs is None):\n msg = \"Missing param input_folders or input_path_glob\"\n raise ValueError(msg)\n\n # compose input_path_list over multiple input_folders\n for input_folder in input_folders:\n for input_path_glob in input_path_globs:\n paths = list(Path(input_folder).glob(input_path_glob))\n if (verbosity > 0):\n print(\"{}: Found {} files in input_folder='{}'\"\n \" that match {}\\n\"\n .format(me, len(paths), input_folder, input_path_glob))\n\n #input_path_list.extend(list(Path(input_folder).glob(input_path_glob)))\n for path in paths:\n yield path\n # end for path\n #end for input_path_glob\n # end for input folder\n# end def sequence_paths\n\nclass OysterProject():\n ''' Get sensor serial numbers into useful list\n Future: create a generator function to take engine and table name\n and create a generator of rows, and use that.\n '''\n def get_d_serial_sensor(self, table_name='lcroyster_sensor'):\n me = 'get_d_serial_sensor'\n metadata = MetaData()\n engine = self.engine\n verbosity = self.verbosity\n log_file = self.log_file\n table_deploy = Table(table_name, metadata, autoload=True,\n autoload_with=engine)\n s = select([table_deploy])\n conn = engine.connect()\n result = conn.execute(s)\n self.d_serial_sensor = {}\n\n for row in result:\n self.d_serial_sensor[row['serial_number']] = row['sensor_id']\n if verbosity > 0:\n msg=(\"Got sensor serial_number '{}' with sensor_id {}\"\n .format(row['serial_number'], row['sensor_id']))\n return\n # end def get_d_serial_sensor\n\n\n ''' Get sensor deployments into useful dictionary\n '''\n def get_d_sensor_deployment(self):\n me = 'get_d_sensor_deployment'\n metadata = MetaData()\n engine = self.engine\n verbosity = self.verbosity\n log_file = self.log_file\n table_deploy = Table('lcroyster_sensordeploy', metadata, autoload=True,\n autoload_with=engine)\n s = select([table_deploy])\n conn = engine.connect()\n result = conn.execute(s)\n\n l_rows = []\n if verbosity > 1 or 1 ==1:\n print(\"{}: Showing lcroyster_sensordeploy rows:\".format(me),\n file=log_file)\n for count,row in enumerate(result):\n print (\"Got row='{}'\".format(row),file=log_file)\n l_row = {\n 'sensor_id': row['sensor_id'],\n 'event_date': row['deploy_datetime'],\n 'location_id': row['location_id'],\n }\n print(\"Got l_row = {}\".format(l_row),file=log_file)\n l_rows.append(l_row)\n\n # d_sensor_deployment{} key is sensor, value is dict keyed by unique\n # dates, each with a location id (deployment location) value.\n d_sensor_deployment = {}\n for d_row in l_rows:\n if self.verbosity > 0:\n print(\"Sensor deployment: Using d_row='{}'\".format(d_row)\n ,file=self.log_file)\n sensor_id = d_row['sensor_id']\n if d_sensor_deployment.get(sensor_id, None) is None:\n d_sensor_deployment[sensor_id] = dict()\n d_date_loc = d_sensor_deployment[sensor_id];\n\n #dt = datetime.datetime.strptime(d_row['event_date'],\"%Y-%m-%d %H:%M:%S\")\n dt = d_row['event_date']\n\n if dt in d_date_loc.keys():\n raise ValueError(\n \"Sensor {} has duplicate sensor datetime {}\"\n .format(sensor_id, repr(dt)))\n\n d_date_loc[dt] = d_row['location_id']\n\n # Replace each d_date_loc with an orderedDict to\n # support faster downstream processes\n for sensor_id, d_date_loc in d_sensor_deployment.items():\n # Sort each sensor_deployment dict by date keys\n d_sensor_deployment[sensor_id] = OrderedDict(\n { key:d_date_loc[key] for key in sorted(d_date_loc.keys()) })\n\n if self.verbosity > 1 or 1 == 1:\n print(\"Got final d_sensor_deployment = {}\"\n .format(repr(d_sensor_deployment)),file=self.log_file)\n for sensor_id, d_date_loc in d_sensor_deployment.items():\n print(\"{}={}\".format(sensor_id,d_date_loc))\n\n self.d_sensor_deployment = d_sensor_deployment\n return\n #end def get_d_sensor_deployment\n\n '''\n get_in_service_location():\n Potential speedup:\n Put the increasing times in a list rather than a dict so they can be\n indexed and return the index of deployed so the caller can state it as\n a param on a successive call to save time.. because the caller's inputs\n are sorted such that the time is always increasing within a file.\n '''\n\n def get_in_service_location(\n self,sensor_id=None, observation_datetime=None):\n # Return True if this observation falls in a period of a valid\n # deployment to a project location\n try:\n od_datetime_loc = self.d_sensor_deployment[sensor_id]\n except:\n msg=(\"\\n*********************\\n\"\n \"FATAL ERROR: Sensor_id '{}' has raw data but no deployments.\"\n \"\\n*********************\\n\"\n .format(repr(sensor_id)))\n print(msg, file=self.log_file)\n\n # NOTE: log file output shows newlines, but ValueError shows \\n\n raise ValueError(\"**** FATAL ERROR: See log_file={}\"\n .format(self.log_file.name))\n #Find whether this date is covered by a valid deployment\n in_service = 0\n found_location_id = 0\n for deployed, location_id in od_datetime_loc.items():\n if deployed > observation_datetime:\n #This deployment is in the future beyond this observation,\n #so just break with the current in_service_value\n break;\n if observation_datetime >= deployed and location_id != 0:\n # 0 is the 'unknown' or invalid location\n found_location_id = location_id\n in_service = 1\n return in_service, found_location_id\n\n #end def get_in_service() of class OysterProject\n\n #class OysterProject\n def __init__(self, engine=None, observations_table_name=None,\n log_file=None, verbosity=1, max_exception_logs_per_file=10):\n me='OysterProject.__init__'\n # Initialize some central data, later read some from db\n\n\n if verbosity > 0:\n print(\"{}: starting\".format(me), file=log_file)\n self.verbosity = verbosity\n self.SalinityPsuCalculator = SalinityPsuCalculatorDouglass2010(verbosity=1)\n if verbosity > 0:\n print(\"{}: Back from constructing SalinityPsuCalculator\"\n .format(me), file=log_file)\n\n if engine is None:\n raise ValueError(\"Missing engine parameter\")\n self.engine = engine\n self.connection = engine.connect()\n self.trans = self.connection.begin()\n self.max_exception_logs_per_file = max_exception_logs_per_file\n self.log_file = log_file\n\n self.sa_metadata = MetaData()\n\n # Get engine table object for water_observation\n self.observations_table = Table(observations_table_name,\n self.sa_metadata, autoload=True, autoload_with=engine)\n\n if log_file is None:\n self.log_file = sys.stdout\n else:\n self.log_file = log_file\n\n #Get some reference database table data\n self.get_d_serial_sensor()\n self.get_d_sensor_deployment()\n #print(\"Test print to log file.\", file=log_file)\n\n return\n # end def __init__\n#end class OysterProject\n\n'''\nclass Diver():\n\nThis class represents water quality sensors of type \"Diver\".\n\nNote: on 20180218 the current sub-folders with sample diver files are:\n[ 'LC-WQ1','LC-WQ3' ]\n\n'''\n\nclass Diver():\n\n # class Diver():\n def __init__(self,project=None,input_file_folders=None,\n input_file_globs=None, engine=None, log_file=None):\n\n if project is None:\n # eg an instance of the OysterProject() class\n raise ValueError(\"project not given\")\n\n self.project = project\n self.max_exception_logs_per_file = project.max_exception_logs_per_file\n self.log_file = log_file if log_file is not None else project.log_file\n\n self.input_file_folders = input_file_folders\n\n if input_file_globs is None:\n self.input_file_globs = ['**/*.MON']\n else:\n self.input_file_globs = input_file_globs\n\n # We require that all sensors, regardless of sensor type, maintain\n # unique sensor serial_number ids, so we can use project\n # level sensor dictionary here.\n self.d_serial_sensor = project.d_serial_sensor\n\n if engine is None:\n self.engine = self.project.engine\n engine = self.engine\n else:\n self.engine = engine\n\n # Example:'2017/12/21 21:00:00.0 1110.675 20.263 12.508'\n self.rx_diver_reading = (\n r\"(?P.*)/(?P.*)/(?P
.*)\"\n r\"\\s\\s*(?P
.*):(?P.*):(?P(\\d+))\\.\\d*\"\n r\"\\s*(?P(\\d+(\\.\\d*)))\\s*(?P\\d+(\\.\\d*))\"\n r\"\\s*(?P\\d+(\\.\\d*))\"\n )\n # rx based on Dr. Pine's group, implied by the IDs they manually record.\n #self.rx_serial_number = r\"\\s*Serial number\\s*.*-(?P.*) .*\"\n\n self.rx_serial_number = (\n r\"\\s*Serial number[^-]*-(?P[^\\s]*)\\s*.*\")\n #end def __init__ for class Diver()\n\n #class Diver\n def parse_files(self, verbosity=1):\n me = 'parse_files'\n file_count = 0\n\n total_file_rows = 0\n log_file = self.log_file\n\n gpaths = sequence_paths(input_folders=self.input_file_folders,\n input_path_globs=self.input_file_globs)\n\n paths = []\n total_inserts = 0\n total_exceptions = 0\n for path in gpaths:\n if path in paths:\n # gpaths could have duplicates when mulitple globs\n # were used to generate the gpaths, so skip dups\n # If carefully chosen to guarantee the globs have no dups,\n # one can bypass this checking\n continue\n #Store this path to reject future duplicates in the sequence\n paths.append(path)\n\n file_count += 1\n\n input_file_name = path.resolve()\n if verbosity > 0:\n print(\"{}: parsing input file '{}'\"\n .format(me, input_file_name),flush=True\n , file=log_file )\n\n n_rows, n_inserts, n_exceptions = self.import_file(\n input_file_name=input_file_name ,verbosity=verbosity)\n\n total_file_rows += n_rows\n total_inserts += n_inserts\n total_exceptions += n_exceptions\n #l_rows = ['one']\n if verbosity > 0:\n print(\n \"{}: Parsed file {}={} with {} 'readings' rows\"\n .format(me, file_count, input_file_name, n_rows)\n ,file=log_file)\n\n # end for path in paths\n\n if verbosity > 0:\n print(\"{}:Diver Files - Ending with {} files found and parsed.\".format(me,file_count),\n file=log_file)\n\n return file_count, total_file_rows, total_inserts, total_exceptions\n # end def parse_files\n\n ''' Diver class: def import_file'''\n def import_file(self, input_file_name=None, verbosity=1):\n\n me='import_file'\n log_file = self.log_file\n l_rows = []\n #rx_diver_reading = self.d_name_rx['data_reading']\n rx_diver_reading = self.rx_diver_reading\n if verbosity > 1:\n print(\"rx_diver_reading='{}',\\nand line='{}'\"\n .format(rx_diver_reading,line), file=log_file)\n\n with open(input_file_name, 'r', encoding='latin1') as ifile:\n for line_count, line in enumerate(ifile, start = 1):\n # Nip pesky ending newline\n line = line[:len(line)-1]\n if verbosity > 1:\n print(\"Parsing line {} ='{}'\".format(line_count,line)\n ,file=log_file)\n if line.startswith('END OF') :\n # Expected end of data LINES\n break\n\n if line_count == 13:\n #rx = self.d_name_rx['serial_number']\n #r'Serial number =(?P.*)'\n rx_serial_number = (\n r'Serial number[^-]*-(?P[^\\s]*).*'\n )\n match = re.search(rx_serial_number,line)\n # Check the serial number of this diver sensor device\n try:\n serial_number = match.group(\"serial_number\")\n serial_number = match.group(1)\n except Exception as ex:\n msg=(\"rx_serial_number={}, line={}, no serial part\"\n .format(rx_serial_number,line))\n print(msg, file=log_file)\n raise ValueError(msg)\n\n d_serial_sensor = self.d_serial_sensor\n if serial_number not in d_serial_sensor.keys():\n msg=(\"Input_file_name: {}\\n\"\n \"Found serial number '{}' not in '{}'\"\n .format(input_file_name, serial_number,\n repr(d_serial_sensor.keys())))\n raise ValueError(msg)\n\n sensor_id = d_serial_sensor[serial_number]\n\n if verbosity > 0:\n msg=(\"Diver input file '{}',\\n line13='{}',\\n\"\n \" serial_number='{}', sensor_id='{}'\"\n .format(input_file_name, line,\n serial_number, sensor_id))\n print(msg, file=log_file)\n\n if line_count < 67:\n #Skip constant sensor header information\n continue\n\n # Now read and parse this data line and create output d_row\n d_row = {}\n l_rows.append(d_row)\n d_row['sensor_id'] = sensor_id\n\n try:\n data_match = re.search(rx_diver_reading, line)\n except Exception as ex:\n msg=('line={}, data reading fails'.format(line_count))\n raise ValueError(msg)\n\n y4 = data_match.group(\"y4\")\n mm = data_match.group(\"mm\")\n dd = data_match.group(\"dd\")\n hr = data_match.group(\"hr\")\n minute = data_match.group(\"min\")\n sec = data_match.group(\"sec\")\n #frac = data_match.group(\"frac\")\n date_str = \"{}-{}-{} {}:{}:{}\".format(y4,mm,dd,hr,minute,sec)\n d_row['observation_datetime'] = date_str\n\n obs_dt = datetime.datetime.strptime(date_str,\"%Y-%m-%d %H:%M:%S\")\n # May check for None obs_dt here and skip?\n\n in_service, location_id = self.project.get_in_service_location(\n sensor_id=sensor_id, observation_datetime=obs_dt)\n\n d_row['in_service'] = in_service\n d_row['location_id'] = location_id\n\n if verbosity > 1:\n print(\"{}: input line {}='{}'\"\n .format(me, line_count, line),file=log_file)\n\n if verbosity > 2:\n d_row['date_str'] = date_str\n print(\"date_str='{}'\".format(date_str))\n print(\"in_service='{}'\".format(in_service))\n print(\"location_id='{}'\".format(location_id))\n\n # NOTE: field_names match columns in observations_table_name\n d_row['pressure_cm'] = data_match.group('pressure_cm')\n\n temperature_c = data_match.group('temperature_c')\n d_row['temperature_c'] = temperature_c\n\n conductivity_mS_cm = data_match.group('conductivity_mS_cm')\n d_row['conductivity_mS_cm'] = conductivity_mS_cm\n\n #calculate salinity\n d_row['salinity_psu_calculated'] = (\n self.project.SalinityPsuCalculator\n .from_temperature_c_conductivity_mS_cm(\n temperature_c, conductivity_mS_cm\n )\n )\n\n if verbosity > 2:\n for field_name in ['pressure_cm','temperature_c'\n ,'conductivity_mS_cm', 'salinity_psu_calculated']:\n print(\"Field_name='{}', value='{}'\"\n .format(field_name, d_row[field_name]))\n\n # Calculate salinity with the project's salinity_calculator\n\n # end line in input file\n # end with open.. input file_name\n\n # Insert rows to table water_observation from this input file\n n_exceptions = 0\n n_inserts = 0\n for row in l_rows:\n line_count += 1\n try:\n self.project.engine.execute(\n self.project.observations_table.insert(), row)\n n_inserts += 1\n except Exception as ex:\n n_exceptions += 1\n if n_exceptions < self.project.max_exception_logs_per_file:\n msg=(\"\\n***************\\n\"\n \"WARNING: Input file '{}',\\nline {} has error {}.\"\n \"\\n***************\\n\"\n .format(input_file_name, line_count,ex))\n print(msg, file=log_file)\n elif n_exceptions == self.project.max_exception_logs_per_file:\n msg = ('*** MADE MAXIMUM EXCEPTION REPORTS FOR THIS FILE.')\n print(msg, file=log_file)\n\n if verbosity > 0:\n print(\"{}:Parsed file {} of {} rows, did {} inserts, had {} exceptions:\"\n .format(me, input_file_name, line_count-1, n_inserts, n_exceptions)\n ,file=log_file)\n if verbosity > 1:\n print(\"Rows parsed were:\")\n for count, d_row in enumerate(l_rows, start=1):\n print(\"{}\\t{}\".format(count,d_row),file=log_file)\n\n return len(l_rows), n_inserts, n_exceptions\n # end def import_file()\n#end class Diver()\n\n'''\nHowever, this software is not dependent on that, though it\nmay facilitate locating test data to test modifications to this\nprogram.\n'''\n\n'''\nUsing a given list of folders and a list of globs, create a\ngenerator that yields:\n\nThe next path for a file under a given input folder that matches\na given glob.\n\nTODO: revert to only one glob per generator! Two globs may be given if\na list is used that 'reiterates' the same file name, NOT a good idea.\nThe caller may handle this separately, or need a new non-generator\napproach that keeps a dict of filenames and then tosses dups, and then\njust iterates the keys.. but since the dict is already in memory, it would\nNOT be a generator, just an interable.\n\n'''\n\nclass Star():\n # This class represents water quality sensors of type \"Star-ODDI\".\n\n # class Star():\n def __init__(self,project=None, input_file_folders=None,\n input_file_globs=None, log_file=None, d_serial_location=None):\n me = \"Star.__init__\"\n if project is None:\n # eg an instance of the OysterProject() class\n raise ValueError(\"project not given\")\n\n self.project = project\n self.max_exception_logs_per_file = project.max_exception_logs_per_file\n self.log_file = project.log_file if log_file is None else log_file\n\n print(\"{}:Using log file {}\".format(me, self.log_file))\n print(\"{}:Using log file {}\".format(me, self.log_file), file=self.log_file)\n\n # We require that all sensors, regardless of sensor type, maintain\n # unique sensor serial_number ids, so we can use project\n # level d_serial_sensor\n # Later we may possibly need unique serial_numbers per type,\n # so maintain this data member here.\n self.d_serial_sensor = project.d_serial_sensor\n\n self.input_file_folders = input_file_folders\n\n rx_serial = '' #tbd\n\n if input_file_globs is None:\n self.input_file_globs = ['**/Star*WQ[0-9]']\n else:\n self.input_file_globs = input_file_globs\n\n #end def __init__\n\n # class Star():\n def parse_files(self, verbosity=1):\n me = 'parse_files'\n file_count = 0\n\n total_file_rows = 0\n log_file = self.log_file\n if verbosity > 0:\n print(\"{}:Starting with input_folders={},globs={}\"\n .format(me, self.input_file_folders, self.input_file_globs),\n file=log_file)\n\n paths = sequence_paths(input_folders=self.input_file_folders,\n input_path_globs=self.input_file_globs)\n total_inserts = 0\n total_exceptions = 0\n for path in paths:\n file_count += 1\n\n input_file_name = path.resolve()\n n_rows, n_inserts, n_exceptions = self.import_file(\n input_file_name=input_file_name,\n verbosity=verbosity)\n\n total_file_rows += n_rows\n total_inserts += n_inserts\n total_exceptions += n_exceptions\n\n # end for path in paths\n if verbosity > 0:\n print(\"{}:STAR Files - Ending with {} files found and parsed.\"\n .format(me, file_count), file=log_file)\n return file_count, total_file_rows, total_inserts, total_exceptions\n # end def parse_files\n\n # class Star():\n '''\n Return None if re match failed, otherwise retur d_row of name-value pairs.\n '''\n def update_row_by_match(self, sensor_id=None,match=None, d_row=None, verbosity=1):\n me = 'update_row_by_match'\n\n log_file = self.log_file\n\n try:\n y4 = match.group(\"y4\")\n mm = match.group(\"mm\")\n dd = match.group(\"dd\")\n hr = match.group(\"hr\")\n minute = match.group(\"min\")\n sec = match.group(\"sec\")\n # NOTE: field_names match columns in observations_table_name\n for field_name in ['temperature_c','salinity_psu',\n 'conductivity_mS_cm', 'sound_velocity_m_sec']:\n\n value = match.group(field_name)\n # Capitulation to inconsistent sea star sensor files.\n # Some use , some use . as decimal point\n value = value.replace(',','.')\n d_row[field_name] = value\n\n if verbosity > 2:\n print(\"Field_name='{}', value='{}'\"\n .format(field_name, value), file=log_file)\n # end for field_name\n\n # Calculate salinity\n d_row['salinity_psu_calculated'] = (\n self.project.SalinityPsuCalculator\n .from_temperature_c_conductivity_mS_cm(\n d_row['temperature_c'], d_row['conductivity_mS_cm']\n )\n )\n\n date_str = \"{}-{}-{} {}:{}:{}\".format(y4,mm,dd,hr,minute,sec)\n d_row['observation_datetime'] = date_str\n obs_dt = datetime.datetime.strptime(date_str,\"%Y-%m-%d %H:%M:%S\")\n\n in_service, location_id = self.project.get_in_service_location(\n sensor_id=sensor_id, observation_datetime=obs_dt)\n\n d_row['in_service'] = in_service\n d_row['location_id'] = location_id\n\n except Exception as ex:\n # Signal a parsing exception\n msg = (\"\\n------------------\\n{}:Got exception = {}\"\n .format(me, repr(ex)))\n\n print(msg)\n sys.stdout.flush()\n print(msg, file=log_file)\n d_row = None\n\n return d_row\n#end def update_row_by_match\n\n '''\n Line 19+: sample(Tab delimiters in raw file)\n 1\t26.10.2017 10:20:00\t17.98\t0.01\t0.00\t1475.31\n\n rx_line18_star_reading = (\n )\n rx_star = (\n r\"\\s*(?P
.*)\\.(?P.*)\\.(?P.*)\"\n r\"\\s\\s*(?P
\\d):(?P\\d.*):(?P(\\d+(\\.\\d*)))\"\n r\"\\s+(?P(\\d+(\\.\\d*)))\"\n r\"\\s+(?P(\\d+(\\.\\d*)))\"\n r\"\\s+(?P(\\d+(\\.\\d*)))\"\n r\"\\s+(?P(\\d+(\\.\\d*)))\"\n )\n\n rx_star_line19_reading = (\n # Date components\n r\"(/d)\\t(?P
\\d+).(?P\\d+).(?P\\d+)\"\n r\"\\t(?P
\\d+):(?P\\d+.*):(?P(\\d+(\\.\\d*)))\"\n\n # Readings\n r\"\\t(?P(\\d+(\\.\\d*)))\"\n r\"\\t(?P(\\d+(\\.\\d*)))\"\n r\"\\t(?P\\d+(\\.\\d*))\"\n r\"\\t(?P(\\d+(\\.\\d*)))\"\n )\n '''\n # class Star():\n def import_file(self, input_file_name=None, verbosity=1):\n\n me='import_file'\n log_file = self.log_file\n l_rows = []\n # Date components\n # and Readings\n rx_star_serial_number_line16= (\n r\".*\\t(?P\\d+)\" )\n\n rx_star_line19_reading = (\n r\"(?P\\d+)\\s*(?P
\\d+)\\.(?P\\d+)\\.(?P\\d+)\"\n r\"\\s\\s*(?P
\\d+):(?P\\d+):(?P(\\d+))\"\n r\"\\s+(?P(\\d+([.,]\\d*)))\"\n r\"\\s+(?P(\\d+([.,]\\d*)))\"\n r\"\\s+(?P(\\d+([.,]\\d*)))\"\n r\"\\s+(?P(\\d+([.,]\\d*)))\"\n )\n\n rx_star_line18_reading = (r\"#D\\s+Data:\\s+\"\n + rx_star_line19_reading )\n #but note sn is something else here...?\n\n if verbosity > 1:\n print(\"{}:rx_star_line19_reading='{}'\"\n .format(me, rx_star_line19_reading)\n ,file=log_file)\n\n with open(input_file_name, 'r', encoding='latin1') as ifile:\n for line_count, line in enumerate(ifile, start=1):\n # Nip pesky ending newline\n line = line[:len(line)-1]\n if verbosity > 1:\n print(\"Parsing line {} ='{}'\".format(line_count,line)\n ,file=log_file)\n\n if line_count == 16:\n # Get serial number\n rx_star_serial_number_line16= (\n r\".*\\t(?P\\d+)\" )\n try:\n match = re.search(rx_star_serial_number_line16,line)\n serial_number = match.group(\"serial_number\")\n # 20180424 - special need for Oyster Project, stick\n # or impfer an S prefix in front of serial numbers now.\n serial_number = 'S' + serial_number\n except:\n msg = (\"{}: input_file has {} no serial number\"\n .format(me, input_file_name))\n print(msg, file=log_file)\n raise\n\n d_serial_sensor = self.d_serial_sensor\n if serial_number not in d_serial_sensor.keys():\n msg=(\"ERROR: Input_file_name: {}\\n\"\n \"Found serial number '{}' not in '{}'\"\n .format(input_file_name, serial_number,\n repr(d_serial_sensor.keys())) )\n print(msg, file=log_file)\n raise ValueError(msg)\n\n sensor_id = self.d_serial_sensor[serial_number]\n\n if verbosity > 0:\n msg=(\"Star input file '{}',\\n line13='{}',\\n\"\n \" serial_number='{}', sensor_id='{}'\"\n .format(input_file_name, line,\n serial_number, sensor_id))\n print(msg, file=log_file)\n\n # end if line_count == 16 (We set sensor_id and location_id)\n\n if line_count < 19:\n # Skip constant sensor header information\n continue\n\n d_row = {}\n l_rows.append(d_row)\n\n d_row['sensor_id'] = sensor_id\n if verbosity > 1:\n print(\"{}: reading line {} = '{}'\"\n .format(me, line_count, line),file=log_file)\n\n # Here we have Line 19 and greater - regular-formatted data lines\n # read and parse this data line and create output d_row\n try:\n data_match = re.search(rx_star_line19_reading, line)\n except Exception as ex:\n msg=(\"ERROR: line number {}='{}', rx_star_line19_reading \"\n \"fails,ex={}\".format(line_count, line, repr(ex)))\n raise ValueError(msg)\n\n # Note: since the location depends on observation date of\n # the row, the next method also updates location_id\n d_row = self.update_row_by_match(sensor_id=sensor_id,\n match=data_match, d_row=d_row)\n if d_row is None:\n msg = (\n \"\\n*****************\\n\"\n \"Star.{}:PARSE ERROR:File name '{}', line count='{}':\"\n \"\\nLine='{}'\\n\"\n \"\\n*****************\\n\"\n .format(me, input_file_name, line_count, line))\n print(msg, file=log_file)\n raise ValueError(msg)\n\n if verbosity > 1:\n print(\"{}: input line {}='{}'\"\n .format(me, line_count, line), file=log_file)\n\n # end line in input file\n # end with open.. input file_name\n\n # Insert rows to table water_observation from this input file\n line_count = 18\n n_exceptions = 0\n n_inserts = 0\n for row in l_rows:\n line_count += 1\n try:\n self.project.engine.execute(\n self.project.observations_table.insert(), row)\n n_inserts += 1\n except Exception as ex:\n n_exceptions += 1\n if n_exceptions < self.project.max_exception_logs_per_file:\n msg=(\"\\n***************\\n\"\n \"WARNING: Input file {},\\ninsert line_count {} has error {}.\"\n \"\\n***************\\n\"\n .format(input_file_name, line_count,ex))\n print(msg, file=log_file)\n elif n_exceptions == self.project.max_exception_logs_per_file:\n msg=(\"\\n *** MADE MAXIMUM EXCEPTION REPORTS FOR THIS FILE\")\n print(msg, file=log_file)\n\n if verbosity > 0:\n print(\"{}:Parsed file {}\\nSUMMARY: {} lines, {} inserts, \"\n \"and {} exceptions found.\\n\\n\\n\"\n .format(me, input_file_name, line_count-1, n_inserts,\n n_exceptions) ,file=log_file)\n if verbosity > 1:\n print(\"Parsed rows were:\")\n for count,d_row in enumerate(l_rows, start=1):\n print(\"{}\\t{}\".format(count,d_row),file=log_file)\n\n return len(l_rows), n_inserts, n_exceptions\n #end def import_file\n\n#end class Star\n\n'''\nMay not need class Oyster_Sensor as we can do parsing with the\nDiver and Star classes, but if add more fixed sensor classes\nlater, this class might be useful to serve some management\nfunctions.\n\nLeave this code here as a stub for possible later implementation.\n\n'''\nclass Oyster_Sensor():\n def __init__(d_serial_location=None):\n\n if d_serial_location is None:\n self.d_serial_location = {\n 'DST CTD 8814' : 2,\n 'DST CTD 9058' : 4, # LC-WQ4 folder on 20180218\n 'DST CTD 9060' : 5, # LC-WQ5 folder on 20180218\n 'DST CTD 9061' : 6, # LC-WQ6 folder on 20180218\n 'DST CTD 9035' : 7, # LC-WQ7 folder on 20180218\n 'DST CTD 9062' : 8, # LC-WQ8 folder on 20180218\n 'DST CTD 9036' : 9, # LC-WQ9 folder on 20180218\n # Stuck this in to test parsing 20180407\n 'DST CTD 9238' : 9,\n }\n else:\n self.serial_location = d_serial_location\n\n # Populate the location-indicator input folder names\n # for now manually by examining the input files Mel Moreno\n # made for Robert 2/14/2018 or so\n # Note: if an input file is found under a folder not\n # whose sensor-location association does not\n # match this folder, a warning should be issued.\n self.d_folder_location_20180216 = {\n 'LC-WQ1' : 1 ,\n 'LC-WQ2' : 2,\n 'LC-WQ3' : 3,\n 'LC-WQ4' : 4,\n 'LC-WQ5' : 5,\n 'LC-WQ6' : 6,\n 'LC-WQ7' : 7,\n 'LC-WQ8' : 8,\n 'LC-WQ9' : 9,\n }\n self.l_sensor_serial_numbers = [\n '..02-V5602 317.',\n ''\n ]\n self.diver_glob = ['**/*.MON']\n self.star_globs = ['**/Star*WQ[0-9]']\n #Populate the valid sensor serial numbers (may read from db\n # later if needed)\n\n self.sensor_serial_numbers = [\n\n ]\n\n return\n#end class Oyster_Sensor\n\n'''\n\nNote: the input files to use were identified in an email from Mel\nMoreno to Robert Phillips 2018-02-12.\n\n'''\ndef get_lcroyster_settings(verbosity=1):\n\n # IMPORT SETTINGS FOR MARSHALING APPLICATION WEBSITE (MAW) settings\n import maw_settings\n settings_filename = '{}{}{}'.format(MY_SECRETS_FOLDER, os.sep, 'maw_settings.py')\n return maw_settings.my_project_params['lcroyster'], settings_filename\n\ndef run(input_folder=None,\n observations_table_name=None,\n log_file_name=None,\n max_exception_logs_per_file=5,\n skip_star=0, skip_diver=0, verbosity=1):\n me='run'\n\n d_lcroyster, settings_filename = get_lcroyster_settings()\n\n if input_folder is None:\n input_folder = d_lcroyster['sensor_observations_input_folder']\n\n if log_file_name is None:\n #datetime_string = datetime.datetime.utcnow().strftime(\"%Y%m%dT%H%M%SZ\")\n day_string = datetime.datetime.utcnow().strftime(\"%Y%m%dT%H%MZ\")\n log_file_name = (\"{}/import_buoy_sensor_data_{}.txt\"\n .format(input_folder,day_string))\n\n log_file = open(log_file_name, mode=\"w\", encoding='utf-8')\n\n print(\"STARTING: Using verbosity value={}\".format(verbosity)\n ,file=log_file)\n\n print(\"{}:Using settings_filename={}\".format(me, settings_filename)\n ,file=log_file)\n\n print(\"{}:Using data input_folder={}\".format(me, input_folder)\n ,file=log_file)\n\n # engine_spec = get_engine_spec_by_name(name=engine_nick_name)\n d_engine_info = d_lcroyster['database_connections']['lcroyster']\n print(\"Got d_engine_info of length={}\".format(len(d_engine_info))\n ,file=log_file)\n\n engine_spec = (d_engine_info['format'].format(**d_engine_info))\n engine = create_engine(engine_spec)\n\n # If indicated, delete the observations table rows first\n # Potential feature to add:\n '''\n if delete_observation_rows_first:\n if verbosity > 0:\n print(\"Deleting rows of table '{}' before importing input data.\"\n .format(observations_table_name),\n file=log_file)\n sa_metadata = MetaData()\n observations_table_object = Table(observations_table_name,\n sa_metadata, autoload=True, autoload_with=engine)\n observations_table_object.delete()\n '''\n\n oyster_project = OysterProject(engine=engine, log_file=log_file,\n observations_table_name=observations_table_name,verbosity=verbosity,\n max_exception_logs_per_file=max_exception_logs_per_file\n )\n\n # Create various sensor instances\n # for now, each class defines a glob to identify its files\n # and NO other sensor files.\n # This program ASSUMES/requires coordination/pre-enforcement\n # in file naming. All \"Diver\" raw sensor file names must\n # end in diver.MON and each Star raw sensor file name must end\n # in star.DAT\n # See the class code for exact 'glob' syntax used.\n\n input_file_folders = [input_folder]\n total_inserts = 0\n total_exceptions = 0\n\n if not skip_diver:\n diver = Diver(project=oyster_project,\n input_file_folders=input_file_folders,\n input_file_globs=['**/*diver.MON'])\n\n print(\"{}: calling diver parse_files\".format(me),file=log_file)\n file_count, n_file_rows, n_inserts, n_exceptions = diver.parse_files(\n verbosity=verbosity)\n\n print(\"{}: Parsed {} Diver files with {} inserts, {} exceptions.\"\n .format(me, file_count, n_inserts, n_exceptions),file=log_file)\n\n total_inserts += n_inserts\n total_exceptions += n_exceptions\n\n if not skip_star:\n star = Star(project=oyster_project,\n input_file_folders=input_file_folders,\n input_file_globs=['**/*star.DAT',]\n )\n\n file_count, n_file_rows, n_inserts, n_exceptions = star.parse_files(\n verbosity=verbosity)\n print(\"{}: Parsed {} Star files with {} inserts, {} exceptions.\"\n .format(me, file_count, n_inserts,n_exceptions),file=log_file)\n\n total_inserts += n_inserts\n total_exceptions += n_exceptions\n\n msg = (\"{}: ENDING: Did {} inserts, had {} exceptions.\\n\"\n \"See log file name='{}'\".format(me, total_inserts, total_exceptions,\n log_file_name))\n print(msg, file=log_file)\n print(msg)\n return\n\n#end def run()\n\n# end main code\n\n# Launch the run() method with parsed command line parameters\n\nif __name__ == \"__main__\":\n\n import argparse\n parser = argparse.ArgumentParser()\n\n #Hold of on making this a command line parameter for now.\n #It could allow for critical outages\n observations_table_name = 'lcroyster_buoyobservation'\n\n # Note: do default to get some settings from user\n # config file, like passwords\n # Add help to instruct about MY_SECRETS_FOLDER, etc.\n # Arguments\n\n '''\n parser.add_argument(\"-d\", \"--delete_observation_rows_first\",\n # type=bool, ## THere is no bool for add_argument, must use int\n #type=int,\n default=True, action='store_true',\n help=\"Defaults to True. This option deletes all table '{}' rows before \"\n 'importing data into the table.'.format(observations_table_name)\n )\n '''\n\n parser.add_argument(\"-v\", \"--verbosity\",\n type=int, default=1,\n help=\"output verbosity integer (0 to 2)\")\n\n parser.add_argument(\"-i\", \"--input_folder\",\n #required=True,\n # default=\"U:\\\\data\\\\elsevier\\\\output_exoldmets\\\\test_inbound\\\\\",\n help='All .DAT and .MON files anywhere under this folder will be read '\n 'for imports. The import program will here create the file or '\n 'overwrite a previous import log file.' )\n\n parser.add_argument(\"-x\", \"--max_exception_logs_per_file\",\n type=int, default=5,\n help='Maxiumum number of insert exceptions to report per input file.' )\n\n\n parser.add_argument(\"-l\", \"--log_file_name\",\n #required=True,\n # default=\"U:\\\\data\\\\elsevier\\\\output_exoldmets\\\\test_inbound\\\\\",\n help='This is the name of the output log file to be placed under your '\n 'input folder. If not given, the log_file_name defaults to'\n 'import_buoy_sensor_data_log.txt.'\n )\n\n args = parser.parse_args()\n\n run(input_folder=args.input_folder,\n observations_table_name=observations_table_name,\n log_file_name=None,\n max_exception_logs_per_file=args.max_exception_logs_per_file,\n skip_star=0,\n skip_diver=0,\n verbosity=args.verbosity)\n\n#end if __name__ == \"__main__\"\n\n#END FILE\n","repo_name":"kshitijvr93/Django-Work-Library","sub_path":"projects/lone_cabbage_2017/data_management/import_buoy_sensor_data.py","file_name":"import_buoy_sensor_data.py","file_ext":"py","file_size_in_byte":48633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25008420518","text":"n = int(input())\n \nnumbers = list(map(int, input().rstrip().split(\" \")))\n \ncounter, s, d = 0, 0, 0\n \nwhile len(numbers): \n \n m = max(numbers[0], numbers[-1])\n \n if counter % 2 == 0: s += m\n else : d += m\n \n if m == numbers[0]: numbers.remove(numbers[0])\n else : numbers.remove(numbers[-1])\n \n counter+=1\n \nprint(f\"{s} {d}\")\n","repo_name":"faizurrahman1998/problem_solving_w_cpp_and_python","sub_path":"serejaAndDima/serejaAndDima.py","file_name":"serejaAndDima.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18178529351","text":"from __future__ import print_function, division\nimport os\nimport torch\nimport pandas as pd\n\nimport numpy as np\n\nfrom torch.utils.data import Dataset, DataLoader\n\n\n# Ignore warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\nclass SoilMoistureDataset(Dataset):\n def __init__(self, csv_file, transform = None, include_features = None, include_static = None, unit = 'month'):\n \"\"\"\n\n :param csv_file: Path to the csv file\n :param transform: Optional transform to be applied on a sample\n \"\"\"\n\n\n data = pd.read_csv(csv_file)\n data = data.iloc[:946080, ]\n data['formatted_date'] = pd.to_datetime(data.Date, format='%Y%m%d')\n data['mask'] = 1\n data['mask'][pd.isna(data.SMAP_1km)] = 0\n data['month'] = data['formatted_date'].dt.month\n data['year'] = data['formatted_date'].dt.year\n data['SMAP_1km'][pd.isna(data['SMAP_1km'])] = 0\n data['index'] = list(range(data.shape[0]))\n\n # time varying features\n features = data[['prcp', 'srad', 'tmax', 'tmin', 'vp']]\n # normalize the features\n features = (features - features.mean()) / features.std()\n\n # time independent features\n static = data[['elevation', 'slope', 'aspect', 'hillshade', 'clay', 'sand', 'bd', 'soc', 'LC']]\n # normalize the features\n static = (static - static.mean()) / static.std()\n\n\n\n\n self.data = data\n self.features = features\n self.static = static\n\n\n if unit == \"month\":\n # use one month's data\n self.ind_list = data['index'][~data.duplicated(subset=['POINTID', 'month', 'year'])]\n elif unit == \"year\":\n # use one year's data\n self.ind_list = data['index'][~data.duplicated(subset=['POINTID', 'year'])]\n else:\n raise ValueError(\"Time unit not valid\")\n\n self.ind_list = list(self.ind_list) + [data.shape[0]]\n self.transform = transform\n self.include_features = include_features\n self.include_static = include_static\n\n def __len__(self):\n return len(self.ind_list) - 1\n\n def __getitem__(self,idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n mask = self.data['mask'].iloc[self.ind_list[idx]:self.ind_list[idx+1]].values\n x = self.data['SMAP_1km'].iloc[self.ind_list[idx]:self.ind_list[idx+1]].values\n start = 0\n ind = 0\n while ind < len(mask):\n if mask[ind] == 1:\n break\n else:\n ind += 1\n\n\n\n x = x[ind:]\n mask = mask[ind:]\n\n if self.include_features and self.include_static:\n features = self.features.iloc[self.ind_list[idx]:self.ind_list[idx+1],:].values\n static = self.static.iloc[self.ind_list[idx]:self.ind_list[idx+1],:].values\n features = features[ind:,:]\n static = static[ind:,:]\n sample = (x, mask, features,static)\n elif self.include_features:\n features = self.features.iloc[self.ind_list[idx]:self.ind_list[idx+1],:].values\n features = features[ind:,:]\n sample = (x, mask, features)\n elif self.include_static:\n static = self.static.iloc[self.ind_list[idx]:self.ind_list[idx+1],:].values\n static = static[ind:,:]\n sample = (x, mask, static)\n else:\n sample = (x, mask)\n\n\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\n\nif __name__ == \"__main__\":\n\n mydata = SoilMoistureDataset(\"../data/SMAP_Climate_In_Situ.csv\", None, True, True, 'year')\n (x, mask, features, static) = mydata[0]\n print(x.shape)\n print(mask.shape)\n print(features.shape)\n print(static.shape)\n print(\"x is\", x)\n print(\"mask is\", mask)\n print(\"features are\", features)\n print(\"static features are\", static)\n print(len(mydata))\n","repo_name":"KEHUIYAO/soil_moisture_project","sub_path":"temp/load_data_2.py","file_name":"load_data_2.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43039635780","text":"import os\ndata_root = \"./data/wrench_data\"\ndatasets = [\n \"youtube\",\n \"sms\",\n \"imdb\",\n \"yelp\",\n \"agnews\",\n \"trec\",\n \"chemprot\",\n \"cdr\",\n \"spouse\",\n \"semeval\"\n]\n\nimbalanced_datasets = [\"sms\", \"cdr\", \"spouse\"]\nlabel_models = [\"Snorkel\"]\nend_models = [\"logistic\"]\n\nfor dataset in datasets:\n if dataset in imbalanced_datasets:\n metric = \"f1\"\n else:\n metric = \"acc\"\n\n for lm in label_models:\n for em in end_models:\n cmd = f\"python main.py --dataset-name {dataset} --lf-agent wrench --label-model {lm} --end-model {em} \" \\\n f\"--tune-metric {metric} --save-wandb\"\n print(cmd)\n os.system(cmd)\n","repo_name":"Gnaiqing/LLMDP","sub_path":"batch_wrench.py","file_name":"batch_wrench.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23792588941","text":"import copy\nimport numpy as np\nfrom numpy.linalg import inv\nimport matplotlib.pyplot as plt\nfrom numpy.random import normal\n\nA = np.array(\n [[1, 1],\n [0, 1]], dtype=np.float32)\n\nH = np.array(\n [[1, 0],\n [0, 1]], dtype=np.float32)\n\nX_real = np.array(\n [[0],[1]], dtype=np.float32)\n\nI = np.array(\n [[1, 0],\n [0, 1]], dtype=np.float32)\n\n# Covariance of the process noise\nQ = np.array(\n [[0.1, 0], [0, 0.1]], dtype=np.float32)\n\n# Covariance of the measure noise\nR = np.array(\n [[1., 0], [0, 1.]], dtype=np.float32)\n\n# Initialize the covariance of the error between \n# evaluated value and the real value.\nP = np.array(\n [[1, 0],[0, 1]], dtype=np.float32)\n\npos_real_list = []\nvel_real_list = []\nmeasure_pose_list = []\nmeasure_vel_list = []\neval_pos_list = []\neval_vel_list = []\n\nSTEP=20\n\nfor i in range(STEP):\n W = normal(loc=0.0, scale=0.316, size=(2, 1))\n V = normal(loc=0.0, scale=1.0, size=(2, 1))\n X_pre_eval = np.dot(A, X_real) \n X_real = np.dot(A, X_real) + W\n Z = np.dot(H, X_real) + V\n\n Pk_1 = copy.deepcopy(P)\n P_pre_k = np.dot(np.dot(A, Pk_1), A.transpose()) + Q\n K = np.dot(np.dot(P_pre_k, H.T), inv(np.dot(np.dot(H, P_pre_k), H.T) + R))\n X_eval = X_pre_eval + np.dot(K, Z - np.dot(H, X_pre_eval))\n\n P = np.dot(P_pre_k, I - np.dot(K, H))\n\n pos_real_list.append(X_real[0, 0])\n vel_real_list.append(X_real[1, 0])\n measure_pose_list.append(Z[0, 0])\n measure_vel_list.append(Z[1, 0])\n eval_pos_list.append(X_eval[0, 0])\n eval_vel_list.append(X_eval[1, 0])\n\n\nplt.plot([i for i in range(STEP)], pos_real_list, label=\"pos_real\")\nplt.plot([i for i in range(STEP)], measure_pose_list, label='pos_mea')\nplt.plot([i for i in range(STEP)], eval_pos_list, label='eval_pos')\nplt.legend()\n\nplt.figure()\nplt.plot([i for i in range(STEP)], vel_real_list, label='vel_real')\nplt.plot([i for i in range(STEP)], measure_vel_list, label='vel_mea')\nplt.plot([i for i in range(STEP)], eval_vel_list, label='eval_vel')\n\nplt.legend()\nplt.show()","repo_name":"Kin9L/KalmanFilter","sub_path":"kalmanfilter.py","file_name":"kalmanfilter.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10272587068","text":"import numpy as np\nfrom sklearn.base import BaseEstimator\n\nfrom sklearn.utils.validation import check_array, check_is_fitted, check_X_y\n\n'''\nUsage:\n-----\n## Find model with best param 'delta' and 'max_depth' using a GridSearchCV or a StratifiedKFold\nfrom sklearn.model_selection import GridSearchCV, StratifiedKFold\nimport RegularDecisionTree as rdt\nmodels = [i['class'] for i in instances]\nlist_nodes = list(G.nodes)\nflow_array = [[] for i in range(len(models))]\nfor i in range(len(models)):\n for n in list_nodes:\n flow_array[i].append(flow[n][i])\nnb_sensors = 3\nreg_parameters = {'delta': [0.1+0.1*i for i in range(10)], 'max_depth': [4,6,8,10]}\nreg_model = rdt.RegularizedDecisionTreeClassifier(max_n_features=nb_sensors)\nskf = StratifiedKFold(n_splits=5)\ngrid_reg = GridSearchCV(reg_model, reg_parameters, cv = skf, n_jobs= 4)\ngrid_reg.fit(flow_array,models)\nprint(grid_reg.best_params_)\nprint(grid_reg.best_score_)\nselected_nodes = []\nfor id_n in list(grid_reg.best_estimator_.selected_features_):\n selected_nodes.append(list_nodes[id_n])\n## Feedback: use previously observed nodes\n## by keeping the list of INDEXES of the previously selected nodes\nhist_observed_nodes = grid_reg.best_estimator_.selected_features_ \n## Then it can be passed as parameter of the fit(.) function\ngrid_reg.fit(new_flow_array,new_models,hist_observed_nodes)\n## WARNING: the order of nodes in flow_array should be the same between each iteration\n## of the feedback loop\n## Newly selected nodes will not contain elements of hist_observed_node\n## You have to keep track of the previously observed nodes indexes.\ngrid_reg.best_estimator_.selected_features_ \n'''\n\n\nclass Node:\n def __init__(self, depth = 0, counts = None, impurity = None): \n self.depth = depth \n self.counts = counts\n self.impurity = impurity\n # links to the left and right child nodes\n self.right = None\n self.left = None \n # derived from splitting criteria\n self.column = None\n self.threshold = None\n # counts for sample inside the node to belong for each of the given classes\n # depth of the given node\n self.is_terminal = False\n\n\n def print(self,tab=''):\n if not self.is_terminal:\n print(f'{tab}X[{self.column}] <= {round(self.threshold,3)} I:{round(self.impurity,4)} {list(self.counts)}') \n self.left.print(tab+' ')\n self.right.print(tab+' ')\n else:\n print(f'{tab}Leaf I:{round(self.impurity,4)} {list(self.counts)}')\n\nclass RegularizedDecisionTreeClassifier(BaseEstimator):\n '''\n Usage:\n ------\n import numpy as np\n import RegularDecisionTree as rdt\n nodes_list = list(G.nodes())\n print(len(nodes_list))\n models = [i['class'] for i in instances]\n flow_lists = [[] for i in range(len(models))]\n for i in range(len(models)):\n for n in nodes_list: \n flow_lists[i].append(flow[n][i])\n reg_model = rdt.RegularizedDecisionTreeClassifier(delta= 1., max_n_features=3, max_depth = 10, min_samples_leaf=1, min_samples_split=2)\n reg_model.fit(flow_lists, models)\n Then use as a regular Decision Tree Classifier\n '''\n def __init__(self, delta=1.0, max_n_features=3, max_depth = 3, min_samples_leaf = 1, min_samples_split = 2):\n self.delta = delta\n self.depth = 0\n self.max_n_features = max_n_features \n self.max_depth = max_depth\n self.min_samples_leaf = min_samples_leaf\n self.min_samples_split = min_samples_split\n\n def nodeCounts(self,y):\n '''\n Calculates Classes counts in a given node\n ''' \n counts = []\n for one_class in range(len(self.classes_labels_)):\n c = y[y == one_class].shape[0]\n counts.append(c)\n return np.asarray(counts)\n\n def gini(self, y):\n '''\n Calculates gini criterion\n '''\n n_labels = len(y)\n if n_labels <= 1:\n return 0\n _,counts = np.unique(y, return_counts=True)\n probas = counts / n_labels \n return 1 - np.sum(probas**2)\n\n def entropy(self, y):\n '''\n Calculates entropy criterion\n '''\n n_labels = len(y)\n if n_labels <= 1:\n return 0\n _,counts = np.unique(y, return_counts=True)\n probas = counts / n_labels\n n_classes = np.count_nonzero(probas)\n if n_classes <= 1:\n return 0\n\n ent = 0.\n # Compute entropy\n for i in probas:\n if i > 0.:\n ent -= i * np.log2(i)\n return ent\n \n def calcImpurity(self, y):\n '''\n Wrapper for the impurity calculation.\n '''\n # return self.gini(y)\n return self.entropy(y)\n \n def candidateFeatures(self, cols):\n '''\n Output list of features that can be used to compute best splits\n If max_n_features reached max_n_features then only output\n a list with already selected features + the default features (parameter of init())\n '''\n candidates = [] \n ## Cannot use any more features \n if len(self.selected_features_) == self.max_n_features:\n candidates = list(self.selected_features_)\n ## add default features\n candidates.extend(self.default_features_)\n else:\n candidates = cols\n \n np.random.shuffle(np.array(candidates))\n return(candidates)\n \n def calcBestSplit(self, X, y):\n '''\n Calculates the best possible split for the current node of the tree\n ''' \n bestSplitCol = None\n bestThresh = None\n bestInfoGain = -999\n \n impurityBefore = self.calcImpurity(y)\n \n # List of possible feature for split\n candidates_features = self.candidateFeatures(range(X.shape[1]))\n\n # For each possible feature\n for col in candidates_features:\n x_col = X[:, col]\n ## if only single value\n # print(x_col)\n if np.min(x_col) == np.max(x_col):\n continue\n\n factor_reg = 1.\n # If the feature was never used before\n if col not in self.selected_features_:\n factor_reg = self.delta\n \n ## sort x_col\n # s_x_col = np.sort(x_col)\n s_x_col = np.unique(x_col)\n\n # for each sorted value in the column (expect the last) \n for i in range(s_x_col.shape[0]-1):\n \n threshold = s_x_col[i]/2. + s_x_col[i+1]/2.\n\n y_right = y[x_col > threshold]\n y_left = y[x_col <= threshold]\n if y_right.shape[0] < self.min_samples_leaf or y_left.shape[0]< self.min_samples_leaf:\n continue\n\n # calculate impurity for the right and left nodes\n impurityRight = self.calcImpurity(y_right)\n impurityLeft = self.calcImpurity(y_left)\n\n # calculate information gain\n infoGain = impurityBefore\n infoGain -= impurityLeft * y_left.shape[0] / y.shape[0]\n infoGain -= impurityRight * y_right.shape[0] / y.shape[0]\n infoGain *= factor_reg \n \n # is this infoGain better then all other?\n if infoGain >= bestInfoGain:\n bestSplitCol = col\n bestThresh = threshold\n bestInfoGain = infoGain\n\n\n # if we still didn't find the split\n if bestInfoGain == -999:\n return None, None, None, None, None, None\n \n # making the best split \n x_col = X[:, bestSplitCol]\n x_left, x_right = X[x_col <= bestThresh, :], X[x_col > bestThresh, :]\n y_left, y_right = y[x_col <= bestThresh], y[x_col > bestThresh]\n\n return bestSplitCol, bestThresh, x_left, y_left, x_right, y_right \n \n def splitNode(self, X, y, node):\n # checking for the terminal conditions \n if node.depth >= self.max_depth:\n node.is_terminal = True\n return None, None, None, None\n\n if X.shape[0] < self.min_samples_split:\n node.is_terminal = True\n return None, None, None, None\n\n if np.unique(y).shape[0] == 1:\n node.is_terminal = True\n return None, None, None, None\n\n # calculating current split\n splitCol, thresh, x_left, y_left, x_right, y_right = self.calcBestSplit(X, y)\n \n if splitCol is None:\n node.is_terminal = True\n return None, None, None, None \n\n # Do the child nodes have enough samples?\n if x_left.shape[0] < self.min_samples_leaf or x_right.shape[0] < self.min_samples_leaf:\n node.is_terminal = True\n return None, None, None, None \n\n if splitCol not in self.default_features_:\n self.selected_features_.add(splitCol) \n \n self.depth = max(self.depth,node.depth+1)\n node.column = splitCol\n node.threshold = thresh\n return x_left, y_left, x_right, y_right\n \n def fit(self, X, y, default_selected = []):\n '''\n Standard fit function to run all the model training\n Input\n -----\n X: Features\n y: classes to predicts\n prev_selected: list(int) indexes of variable \n ''' \n\n ## Get indexes of prev selected variables\n self.default_features_ = default_selected \n\n ## Convert X,y into correct type\n X,y = check_X_y(X,y)\n self.n_features_in_ = X.shape[1]\n \n self.classes_labels_, self.classes_ = np.unique(y, return_inverse=True)\n self.selected_features_ = set()\n\n if True in np.iscomplex(y):\n raise ValueError('Complex data not supported')\n if X.shape[0]!= self.classes_.shape[0]:\n raise ValueError('X and y should have the same length') \n\n # Root node creation\n self.tree_ = Node(0,self.nodeCounts(self.classes_),self.calcImpurity(self.classes_))\n\n ## Tree construction using BFS\n next_level = [(self.tree_, X, self.classes_)]\n while len(next_level) > 0:\n\n cur_level = next_level\n np.random.shuffle(cur_level)\n next_level = []\n\n for node, Xn, yn in cur_level:\n\n Xn_left, yn_left, Xn_right, yn_right = self.splitNode(Xn, yn, node)\n \n if not node.is_terminal:\n node.left = Node(node.depth + 1,self.nodeCounts(yn_left),self.calcImpurity(yn_left)) \n node.right = Node(node.depth + 1,self.nodeCounts(yn_right),self.calcImpurity(yn_right))\n\n next_level.append((node.left, Xn_left, yn_left))\n next_level.append((node.right, Xn_right, yn_right))\n\n return self\n \n def predictSample(self, x, node):\n '''\n Passes one object through decision tree and return the probability of it to belong to each class\n '''\n # if we have reached the terminal node of the tree\n if node.is_terminal:\n ## return probability vector\n c = node.counts\n return c / sum(c)\n \n if x[node.column] > node.threshold:\n probas = self.predictSample(x, node.right)\n else:\n probas = self.predictSample(x, node.left) \n return probas \n \n def predict(self, X):\n '''\n Returns the predicted labels for each X \n X = np array\n ''' \n check_is_fitted(self)\n\n X = check_array(X,dtype='numeric')\n \n predictions = []\n for x in X:\n pred = np.argmax(self.predictSample(x, self.tree_))\n predictions.append(self.classes_labels_[pred])\n \n return np.asarray(predictions)\n\n def predict_proba(self, X):\n '''\n Returns the probabilities for each X to belong to \n each class \n X = np array\n ''' \n check_is_fitted(self)\n\n X = check_array(X,dtype='numeric')\n\n probas = []\n for x in X:\n p_x = self.predictSample(x, self.tree_)\n probas.append(p_x)\n return np.asarray(probas)\n\n\n def score(self, X, y):\n '''\n Return the average accuracy of the predictions for data X w.r.t. y\n '''\n\n check_is_fitted(self)\n\n X,y = check_X_y(X,y)\n\n n_good_predict = 0\n for i in range(X.shape[0]):\n pred = np.argmax(self.predictSample(X[i,:], self.tree_))\n if self.classes_labels_[pred] == y[i] :\n n_good_predict += 1\n return n_good_predict / X.shape[0]\n","repo_name":"ruishibasaki/FlotesAppPub","sub_path":"main/RegularDecisionTree.py","file_name":"RegularDecisionTree.py","file_ext":"py","file_size_in_byte":12920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1533850377","text":"from django.contrib import admin\nfrom django.urls import path\nfrom LMS import views\n\nurlpatterns = [\n path(\"\", views.index, name='home'),\n path(\"teacher\", views.teacher, name='teacher'),\n path(\"student\", views.student, name='student'),\n path(\"parent\", views.parent, name='parent'),\n path(\"Courses\", views.Coursesviews, name='Courses'),\n path(\"Assignment/\", views.Assignmentviews, name='Assignment'),\n path(\"Marks/\", views.Marksview, name='Marks'),\n path(\"studentdetails/\", views.studentdetailsview, name='studentdetails')\n]\n","repo_name":"manas2599/Practise","sub_path":"LMS/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5128489382","text":"with open(\"./rosalind_iev.txt\") as f:\n l = f.readline().strip().split(\" \")\n\np = [float(i) for i in l]\n\n\nP = [1., 1., 1., 3/4, 1/2, 0.]\n\noff = 0\nfor i in range(len(P)):\n off += 2.0 * P[i]*p[i]\n\nprint(off)\n\n","repo_name":"lucaparmigiani/Rosalind","sub_path":"013.IEV/013.IEV.py","file_name":"013.IEV.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31555743329","text":"# coding=utf-8\nfrom menu.models import MenuItem\nfrom users.models import SAWPermission\nfrom users.groups import GUEST, MEMBER, BOARD_MEMBER\n\nDESCRIPTION = \"Create events that people can sign up for\"\n\nCAN_CREATE_EVENTS = \"can_create_events\"\nCAN_VIEW_SIGNUP_INFO = \"can_view_signup_info\"\n\nCAN_VIEW_AND_JOIN_PUBLIC_EVENTS = \"can_view_and_join_public_events\"\nCAN_VIEW_AND_JOIN_MEMBER_EVENTS = \"can_view_and_join_member_events\"\nCAN_VIEW_AND_JOIN_BOARD_MEMBER_EVENTS = \"can_view_and_join_board_member_events\"\n\n\ndef get_menu_items():\n \"\"\"\n :return: a tuple ([main menu items], [settings menu items], [others])\n \"\"\"\n item, created = MenuItem.get_or_create(identifier=\"events_home\",\n app_name=__package__,\n display_name=\"Events\",\n reverse_string=\"events_home\",\n permission=SAWPermission.get(CAN_VIEW_AND_JOIN_PUBLIC_EVENTS))\n return ([item],\n None,\n None)\n\n\ndef get_urls():\n \"\"\"\n :returns: A tuple of regexes describing what URLs the top-level URL dispatcher should associate with this module\n \"\"\"\n return r\"^events/\",\n\n\ndef get_permissions():\n \"\"\"\n :return: a list of tuples containing the permissions of this module and their default group\n \"\"\"\n return (\n (CAN_VIEW_AND_JOIN_PUBLIC_EVENTS, GUEST, \"Can view and join public events\"),\n (CAN_VIEW_AND_JOIN_MEMBER_EVENTS, MEMBER, \"Can view and join member events\"),\n (CAN_VIEW_AND_JOIN_BOARD_MEMBER_EVENTS, BOARD_MEMBER, \"Can view and join board member events\"),\n (CAN_CREATE_EVENTS, BOARD_MEMBER, \"Can create and edit events\"),\n (CAN_VIEW_SIGNUP_INFO, BOARD_MEMBER, \"Can view normally hidden info about signed up people\"),\n )\n","repo_name":"Lundis/SAW","sub_path":"studassweb/events/register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9620813520","text":"print('Эта программа поможет вам вычислить возможный доход за год, от вложенной вами суммы в различных банках')\nper_cent = {'ТКБ': 5.6, 'СКБ': 5.9, 'ВТБ': 4.28, 'СБЕР': 4.0}\nname = list(per_cent.keys())\nd = list(per_cent.values())\ndeposit = []\nmoney = float(input('Введите сумму в рублях котроую вы хотите вложить ' ))\nwhile money == 0 or money <0:\n print('Введенная вами сумма не должна быть равна или меньше 0')\n money = float(input('Введите сумму в рублях котроую вы хотите вложить '))\nfor x in d:\n n = round((x * money / 100),2)\n deposit.append(n)\nprint ('Доход за год составит ')\ns = 0\nfor u in deposit:\n print (name[s], u, end=' рубля(ей), ')\n s = s + 1\nprint(' ')\nprint ('Максимальная сумма, кото'\n 'рую вы можете заработать ', max(deposit), 'рубля(ей)')\n","repo_name":"AndrewFuraev/Zadanie17.7.3","sub_path":"bankcounter.py","file_name":"bankcounter.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9994199040","text":"from nanpy.ad9850 import AD9850\nfrom nanpy.serialmanager import SerialManager\n\n# http://nr8o.dhlpilotcentral.com/?p=83\nW_CLK = 'A5' # Pin 8 - connect to AD9850 module word load clock pin (CLK)\nFQ_UD = 'A4' # Pin 9 - connect to freq update pin (FQ)\nDATA = 'A3' # Pin 10 - connect to serial data load pin (DATA)\nRESET = 'A2' # Pin 11 - connect to reset pin (RST).\n\nF = 440 # Hz\n\n\ndef dds():\n connection = SerialManager()\n dds = AD9850([W_CLK, FQ_UD, DATA, RESET], connection=connection)\n\n dds.setup()\n dds.write_frequency(F)\n\n\nif __name__ == '__main__':\n dds()\n","repo_name":"nanpy/nanpy","sub_path":"nanpy/examples/dds.py","file_name":"dds.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":233,"dataset":"github-code","pt":"67"} +{"seq_id":"3606309842","text":"from django.contrib import admin\nfrom django.urls import path, re_path, include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.template import RequestContext\nfrom rest_framework.documentation import include_docs_urls\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.views.generic.base import RedirectView\nimport curate.views as views\nimport curate.views.api as api\nfrom curate.views.author_embed import author_embed\n\n\nurlpatterns = [\n path('', RedirectView.as_view(url='/app', permanent=False), name='index'),\n re_path(r'^app/(.*)$', views.router_index),\n path('admin/', admin.site.urls),\n #path('accounts/', include('django.contrib.auth.urls')),\n path('accounts/', include('allauth.urls')),\n path('invitations/', include('invitations.urls', namespace='invitations')),\n] + static(\"/dist/\", document_root=\"dist\") + static(\"/sitestatic/\", document_root=\"sitestatic\")\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n\nurlpatterns += [\n path('api/accounts/', api.list_accounts, name='api-list-accounts'),\n path('api/accounts//', api.view_user, name='api-view-user'),\n path('api/', api.index, name='api-index'),\n path('api/docs/', include_docs_urls(title=\"Curate Science API\")),\n path('api/schema/', api.schema, name='api-schema'),\n path('api/invitations/create/', api.create_invitation, name='api-create-invitation'),\n # Author paths\n path('api/authors/', api.list_authors, name='api-list-authors'),\n path('api/authors/autocomplete/', api.AuthorAutocomplete.as_view(), name='author-autocomplete'),\n path('api/authors/create/', api.create_author, name='api-create-author'),\n path('api/authors//', api.view_author, name='api-view-author'),\n path('api/authors//update/', api.update_author, name='api-update-author'),\n path('api/authors//delete/', api.delete_author, name='api-delete-author'),\n # Article paths\n path('api/articles/', api.list_articles, name='api-list-articles'),\n path('api/authors//articles/', api.list_articles_for_author, name='api-list-articles-for-author'),\n path('api/authors//articles/linkage/', api.link_articles_to_author, name='api-link-articles-to-author'),\n path('api/articles/autocomplete/', api.ArticleAutocomplete.as_view(), name='article-autocomplete'),\n path('api/articles/create/', api.create_article, name='api-create-article'),\n path('api/articles/search/', api.search_articles, name='api-search-articles'),\n path('api/articles//', api.view_article, name='api-view-article'),\n path('api/articles//update/', api.update_article, name='api-update-article'),\n path('api/articles//delete/', api.delete_article, name='api-delete-article'),\n\n # Key figure paths\n path('api/articles//key_figures/', api.list_key_figures_for_article,\n name='api-list-key-figures-for-article'),\n path('api/articles//key_figures/upload/',\n api.ImageUploadView.as_view(), name='api-create-key-figure'),\n path('api/key_figures//', api.view_key_figure, name='api-view-key-figure'),\n path('api/key_figures//delete/', api.delete_key_figure, name='api-delete-key-figure'),\n\n # Commentary paths\n path('api/commentaries/', api.list_commentaries, name='api-list-commentaries'),\n path('api/commentaries/create/', api.create_commentary, name='api-create-commentary'),\n path('api/commentaries//', api.view_commentary, name='api-view-commentary'),\n path('api/commentaries//update/', api.update_commentary, name='api-update-commentary'),\n path('api/commentaries//delete/', api.delete_commentary, name='api-delete-commentary'),\n path('api/search/', api.search_articles_and_authors, name='api-search-articles-and-authors'),\n\n # Author embed\n path('author-embed/.js', author_embed, name='author-embed-script')\n]\n","repo_name":"ScienceCommons/curate_science","sub_path":"curate_science/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"67"} +{"seq_id":"19738917789","text":"def sorteio(lista):\n for num, elem in enumerate(lista):\n #print(elem, num)\n if elem == num + 1:\n return elem\n\nteste = 1\nwhile True:\n n = int(input())\n if n == 0:\n break\n l = list(map(int, input().split()))\n print(\"Teste %d\" % teste)\n if n == l[-1]:\n print(n)\n else:\n print(sorteio(l))\n teste += 1","repo_name":"Filipelion/Competitive-programming","sub_path":"Contests/Roteiro UFMG/Roteiro 0/QUERM.py","file_name":"QUERM.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27057821230","text":"import requests\r\nfrom .login import *\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\n\r\n\r\nimage_number = 0\r\nstorage_number = 1\r\nlink = f'https://zastavok.net'\r\n\r\nfor storage in range(1):\r\n response = requests.get(f'{link}/{storage_number}').text\r\n soup = BeautifulSoup(response, 'lxml')\r\n block = soup.find('div', class_='block-photo')\r\n all_images = block.find_all('div', class_='short_full')\r\n\r\n for image in all_images:\r\n image_link = image.find('a').get('href')\r\n download_storage = requests.get(f'{link}/{image_link}').text\r\n download_soup = BeautifulSoup(download_storage, 'lxml')\r\n download_block = download_soup.find('div', class_='image_data').find('div', class_='block_down')\r\n result_link = download_block.find('a').get('href')\r\n\r\n h1_name = download_soup.find('div', class_='wall_page-speedbar').find('h1')\r\n image_name = h1_name.string\r\n image_name = image_name[1:-1]\r\n\r\n image_bytes = requests.get(f'{link}{result_link}').content\r\n\r\n with open(f'images/{image_name}.jpg', 'wb') as file:\r\n file.write(image_bytes)\r\n\r\n print(f'{image_name}.jpg Image successfully downloaded')\r\n\r\n storage_number += 1\r\n\r\n#print(all_images[0])\r\n\r\n","repo_name":"Nalivator3000/bs4","sub_path":"get_files.py","file_name":"get_files.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2561815122","text":"# Exercise 8 : How Old Are You On Jupiter?\n# Instructions\n# Given an age in seconds, calculate how old someone would be on:\n# Earth: orbital period 365.25 Earth days, or 31557600 seconds\n# Mercury: orbital period 0.2408467 Earth years\n# Venus: orbital period 0.61519726 Earth years\n# Mars: orbital period 1.8808158 Earth years\n# Jupiter: orbital period 11.862615 Earth years\n# Saturn: orbital period 29.447498 Earth years\n# Uranus: orbital period 84.016846 Earth years\n# Neptune: orbital period 164.79132 Earth years\n# So if you are told someone is 1,000,000,000 seconds old, the function should output that they are 31.69 Earth-years old.\n\nimport datetime\n\n\ndef age_planet(orbital_period):\n planets = {\n 1: 'Earth',\n 0.2408467: 'Mercury',\n 0.61519726: 'Venus',\n 1.8808158: 'Mars',\n 11.862615: 'Jupiter',\n 29.447498: 'Saturn',\n 84.016846: 'Uranus',\n 164.79132: 'Neptune'}\n\n if orbital_period not in planets:\n return 'Unknown data'\n\n user = datetime.datetime.strptime(input('please enter you birth date (format: day/month/year): '), '%d/%m/%Y')\n age_seconds = (datetime.datetime.now() - user).days * 86400 + (datetime.datetime.now() - user).seconds\n return f\"Your age on {planets[orbital_period]} is {round(age_seconds / 31557600 * orbital_period, 2)} years\"\n\nprint(age_planet(84.016846))\n","repo_name":"alegofrenicht/DI_Bootcamp","sub_path":"Week10/Day2/Exercises/Ex8/exercise8.py","file_name":"exercise8.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26387699263","text":"from launch import LaunchDescription\nfrom launch.actions import DeclareLaunchArgument, OpaqueFunction, RegisterEventHandler\nfrom launch.event_handlers import OnProcessExit\nfrom launch.conditions import IfCondition\n# from launch.launch_description_sources import PythonLaunchDescriptionSource\nfrom launch.substitutions import Command, FindExecutable, LaunchConfiguration, PathJoinSubstitution\nfrom launch_ros.actions import Node\nfrom launch_ros.substitutions import FindPackageShare\n\ndef launch_setup(context, *args, **kwargs):\n description_package = FindPackageShare('indy_description')\n indy_driver_package = FindPackageShare('indy_driver')\n\n # Initialize Arguments\n name = LaunchConfiguration(\"name\")\n indy_ip = LaunchConfiguration(\"indy_ip\")\n indy_type = LaunchConfiguration(\"indy_type\")\n indy_eye = LaunchConfiguration(\"indy_eye\")\n indy_sw = LaunchConfiguration(\"indy_sw\")\n prefix = LaunchConfiguration(\"prefix\")\n launch_rviz = LaunchConfiguration(\"launch_rviz\")\n\n if (indy_type.perform(context) == 'indyrp2') or (indy_type.perform(context) == 'indyrp2_v2'):\n initial_joint_controllers = PathJoinSubstitution(\n [indy_driver_package, \"controller\", \"indy_controllers_7dof.yaml\"]\n )\n else:\n initial_joint_controllers = PathJoinSubstitution(\n [indy_driver_package, \"controller\", \"indy_controllers_6dof.yaml\"]\n )\n\n robot_description_content = Command(\n [\n PathJoinSubstitution([FindExecutable(name=\"xacro\")]),\n \" \",\n PathJoinSubstitution([description_package, \"urdf\", \"indy.urdf.xacro\"]),\n \" \",\n \"name:=\",\n name,\n \" \",\n \"indy_type:=\",\n indy_type,\n \" \",\n \"indy_eye:=\",\n indy_eye,\n \" \",\n \"prefix:=\",\n prefix,\n ]\n )\n robot_description = {\"robot_description\": robot_description_content}\n\n rviz_config_file = PathJoinSubstitution(\n [description_package, \"rviz_config\", \"indy.rviz\"]\n )\n\n indy_control_node = Node(\n package=\"controller_manager\",\n executable=\"ros2_control_node\",\n parameters=[robot_description, initial_joint_controllers],\n output=\"screen\",\n )\n\n robot_state_publisher_node = Node(\n package=\"robot_state_publisher\",\n executable=\"robot_state_publisher\",\n output=\"screen\",\n parameters=[robot_description],\n )\n\n indy_driver = Node(\n package=\"indy_driver\",\n executable=\"indy_driver.py\",\n name=\"indy_driver\",\n output=\"screen\",\n emulate_tty=True,\n parameters=[\n {'indy_ip': indy_ip.perform(context)},\n {'indy_type': indy_type.perform(context)},\n {'indy_sw': indy_sw.perform(context)},\n ],\n )\n\n rviz_node = Node(\n condition=IfCondition(launch_rviz),\n package=\"rviz2\",\n executable=\"rviz2\",\n name=\"rviz2\",\n output=\"log\",\n arguments=[\"-d\", rviz_config_file],\n )\n\n # Delay rviz\n delay_rviz2_spawner = RegisterEventHandler(\n event_handler=OnProcessExit(\n target_action=robot_state_publisher_node,\n on_exit=[rviz_node],\n )\n )\n\n nodes_to_start = [\n indy_driver,\n # indy_control_node,\n robot_state_publisher_node,\n # delay_rviz2_spawner\n rviz_node\n ]\n\n return nodes_to_start\n\n\ndef generate_launch_description():\n declared_arguments = []\n\n declared_arguments.append(\n DeclareLaunchArgument(\n \"name\",\n default_value=\"indy\"\n )\n )\n\n declared_arguments.append(\n DeclareLaunchArgument(\n \"indy_ip\", \n description=\"IP address for real robot\"\n )\n )\n\n declared_arguments.append(\n DeclareLaunchArgument(\n \"indy_type\",\n default_value=\"indy7\",\n description=\"Type of Indy robot.\",\n choices=[\"indy7\", \"indy7_v2\" , \"indy12\", \"indy12_v2\", \"indyrp2\", \"indyrp2_v2\"]\n )\n )\n\n declared_arguments.append(\n DeclareLaunchArgument(\n \"indy_eye\",\n default_value=\"false\",\n description=\"Work with Indy Eye\",\n )\n )\n \n declared_arguments.append(\n DeclareLaunchArgument(\n \"indy_sw\",\n description=\"Software Version\",\n default_value=\"2\",\n choices=[\"2\", \"3\"]\n )\n )\n\n declared_arguments.append(\n DeclareLaunchArgument(\n \"prefix\",\n default_value='\"\"',\n description=\"Prefix of the joint names, useful for multi-robot setup. \\\n If changed than also joint names in the controllers configuration have to be updated.\"\n )\n )\n\n declared_arguments.append(\n DeclareLaunchArgument(\n \"launch_rviz\", \n default_value=\"true\", \n description=\"Launch RViz?\"\n )\n )\n\n return LaunchDescription(declared_arguments + [OpaqueFunction(function=launch_setup)])\n","repo_name":"neuromeka-robotics/indy-ros2","sub_path":"indy_driver/launch/indy_bringup.launch.py","file_name":"indy_bringup.launch.py","file_ext":"py","file_size_in_byte":5058,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"19459163105","text":"import pandas as pd\r\n\r\ninputFilePath = \"Self_Care_Project_DB - Resource_DB.csv\"\r\n\r\ndatabase = pd.read_csv(inputFilePath)\r\ndatabase = database.fillna('')\r\n\r\noutput_json_str = \"{\"\r\nfor i in range(0,len(database)):\r\n\r\n entry_str = f'\"{i}\":'+str(\"{\")\r\n entry_str+=f'\"Resource_ID\":\"{database.loc[i][0]}\",'\r\n entry_str+=f'\"Domain_ID\":\"{database.loc[i][1]}\",'\r\n entry_str+=f'\"Domain\":\"{database.loc[i][2]}\",'\r\n entry_str+=f'\"Resource_Name\":\"{database.loc[i][3]}\",'\r\n entry_str+=f'\"Resource_Description\":\"{database.loc[i][4]}\",'\r\n entry_str+=f'\"Resource_type\":\"{database.loc[i][5]}\",'\r\n entry_str+=f'\"Resource_Redirect_URL\":\"{database.loc[i][6]}\",'\r\n entry_str+=f'\"Category\":\"{database.loc[i][7]}\",'\r\n entry_str+=f'\"Resource_icon\":\"{database.loc[i][8]}\"'\r\n \r\n if i == len(database)-1:\r\n entry_str+= \"}\"\r\n else:\r\n entry_str+= \"},\"\r\n output_json_str+=entry_str\r\n\r\noutput_json_str += \"}\"\r\noutputFile_path = str('Converted_Database.json')\r\nprint(outputFile_path)\r\n\r\nwith open(outputFile_path,\"a+\") as outputFile:\r\n outputFile.write(output_json_str)","repo_name":"Aryan360/Selfcare_Application","sub_path":"UtilityScripts/Convert_DB_to_json/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21573086219","text":"class ChannelJoinError(Exception):\n pass\n\n\nclass ChannelManager():\n def __init__(self, max_channels, max_members):\n self.max_channels = max_channels\n self.max_members = max_members\n self.channels = {}\n\n # Return True if join succesful (socket not joined already), otherwise return False\n def join(self, socket, channel):\n # If channel exists make socket join it\n if channel in self.channels:\n # Check that socket not already joined\n if socket not in self.channels[channel]:\n if len(self.channels[channel]) < self.max_members:\n self.channels[channel].append(socket)\n return True\n else:\n raise ChannelJoinError(\"Too many members on channel. Unable to add more.\")\n # If channel doesn't exist create the channel list and add socket as the only subscriber\n else:\n if len(self.channels) < self.max_channels:\n self.channels[channel] = [socket]\n return True\n else:\n raise ChannelJoinError(\"Too many channels. Can't create more.\")\n\n return False\n\n # Part socket from channel but doesn't delete channel if it is empty.\n # Instead returns false if channel is empty and has to be deleted, otherwise returns true\n def __part_but_dont_delete_channel(self, socket, channel):\n if channel in self.channels:\n if socket in self.channels[channel]:\n self.channels[channel].remove(socket)\n\n if self.channels[channel] == []:\n return False\n return True\n\n # return True if part succesful (client had joined first), return False if unsuccesful\n def part(self, socket, channel):\n if socket in self.channels[channel]:\n if not self.__part_but_dont_delete_channel(socket, channel):\n del self.channels[channel]\n return True\n return False\n\n\n # return a list of all the parted channels\n def part_all(self, socket):\n empty_channels = []\n sockets_channels = []\n for channel in self.channels:\n if socket in self.channels[channel]:\n sockets_channels.append(channel)\n if not self.__part_but_dont_delete_channel(socket, channel):\n empty_channels.append(channel)\n for empty_channel in empty_channels:\n del self.channels[empty_channel]\n return sockets_channels\n\n # Returns list of sockets that are subscribed to channel.\n # Returns empty list if channel doesn't exist\n def get(self, channel):\n if self.channels.get(channel) is None:\n return []\n return self.channels.get(channel)\n\n def get_channels_of_socket(self, sock):\n socket_channels = []\n for channel in self.channels:\n if sock in self.channels[channel]:\n socket_channels.append(channel)\n return socket_channels","repo_name":"jjaanila/mChat","sub_path":"src/server/channelmanager.py","file_name":"channelmanager.py","file_ext":"py","file_size_in_byte":2997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7678138700","text":"st = {'item1', 'item2', 'item3', 'item4'}\nst.add('aaaa')\n#st.update(['item5', 'item6', 'item7'])\n\n#{'item5', 'item6', 'item3', 'item1', 'aaaa', 'item7', 'item4', 'item2'}\nprint(st)\n#ad(), frutas.pop() # remove um item aleatório do conjunto, st.clear() \n# > limpar td a set print(fruits) # set(), st3 = st1.union(st2)\n\nconjunto = {'a', 'b', 'c', 'd'}\nuniao = st.union(conjunto)#cria um novo conjunto\nprint(uniao)\n\n# syntax\nst1 = {'item1', 'item2', 'item3', 'item4'}\nst2 = {'item3', 'item2', 'ffawwaef', 'faweadf', 'item1'}\nst3 = st1.intersection(st2) # {'item3', 'item2'}\nprint(st3)\n\n#super conjunto do outro\nwhole_numbers = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}\neven_numbers = {0, 2, 4, 6, 8, 10}\nwhole_numbers.issubset(even_numbers) # False, because it is a super set\nwhole_numbers.issuperset(even_numbers) # True\n\npython = {'p', 'y', 't', 'h', 'o', 'n'}\ndragon = {'d', 'r', 'a', 'g', 'o', 'n'}\npython.issubset(dragon) # False\n\n#.difference(),\nwhole_numbers = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}\nsome_numbers = {1, 2, 3, 4, 5}\nwhole_numbers.symmetric_difference(some_numbers) # {0, 6, 7, 8, 9, 10}\n\n#se e conjunto ou disjunto\neven_numbers = {0, 2, 4, 6, 8}\nodd_numbers = {1, 3, 5, 7, 9}\neven_numbers.isdisjoint(odd_numbers) # True, because no common item\n\npython = {'p', 'y', 't', 'h', 'o', 'n'}\ndragon = {'d', 'r', 'a', 'g', 'o', 'n'}\npython.isdisjoint(dragon) # False, there are common items {'o', 'n'}\n","repo_name":"pollyanarocha416/Python-30Day","sub_path":"Day07.py","file_name":"Day07.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"928471261","text":"from vkbottle.bot import Blueprint, Message, rules\n\nfrom texts import MESSAGES, BUTTONS\nfrom states import Registration, Main\n\nfrom db import DBApi\n\n\nclass UserInfoRule(rules.ABCRule[Message]):\n async def check(self, message: Message) -> dict:\n user = (await bp.api.users.get(message.from_id))[0]\n return {\"user\": user}\n\n\nbp = Blueprint(\"for menu\")\nbp.labeler.vbml_ignore_case = True\n\n\n@bp.on.message(text=BUTTONS['main']['1'])\nasync def get_today_timetable(message: Message):\n async with DBApi() as db:\n timetables = await db.get_user_today_timetable(message.from_id)\n msg = MESSAGES['main']['2']\n for n, timetable in enumerate(timetables, start=1):\n msg = MESSAGES['main']['3'].format(n, timetable.subject, timetable.start, timetable.end,\n timetable.subject.lecturer)\n if timetable.cabinet != \"-\":\n msg += MESSAGES['main']['4'].format(timetable.cabinet)\n else:\n msg += MESSAGES['main']['5']\n msg += \"\\n\\n\"\n\n await message.answer(msg)\n\n","repo_name":"progerg/TimetableBot","sub_path":"bot/blueprints/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28291928807","text":"from config import Config\n\nimport time\nimport hmac\nimport hashlib\nimport base64\nimport requests\nimport json\n\n\nclass SliceIdGenerator:\n \"\"\"slice id生成器\"\"\"\n def __init__(self):\n self.__ch = 'aaaaaaaaa`'\n\n def get_next_id(self):\n ch = self.__ch\n j = len(ch) - 1\n while j >= 0:\n cj = ch[j]\n if cj != 'z':\n ch = ch[:j] + chr(ord(cj) + 1) + ch[j+1:]\n break\n else:\n ch = ch[:j] + 'a' + ch[j+1:]\n j = j -1\n self.__ch = ch\n return self.__ch\n\n\nclass RequestApi:\n def __init__(self, file):\n self.app_id = Config.API_APP_ID\n self.secret_key = Config.API_SECRET_KEY\n self.file = file\n self.file_name = file.filename\n self.file_len = 0\n self.file_hash = ''\n self.task_id = ''\n self.file_slices = {}\n self.slice_num = 0\n self.handle_file()\n\n @classmethod\n def get_common_params(cls):\n app_id = Config.API_APP_ID\n secret_key = Config.API_SECRET_KEY\n ts = str(int(time.time()))\n md5 = hashlib.md5()\n md5.update((app_id + ts).encode('utf-8'))\n md = bytes(md5.hexdigest(), encoding='utf-8')\n signa = hmac.new(secret_key.encode('utf-8'), md, hashlib.sha1).digest()\n signa = base64.b64encode(signa)\n signa = str(signa, 'utf-8')\n return {\n 'app_id': app_id,\n 'signa': signa,\n 'ts': ts,\n }\n\n @staticmethod\n def post(api_name, data, files=None, headers=None):\n url = Config.UPLOAD_PATH_BASE + api_name\n response = requests.post(url, data, files=files, headers=headers)\n result = json.loads(response.text)\n if result[\"ok\"] == 0:\n print(\"{} success:\".format(api_name) + str(result))\n else:\n print(\"{} error:\".format(api_name) + str(result))\n return result\n\n def handle_file(self):\n file_piece_size = Config.FILE_PIECE_SIZE\n sig = SliceIdGenerator()\n md5 = hashlib.md5()\n\n while True:\n content = self.file.read(file_piece_size)\n if not content or len(content) == 0:\n break\n self.slice_num += 1\n self.file_len += len(content)\n self.file_slices[sig.get_next_id()] = content\n md5.update(content)\n self.file_hash = md5.hexdigest()\n\n def prepare(self):\n params = self.get_common_params()\n params['file_len'] = str(self.file_len)\n params['file_name'] = self.file_name\n params['slice_num'] = self.slice_num\n result = self.post('/prepare', params)\n self.task_id = result['data']\n\n def upload(self):\n params = self.get_common_params()\n params['task_id'] = self.task_id\n for slice_id, content in self.file_slices.items():\n param = params.copy()\n param['slice_id'] = slice_id\n self.post('/upload', param, files={'filename': slice_id, 'content': content})\n\n def merge(self):\n params = self.get_common_params()\n params['file_name'] = self.file_name\n params['task_id'] = self.task_id\n self.post('/merge', params)\n\n @classmethod\n def get_progress(cls, task_id):\n params = cls.get_common_params()\n params['task_id'] = task_id\n return cls.post('/getProgress', params)\n\n @classmethod\n def get_result(cls, task_id):\n params = cls.get_common_params()\n params['task_id'] = task_id\n return cls.post('/getResult', params)\n\n def call(self):\n self.prepare()\n self.upload()\n self.merge()\n","repo_name":"JoJoJoJoJoJoJo/MeetingHelper","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"25159625560","text":"\"\"\"\n(c) RIKEN 2015. All rights reserved. \nAuthor: Keitaro Yamashita\n\nThis software is released under the new BSD License; see LICENSE.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport sys\n\nfrom cctbx.array_family import flex\nfrom cctbx import miller\n\nfrom yamtbx.dataproc.xds.xds_ascii import XDS_ASCII\n\ndef run(lstin):\n data = []\n for l in open(lstin):\n xdsasc = l.strip()\n xa = XDS_ASCII(xdsasc, sys.stdout, i_only=True)\n ma = miller.array(miller_set=xa.as_miller_set(anomalous_flag=False),\n data=xa.iobs)\n data.append((xdsasc, ma))\n\n print(\"index filename\")\n for i, d in enumerate(data):\n print(i, d[0])\n\n print(\"i j n.i n.j n.common cc\")\n for i in range(len(data)-1):\n for j in range(i+1, len(data)):\n di, dj = data[i][1].common_sets(data[j][1], assert_is_similar_symmetry=False)\n print(i, j, data[i][1].data().size(), data[j][1].data().size(), end=' ') \n if len(di.data()) == 0:\n print(0, \"nan\")\n else:\n corr = flex.linear_correlation(di.data(), dj.data())\n assert corr.is_well_defined()\n cc = corr.coefficient()\n print(len(di.data()), cc)\n# run()\n\nif __name__ == \"__main__\":\n lst = sys.argv[1]\n run(lst)\n","repo_name":"keitaroyam/yamtbx","sub_path":"yamtbx/dataproc/xds/command_line/xds_pairwise_cc.py","file_name":"xds_pairwise_cc.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"67"} +{"seq_id":"24792970418","text":"import numpy as np\nimport math\nimport random\n\nfrom model import *\nfrom fitnessMaps import *\n\nMAX_EVAL = 3750000 #max number of evaluations\nNUM_BINS = 3125 #number of bins\nSTART_POP = 750 #starting population\nBATCH_SZ = 75 #size of each batch\nALPHA = 0.2 #constant used in crossover to generate offspring\n#length of arm is 8.125 fully stretched\nSAMPLE = 100 #the sample period\nSEED = 8 #np.ramdom seed\nRADIUS = -0.01 #radius around goal point for assessing solutions\n\n#function to generate the population\ndef generate():\n goal = np.array([[0.7], [0.7], [1]]) #target point\n startBase = np.array([[0.6], [0.3], [0]]) #base start position\n startEnd = np.array([[1.0264], [0.4369], [0.3177]]) #end effector start position\n\n #archive to store fitest individuals\n archive = {}\n #number of evaluations\n random.seed(SEED)\n seeds = [random.randint(0, 200000) for iter in range(40000)]\n numEval = 0\n c = 0\n seedIndex = 0\n generations = 0\n avgFitList = []\n bestFitList = []\n coverList = []\n while numEval < MAX_EVAL:\n #list of individuals to evaluate fitness\n to_eval = []\n #check that the inital population has been generated\n if len(archive) < START_POP:\n #generate the initial population\n i = 0\n c = 0\n #create list of seeds to iterate through to generate the random initial population\n random.seed(seeds[seedIndex])\n stepSeeds = [random.randint(0, 200000) for iter in range(60000)]\n while i < BATCH_SZ:\n #generate a matrix of random numbers for the joint angles\n z = 0\n ind = np.empty((11,1))\n while z < 7:\n np.random.seed(stepSeeds[c])\n randNum = (np.random.uniform(-5, 5, size=1)).round(8)\n validJoint = testIndividual(randNum, False, goal, True, z)\n if validJoint:\n ind[z] = randNum\n z += 1\n #endif\n c += 1\n #endloop\n\n #generate a matrix of random numbers for the base position, base orientation and torso height\n np.random.seed(stepSeeds[c])\n ind[7] = (np.random.uniform(0, 2, size=1)).round(8)\n c += 1\n np.random.seed(stepSeeds[c])\n ind[8] = (np.random.uniform(0, 2, size=1)).round(8)\n c += 1\n np.random.seed(stepSeeds[c])\n ind[9] = (np.random.uniform(0, 6.283, size=1)).round(8)\n c += 1\n np.random.seed(stepSeeds[c])\n ind[10] = (np.random.uniform(0, 0.35, size=1)).round(8)\n c += 1\n\n #check that the generated individual is valid\n valid, endPosition = testIndividual(ind, False, goal, False, 0)\n if valid:\n print(\"Valid start individual: \")\n print(\"archive length: \", len(archive))\n i += 1\n to_eval.append((ind, endPosition))\n #endif\n #endloop\n seedIndex += 1\n\n else:\n #parent selection and variation\n pIdentifier = list(archive.keys())\n #generate two arrays of random numbers that decide sets of parents\n randList1 = np.random.randint(0, len(pIdentifier), size=BATCH_SZ*3)\n randList2 = np.random.randint(0, len(pIdentifier), size=BATCH_SZ*3)\n i = 0\n #variable added so child is only mutated twice to try and achieve a valid solution\n mutated = 0\n while i < BATCH_SZ:\n parent1 = archive[pIdentifier[randList1[i]]][0]\n parent2 = archive[pIdentifier[randList2[i]]][0]\n child = generateChild(parent1, parent2)\n mutated += 1\n\n #check that the generated individual is valid\n valid, endPosition = testIndividual(child, False, goal, False, 0)\n if valid:\n mutated = 0\n i += 1\n #send individual to physics model\n to_eval.append((child, endPosition))\n elif mutated > 2:\n mutated = 0\n i += 1\n #endif\n #endloop\n generations += 1\n print(\"Generation: \", generations, \". Number of Evaluations: \", numEval)\n #endif\n\n fitnessList = []\n #calculate the fitness of all the individuals in to_eval\n for i in range(len(to_eval)):\n fitness = calculateFitness(to_eval[i], goal, startBase, startEnd)\n fitnessList.append((to_eval[i][0], to_eval[i][1], fitness))\n #endloop\n\n #add the contents of to_eval to archive, if the fitness of an individual is greater than that currently in the bin add to the archive\n for i in range(len(fitnessList)):\n bin, dimensions = determineBin(fitnessList[i][0], fitnessList[i][1])\n extend = list(fitnessList[i])\n extend.append(dimensions)\n fitnessList[i] = tuple(extend)\n addToArchive(fitnessList[i], bin, archive)\n #endloop\n if generations != 0:\n numEval += len(to_eval)\n #endif\n\n #sample and save the current population after every SAMPLE generations\n if (generations%SAMPLE == 0) and (generations != 0):\n avgFitList, bestFitList, coverList = sample(archive, avgFitList, bestFitList, coverList, SEED)\n #endif\n #endloop\n\n #sample at the end\n avgFitList, bestFitList, coverList = sample(archive, avgFitList, bestFitList, coverList, SEED)\n #send final set of policies to model\n pIdentifier = list(archive.keys())\n print(\"number of filled bins: \", len(pIdentifier))\n for i in range(0, len(pIdentifier), 200):\n print(\"i: \", i)\n print(\"fitness: \", archive[pIdentifier[i]][2])\n testIndividual(archive[pIdentifier[i]][0], True, goal, False, 0)\n print(archive[pIdentifier[i]][0])\n #endloop\n\n #plot fitness map\n generateMap(archive)\n #plot the avg fitness, best fitness and coverage over generateCuboidCoordinates\n plotMeasures(avgFitList, bestFitList, coverList, SAMPLE)\n print(\"Suitable solutions: \", assessMap(archive, RADIUS))\n writeMapFile(archive, \"FINALMAPS2/finalMapSeed\"+str(SEED)+\".txt\")\n\n#function to generate the offspring from two parents\ndef generateChild(parent1, parent2):\n #use \"whole\" arithmetic crossover to generate offspring\n child = np.empty((11,1))\n i = 0\n while i < 11:\n p1 = parent1[i][0]\n p2 = parent2[i][0]\n res = (ALPHA*p1 + (1-ALPHA)*p2).round(4)\n child[i] = [res]\n i += 1\n #endloop\n #mutate a genome\n mutate = np.random.randint(1, 40, size=1)\n if mutate[0] < 29:\n element = np.random.randint(0, 10, size=1)\n if 0 <= element[0] < 7:\n row = random.randint(0, 6)\n newValue = np.random.randint(-5, 5, size=1)\n child[row] = newValue[0]\n elif element[0] == 7 or element[0] == 8:\n newPos = random.randint(0, 1)\n newValue = np.random.randint(0, 2, size=1)\n child[7+newPos] = newValue\n elif element[0] == 9:\n newValue = np.random.uniform(0, 6.283, size=1)\n child[9] = newValue.round(8)\n elif element[0] == 10:\n newValue = np.random.uniform(0, 0.35, size=1)\n child[10] = newValue.round(8)\n #endif\n\n return child\n\n#find the distance between two points\ndef findDistance(pointA, pointB, dimensions):\n if dimensions == 3:\n x = pow(pointB[0][0]-pointA[0][0], 2)\n y = pow(pointB[1][0]-pointA[1][0], 2)\n z = pow(pointB[2][0]-pointA[2][0], 2)\n distance = math.sqrt(x+y+z)\n return distance\n elif dimensions == 2:\n x = pow(pointB[0][0]-pointA[0][0], 2)\n y = pow(pointB[1][0]-pointA[1][0], 2)\n distance = math.sqrt(x+y)\n return distance\n #endif\n\n#function to calculate the fitness of an individual\ndef calculateFitness(ind, goal, startB, startE):\n dGoalE = findDistance(ind[1], goal, 3)\n fitness = -1*dGoalE\n return round(fitness, 6)\n\n#function to normalise input value\ndef normalise(value, min, max):\n normaliseVal = (value-min)/(max-min)\n return round(normaliseVal, 8)\n\ndef splitOrientation(angle):\n return math.sin(angle), math.cos(angle)\n\n#function to calculate the variance between policy angles\ndef calculateVariance(angles):\n mean = (angles[0][0]+angles[1][0]+angles[2][0]+angles[3][0]+angles[4][0]+angles[5][0]+angles[6][0])/7\n total = 0\n for i in range(len(angles)):\n total += pow((angles[i][0]-mean), 2)\n #endloop\n variance = total/(len(angles)-1)\n return variance\n\n#function to determine what bin an individual belongs to\ndef determineBin(ind, endPoint):\n #dim2=arm extension, dim4=variance of joint angles, dim1=shoulder joint angle\n #dim3=elbow joint angle, dim5=wrist joint angle\n torso = np.array([[ind[7][0]], [ind[8][0]], [0]])\n distanceTA = findDistance(torso, endPoint, 2)\n distanceTA -= 0.13693\n dim2 = normalise(abs(distanceTA), 0.2, 0.78)\n var = calculateVariance(ind)\n dim4 = normalise(var, 1, 8.5)\n dim1 = normalise(ind[1][0], -3.142, -0.480)\n dim3 = normalise(ind[3][0], -1.964, 0.785)\n dim5 = normalise(ind[5][0], -1.414, 1.414)\n dimensions = (dim1, dim2, dim3, dim4, dim5)\n\n bin = []\n for i in dimensions:\n if 0<=i<0.2:\n bin.append(0)\n elif 0.2<=i<0.4:\n bin.append(1)\n elif 0.4<=i<0.6:\n bin.append(2)\n elif 0.6<=i<0.8:\n bin.append(3)\n else:\n bin.append(4)\n #endif\n #endloop\n\n return (bin[0], bin[1], bin[2], bin[3], bin[4]), np.array([[dim1], [dim2], [dim3], [dim4], [dim5]])\n #return (bin[0], bin[1], bin[2], bin[3], bin[4], bin[5])\n\n#function to add an individual to the archive if its fitness is greater than the individual currently in the bin\ndef addToArchive(ind, bin, archive):\n #ind contains the policy, endPosition and fitness\n if bin in archive:\n if ind[2] > archive[bin][2]:\n archive[bin] = ind\n #endif\n else:\n archive[bin] = ind\n #endif\n\n#main\ngenerate()\n","repo_name":"ecorkill/map_elites_3YP","sub_path":"map_elites.py","file_name":"map_elites.py","file_ext":"py","file_size_in_byte":10405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6532465010","text":"import abc\nfrom django.db.models.aggregates import Count\nfrom rest_framework import status\nfrom rest_framework.mixins import CreateModelMixin\nfrom rest_framework.mixins import DestroyModelMixin\nfrom rest_framework.mixins import ListModelMixin\nfrom rest_framework.mixins import RetrieveModelMixin\nfrom rest_framework.response import Response\n\n\nclass CustomCreateModelMixin(CreateModelMixin):\n def perform_create(self, serializer):\n return serializer.save()\n\n def create(self, request, *args, **kwargs):\n serializer_class = self.get_serializer_class('create')\n serializer = self.get_serializer(serializer_class, data=request.data)\n serializer.is_valid(raise_exception=True)\n instance = self.perform_create(serializer)\n serializer_class = self.get_serializer_class('retrieve')\n self.kwargs['pk'] = instance.id\n obj = self.get_object(serializer_class) # Adds the joins\n serializer = self.get_serializer(serializer_class, obj)\n data = serializer.data\n headers = self.get_success_headers(data)\n return Response(data, status=status.HTTP_201_CREATED, headers=headers)\n\n\nclass CustomListModelMixin(ListModelMixin):\n def paginate(self, queryset, rollup=None):\n page = self.paginate_queryset(queryset)\n serializer_class = self.get_serializer_class('list')\n serializer = self.get_serializer(serializer_class, page, many=True)\n data = self.get_paginated_response(serializer.data)\n data.data.update({\"rollup\": rollup})\n return data\n\n def dotstyle(self, dict):\n retdict = {}\n for key, value in dict.items():\n retdict[key.replace(\".\", \"__\")] = value\n return retdict\n\n def get_rollup(self, request, queryset):\n rollup = {}\n if True if request.query_params.get(\"rollup\", False) in [\n \"True\", \"true\", \"t\", \"T\"] else False:\n for key, filter_ in self.ROLLUP.items():\n rollup[key] = queryset.filter(filter_).aggregate(count=Count('id'))['count']\n return rollup\n\n def list(self, request, *args, **kwargs):\n serializer_class = self.get_serializer_class('list')\n queryset = self.get_queryset(serializer_class=serializer_class)\n if not kwargs.pop('skip_ordering', False):\n queryset = self.filter_queryset(queryset)\n\n post_filter = kwargs.get('post_filter')\n if post_filter:\n queryset = queryset.filter(post_filter)\n\n rollup = self.get_rollup(request, queryset)\n\n return self.paginate(queryset=queryset, rollup=rollup)\n\n\nclass CustomSearchModelMixin(object):\n\n @abc.abstractmethod\n def search(self, request, keyword):\n pass\n\n def _search(self, request):\n keyword = request.GET.get('keyword')\n if keyword:\n return self.search(request, keyword)\n\n def list(self, request, *args, **kwargs):\n return super().list(request, post_filter=self._search(request), *args, **kwargs)\n\n\nclass CustomRetrieveModelMixin(RetrieveModelMixin):\n def retrieve(self, request, *args, **kwargs):\n serializer_class = self.get_serializer_class('retrieve')\n instance = self.get_object(serializer_class=serializer_class)\n serializer = self.get_serializer(serializer_class, instance)\n data = serializer.data\n return Response(data)\n\n\nclass CustomUpdateModelMixin:\n def update(self, request, *args, **kwargs):\n partial = kwargs.pop('partial', False)\n serializer_class = self.get_serializer_class('update')\n instance = self.get_object(serializer_class=serializer_class)\n serializer = self.get_serializer(\n serializer_class, instance, data=request.data, partial=partial)\n serializer.is_valid(raise_exception=True)\n\n self.perform_update(serializer)\n obj = self.get_object(serializer_class) # Adds the joins\n serializer_class = self.get_serializer_class('retrieve')\n serializer = self.get_serializer(serializer_class, obj)\n if getattr(instance, '_prefetched_objects_cache', None):\n # If 'prefetch_related' has been applied to a queryset, we need to\n # forcibly invalidate the prefetch cache on the instance.\n instance._prefetched_objects_cache = {}\n data = serializer.data\n return Response(data)\n\n def perform_update(self, serializer):\n return serializer.save()\n\n\nclass CustomDestroyModelMixin(DestroyModelMixin):\n\n def destroy(self, request, *args, **kwargs):\n serializer_class = self.get_serializer_class('destroy')\n instance = self.get_object(serializer_class=serializer_class)\n self.perform_destroy(instance)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n","repo_name":"GovReady/govready-q","sub_path":"api/base/views/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":4771,"program_lang":"python","lang":"en","doc_type":"code","stars":160,"dataset":"github-code","pt":"67"} +{"seq_id":"28724782019","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd \nimport numpy as np\nfrom matplotlib import pyplot\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import Bidirectional\nfrom keras.utils import np_utils\nimport keras\n\n\n# In[2]:\n\n\n# read training data\ndf = pd.read_csv('train_test.csv')\npredict_df = pd.read_csv('predict.csv')\n\n# read testing data\nreboot_df = pd.read_csv('reboot_time.csv')\npredict_reboot_df = pd.read_csv('reboot_time(predict).csv')\n\n# parameter setting\nn_epochs = 5\noversampling_type = 2\nlabel_type = 3\nmodel_type = 2\nfilter_out_breakpoint = 'yes'\n\n\n# In[3]:\n\n\npredict_df\n\n\n# In[4]:\n\n\n# extract all reboot index\nreboot = reboot_df['reboot'].values\npredict_reboot = predict_reboot_df['reboot'].values\n\ndef extract_reboot_time(reboot):\n reboot_index = []\n for i,v in enumerate(reboot):\n if v == 1:\n reboot_index.append(i)\n return reboot_index\n\nt1 = extract_reboot_time(reboot)\nt2 = extract_reboot_time(predict_reboot) \n\n\n# In[5]:\n\n\n# 各類別label個數 \ncount_class_0, count_class_1, count_class_2 = df['label'].value_counts()\nprint(count_class_0, count_class_1, count_class_2)\n\n\n# In[6]:\n\n\n# create model input/ouput data\ny = df['label'].values\nx_df = df.drop('label',axis=1)\nx = x_df[::].values\nprint(x.shape,type(x),len(x))\nprint(y.shape,type(y),len(y))\n\ntruth = predict_df['label'].values\npredict = predict_df.drop('label',axis = 1)\npredict_data = predict[::].values\nprint(predict_data.shape,type(predict_data),len(predict_data))\nprint(truth.shape,type(truth),len(truth))\n\n\n# filter out reboot point\ndef filterout_breakpoint(t,x,y):\n x2,y2 = [],[]\n for i in range(len(x)):\n if i in t:\n continue\n else:\n x2.append(x[i])\n y2.append(y[i])\n \n return np.array(x2), np.array(y2) \n \n\nx_train, y_train = x, y\nx_test, y_test = predict_data, truth\n\n\n# filter out reboot point in training \nif filter_out_breakpoint == 'yes':\n x_train, y_train = filterout_breakpoint(t1,x_train,y_train)\n \n\n \n# handle imbalanced dataset by oversampling minority data\nfrom imblearn.over_sampling import (RandomOverSampler, SMOTE, ADASYN)\n\nif oversampling_type == 1:\n sm = RandomOverSampler(random_state=42)\nelif oversampling_type == 2:\n sm = SMOTE(random_state=42)\n \nx_train_res, y_train_res = sm.fit_sample(x_train, y_train)\ncnt1,cnt2, cnt3 = 0, 0, 0 \nfor i in y_train_res:\n if i == 0:\n cnt1 += 1\n elif i == 1:\n cnt2 += 1\n else:\n cnt3 += 1\nprint(cnt1,cnt2,cnt3)\n\n\n# In[7]:\n\n\npredict_data\n\n\n# In[8]:\n\n\nprint(len(x_train_res)) #number of sliding window in train\nprint(len(x_train_res[0])) # size of each sliding window\nprint(x_train_res)\nprint(y_train_res)\nprint('------------------------------------------------')\nprint(len(x_test)) #number of sliding window in train\nprint(len(x_test[0])) # size of each sliding window\nprint(x_test)\nprint(y_test)\n\n\n# In[9]:\n\n\n# stroe temp value in comparison with y_pred\ntmp_y_test = y_test\n\n\n# In[10]:\n\n\n# reshape input \nx_train_res = np.reshape(x_train_res, (x_train_res.shape[0],1,x_train_res.shape[1]))\nx_test= np.reshape(x_test, (x_test.shape[0],1,x_test.shape[1]))\n\npredict_data = np.reshape(predict_data, (predict_data.shape[0],1,predict_data.shape[1]))\npredict_data.shape\n\n\n# In[11]:\n\n\n# oversampling後的data數\nx_train_res.shape\n\n\n# In[12]:\n\n\n# np.shape example\na = np.zeros([2,12])\nprint(a)\n\n\n# In[13]:\n\n\n# lSTM model types:\n# each model performs at least 90% in f1-score:\nif model_type == 1:\n # Single cell LSTM\n model = Sequential()\n model.add(LSTM(128,input_shape=(1, 12)))\n model.add(Dense(label_type,activation = 'sigmoid'))\nif model_type == 2:\n # Stacked LSTM (3 cells)\n model = Sequential()\n model.add(LSTM(128,input_shape=(1, 12), return_sequences=True))\n model.add(LSTM(128, return_sequences=True))\n model.add(LSTM(128))\n model.add(Dense(label_type, activation = 'sigmoid'))\nif model_type == 3:\n # Bidirectional LSTM\n model = Sequential()\n model.add(Bidirectional(LSTM(128), input_shape=(1,12)))\n model.add(Dense(label_type,activation = 'sigmoid'))\n\n\n# In[14]:\n\n\nif label_type > 2:\n model.compile(optimizer = 'adam',loss = 'categorical_crossentropy', metrics = ['accuracy'])\nelse:\n model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\ny_train_res = np_utils.to_categorical(y_train_res)\ny_test = np_utils.to_categorical(y_test)\nprint(model.summary())\n\n\n# In[15]:\n\n\nhistory = model.fit(x_train_res, y_train_res, validation_split = 0.2, epochs = n_epochs, batch_size = 32 ,verbose = 1)\n\n# evaluate model score\nscore = model.evaluate(x_test,y_test)\nprint('test loss:', score[0])\nprint('test accuracy:',score[1])\n\n\n# In[ ]:\n\n\n# model Evaluation\nfrom sklearn.metrics import precision_recall_curve, auc, confusion_matrix, accuracy_score, classification_report\ny_pred = model.predict_classes(x_test)\nprint('Check shape is equal or not:')\nprint(y_pred.shape)\nprint(tmp_y_test.shape,'\\n')\nprint('Confusion matrix:')\nprint(confusion_matrix(tmp_y_test,y_pred),'\\n')\nprint('Model performance:')\nprint(classification_report(tmp_y_test,y_pred))\n\n\n# In[ ]:\n\n\n# Visualization\nimport matplotlib.pyplot as plt\ndef plotting(time,value,code,re_indx,title=\"\", xlabel='Time', ylabel='Value', dpi=200):\n if code == 'truth':\n plt.figure(figsize=(16,8), dpi=dpi)\n plt.plot(time,value, color='tab:blue', linewidth=0.5)\n plt.plot(re_indx,value[re_indx],'ks')\n plt.gca().set(title=title, xlabel=xlabel, ylabel=ylabel)\n elif code == 'pred':\n plt.figure(figsize=(16,8), dpi=dpi)\n plt.plot(time,value, color='tab:green', linewidth=0.5)\n plt.plot(re_indx,value[re_indx],'ks')\n plt.gca().set(title=title, xlabel=xlabel, ylabel=ylabel)\n plt.show()\n\n\n# In[ ]:\n\n\ntime = np.arange(len(truth))\nplotting(time,tmp_y_test, code = 'truth',re_indx=t2,title= 'mip-svc-push-notify-counter(truth)')\nplotting(time,y_pred, code = 'pred',re_indx=t2,title= 'mip-svc-push-notify-counter(pred)')\n\n\n# In[ ]:\n\n\n# masked_plot(time,truth, title= 'mip-svc-push-notify-counter(0727-0729)')\n# masked_plot(time,pred, title= 'mip-svc-push-notify-counter(0727-0729)')\n\n","repo_name":"alvinyee860120/AIOps-anomaly-detection","sub_path":"AIOps_project.py","file_name":"AIOps_project.py","file_ext":"py","file_size_in_byte":6228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"70251543895","text":"import asyncio\nimport discord\n\nclass SoundVoice:\n async def playEffect(ctx, file):\n try:\n if not client.is_voice_connected(ctx.message.server):\n voice = await client.join_voice_channel(ctx.message.author.voice_channel)\n else:\n voice = client.voice_client_in(ctx.message.server)\n \n player = await voice.create_ytdl_player(url, after=toggle_next)\n await songs.put(player)\n \n client.loop.create_task(audio_player_task())\n except:\n return\n \n player = voice.create_ffmpeg_player(file)\n player.start()\n if player.is_playing():\n await asyncio.sleep(5)\n await voice.disconnect() \n\n","repo_name":"Jetidball/DIBS2.1","sub_path":"SoundVoice.py","file_name":"SoundVoice.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42368698739","text":"from __future__ import annotations\n\nimport random # For Task 2\nfrom typing import Any, Optional, List, Tuple\n\n\nclass Tree:\n \"\"\"A recursive tree data structure.\n\n Note the relationship between this class and RecursiveList; the only major\n difference is that _rest has been replaced by _subtrees to handle multiple\n recursive sub-parts.\n \"\"\"\n # === Private Attributes ===\n # The item stored at this tree's root, or None if the tree is empty.\n _root: Optional[Any]\n # The list of all subtrees of this tree.\n _subtrees: List[Tree]\n\n # === Representation Invariants ===\n # - If self._root is None then self._subtrees is an empty list.\n # This setting of attributes represents an empty tree.\n #\n # Note: self._subtrees may be empty when self._root is not None.\n # This setting of attributes represents a tree consisting of just one\n # node.\n\n def __init__(self, root: Optional[Any], subtrees: List[Tree]) -> None:\n \"\"\"Initialize a new Tree with the given root value and subtrees.\n\n If is None, the tree is empty.\n Precondition: if is None, then is empty.\n \"\"\"\n self._root = root\n self._subtrees = subtrees\n\n def is_empty(self) -> bool:\n \"\"\"Return whether this tree is empty.\n\n >>> t1 = Tree(None, [])\n >>> t1.is_empty()\n True\n >>> t2 = Tree(3, [])\n >>> t2.is_empty()\n False\n \"\"\"\n return self._root is None\n\n def __len__(self) -> int:\n \"\"\"Return the number of items contained in this tree.\n\n >>> t1 = Tree(None, [])\n >>> len(t1)\n 0\n >>> t2 = Tree(3, [Tree(4, []), Tree(1, [])])\n >>> len(t2)\n 3\n \"\"\"\n if self.is_empty():\n return 0\n else:\n size = 1 # count the root\n for subtree in self._subtrees:\n size += subtree.__len__() # could also do len(subtree) here\n return size\n\n def __contains__(self, item: Any) -> bool:\n \"\"\"Return whether is in this tree.\n\n >>> t = Tree(1, [Tree(2, []), Tree(5, [])])\n >>> 1 in t # Same as t.__contains__(1)\n True\n >>> 5 in t\n True\n >>> 4 in t\n False\n \"\"\"\n if self.is_empty():\n return False\n\n # item may in root, or subtrees\n if self._root == item:\n return True\n else:\n for subtree in self._subtrees:\n if item in subtree:\n return True\n return False\n\n def __str__(self) -> str:\n \"\"\"Return a string representation of this tree.\n\n For each node, its item is printed before any of its\n descendants' items. The output is nicely indented.\n\n You may find this method helpful for debugging.\n \"\"\"\n return self._str_indented()\n\n def _str_indented(self, depth: int = 0) -> str:\n \"\"\"Return an indented string representation of this tree.\n\n The indentation level is specified by the parameter.\n \"\"\"\n if self.is_empty():\n return ''\n else:\n s = ' ' * depth + str(self._root) + '\\n'\n for subtree in self._subtrees:\n # Note that the 'depth' argument to the recursive call is\n # modified.\n s += subtree._str_indented(depth + 1)\n return s\n\n def average(self) -> float:\n \"\"\"Return the average of all the values in this tree.\n\n Return 0 if this tree is empty.\n\n Precondition: this is a tree of numbers.\n\n >>> Tree(None, []).average()\n 0.0\n >>> t = Tree(13, [Tree(2, []), Tree(6, [])])\n >>> t.average()\n 7.0\n >>> lt = Tree(2, [Tree(4, []), Tree(5, [])])\n >>> rt = Tree(3, [Tree(6, []), Tree(7, []), Tree(8, []), Tree(9, []),\\\n Tree(10, [])])\n >>> t = Tree(1, [lt, rt])\n >>> t.average()\n 5.5\n \"\"\"\n if self.is_empty():\n return 0.0\n\n total, count = self._average_helper()\n return total / count\n\n def _average_helper(self) -> Tuple[int, int]:\n \"\"\"Return a tuple (x,y) where:\n\n x is the total values in this tree, and\n y is the size of this tree.\n \"\"\"\n if self.is_empty():\n return 0, 0\n else:\n total = self._root\n number = 1\n for subtree in self._subtrees:\n child_total, child_number = subtree._average_helper()\n total += child_total\n number += child_number\n return (total, number)\n\n def delete_item(self, item: Any) -> bool:\n \"\"\"Delete *one* occurrence of the given item from this tree.\n\n Return True if was deleted, and False otherwise.\n Do not modify this tree if it does not contain .\n\n **NOTE**\n This code is incomplete in one subtle way: it leaves empty trees\n in the list self._subtrees! This might cause some unexpected behaviour\n in some other tree methods. We'll discuss this more on Friday's lecture.\n \"\"\"\n if self.is_empty():\n # The item is not in the tree.\n return False\n elif self._root == item:\n # We've found the item: now delete it.\n self._delete_root()\n return True\n else:\n # Loop through each subtree, and stop the first time\n # the item is deleted. (This is why a boolean is returned!)\n for subtree in self._subtrees:\n deleted = subtree.delete_item(item)\n if deleted:\n return True\n else:\n # No item was deleted. Continue onto the next subtree.\n # Note that this branch is unnecessary; we've only shown\n # it to write comments.\n pass\n\n # If we don't return inside the loop, the item is not deleted\n # from any of the subtrees. In this case, the item does not\n # appear in this tree.\n return False\n\n def _delete_root(self) -> None:\n \"\"\"Delete the root of this tree.\n\n Precondition: this tree is non-empty.\n \"\"\"\n if self._subtrees == []:\n # This is a leaf. Deleting the root gives and empty tree.\n self._root = None\n else:\n # This tree has more than one value!\n # Can't just set self._root = None, need to REPLACE it.\n\n # Strategy 1: \"Promote\" a subtree.\n # 1. Remove the rightmost subtree.\n last_subtree = self._subtrees.pop()\n\n # 2. Update self._root\n self._root = last_subtree._root\n\n # 3. Update self._subtrees\n self._subtrees += last_subtree._subtrees\n\n # Strategy 2: Replace with a leaf.\n # 1. Extract the leftmost leaf (using another helper).\n # leaf = self._extract_leaf()\n #\n # 2. Update self._root. (Note that self._subtrees remains the same.)\n # self._root = leaf\n\n def _extract_leaf(self) -> Any:\n \"\"\"Remove and return the leftmost leaf in a tree.\n\n Precondition: this tree is non-empty.\n \"\"\"\n if self._subtrees == []:\n old_root = self._root\n self._root = None\n return old_root\n else:\n return self._subtrees[0]._extract_leaf()\n\n # ------------------------------------------------------------------------\n # Lab Task 1: Non-mutating tree methods\n # ------------------------------------------------------------------------\n\n def branching_factor(self) -> float:\n \"\"\"Return the average branching factor of this tree's internal values.\n\n Return 0.0 if this tree does not have internal values.\n\n >>> Tree(None, []).branching_factor()\n 0.0\n >>> t = Tree(1, [Tree(2, []), Tree(5, [])])\n >>> t.branching_factor()\n 2.0\n >>> lt = Tree(2, [Tree(4, []), Tree(5, [])])\n >>> rt = Tree(3, [Tree(6, []), Tree(7, []), Tree(8, []), Tree(9, []),\\\n Tree(10, [])])\n >>> t = Tree(1, [lt, rt])\n >>> t.branching_factor()\n 3.0\n \"\"\"\n if self.is_empty():\n return 0.0\n total, count = self._branching_factor_helper()\n return total / count\n\n def _branching_factor_helper(self) -> Tuple[float, int]:\n \"\"\"\n >>> Tree(None, [])._branching_factor_helper()\n (0.0, 0)\n >>> t = Tree(1, [Tree(2, []), Tree(5, [])])\n >>> t._branching_factor_helper()\n (2.0, 1)\n >>> lt = Tree(2, [Tree(4, []), Tree(5, [])])\n >>> rt = Tree(3, [Tree(6, []), Tree(7, []), Tree(8, []), Tree(9, []),\\\n Tree(10, [])])\n >>> t = Tree(1, [lt, rt])\n >>> t._branching_factor_helper()\n (9.0, 3)\n \"\"\"\n if self.is_empty():\n return 0.0, 0\n elif self._root == []:\n return 0.0, 0\n else:\n total = 0.0\n number = 0\n if self._subtrees != []:\n number += 1\n for subtree in self._subtrees:\n\n total += 1\n child_total, child_number = subtree._branching_factor_helper()\n total += child_total\n number += child_number\n return total, number\n\n def items_at_depth(self, d: int) -> List:\n \"\"\"Return a list of the values in this tree at the given depth.\n\n Precondition: d >= 1. (Depth 1 is the root of the tree.)\n\n We've provided some doctests for the empty and size-one tree cases.\n You'll want to write more doctests when working on the recursive case.\n\n >>> t1 = Tree(None, [])\n >>> t1.items_at_depth(2)\n []\n >>> t2 = Tree(5, [])\n >>> t2.items_at_depth(1)\n [5]\n >>> t2 = Tree(5, [Tree(5, []),Tree(5, [])])\n >>> t2.items_at_depth(2)\n [5, 5]\n >>> t2 = Tree(5, [Tree(5, [Tree(5, [Tree(2, []),Tree(1, [])])]),Tree(5, [])])\n >>> t2.items_at_depth(4)\n [2, 1]\n \"\"\"\n if self.is_empty():\n return []\n elif d == 1:\n return [self._root]\n else:\n lst = []\n lvl = 1\n for subtree in self._subtrees:\n lvl += 1\n lst.extend(subtree.items_at_depth(d - 1))\n\n return lst\n\n # ------------------------------------------------------------------------\n # Lab Task 2: Tree insertion\n # ------------------------------------------------------------------------\n def insert(self, item: Any) -> None:\n \"\"\"Insert into this tree using the following algorithm.\n\n 1. If the tree is empty, is the new root of the tree.\n 2. If the tree has a root but no subtrees, create a\n new tree containing the item, and make this new tree a subtree\n of the original tree.\n 3. Otherwise, pick a random number between 1 and 3 inclusive.\n - If the random number is 3, create a new tree containing\n the item, and make this new tree a subtree of the original.\n - If the random number is a 1 or 2, pick one of the existing\n subtrees at random, and *recursively insert* the new item\n into that subtree.\n\n >>> t = Tree(None, [])\n >>> t.insert(1)\n >>> 1 in t\n True\n >>> lt = Tree(2, [Tree(4, []), Tree(5, [])])\n >>> rt = Tree(3, [Tree(6, []), Tree(7, []), Tree(8, []), Tree(9, []),\\\n Tree(10, [])])\n >>> t = Tree(1, [lt, rt])\n >>> t.insert(100)\n >>> 100 in t\n True\n \"\"\"\n if self.is_empty():\n self._root = item\n elif self._subtrees == []:\n self._subtrees.append(Tree(item, []))\n else:\n tmp = int(random.randint(1, 3))\n if tmp == 3:\n self._subtrees.append(Tree(item, []))\n else:\n for subtree in self._subtrees:\n subtree.insert(item)\n\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n\n # import python_ta\n # python_ta.check_all(config={'extra-imports': ['random']})","repo_name":"EroSkulled/CSC148","sub_path":"labs/lab8/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":12418,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"72892789333","text":"import socket, threading\r\n\r\nUDP_IP = \"localhost\"\r\nUDP_PORT = 1234\r\n\r\n\r\ndef handle_request(client_address, client_data):\r\n response_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\r\n print(\"Incoming message from \" + str(client_address))\r\n response_socket.sendto((\"Hello, %s!\" % client_data.decode(\"UTF-8\")).encode(\"UTF-8\"), client_address)\r\n\r\n response_socket.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n server_socket.bind((UDP_IP, UDP_PORT))\r\n\r\n while True:\r\n data, address = server_socket.recvfrom(1024)\r\n\r\n threading.Thread(target=handle_request, args=(address, data)).start()\r\n","repo_name":"Akitektuo/University","sub_path":"2nd year/CN/Labs/Lab4/server1.py","file_name":"server1.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"10267492354","text":"#continue practice using while\n#Printing 1 to n using while: Only Even numbers\n \ndef forward_Printing(n):\n for i in range(1,n+1):\n if i%2 == 1:\n continue\n print('Value of i =', i)\n \n \n \n \nn = int(input())\n \nprint('************Forward Printing using for with continue statement**********')\nforward_Printing(n)\n \n \n#continue practice using while\n#Printing n to 1 using while: Only odd numbers\ndef backward_Printing(n):\n for i in range(n,0,-1): #[n to 1]\n if i%2 == 0:\n continue\n print('Value of i =', i)\n \n \n \nprint('************Backward Printing using for with continue statement**********')\nbackward_Printing(n)\n","repo_name":"ehteshamkaushik/Python_Practice","sub_path":"continue using for.py","file_name":"continue using for.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1320205275","text":"import tkinter as tk\nimport time\nimport threading\n\nroot = tk.Tk()\n\nvar = tk.StringVar()\nvar1 = tk.StringVar()\nvar.set('0')\nvar1.set('1')\nprint(var)\nlab = tk.Label(root, textvariable=var).grid(row=0, column=0)\nlab1 = tk.Label(root, textvariable=var1).grid(row=1, column=0)\nprint(lab)\ndef change():\n for i in range(100):\n var.set(format(i, '.2f'))\n var1.set(format(-i, '.2f'))\n root.update()\n time.sleep(0.1)\n\nthreads = []\nt1 = threading.Thread(target=change)\n\nthreads.append(t1)\n\nif __name__ == '__main__':\n for t in threads:\n t.setDaemon(True)\n t1.start()\n\n root.mainloop()","repo_name":"ZhaoChaoHua/GimbalMonitor","sub_path":"tkinterT2.py","file_name":"tkinterT2.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15223618149","text":"import math, sys, os, random\nimport numpy as np\n\noutlist = []\ntime = 0\ndef makeRoundCurtain(btype, pos, angle, num, radius=40) :\n\ttheta = angle[0]\n\tdeltaAngle = (angle[1]-angle[0])/num\n\twhile theta <= angle[1] :\n\t\toutlist.append([time, btype, pos[0] + radius*math.cos(theta), pos[1] + radius*math.sin(theta), theta])\n\t\ttheta += deltaAngle\n\ndef save() :\n\toutstring = ''\n\tfor li in outlist :\n\t\tfor ch in li :\n\t\t\toutstring += str(ch) + ' '\n\t\toutstring = outstring[:-1]\n\t\toutstring += '\\n'\n\toutstring = outstring[:-1]\n\tofile = open('Data/BulletScenario/output.txt', 'w')\n\tofile.write(outstring)\n\tofile.close()\n\nif __name__ == '__main__' :\n\t############# 0\n\ttime = 0\n\tfor x in range(410, 800, 70) :\n\t\toutlist.append([time, 1, x, 210, math.pi/2])\n\t############ 1000~10000\n\tfor tm in np.arange(1000, 10001, 500) :\n\t\ttime = tm\n\t\tmakeRoundCurtain(1, (random.randint(400,800), random.randint(200,600)), (0, math.pi*2), 10, 40)\n\t############ 10000~20000\n\tfor tm in np.arange(10000, 20000, 500) :\n\t\ttime = tm\n\t\tfor i in range(5) :\n\t\t\toutlist.append([time, 0, random.randint(400,800), 205, math.pi/2])\n\t\n\t# End\n\tsave()\n\n\n\n","repo_name":"smilu97/noname","sub_path":"EtcBulletScenarioMaker.py","file_name":"EtcBulletScenarioMaker.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"44183397022","text":"import cv2 as cv\n\nimg = cv.imread(\"Lenna.png\")\n\ncv.namedWindow(\"FAST\", cv.WINDOW_NORMAL)\n\n\ndef f(x):\n return\n\n\n# Agast detector types\ndetector_types = {\n 0: cv.FAST_FEATURE_DETECTOR_TYPE_5_8,\n 1: cv.FAST_FEATURE_DETECTOR_TYPE_7_12,\n 2: cv.FAST_FEATURE_DETECTOR_TYPE_9_16,\n}\n\n# Set Agast detector parameter callbacks\ncv.createTrackbar(\"Threshold\", \"FAST\", 15, 50, f)\ncv.createTrackbar(\"Non Max Suppression\", \"FAST\", 1, 1, f)\ncv.createTrackbar(\"Type\", \"FAST\", 0, len(detector_types) - 1, f)\n\nwhile True:\n fast_threshold = cv.getTrackbarPos(\"Threshold\", \"FAST\")\n non_max_suppression = cv.getTrackbarPos(\"Non Max Suppression\", \"FAST\")\n current_type = cv.getTrackbarPos(\"Type\", \"FAST\")\n\n # Initiate FAST object with default values\n fast = cv.FastFeatureDetector_create(\n threshold=fast_threshold,\n nonmaxSuppression=non_max_suppression,\n type=detector_types[current_type],\n )\n\n # find and draw the keypoints\n kp = fast.detect(img, None)\n\n # Flags:\n # cv.DRAW_MATCHES_FLAGS_DEFAULT,\n # cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,\n # cv.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG,\n # cv.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS\n img2 = cv.drawKeypoints(img, kp, None, color=(255, 0, 0))\n\n cv.imshow(\"FAST\", img2)\n if cv.waitKey(10) & 0xFF == 27:\n break\n\ncv.destroyAllWindows()\n","repo_name":"isLinXu/CVProcessLib","sub_path":"core/cv/features/featureDetector/detector_fast.py","file_name":"detector_fast.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"67"} +{"seq_id":"39724993646","text":"class ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\ndef print_obj(head):\n nums = []\n current = head\n while current:\n nums.append(current.val)\n current = current.next\n return \"->\".join(str(num) for num in nums)\n\nnode1 = ListNode(1)\nnode1.next = ListNode(4)\nnode1.next.next = ListNode(3)\n\nless_head = ListNode(0)\nprint(print_obj(less_head))\nless_ptr = less_head\nless_ptr.next = node1\nless_ptr = node1\nprint(print_obj(node1))\nprint(print_obj(less_ptr))\nprint(print_obj(less_head))\n\n\n","repo_name":"philiptam/Algorithm","sub_path":"python对象解惑.py","file_name":"python对象解惑.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21128665938","text":"class Node:\n def __init__(self) -> None:\n self.children = defaultdict(Node)\n self.sum = 0\n\nclass MapSum:\n def __init__(self):\n self.root = Node()\n self.kv = defaultdict(int)\n\n def insert(self, key: str, val: int) -> None:\n delta = val - self.kv[key]\n self.kv[key] = val\n p = self.root\n for c in key:\n p = p.children[c]\n p.sum += delta\n\n def sum(self, prefix: str) -> int:\n p = self.root\n for c in prefix:\n p = p.children[c]\n return p.sum\n\n# Your MapSum object will be instantiated and called as such:\n# obj = MapSum()\n# obj.insert(key,val)\n# param_2 = obj.sum(prefix)","repo_name":"fxrcode/FG","sub_path":"677-map-sum-pairs/677-map-sum-pairs.py","file_name":"677-map-sum-pairs.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"31958985164","text":"import filemanager\nimport datetime\ndef main():\n\n targetDirs = [r'C:\\Live\\英文书籍']\n\n start = datetime.datetime.now()\n fm = filemanager.FileManager()\n for p in targetDirs:\n fm.scan(p)\n \n fm.dump_duplicated(\"dup.csv\")\n end = datetime.datetime.now()\n print('Total time is: ' + str(end - start))\n\n\nif __name__ == '__main__':\n main()","repo_name":"coralcoffee/filemanager","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"44739374292","text":"def base_convert(data, base, base_out):\n digits = \"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n valueof = {k:v for v, k in enumerate(digits)}\n assert 2 <= base <= 36 and 2 <= base_out <= 36\n s, scale = 0, 1\n for d in reversed(data):\n s += scale * digits.index(d)\n scale *= base\n data_out = \"0\" if s==0 else \"\"\n while s > 0:\n data_out = digits[s%base_out] + data_out\n s //= base_out\n return data_out\n\nprint(base_convert(\"EF\", 16, 2))","repo_name":"cloud1974/combination","sub_path":"base_convert.py","file_name":"base_convert.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9285661179","text":"from flask import Blueprint,abort,render_template,redirect,url_for\nfrom flask_login import login_required\nfrom zoo.utils.access_control import admin_required\nfrom zoo.group.models import Group\nfrom zoo.activity.models import Activity\nfrom zoo.category.models import Category\nfrom zoo.extensions import db\nfrom zoo.message.models import Message\nfrom zoo.user.models import User\nfrom sqlalchemy import or_\n\n\nadmin = Blueprint(\"admin\", __name__)\n\n@admin.route(\"/verify\")\n@login_required\n@admin_required\ndef admin_verify():\n groups = Group.query.filter(Group.active == False).all()\n return render_template(\"admin/verify.html\", groups=groups)\n\n@admin.route(\"/group/agree/\")\n@login_required\n@admin_required\ndef group_agree(group_id):\n group = Group.query.get(group_id)\n if group:\n group.active = True\n group.creator.role = 2\n group.save()\n group.join(group.creator.id)\n message = Message(user=group.creator, content=\"你创建的小组\"+group.name+\"已经通过审核!\")\n message.save()\n return redirect(url_for('admin.admin_verify'))\n else:\n abort(404)\n\n@admin.route(\"/group/deny/\")\n@login_required\n@admin_required\ndef group_deny(group_id):\n group = Group.query.get(group_id)\n if group and not group.active:\n db.session.delete(group)\n db.session.commit()\n return redirect(url_for('admin.admin_verify'))\n else:\n abort(404)\n\n@admin.route(\"/group/manage\", methods=['GET'])\n@login_required\n@admin_required\ndef group_manage():\n groups = Group.query.order_by(Group.created_at).all()\n return render_template('admin/group.html', groups=groups)\n\n@admin.route(\"/acivity/manage\", methods=['GET'])\n@login_required\n@admin_required\ndef activities_manage():\n activities = Activity.query.all()\n return render_template('admin/activities.html', activities=activities)\n\n@admin.route(\"/category/manage\", methods=['GET'])\n@login_required\n@admin_required\ndef category_manage():\n categories = Category.query.all()\n return render_template('admin/category.html', categories=categories)\n\n@admin.route(\"/user/manage\", methods=['GET'])\n@login_required\n@admin_required\ndef user_manage():\n users = User.query.filter(or_(User.role == 2, User.role == 3))\n return render_template('admin/user.html', users=users)\n\n\n","repo_name":"highsoul/dky","sub_path":"zoo/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42373174900","text":"import logging\nimport random\nimport threading\n\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\nfrom flectra import api, fields, models, tools\nfrom flectra.tools import exception_to_unicode\nfrom flectra.tools.translate import _\n\n_logger = logging.getLogger(__name__)\n\n_INTERVALS = {\n 'hours': lambda interval: relativedelta(hours=interval),\n 'days': lambda interval: relativedelta(days=interval),\n 'weeks': lambda interval: relativedelta(days=7*interval),\n 'months': lambda interval: relativedelta(months=interval),\n 'now': lambda interval: relativedelta(hours=0),\n}\n\n\nclass EventTypeMail(models.Model):\n \"\"\" Template of event.mail to attach to event.type. Those will be copied\n upon all events created in that type to ease event creation. \"\"\"\n _name = 'event.type.mail'\n _description = 'Mail Scheduling on Event Category'\n\n event_type_id = fields.Many2one(\n 'event.type', string='Event Type',\n ondelete='cascade', required=True)\n notification_type = fields.Selection([('mail', 'Mail')], string='Send', default='mail', required=True)\n interval_nbr = fields.Integer('Interval', default=1)\n interval_unit = fields.Selection([\n ('now', 'Immediately'),\n ('hours', 'Hours'), ('days', 'Days'),\n ('weeks', 'Weeks'), ('months', 'Months')],\n string='Unit', default='hours', required=True)\n interval_type = fields.Selection([\n ('after_sub', 'After each registration'),\n ('before_event', 'Before the event'),\n ('after_event', 'After the event')],\n string='Trigger', default=\"before_event\", required=True)\n template_id = fields.Many2one(\n 'mail.template', string='Email Template',\n domain=[('model', '=', 'event.registration')], ondelete='restrict',\n help='This field contains the template of the mail that will be automatically sent')\n\n @api.model\n def _get_event_mail_fields_whitelist(self):\n \"\"\" Whitelist of fields that are copied from event_type_mail_ids to event_mail_ids when\n changing the event_type_id field of event.event \"\"\"\n return ['notification_type', 'template_id', 'interval_nbr', 'interval_unit', 'interval_type']\n\n\nclass EventMailScheduler(models.Model):\n \"\"\" Event automated mailing. This model replaces all existing fields and\n configuration allowing to send emails on events since Flectra 9. A cron exists\n that periodically checks for mailing to run. \"\"\"\n _name = 'event.mail'\n _rec_name = 'event_id'\n _description = 'Event Automated Mailing'\n\n event_id = fields.Many2one('event.event', string='Event', required=True, ondelete='cascade')\n sequence = fields.Integer('Display order')\n notification_type = fields.Selection([('mail', 'Mail')], string='Send', default='mail', required=True)\n interval_nbr = fields.Integer('Interval', default=1)\n interval_unit = fields.Selection([\n ('now', 'Immediately'),\n ('hours', 'Hours'), ('days', 'Days'),\n ('weeks', 'Weeks'), ('months', 'Months')],\n string='Unit', default='hours', required=True)\n interval_type = fields.Selection([\n ('after_sub', 'After each registration'),\n ('before_event', 'Before the event'),\n ('after_event', 'After the event')],\n string='Trigger ', default=\"before_event\", required=True)\n template_id = fields.Many2one(\n 'mail.template', string='Email Template',\n domain=[('model', '=', 'event.registration')], ondelete='restrict',\n help='This field contains the template of the mail that will be automatically sent')\n scheduled_date = fields.Datetime('Scheduled Sent Mail', compute='_compute_scheduled_date', store=True)\n mail_registration_ids = fields.One2many('event.mail.registration', 'scheduler_id')\n mail_sent = fields.Boolean('Mail Sent on Event', copy=False)\n done = fields.Boolean('Sent', compute='_compute_done', store=True)\n\n @api.depends('mail_sent', 'interval_type', 'event_id.registration_ids', 'mail_registration_ids')\n def _compute_done(self):\n for mail in self:\n if mail.interval_type in ['before_event', 'after_event']:\n mail.done = mail.mail_sent\n else:\n mail.done = len(mail.mail_registration_ids) == len(mail.event_id.registration_ids) and all(mail.mail_sent for mail in mail.mail_registration_ids)\n\n @api.depends('event_id.date_begin', 'interval_type', 'interval_unit', 'interval_nbr')\n def _compute_scheduled_date(self):\n for mail in self:\n if mail.interval_type == 'after_sub':\n date, sign = mail.event_id.create_date, 1\n elif mail.interval_type == 'before_event':\n date, sign = mail.event_id.date_begin, -1\n else:\n date, sign = mail.event_id.date_end, 1\n\n mail.scheduled_date = date + _INTERVALS[mail.interval_unit](sign * mail.interval_nbr) if date else False\n\n def execute(self):\n for mail in self:\n now = fields.Datetime.now()\n if mail.interval_type == 'after_sub':\n # update registration lines\n lines = [\n (0, 0, {'registration_id': registration.id})\n for registration in (mail.event_id.registration_ids - mail.mapped('mail_registration_ids.registration_id'))\n ]\n if lines:\n mail.write({'mail_registration_ids': lines})\n # execute scheduler on registrations\n mail.mail_registration_ids.execute()\n else:\n # Do not send emails if the mailing was scheduled before the event but the event is over\n if not mail.mail_sent and mail.scheduled_date <= now and mail.notification_type == 'mail' and \\\n (mail.interval_type != 'before_event' or mail.event_id.date_end > now):\n mail.event_id.mail_attendees(mail.template_id.id)\n mail.write({'mail_sent': True})\n return True\n\n @api.model\n def _warn_template_error(self, scheduler, exception):\n # We warn ~ once by hour ~ instead of every 10 min if the interval unit is more than 'hours'.\n if random.random() < 0.1666 or scheduler.interval_unit in ('now', 'hours'):\n ex_s = exception_to_unicode(exception)\n try:\n event, template = scheduler.event_id, scheduler.template_id\n emails = list(set([event.organizer_id.email, event.user_id.email, template.write_uid.email]))\n subject = _(\"WARNING: Event Scheduler Error for event: %s\", event.name)\n body = _(\"\"\"Event Scheduler for:\n - Event: %(event_name)s (%(event_id)s)\n - Scheduled: %(date)s\n - Template: %(template_name)s (%(template_id)s)\n\nFailed with error:\n - %(error)s\n\nYou receive this email because you are:\n - the organizer of the event,\n - or the responsible of the event,\n - or the last writer of the template.\n\"\"\",\n event_name=event.name,\n event_id=event.id,\n date=scheduler.scheduled_date,\n template_name=template.name,\n template_id=template.id,\n error=ex_s)\n email = self.env['ir.mail_server'].build_email(\n email_from=self.env.user.email,\n email_to=emails,\n subject=subject, body=body,\n )\n self.env['ir.mail_server'].send_email(email)\n except Exception as e:\n _logger.error(\"Exception while sending traceback by email: %s.\\n Original Traceback:\\n%s\", e, exception)\n pass\n\n @api.model\n def run(self, autocommit=False):\n schedulers = self.search([\n ('event_id.active', '=', True),\n ('done', '=', False),\n ('scheduled_date', '<=', datetime.strftime(fields.datetime.now(), tools.DEFAULT_SERVER_DATETIME_FORMAT))\n ])\n for scheduler in schedulers:\n try:\n with self.env.cr.savepoint():\n # Prevent a mega prefetch of the registration ids of all the events of all the schedulers\n self.browse(scheduler.id).execute()\n except Exception as e:\n _logger.exception(e)\n self.invalidate_cache()\n self._warn_template_error(scheduler, e)\n else:\n if autocommit and not getattr(threading.currentThread(), 'testing', False):\n self.env.cr.commit()\n return True\n\n\nclass EventMailRegistration(models.Model):\n _name = 'event.mail.registration'\n _description = 'Registration Mail Scheduler'\n _rec_name = 'scheduler_id'\n _order = 'scheduled_date DESC'\n\n scheduler_id = fields.Many2one('event.mail', 'Mail Scheduler', required=True, ondelete='cascade')\n registration_id = fields.Many2one('event.registration', 'Attendee', required=True, ondelete='cascade')\n scheduled_date = fields.Datetime('Scheduled Time', compute='_compute_scheduled_date', store=True)\n mail_sent = fields.Boolean('Mail Sent')\n\n def execute(self):\n now = fields.Datetime.now()\n todo = self.filtered(lambda reg_mail:\n not reg_mail.mail_sent and \\\n reg_mail.registration_id.state in ['open', 'done'] and \\\n (reg_mail.scheduled_date and reg_mail.scheduled_date <= now) and \\\n reg_mail.scheduler_id.notification_type == 'mail'\n )\n for reg_mail in todo:\n organizer = reg_mail.scheduler_id.event_id.organizer_id\n company = self.env.company\n author = self.env.ref('base.user_root')\n if organizer.email:\n author = organizer\n elif company.email:\n author = company.partner_id\n elif self.env.user.email:\n author = self.env.user\n\n email_values = {\n 'author_id': author.id,\n }\n if not reg_mail.scheduler_id.template_id.email_from:\n email_values['email_from'] = author.email_formatted\n reg_mail.scheduler_id.template_id.send_mail(reg_mail.registration_id.id, email_values=email_values)\n todo.write({'mail_sent': True})\n\n @api.depends('registration_id', 'scheduler_id.interval_unit', 'scheduler_id.interval_type')\n def _compute_scheduled_date(self):\n for mail in self:\n if mail.registration_id:\n date_open = mail.registration_id.date_open\n date_open_datetime = date_open or fields.Datetime.now()\n mail.scheduled_date = date_open_datetime + _INTERVALS[mail.scheduler_id.interval_unit](mail.scheduler_id.interval_nbr)\n else:\n mail.scheduled_date = False\n","repo_name":"flectra-hq/flectra","sub_path":"addons/event/models/event_mail.py","file_name":"event_mail.py","file_ext":"py","file_size_in_byte":10862,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"67"} +{"seq_id":"38715358702","text":"\"\"\"\nProblem:\n\nA network consists of nodes labeled 0 to N. You are given a list of edges (a, b, t),\ndescribing the time t it takes for a message to be sent from node a to node b. Whenever\na node receives a message, it immediately passes the message on to a neighboring node,\nif possible.\n\nAssuming all nodes are connected, determine how long it will take for every node to\nreceive a message that begins at node 0.\n\nFor example, given N = 5, and the following edges:\n\nedges = [\n (0, 1, 5),\n (0, 2, 3),\n (0, 5, 4),\n (1, 3, 8),\n (2, 3, 1),\n (3, 5, 10),\n (3, 4, 5)\n]\nYou should return 9, because propagating the message from 0 -> 2 -> 3 -> 4 will take\nthat much time\n\"\"\"\n\nfrom sys import maxsize\nfrom typing import Dict, List, Optional, Tuple\n\nfrom DataStructures.Graph import GraphDirectedWeighted\nfrom DataStructures.PriorityQueue import MinPriorityQueue\n\n\ndef dijkstra(\n graph: GraphDirectedWeighted, start: int\n) -> Tuple[Dict[int, int], Dict[int, Optional[int]]]:\n dist = {node: maxsize for node in graph.connections}\n parent = {node: None for node in graph.connections}\n dist[start] = 0\n priority_queue = MinPriorityQueue()\n [priority_queue.push(node, weight) for node, weight in dist.items()]\n while not priority_queue.isEmpty():\n node = priority_queue.extract_min()\n for neighbour in graph.connections[node]:\n if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:\n dist[neighbour] = dist[node] + graph.connections[node][neighbour]\n priority_queue.update_key(neighbour, dist[neighbour])\n parent[neighbour] = node\n return dist, parent\n\n\ndef get_propagation_time(edges: List[Tuple[int, int, int]]) -> int:\n graph = GraphDirectedWeighted()\n for src, dest, wt in edges:\n graph.add_edge(src, dest, wt)\n\n time, _ = dijkstra(graph, 0)\n return max(time.values())\n\n\nif __name__ == \"__main__\":\n edges = [\n (0, 1, 5),\n (0, 2, 3),\n (0, 5, 4),\n (1, 3, 8),\n (2, 3, 1),\n (3, 5, 10),\n (3, 4, 5),\n ]\n print(get_propagation_time(edges))\n\n\n\"\"\"\nSPECS:\n\nTIME COMPLEXITY: O(v + e x log(v))\nSPACE COMPLEXITY: O(v)\n\"\"\"\n","repo_name":"ruppysuppy/Daily-Coding-Problem-Solutions","sub_path":"Solutions/270.py","file_name":"270.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":444,"dataset":"github-code","pt":"67"} +{"seq_id":"33659124340","text":"#Imports\r\nimport pandas as pd\r\nimport numpy as np\r\nimport scipy.stats as stats\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.decomposition import PCA\r\nfrom matplotlib.pyplot import cm\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.neighbors import NearestNeighbors\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nimport pickle\r\nimport geopy.distance\r\n\r\ndef savemodel(object,filename='object_to_store.obj'):\r\n\t#store this object file \r\n with open(filename, 'wb') as file:\r\n pickle.dump(object, file)\r\n\r\ndef loadmodel(filename):\r\n\twith open(filename, 'rb') as file:\r\n\t\tobj = pickle.load(file)\r\n\treturn obj\r\n\r\ndef generateXY(filtered_df):\r\n\tX = filtered_df[['Longitude','Latitude']].to_numpy()\r\n\tY = df[['OFFENCE']].loc[filtered_df.index].to_numpy()\r\n\treturn X,Y\r\n\r\ndef filter_df(geo, range_in_km):\r\n\tGeo_feature = ['Latitude','Longitude']\r\n\tgeo_data = df[Geo_feature]\r\n\tfiltered_df = calculate_distances(geo, geo_data, range_in_km)\r\n\treturn filtered_df\r\n\r\ndef group_by_geo():\r\n\tres2 = df.groupby([\"Longitude\", \"Latitude\"]).size().reset_index(name=\"Occurences\").sort_values(by=['Occurences'],ascending=False)\r\n\treturn res2\r\n\r\ndef Kmeans(k, df_var):\r\n\tkmeans = KMeans(n_clusters=k, random_state=0, n_init=\"auto\")\r\n\tkmeans.fit(df_var)\r\n\treturn kmeans\t\r\n\r\ndef Kmeans_predict(kmeans, new_df):\r\n\tpredict = kmeans.predict(new_df)\r\n\tlabel = kmeans.labels_\r\n\tcentroids = kmeans.cluster_centers_\r\n\treturn predict, label, centroids\r\n\r\ndef get4ptrs(centroid,delta):\r\n #top left\r\n topleft = (centroid[0]-delta,centroid[1]+delta)\r\n topright = (centroid[0]+delta,centroid[1]+delta)\r\n downleft = (centroid[0]-delta,centroid[1]-delta)\r\n downright= (centroid[0]+delta,centroid[1]-delta)\r\n return [topleft,topright,downleft,downright]\r\n\r\ndef calculate_distances(centroid, dataframe, threshold):\r\n # Calculate distances\r\n dataframe['Distance'] = dataframe.apply(lambda row: geopy.distance.distance(centroid, (row['Latitude'], row['Longitude'])).km, axis=1)\r\n # Filter dataframe based on distance threshold\r\n filtered_dataframe = dataframe[dataframe['Distance'] < threshold]\r\n return filtered_dataframe\r\n\r\ndef KNN(k,X,Y):\r\n\tmodel = KNeighborsClassifier(n_neighbors=3,weights='distance')\r\n\tmodel.fit(X,Y)\r\n\treturn model\r\n\r\ndef KNN_predict(model, newpoint):\r\n\tdistances, indices = model.kneighbors([newpoint])\r\n\treturn distances, indices \r\n\r\n#main script\r\nif __name__ == '__main__':\r\n df = pd.read_csv('.\\Dataset_June_8.csv')\r\n # build the dictionary:\r\n dict_offense = {'Assault': 9, 'Break and Enter': 1, 'Robbery': 5, 'Auto Theft': 1, 'Homicide': 15, 'Theft Over': 10, 'Pedestrian Collision': 6}\r\n # replace the column values:\r\n df2=df.replace({\"OFFENCE\": dict_offense})\r\n\r\n geo_loc = (43.609206, -79.514755)\r\n #177-137 Horner Ave, Etobicoke, ON\r\n\r\n filtered_df = filter_df(geo_loc, 5)\r\n x, y = generateXY(filtered_df)\r\n\r\n #Trained your KNN\r\n knn = KNN(100,x,y)\r\n newpoint = (43.616875, -79.519073)\r\n distances, indices = KNN_predict(knn,newpoint) \r\n savemodel(knn,'100nbrs.obj')\r\n #reload KNN:\r\n KNN100 = loadmodel('100nbrs.obj')\r\n print(\"success\")\r\n\t","repo_name":"SamiraFairuz/SafeT.First","sub_path":"KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41211998808","text":"from qtpy.QtWidgets import (QWidget, QHBoxLayout, QVBoxLayout, QLabel, QSplitter)\n\nfrom bioimageit_framework.framework import BiComponent\n\nfrom ._tools_widget import BiDesignerToolsArea, BiDesignerTools\nfrom ._editor_widget import BiDesignerEditorWidget\nfrom ._scene import BiDesignerView\n\n\nclass BiDesigner(BiComponent):\n def __init__(self):\n super().__init__()\n self._object_name = 'BiDesignerComponent'\n self.widget = BiDesignerWidget()\n\n\nclass BiDesignerWidget(QWidget):\n def __init__(self):\n super().__init__()\n\n self.toolbar_widget = BiDesignerToolsArea() \n self.toolbar_widget.add_widget('Tools', BiDesignerTools())\n self.toolbar_widget.show_widget('Tools')\n self.editor_widget = BiDesignerEditorWidget() \n self.editor_widget.view.added_node.connect(self._add_node_widget)\n self.editor_widget.view.show_node_widget.connect(self._show_node_widget)\n\n self.widget = QSplitter()\n self.widget.addWidget(self.toolbar_widget)\n self.widget.addWidget(self.editor_widget)\n self.widget.setStretchFactor(0, 1)\n self.widget.setStretchFactor(1, 7)\n\n layout = QVBoxLayout()\n layout.setContentsMargins(0, 0, 0, 0)\n layout.addWidget(self.widget)\n self.setLayout(layout)\n\n def _add_node_widget(self, id, widget):\n self.toolbar_widget.add_widget(id, widget)\n\n def _show_node_widget(self, id):\n print('BiDesignerWidget: _show_node_widget: ', id)\n self.toolbar_widget.show_widget(id) \n","repo_name":"bioimageit/bioimageit_gui","sub_path":"bioimageit_gui/designer/_components.py","file_name":"_components.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"575059131","text":"import datetime\nfrom sys import platform\nimport logging as logme\nfrom urllib.parse import urlencode\nfrom urllib.parse import quote\n\nmobile = \"https://mobile.twitter.com\"\nbase = \"https://api.twitter.com/2/search/adaptive.json\"\n\n\ndef _sanitizeQuery(_url, params):\n _serialQuery = \"\"\n _serialQuery = urlencode(params, quote_via=quote)\n _serialQuery = _url + \"?\" + _serialQuery\n return _serialQuery\n\n\ndef _formatDate(date):\n if \"win\" in platform:\n return f'\\\"{date.split()[0]}\\\"'\n try:\n return int(datetime.datetime.strptime(date, \"%Y-%m-%d %H:%M:%S\").timestamp())\n except ValueError:\n return int(datetime.datetime.strptime(date, \"%Y-%m-%d\").timestamp())\n\n\nasync def Favorites(username, init):\n logme.debug(__name__ + ':Favorites')\n url = f\"{mobile}/{username}/favorites?lang=en\"\n\n if init != '-1':\n url += f\"&max_id={init}\"\n\n return url\n\n\nasync def Followers(username, init):\n logme.debug(__name__ + ':Followers')\n url = f\"{mobile}/{username}/followers?lang=en\"\n\n if init != '-1':\n url += f\"&cursor={init}\"\n\n return url\n\n\nasync def Following(username, init):\n logme.debug(__name__ + ':Following')\n url = f\"{mobile}/{username}/following?lang=en\"\n\n if init != '-1':\n url += f\"&cursor={init}\"\n\n return url\n\n\nasync def MobileProfile(username, init):\n logme.debug(__name__ + ':MobileProfile')\n url = f\"{mobile}/{username}?lang=en\"\n\n if init != '-1':\n url += f\"&max_id={init}\"\n\n return url\n\n\nasync def Search(config, init):\n logme.debug(__name__ + ':Search')\n url = base\n tweet_count = 100\n q = \"\"\n params = [\n # ('include_blocking', '1'),\n # ('include_blocked_by', '1'),\n # ('include_followed_by', '1'),\n # ('include_want_retweets', '1'),\n # ('include_mute_edge', '1'),\n # ('include_can_dm', '1'),\n ('include_can_media_tag', '1'),\n # ('skip_status', '1'),\n # ('include_cards', '1'),\n ('include_ext_alt_text', 'true'),\n ('include_quote_count', 'true'),\n ('include_reply_count', '1'),\n ('tweet_mode', 'extended'),\n ('include_entities', 'true'),\n ('include_user_entities', 'true'),\n ('include_ext_media_availability', 'true'),\n ('send_error_codes', 'true'),\n ('simple_quoted_tweet', 'true'),\n ('count', tweet_count),\n # ('query_source', 'typed_query'),\n # ('pc', '1'),\n ('cursor', str(init)),\n ('spelling_corrections', '1'),\n ('ext', 'mediaStats%2ChighlightedLabel'),\n ('tweet_search_mode', 'live'), # this can be handled better, maybe take an argument and set it then\n ]\n if not config.Popular_tweets:\n params.append(('f', 'tweets'))\n if config.Lang:\n params.append((\"l\", config.Lang))\n params.append((\"lang\", \"en\"))\n if config.Query:\n q += f\" from:{config.Query}\"\n if config.Username:\n q += f\" from:{config.Username}\"\n if config.Geo:\n config.Geo = config.Geo.replace(\" \", \"\")\n q += f\" geocode:{config.Geo}\"\n if config.Search:\n\n q += f\" {config.Search}\"\n if config.Year:\n q += f\" until:{config.Year}-1-1\"\n if config.Since:\n q += f\" since:{_formatDate(config.Since)}\"\n if config.Until:\n q += f\" until:{_formatDate(config.Until)}\"\n if config.Email:\n q += ' \"mail\" OR \"email\" OR'\n q += ' \"gmail\" OR \"e-mail\"'\n if config.Phone:\n q += ' \"phone\" OR \"call me\" OR \"text me\"'\n if config.Verified:\n q += \" filter:verified\"\n if config.To:\n q += f\" to:{config.To}\"\n if config.All:\n q += f\" to:{config.All} OR from:{config.All} OR @{config.All}\"\n if config.Near:\n q += f' near:\"{config.Near}\"'\n if config.Images:\n q += \" filter:images\"\n if config.Videos:\n q += \" filter:videos\"\n if config.Media:\n q += \" filter:media\"\n if config.Replies:\n q += \" filter:replies\"\n # although this filter can still be used, but I found it broken in my preliminary testing, needs more testing\n if config.Native_retweets:\n q += \" filter:nativeretweets\"\n if config.Min_likes:\n q += f\" min_faves:{config.Min_likes}\"\n if config.Min_retweets:\n q += f\" min_retweets:{config.Min_retweets}\"\n if config.Min_replies:\n q += f\" min_replies:{config.Min_replies}\"\n if config.Links == \"include\":\n q += \" filter:links\"\n elif config.Links == \"exclude\":\n q += \" exclude:links\"\n if config.Source:\n q += f\" source:\\\"{config.Source}\\\"\"\n if config.Members_list:\n q += f\" list:{config.Members_list}\"\n if config.Filter_retweets:\n q += f\" exclude:nativeretweets exclude:retweets\"\n if config.Custom_query:\n q = config.Custom_query\n\n q = q.strip()\n params.append((\"q\", q))\n _serialQuery = _sanitizeQuery(url, params)\n return url, params, _serialQuery\n\n\ndef SearchProfile(config, init=None):\n logme.debug(__name__ + ':SearchProfile')\n _url = 'https://api.twitter.com/2/timeline/profile/{user_id}.json'.format(user_id=config.User_id)\n tweet_count = 100\n params = [\n # some of the fields are not required, need to test which ones aren't required\n ('include_profile_interstitial_type', '1'),\n ('include_blocking', '1'),\n ('include_blocked_by', '1'),\n ('include_followed_by', '1'),\n ('include_want_retweets', '1'),\n ('include_mute_edge', '1'),\n ('include_can_dm', '1'),\n ('include_can_media_tag', '1'),\n ('skip_status', '1'),\n ('cards_platform', 'Web - 12'),\n ('include_cards', '1'),\n ('include_ext_alt_text', 'true'),\n ('include_quote_count', 'true'),\n ('include_reply_count', '1'),\n ('tweet_mode', 'extended'),\n ('include_entities', 'true'),\n ('include_user_entities', 'true'),\n ('include_ext_media_color', 'true'),\n ('include_ext_media_availability', 'true'),\n ('send_error_codes', 'true'),\n ('simple_quoted_tweet', 'true'),\n ('include_tweet_replies', 'true'),\n ('count', tweet_count),\n ('ext', 'mediaStats%2ChighlightedLabel'),\n ]\n\n if type(init) == str:\n params.append(('cursor', str(init)))\n _serialQuery = _sanitizeQuery(_url, params)\n return _url, params, _serialQuery\n","repo_name":"twintproject/twint","sub_path":"twint/url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":6346,"program_lang":"python","lang":"en","doc_type":"code","stars":15298,"dataset":"github-code","pt":"67"} +{"seq_id":"27193558698","text":"import asyncio\nimport json\n\nimport aiohttp\nfrom .http_exceptions import HTTP_EXCEPTIONS, NsqHttpException\nfrom ..utils import _convert_to_str\n\n\nclass NsqHTTPConnection:\n \"\"\"XXX\"\"\"\n\n def __init__(self, host='127.0.0.1', port=4150, *, loop):\n self._loop = loop\n self._endpoint = (host, port)\n self._base_url = 'http://{0}:{1}/'.format(*self._endpoint)\n\n self._session = aiohttp.ClientSession(connector=aiohttp.TCPConnector(),\n loop=self._loop)\n\n @property\n def endpoint(self):\n return 'http://{0}:{1}'.format(*self._endpoint)\n\n async def close(self):\n return await self._session.close()\n\n async def perform_request(self, method, url, params, body):\n _body = _convert_to_str(body) if body else body\n url = self._base_url + url\n print(method, url, params, _body)\n try:\n resp = await self._session.request(method, url,\n params=params,\n data=_body)\n except Exception as tmp:\n print('exception', tmp)\n resp_body = await resp.text()\n try:\n response = json.loads(resp_body)\n except ValueError:\n return resp_body\n\n if not (200 <= resp.status <= 300):\n extra = None\n try:\n extra = json.loads(resp_body)\n except ValueError:\n pass\n exc_class = HTTP_EXCEPTIONS.get(resp.status, NsqHttpException)\n raise exc_class(resp.status, resp_body, extra)\n return response\n\n def __repr__(self):\n cls_name = self.__class__.__name__\n return '<{}: {}>'.format(cls_name, self._endpoint)\n","repo_name":"slowpilot/asyncnsq","sub_path":"asyncnsq/http/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"1481739768","text":"\n'''\n\tGillespie algorithm (Stochastic simulation algorithm)\n\n\tNext-reaction method : J. Phys. Chem. A 104, 1876 (2000)\n\t\n\t0. initialize dependance graph G, propensity function a, putative time tau, indexed priority queue P\n\t1. Choose the reaction having the smallest putative time and corresponding time\n\t2. Update number of molecules and t=tau\n\t3. Record trajectory \n\t4. Update P and return to 1\n\n\tUpdate of P\n\t3-1. Update a on G\n\t3-2. For the fired reaction, sample new random number based on new a so taui=new value+t\n\t3-2. Otherwise, taui= a_old/a_new(taui-t)+t\n\t3-3. Update values in P\n\t\n'''\nimport numpy as np\nfrom copy import deepcopy\n\nclass indexed_priority_queue:\n\t'''\n\t\tindexed priority queue\n\t\tref: WilliamFiset Youtube\n\n\t\ti\t\t\n\t\tvalues[i]: value of key\n\t\tpm[i]\t: position map. heap position of key i\n\t\tim[i]\t: inverse map. key of heap position i\n\t'''\n\tdef __init__(self,values=[],keys=[],size=0):\n\t\tself.size=size\n\t\tself.im=[]\n\t\tself.pm=[]\n\t\tif len(keys)!=0 and len(values)!=0:\n\t\t\tself.build(keys,values)\n\t\n\tdef insert(self,ki,value):\n\t\t#Insert value into key index (not in previous heap)\n\t\tself.values[ki]=value\n\t\tself.pm[ki]=self.size\n\t\tself.im[self.size]=ki\n\t\tself.swim(self.size)\n\t\tself.size=self.size+1\n\t\treturn\n\t\n\tdef add(self,value):\n\t\t#Add new value at the end of heap\n\t\tself.insert(self.size+1,value)\n\t\treturn\n\n\tdef swap(self,i,j):\n\t\tself.pm[self.im[i]]=j\t\n\t\tself.pm[self.im[j]]=i\t\n\t\ttmp=self.im[i]\n\t\tself.im[i]=self.im[j]\n\t\tself.im[j]=tmp\n\t\treturn\n\n\tdef parent(self,i):\n\t\treturn int((i-1)/2) \n\tdef left(self,i):\n\t\treturn int(2*i+1)\n\tdef right(self,i):\n\t\treturn int(2*i+2)\n\n\tdef less(self,i,j):\n\t\treturn self.values[self.im[i]]0 and self.less(n,p):\n\t\t\tself.swap(n,p)\n\t\t\tn=p\n\t\t\tp=self.parent(n)\n\t\treturn\n\n\tdef sink(self,i):\n\t\tn=i\n\t\twhile True:\n\t\t\tl=self.left(n)\n\t\t\tr=self.right(n)\n\t\t\ts=l\n\n\t\t\tif r=self.size or self.less(n,s):\n\t\t\t\tbreak\n\t\n\t\t\tself.swap(s,n)\n\t\t\tn=s\n\t\treturn\n\n\tdef build(self,keys,values):\t\n\t\tself.keys=deepcopy(keys)\n\t\tself.values=deepcopy(values)\n\t\tself.pm=np.arange(self.size).astype(int)\n\t\tself.im=np.arange(self.size).astype(int)\n\t\tfor i in np.arange(0,self.size).astype(int)[::-1]:\n\t\t\tself.sink(i)\n\t\t\tself.swim(i)\n\t\treturn\n\t\n\tdef update(self,key,value):\n\t\tself.values[key]=value\t\n\t\tn=self.pm[key]\n\t\tself.sink(n)\n\t\tself.swim(n)\n\t\treturn\n\n\tdef print(self):\n\t\tprint(\"values \",self.values)\n\t\tprint(\"pm \",self.pm)\n\t\tprint(\"im \",self.im)\n\t\treturn\n\nimport itertools\t\n\nclass next_reaction:\n\tdef __init__(self,model):\t\n\t\t#The model should have 'proFunc','rxtOrder' and 'changeVec'\n\t\tself.model=model\n\t\tself.generate_depG()\n\t\tself.ipq=indexed_priority_queue(size=len(self.model.proFunc))\n\t\t\t\n\tdef generate_depG(self):\n\t\tlf=len(self.model.proFunc)\n\t\tself.depGraph=np.zeros((lf,lf)).astype(bool)\n\t\tfor i,j in itertools.product(range(lf),range(lf)):\n\t\t\t\tself.depGraph[i,j]=np.any(np.logical_and(testmodel.changeVec[i]!=0 ,testmodel.rxnOrder[j]!=0))\n\t\treturn\n\t\n\tdef build_IPQ(self,X):\n\t\t\n\t\t#Calculate propensity function and corresponding putative time\n\t\tnrxn=len(self.model.proFunc)\n\t\tself.a=np.zeros(nrxn)\n\t\ttaus=np.zeros(nrxn)\n\t\tfor i in range(nrxn):\n\t\t\tself.a[i]=self.model.proFunc[i](X)\n\t\t\ttaus[i]=np.random.exponential(1./(self.a[i]+1e-12))\n\n\t\t#Insert data to ipq and bulid\n\t\tself.ipq.build(np.arange(nrxn).astype(int),taus)\n\n\t\treturn\n\n\tdef step(self,X):\n\t\t#Choose fired reaction\n\t\trxn=self.ipq.im[0]\n\t\ttau=self.ipq.values[rxn]\n\t\t\n\t\treturn tau, rxn\n\t\n\tdef run(self,Xini,tini,tmax,maxstep=None):\t\n\t\tif len(Xini) != np.shape(self.model.changeVec)[-1]:\n\t\t\tprint(\"State and change vectors has different dimensions!\")\n\t\t\treturn tini,Xini\n\t\tT=np.array([tini])\n\t\tX=np.array(np.array([Xini]).T)\n\t\t\n\t\t#Step 0. Initialization\n\t\tx=Xini\n\t\tself.build_IPQ(Xini)\n\t\tt=tini\n\t\tstep=0\n\t\tgoahead=True\n\t\twhile t=maxstep:\n\t\t\t\tbreak\n\t\t\tstep=step+1\t\n\t\t\t\n\t\t\t#Step 4. Update IPQ\t\n\t\t\tupdate_cand=np.argwhere(self.depGraph[ind])[:,0]\n\t\t\tfor i in update_cand:\n\t\t\t\tanew=self.model.proFunc[i](x)\n\t\t\t\tif ind != i:\n\t\t\t\t\ttaunew=(self.a[i]/anew)*(self.ipq.values[i]-t)+t\n\t\t\t\telse:\n\t\t\t\t\ttaunew=np.random.exponential(scale=1./(anew+1e-12))+t\n\t\t\t\tself.a[i]=anew\n\t\t\t\tself.ipq.update(i,taunew)\n\t\t\t\n\t\t#After finishing generation, print data\n\t\treturn T,X\n\t\t\t\t\nif __name__==\"__main__\":\n\tprint(\"Test code for next_reaction.py\")\t\t\n\tfrom model import model\n\t'''\t\n\tproFunc=[\n\tlambda x: x[0]*x[1],\n\tlambda x: x[1]*x[2],\n\tlambda x: x[3]*x[4],\n\tlambda x: x[5],\n\tlambda x: x[4]*x[6]\n\t]\n\tchangeVec=np.array([[-1,-1,1,0,0,0,0],\n\t\t\t\t\t\t[0,-1,-1,1,0,0,0],\n\t\t\t\t\t\t[0,0,0,-1,0,1,0],\n\t\t\t\t\t\t[0,0,0,1,0,-1,1],\n\t\t\t\t\t\t[1,0,0,0,-1,0,-1]]).astype(int)\n\trxnOrder=np.array([[1,1,0,0,0,0,0],\n\t\t\t\t\t\t[0,1,1,0,0,0,0],\n\t\t\t\t\t\t[0,0,0,1,1,0,0],\n\t\t\t\t\t\t[0,0,0,0,0,1,0],\n\t\t\t\t\t\t[0,0,0,0,1,0,1]]).astype(int)\n\t\n\ttestmodel=model(proFunc=proFunc,changeVec=changeVec,rxnOrder=rxnOrder)\n\n\tsolver=next_reaction(model=testmodel)\t\n\n\tXini=[1,2,3,4,3,2,1]\n\n\n\tsolver.build_IPQ(Xini)\t\n\tsolver.ipq.print()\n\t'''\n\t'''\t\n\tfrom direct import direct\n\tproFunc=[\n\t\tlambda x: x[0],\n\t\tlambda x: x[1]\n\t]\n\tchangeVec=np.array([[-1,1,0],[0,-1,1]]).astype(int)\n\trxnOrder=np.array([[1,0,0],[0,1,0]]).astype(int)\n\tXini=np.array([10000,1,0])\n\t\n\tfrom model import model\n\ttestmodel=model(proFunc=proFunc,rxnOrder=rxnOrder,changeVec=changeVec)\n\tsolver1=next_reaction(model=testmodel)\n\tsolver2=direct(model=testmodel)\n\tX1s1=[]\n\tX2s1=[]\n\tX3s1=[]\n\tX1s2=[]\n\tX2s2=[]\n\tX3s2=[]\n\t\n\timport time\n\timport matplotlib.pyplot as plt\n\tt1=time.time()\n\tfor i in range(10**2):\n\t\tT,X=solver2.run(Xini,0,2.0)\n\t\tif i<100:\n\t\t\tplt.plot(T,X[1],lw=1,c='b')\n\t\t#plt.plot(T,oc[1],ls='--',lw=1,c='orange')\n\t\tX1s2.append(X[0,-1])\n\t\tX2s2.append(X[1,-1])\n\t\tX3s2.append(X[2,-1])\n\tt2=time.time()\n\tfor i in range(10**2):\n\t\tT,X=solver1.run(Xini,0,2.0)\n\t\tif i<100:\n\t\t\tplt.plot(T,X[1],ls='--',lw=1,c='r')#,marker='o',ms=2)\n\t\t#plt.plot(T,oc[1],lw=1,c='b')\n\t\tX1s1.append(X[0,-1])\n\t\tX2s1.append(X[1,-1])\n\t\tX3s1.append(X[2,-1])\n\tt3=time.time()\n\n\tplt.ylabel(\"X[1]\")\t\n\tplt.show()\n\t\n\t#x=np.arange(25,70)\n\tplt.hist(X1s1,bins=40,label=r'$Nxt-%fs$'%(t3-t2),histtype=u'step',density=True)\n\tplt.hist(X1s2,bins=40,label=r'$Dir-%fs$'%(t2-t1),histtype=u'step',density=True)\n\tplt.legend(frameon=False)\n\tplt.show()\n\tplt.hist(X2s1,bins=40,label=r'$Nxt-%fs$'%(t3-t2),histtype=u'step',density=True)\n\tplt.hist(X2s2,bins=40,label=r'$Dir-%fs$'%(t2-t1),histtype=u'step',density=True)\n\tplt.legend(frameon=False)\n\tplt.show()\n\tplt.hist(X3s1,bins=40,label=r'$Nxt-%fs$'%(t3-t2),histtype=u'step',density=True)\n\tplt.hist(X3s2,bins=40,label=r'$Dir-%fs$'%(t2-t1),histtype=u'step',density=True)\n\tplt.legend(frameon=False)\n\t'''\n\n\t#Gamma distribution example\n\tfrom direct import direct\n\tproFunc=[\n\t\tlambda x: 0.001*x[0]*x[1],\n\t\tlambda x: x[2],\n\t\tlambda x: x[3],\n\t\tlambda x: x[4]\n\t]\n\tchangeVec=np.array([[-1,-1,1,0,0,0,],\n\t\t\t\t\t\t[0,0,-1,1,0,0],\n\t\t\t\t\t\t[0,0,0,-1,1,0],\n\t\t\t\t\t\t[1,0,0,0,-1,1]]).astype(int)\n\trxnOrder=np.array([[1,1,0,0,0,0],\n\t\t\t\t\t\t[0,0,1,0,0,0],\n\t\t\t\t\t\t[0,0,0,1,0,0],\n\t\t\t\t\t\t[0,0,0,0,1,0]]).astype(int)\n\tXini=np.array([1000,1000,0,0,0,0])\n\t\n\tfrom model import model\n\ttestmodel=model(proFunc=proFunc,rxnOrder=rxnOrder,changeVec=changeVec)\n\tsolver1=next_reaction(model=testmodel)\n\tprint(solver1.depGraph)\n\tsolver2=direct(model=testmodel)\n\tXs2=np.array([Xini])\n\tXs1=np.array([Xini])\n\t\t\n\timport time\n\timport matplotlib.pyplot as plt\n\tt1=time.time()\n\tfor i in range(10):\n\t\tT,X=solver2.run(Xini,0,2.0)\n\t\tif i<100:\n\t\t\tplt.plot(T,X[4],lw=1,c='b')\n\t\t#plt.plot(T,oc[1],ls='--',lw=1,c='orange')\n\t\tXs2=np.vstack((Xs2,X[:,-1]))\n\tt2=time.time()\n\tfor i in range(10):\n\t\tT,X=solver1.run(Xini,0,2.0)\n\t\tif i<100:\n\t\t\tplt.plot(T,X[4],ls='--',lw=1,c='r')#,marker='o',ms=2)\n\t\t#plt.plot(T,oc[1],lw=1,c='b')\n\t\tXs1=np.vstack((Xs1,X[:,-1]))\n\tt3=time.time()\n\n\tplt.ylabel(\"X[2]\")\t\n\tplt.show()\n\t'''\t\n\t#x=np.arange(25,70)\n\tplt.hist(Xs1[0],bins=40,label=r'$Nxt-%fs$'%(t3-t2),histtype=u'step',density=True)\n\tplt.hist(Xs2[0],bins=40,label=r'$Dir-%fs$'%(t2-t1),histtype=u'step',density=True)\n\tplt.legend(frameon=False)\n\tplt.show()\n\tplt.hist(X2s1,bins=40,label=r'$Nxt-%fs$'%(t3-t2),histtype=u'step',density=True)\n\tplt.hist(X2s2,bins=40,label=r'$Dir-%fs$'%(t2-t1),histtype=u'step',density=True)\n\tplt.legend(frameon=False)\n\tplt.show()\n\tplt.hist(X3s1,bins=40,label=r'$Nxt-%fs$'%(t3-t2),histtype=u'step',density=True)\n\tplt.hist(X3s2,bins=40,label=r'$Dir-%fs$'%(t2-t1),histtype=u'step',density=True)\n\tplt.legend(frameon=False)\n\n\tplt.show()\n\t'''\n","repo_name":"schwarzg/gillespie","sub_path":"next_reaction.py","file_name":"next_reaction.py","file_ext":"py","file_size_in_byte":8690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29432919067","text":"from ..core import module\n\nclass Scraper(Module):\n \"\"\"\n \"\"\"\n\n # Module metainfo\n meta = {\n \"name\": \"\",\n \"author\": \"\",\n \"supertype\": None,\n \"type\": \"scraper\",\n \"required_keys\": [],\n \"short_description\": \"\",\n \"long_description\": \"\",\n \"tags\": []\n }\n\n options = {}\n\n def __init__(self):\n super(Scraper, self).__init__()\n\n # Extracted data\n self.extracted_data = {\n 'type': '',\n 'steps': []\n }\n self.max_steps = 5\n\n def run(self, *args, **kwargs):\n self.setup(*args, **kwargs)\n for _ in range(self.max_steps):\n result = self.step()\n self.extracted_data['steps'].append(result)\n self._update_step_state()\n self.freeze()\n self.finish()\n","repo_name":"vurmux/gorynych","sub_path":"gorynych/modules/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14808344164","text":"import os\nimport sys\nimport unittest\nfrom google.appengine.ext import testbed\n\n\nclass TestTool(unittest.TestCase):\n \"\"\"Test set up tools for unit testing\"\"\"\n lib_path = os.path.abspath(os.path.join(__file__, '..', '..', 'py'))\n sys.path.append(lib_path)\n test_email = \"test@example.com\"\n user_id = '123'\n\n def set_test(self):\n os.environ['ENV'] = 'prod'\n self.testbed = testbed.Testbed()\n self.testbed.activate()\n self.testbed.setup_env(USER_EMAIL=self.test_email, USER_ID=self.user_id,\n USER_IS_ADMIN='1', overwrite=True)\n self.testbed.init_user_stub()\n self.testbed.init_datastore_v3_stub()\n self.testbed.init_memcache_stub()\n\n def tearDown(self):\n self.testbed.deactivate()\n\n def set_user(self, email, id):\n self.test_email = email\n self.user_id = id\n self.testbed.setup_env(USER_EMAIL=email, USER_ID=id)\n\n def set_non_admin(self):\n self.testbed.setup_env(USER_IS_ADMIN='0')\n","repo_name":"YiningGuo/Habit-Tracker","sub_path":"tests/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73754457814","text":"from flask import abort\n\nfrom app.credentials.domain.Credential import Credential\nfrom app.credentials.domain.CredentialId import CredentialId\nfrom app.credentials.domain.CredentialStructure import CredentialStructure\nfrom app.credentials.interface.FirestoreRepository import CredentialRepository\nfrom app.auth_types.interface.FirestoreRepository import AuthTypeRepository\nfrom app.users.interface.FirestoreRepository import UserRepository\n\n\nclass ShowCredentials(CredentialStructure):\n\n def setAuthTypeId(self, credential_auth_type_id):\n auth_repo = AuthTypeRepository()\n result = auth_repo.listAuthTypeById(credential_auth_type_id)\n if result.exists is False:\n abort(404, \"Auth Type not found\")\n\n self.setAuthTypeRef(result.reference)\n\n def setUserId(self, user_id):\n user_repo = UserRepository()\n result = user_repo.listUsersById(user_id)\n if result.exists is False:\n abort(404, \"User not found\")\n\n self.setUserRef(result.reference)\n\n def execute(self, fill_id=None):\n repo = CredentialRepository()\n credentials = []\n if fill_id is not None:\n credential_id = CredentialId(fill_id)\n if credential_id.is_valid() is False:\n abort(400)\n\n result = repo.listCredentialsById(credential_id.value)\n if result.exists:\n credential = Credential()\n credential.from_firestore_document(result)\n params = credential.to_dict()\n params['user_id'] = params['user'].id\n params.pop('user')\n params['auth_type_id'] = params['auth_type'].id\n params.pop('auth_type')\n credentials.append(params)\n else:\n result = repo.listCredentials(self.getUserRef(), self.getAuthTypeRef(), self.getUsername(), self.getToken())\n for doc in result:\n if doc.exists:\n credential = Credential()\n credential.from_firestore_document(doc)\n params = credential.to_dict()\n params['user_id'] = params['user'].id\n params.pop('user')\n params['auth_type_id'] = params['auth_type'].id\n params.pop('auth_type')\n credentials.append(params)\n\n return credentials\n","repo_name":"JeanPiffaut/Oauth2-API-REST","sub_path":"app/credentials/application/ShowCredentials.py","file_name":"ShowCredentials.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11024000978","text":"import logging\nimport numpy as np\n\nfrom .signal import AcSignalAnalysis\nfrom ...data import NoiseDensity, MultiNoiseDensity, Series\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass AcNoiseAnalysis(AcSignalAnalysis):\n \"\"\"Small signal circuit analysis\"\"\"\n DEFAULT_INPUT_IMPEDANCE = 50\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self._noise_sink = None\n\n @property\n def noise_sink(self):\n return self._noise_sink\n\n @noise_sink.setter\n def noise_sink(self, sink):\n if not hasattr(sink, \"name\"):\n # This is an element name. Get the object. We use the user-supplied circuit here because\n # the copy may not have been created by this point.\n sink = self.circuit.get_element(sink)\n self._noise_sink = sink\n\n def calculate(self, input_type, sink, impedance=None, incoherent_sum=False, input_refer=False,\n **kwargs):\n \"\"\"Calculate noise from circuit elements at a particular element.\n\n Parameters\n ----------\n input_type : str\n Input type, either \"voltage\" or \"current\".\n sink : str or :class:`.Component` or :class:`.Node`\n The element to calculate noise at.\n impedance : float or :class:`.Quantity`, optional\n Input impedance. If None, the default is used.\n incoherent_sum : :class:`bool` or :class:`dict`, optional\n Incoherent sum specification. If True, the incoherent sum of all noise in the circuit at\n the sink is calculated and added to the solution. Alternatively, this parameter can be\n specified as a dict containing labels as keys and sequences of noise sources as values.\n The noise sources can be either :class:`.NoiseDensity` objects or noise specifier\n strings as supported by :meth:`.Solution.get_noise`. The values may alternatively be the\n strings \"all\", \"allop\" or \"allr\" to compute noise from all components, all op-amps and\n all resistors, respectively. Sums are plotted in shades of grey determined by the\n plotting configuration's ``sum_greyscale_cycle_start``, ``sum_greyscale_cycle_stop`` and\n ``sum_greyscale_cycle_count`` values.\n input_refer : bool, optional\n Refer the noise to the input.\n\n Other Parameters\n ----------------\n frequencies : :class:`np.ndarray` or sequence\n The frequency vector to calculate the response with.\n node, node_p, node_n : :class:`.Node`\n The node or nodes to make the input. The `node` parameter sets a single, grounded input,\n whereas `node_p` and `node_n` together create a floating input.\n print_equations : :class:`bool`, optional\n Print the circuit equations.\n print_matrix : :class:`bool`, optional\n Print the circuit matrix.\n\n Returns\n -------\n :class:`~.solution.Solution`\n Solution containing noise spectra at the specified sink (or projected sink).\n \"\"\"\n self.noise_sink = sink\n if impedance is None:\n LOGGER.warning(f\"assuming default input impedance of {self.DEFAULT_INPUT_IMPEDANCE}\")\n impedance = self.DEFAULT_INPUT_IMPEDANCE\n self._do_calculate(input_type, impedance=impedance, is_noise=True, **kwargs)\n if incoherent_sum:\n self._compute_sums(incoherent_sum)\n if input_refer:\n self._refer_sink_noise_to_input()\n return self.solution\n\n def circuit_matrix(self, *args, **kwargs):\n \"\"\"Calculate and return matrix used to solve for circuit noise at a \\\n given frequency.\n\n Returns\n -------\n :class:`scipy.sparse.spmatrix`\n The circuit matrix.\n \"\"\"\n # Return the transpose of the response matrix.\n return super().circuit_matrix(*args, **kwargs).T\n\n @property\n def right_hand_side_index(self):\n \"\"\"Right hand side excitation component index\"\"\"\n return self.noise_element_index\n\n def _build_solution(self, noise_matrix):\n # empty noise sources\n empty = []\n\n # loop over circuit's noise sources\n for noise in self._current_circuit.noise_sources:\n # get this element's noise spectral density\n spectral_density = noise.spectral_density(frequencies=self.frequencies)\n\n if np.all(spectral_density) == 0:\n # null noise source\n empty.append(noise)\n\n if noise.element_type == \"component\":\n # noise is from a component; use its matrix index\n index = self.component_matrix_index(noise.component)\n elif noise.element_type == \"node\":\n # noise is from a node; use its matrix index\n index = self.node_matrix_index(noise.node)\n else:\n raise ValueError(\"unrecognised noise source present in circuit\")\n\n # get response from this element to every other\n response = noise_matrix[index, :]\n\n # multiply response from element to noise output element by noise entering\n # at that element, for all frequencies\n projected_noise = np.abs(response * spectral_density)\n\n # create series\n series = Series(x=self.frequencies, y=projected_noise)\n\n # add noise function to solution\n self.solution.add_noise(NoiseDensity(source=noise, sink=self.noise_sink, series=series))\n\n if empty:\n empty_sources = \", \".join([str(response) for response in empty])\n LOGGER.debug(f\"empty noise sources: {empty_sources}\")\n\n def _compute_sums(self, sum_spec):\n \"\"\"Compute incoherent noise sums and add them to the solution.\n\n Parameters\n ----------\n sum_spec : :class:`bool` or :class:`dict`\n Incoherent sum specification. If True, the incoherent sum of all noise in the circuit at\n the sink is calculated and added to the solution. Alternatively, this parameter can be\n specified as a dict containing labels as keys and sequences of noise sources as values.\n The noise sources can be either :class:`.NoiseDensity` objects or noise specifier\n strings as supported by :meth:`.Solution.get_noise`. The values may alternatively be the\n strings \"all\", \"allop\" or \"allr\" to compute noise from all components, all op-amps and\n all resistors, respectively. Sums are plotted in shades of grey determined by the\n plotting configuration's ``sum_greyscale_cycle_start``, ``sum_greyscale_cycle_stop`` and\n ``sum_greyscale_cycle_count`` values.\n \"\"\"\n if sum_spec is True:\n # Sum using all noise and the default MultiNoiseDensity label.\n sum_spec = {None: self.solution.noise[self.solution.DEFAULT_GROUP_NAME]}\n for label, spectra in sum_spec.items():\n if spectra is None:\n raise ValueError(\"noise sum spectra cannot be empty\")\n if isinstance(spectra, str):\n identifier = spectra.lower()\n if identifier == \"all\":\n constituents = self.solution.noise[self.solution.DEFAULT_GROUP_NAME]\n elif identifier == \"allop\":\n constituents = self.solution.opamp_noise[self.solution.DEFAULT_GROUP_NAME]\n elif identifier == \"allr\":\n constituents = self.solution.resistor_noise[self.solution.DEFAULT_GROUP_NAME]\n else:\n raise ValueError(f\"unrecognised noise collection '{spectra}'\")\n else:\n constituents = []\n for spectrum in spectra:\n if not isinstance(spectrum, NoiseDensity):\n spectrum = self.solution.get_noise(source=spectrum, sink=self.noise_sink)\n constituents.append(spectrum)\n\n self.solution.add_noise_sum(MultiNoiseDensity(constituents=constituents,\n sink=self.noise_sink, label=label))\n\n def _refer_sink_noise_to_input(self):\n \"\"\"Project the calculated noise to the input.\"\"\"\n LOGGER.info(\"projecting noise to input\")\n\n input_component = self._current_circuit.input_component\n if self.input_type == \"voltage\":\n input_element = input_component.node2\n else:\n input_element = input_component\n projection_analysis = self.to_signal_analysis()\n # Grab the input nodes from the noise circuit.\n node_n, node_p = input_component.nodes\n projection = projection_analysis.calculate(frequencies=self.frequencies,\n input_type=self.input_type, node_n=node_n,\n node_p=node_p)\n # Transfer function from input to noise sink.\n input_response = projection.get_response(source=input_element, sink=self.noise_sink)\n\n for __, noise_spectra in self.solution.noise.items():\n for noise in noise_spectra:\n self.solution.replace(noise, noise * input_response.inverse())\n\n for __, noise_sums in self.solution.noise_sums.items():\n for noise in noise_sums:\n self.solution.replace(noise, noise * input_response.inverse())\n\n def to_signal_analysis(self):\n \"\"\"Return a new signal analysis using the settings defined in the current analysis.\"\"\"\n return AcSignalAnalysis(self.circuit, print_progress=self.print_progress,\n stream=self.stream)\n\n @property\n def noise_element_index(self):\n \"\"\"Noise element matrix index\"\"\"\n try:\n return self.component_matrix_index(self.noise_sink)\n except ValueError:\n pass\n\n try:\n return self.node_matrix_index(self.noise_sink)\n except ValueError:\n pass\n\n raise ValueError(f\"noise output element '{self.noise_sink}' is not in the circuit\")\n","repo_name":"SeanDS/zero","sub_path":"zero/analysis/ac/noise.py","file_name":"noise.py","file_ext":"py","file_size_in_byte":10143,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"2947106436","text":"# Factorio Calc Scraper - Marshall Ferguson - 8/2020\n\n# TODO: (DONE) Figure out template for url to get different results from calc\n\n# base url = https://kirkmcdonald.github.io/calc.html\n\n# data set = #data=\"inset data set\"\n# data set determines which version of Factorio the recipe is for\n# data set should be kept up to date in case of recipe changes\n\n# item = &items=\"inset item name\"\n# item determines which item the recipe is for\n# item names will name to exactly match a specific syntax\n\n# factories = :f:\"inset number of factories\"\n# factories determines the number of factories (assembly machines) working on the recipe\n# factories syntax defaults to assembly machine 1, another change to url is needed for assembly machines 2 and 3\n\n# rate = :r:\"inset rate\"\n# rate determines the rate at which the recipe is worked on\n# rate defaults to items/minute, another change to url is needed for items/seond and items/hour\n\n# example url = https://kirkmcdonald.github.io/calc.html#data=1-0-0&items=electronic-circuit:f:1\n# example url syntax = base url + data set + items + factories OR rate\n\n# In the future, this script will be able to handle different levels of assembly machines and rates, but for now it will focus on the defaults\n\n# Imports\nfrom selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport mechanicalsoup\nimport time\nimport math\n\n# Output a menu to guide user's input\n\nprint(\"Welcome to the Factorio Calc Scraper!\")\ntime.sleep(1)\nprint(\"This script will scrape the site of the Factorio Calculator to calculate how many factories you need in total for a recipe.\")\ntime.sleep(2)\nprint(\"All you have to do is answer the following prompts.\")\ntime.sleep(2)\nitem_input = input(\"What item do you want to make? (If multiple words, must be in following syntax: word1-word2) \") \n# item_input = \"space-science-pack\"\nfactories_or_rate = input(\"Would you like to go by number of factories or rate of item per minute? (factories or rate) \")\n# factories_or_rate = \"factories\"\n# factories_input = \"1\"\n# rate_input = \"3000\"\nif factories_or_rate == \"factories\":\n factories_input = input(\"How many factories will be making the item? \")\nelif factories_or_rate == \"rate\":\n rate_input = input(\"At what rate do you want to make the item? (in items per minute) \")\nelse:\n print(\"\")\n\n# TODO: (DONE) Request web page\n\nexample_url = \"https://kirkmcdonald.github.io/calc.html#data=1-0-0&items=electronic-circuit:f:1\"\n# base_url = \"https://kirkmcdonald.github.io/calc.html\"\n# data_set = \"#data=1.0.0\"\n# # This will be a variable input from the prompt later on, but for now will be hard coded as this version\n# item = \"&items=\" + item_input\n# factories = \":f:\" + factories_input\n# rate = \":r:\" + rate_input \n\n# browser = mechanicalsoup.StatefulBrowser()\n# url = base_url + data_set + item + factories\nurl = example_url\n# print(url)\n# page = browser.get(url)\n# print(page)\n# print(page.soup)\n\n# TODO: (DONE) Automate inputting info into calc site \n\nPATH = \"C:\\\\Program Files (x86)\\\\chromedriver.exe\"\ndriver = webdriver.Chrome(PATH)\ndriver.get(url)\ndriver.implicitly_wait(10)\nactions = ActionChains(driver)\n\n# Not sure why this code doesn't work while the dropdownWrapper selection and clicking does.....\n\n# csv_link = driver.find_element_by_link_text(\"CSV\")\n# print(csv_link)\n# print(type(csv_link))\n# driver.implicitly_wait(5)\n# actions.click(csv_link).perform()\n\n# Find the desired HTML elements on the website with selenium\n\nitem_dropdown = driver.find_elements_by_class_name(\"dropdownWrapper\")[0]\n# print(item_dropdown)\n# print(type(item_dropdown))\nActionChains(driver).click(item_dropdown).perform()\nActionChains(driver).reset_actions()\n\nsearch_bar = driver.find_element_by_class_name(\"search\")\n# print(search_bar)\n# print(type(search_bar))\nActionChains(driver).send_keys(item_input).perform()\nActionChains(driver).reset_actions()\n\nitem_link = driver.find_element_by_xpath('//img[@alt=\"' + item_input + '\"]')\n# print(item_link)\n# print(type(item_link))\nActionChains(driver).click(item_link).perform()\nActionChains(driver).reset_actions()\n\nfactories_input_field = driver.find_element_by_xpath(\"/html/body/table/tbody/tr/td[@id='targetparent']/ul[@id='targets']/li[@class='target']/input[1]\")\nrate_input_field = driver.find_element_by_xpath(\"/html/body/table/tbody/tr/td[@id='targetparent']/ul[@id='targets']/li[@class='target']/input[2]\")\n# print(factories_input_field)\n# print(type(factories_input_field))\n# print(rate_input_field)\n# print(type(rate_input_field))\n\n# Input the user supplied input into a search bar\n\nif factories_or_rate == \"factories\":\n actions.send_keys_to_element(factories_input_field, u'\\ue005')\n actions.send_keys_to_element(factories_input_field, factories_input)\n actions.send_keys_to_element(factories_input_field, u'\\ue007')\n actions.perform()\n actions.reset_actions()\nelif factories_or_rate == \"rate\":\n actions.send_keys_to_element(rate_input_field, u'\\ue005')\n actions.send_keys_to_element(rate_input_field, rate_input)\n actions.send_keys_to_element(rate_input_field, u'\\ue007')\n actions.perform()\n actions.reset_actions()\nelse:\n print(\"\")\n\n# TODO: (DONE) Parse through HTML to get total number of assembly machinces needed\n # Figure out how to loop through all the XPaths and put the WebElements in a list\n # Figure out how to extract the text from the html of a list of WebElements \n\n# Create lists of WebElements\n\nrates_elem_list = []\nrates_elem_list.extend(driver.find_elements_by_xpath(\"//table[@id='totals']/tr/td[2]/tt\"))\n# print(all_rates_elem_list)\n# print(len(all_rates_elem_list))\nfactory_elem_list = []\nfactory_elem_list.extend(driver.find_elements_by_xpath(\"//td[@class='factory right-align'][1]/tt\"))\n# print(factory_elem_list)\n# print(len(factory_elem_list))\nfactory_img_elem_list = []\nfactory_img_elem_list.extend(driver.find_elements_by_xpath(\"//td[@class='pad factory right-align leftmost']/img[@class='icon display']\"))\n# print(factory_img_elem_list)\n# print(len(factory_img_elem_list))\nitem_img_elem_list = []\nitem_img_elem_list.extend(driver.find_elements_by_xpath(\"//td[@class='right-align']/img\"))\n# print(item_img_elem_list)\n# print(len(item_img_elem_list))\n\n# Loop through lists of WebElements and create lists of innerHTML(str's)\n\nrates_innerHTML_list = []\nfor i in rates_elem_list:\n rates_innerHTML_list.append(i.get_attribute(\"innerHTML\"))\n# print(\"rates_innerHTML_list is:\")\n# print(rates_innerHTML_list)\n# print(len(rates_innerHTML_list))\n\nfactory_elem_str_list = []\nfor i in factory_elem_list:\n factory_elem_str_list.append(i.get_attribute(\"innerHTML\"))\n# print(\"factory_elem_str_list is:\")\n# print(factory_elem_str_list)\n# print(len(factory_elem_str_list))\n\n# Clean out empty str's from list\n\nfactory_elem_str_list = ' '.join(factory_elem_str_list).split()\n\n# Loop through list of str's, remove encoding characters, and coerce to float\n\nfactory_elem_float_list = []\nfor i in factory_elem_str_list:\n i = i.rstrip(\" \")\n factory_elem_float_list.append(float(i))\n# print(\"factory_elem_float_list is:\")\n# print(factory_elem_float_list)\n# print(len(factory_elem_float_list))\n\n# Loop through list of floats and coerce to rounded up ints\n\nfactory_elem_int_list = []\nfor i in factory_elem_float_list:\n factory_elem_int_list.append(int(math.ceil(i)))\n# print(\"factory_elem_int_list is:\")\n# print(factory_elem_int_list)\n# print(len(factory_elem_int_list))\n\n# Loop through list of str's, remove encoding characters, and coerce to float\n\nrate_float_list = []\nfor i in rates_innerHTML_list:\n i = i.rstrip(\" \")\n rate_float_list.append(float(i))\n# print(\"rate_float_list is:\")\n# print(rate_float_list)\n# print(len(rate_float_list))\n\n# Loop through lists of WebElements and create lists of img alt text (str's)\n\nfactory_img_elem_alt_text_list = []\nfor i in factory_img_elem_list:\n factory_img_elem_alt_text_list.append(i.get_attribute(\"alt\"))\n# print(\"factory_img_elem_alt_text_list is:\")\n# print(factory_img_elem_alt_text_list)\n# print(len(factory_img_elem_alt_text_list))\n\nitem_img_elem_alt_text_list = []\nfor i in item_img_elem_list:\n item_img_elem_alt_text_list.append(i.get_attribute(\"alt\"))\n# print(\"item_img_elem_alt_text_list is:\")\n# print(item_img_elem_alt_text_list)\n# print(len(item_img_elem_alt_text_list))\n\n# Create an empty dictionary and use it to add the keys to the actual dictionary\n\nempty_dict = {}\nitem_dict = empty_dict.fromkeys(item_img_elem_alt_text_list)\n# print(item_dict)\n\n# Add values to dictionary from various lists\n\nfor a, b, c, d in zip(item_img_elem_alt_text_list, factory_img_elem_alt_text_list, factory_elem_float_list, rate_float_list):\n item_dict[a] = [b, c, d]\n# print(item_dict)\n\n# Loop through dictionary and print desired output to user\n # TEMPLATE: \"You will need {number_of_factories} {name_of_factories}'s making {item_name} at rate of {rate} items per minute\"\n # \"You will need {total_number_of_factories} factories in total to make {desired_item}\"\n\noil_in_dict = False\n\nif 'crude-oil' in item_dict:\n oil_in_dict = True\n oil_rate = rate_float_list[-1]\n item_dict.pop('crude-oil')\n\ndriver.quit()\n\nfor i in item_dict:\n print(f\"You will need {item_dict.get(i)[1]} {item_dict.get(i)[0]}'s making {i} at a rate of {item_dict.get(i)[2]} items per minute\")\nif oil_in_dict:\n print(f\"You will need to produce crude-oil at a rate of {oil_rate} items per minute\")\nprint(f\"You will need {sum(factory_elem_int_list)} factories in total to make {item_input}\")\n\n# TODO: (DONE) Clean up output\n# Format - \"You will need {num_of_assemblers} to create {item}\" repeat for item and each subitem and then \"You will need a total of {tot_num_of_assemblers}\"\n # TODO: (DONE) Cycle through item and subitem names similar to number of assemblers\n # TODO: (DONE) Cycle through assembler_elem_list and check to make sure it is not a liquid\n # Something like for item and subitem names but with image before assembler_count\n# TODO: (DONE) Feature - change the assembler count or rate with input from prompt\n# TODO: (DONE) Feature - ouput includes how many miners, furnaces, chemical plants, and oil refineries neeeded for recipe \n # Step 1: Chemical Plants\n # Step 2: Furnaces\n # Step 3: Miners\n # Step 4: Oil Refineries\n# TODO: Feature - ouput includes how much power each subrecipe will take and how much power the recipe will take in total\n# TODO: Feature - ouput includes how many belts of each material each subrecipe needs and how many belts of the item will be produced\n# TODO: (DONE) Feature - output includes rate at which each subitem will be made in order to supply recipe for main item\n# TODO: Add crude oil into output","repo_name":"moferg/factorio-calc-scraper","sub_path":"factorio_calc_scraper.py","file_name":"factorio_calc_scraper.py","file_ext":"py","file_size_in_byte":10918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26497636794","text":"import sys\n\nn, m = map(int, sys.stdin.readline().split())\n\nis_small = list(0 for _ in range(n + 1))\n\nfor _ in range(m):\n is_small[int(sys.stdin.readline())] = 1\n\nminimum_jump = list(list(0x7fffffff for _ in range(n + 1)) for _ in range(200))\n\nif not is_small[2]:\n minimum_jump[1][2] = 1\n\nfor now_stone in range(2, n + 1):\n if is_small[now_stone]:\n continue\n for jump in range(1, min(199, n)):\n if jump > 1 and now_stone + jump - 1 <= n and not is_small[now_stone + jump - 1]:\n minimum_jump[jump - 1][now_stone + jump - 1] = min(\n minimum_jump[jump - 1][now_stone + jump - 1],\n minimum_jump[jump][now_stone] + 1\n )\n if jump + now_stone + 1 <= n and not is_small[now_stone + jump + 1]:\n minimum_jump[jump + 1][now_stone + jump + 1] = min(\n minimum_jump[jump + 1][now_stone + jump + 1],\n minimum_jump[jump][now_stone] + 1\n )\n if jump + now_stone <= n and not is_small[now_stone + jump]:\n minimum_jump[jump][now_stone + jump] = min(\n minimum_jump[jump][now_stone + jump],\n minimum_jump[jump][now_stone] + 1\n )\nresult = 0x7fffffff\nfor jump in range(1, 200):\n result = min(result, minimum_jump[jump][n])\n\nif result != 0x7fffffff:\n print(result)\nelse:\n print(-1)","repo_name":"kyhdudgns113/KraftonJungle","sub_path":"Krafton_Week_4_Red_2/1. DP/9_2253_점프.py","file_name":"9_2253_점프.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36424014009","text":"from rest_framework import status\n\nfrom rest_framework.decorators import action\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework.permissions import IsAuthenticated, SAFE_METHODS\nfrom rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.response import Response\nfrom django.db.models import Sum\nfrom django.http import HttpResponse\nfrom djoser.views import UserViewSet\nfrom rest_framework.exceptions import ValidationError\n\nfrom recipes.models import (\n Tag,\n Ingredient,\n Recipe,\n FavoriteRecipe,\n ShoppingList,\n RecipeIngredient\n)\nfrom users.models import User, Subscribe\nfrom api.serializers import (\n IngredientSerializer,\n ShortRecipeSerializer,\n TagSerializer,\n SubscriptionsSerializer,\n CustomUserSerializer,\n GetRecipeSerializer,\n CreateRecipeSerializer\n)\nfrom api.permissions import IsAdminOrReadOnly, IsAuthorOrReadOnly\nfrom api.pagination import Pagination\nfrom api.filters import RecipeFilter, IngredientFilter\n\n\nclass IngredientViewSet(ReadOnlyModelViewSet):\n queryset = Ingredient.objects.all()\n serializer_class = IngredientSerializer\n permission_classes = (IsAdminOrReadOnly,)\n filter_backends = (DjangoFilterBackend,)\n filterset_class = IngredientFilter\n\n\nclass TagViewSet(ReadOnlyModelViewSet):\n queryset = Tag.objects.all()\n serializer_class = TagSerializer\n permission_classes = (IsAdminOrReadOnly,)\n\n\nclass RecipeViewSet(ModelViewSet):\n queryset = Recipe.objects.all()\n permission_classes = (IsAuthorOrReadOnly,)\n pagination_class = Pagination\n filter_backends = (DjangoFilterBackend,)\n filterset_class = RecipeFilter\n\n def perform_create(self, serializer):\n serializer.save(author=self.request.user)\n\n def get_serializer_class(self):\n if self.request.method in SAFE_METHODS:\n return GetRecipeSerializer\n return CreateRecipeSerializer\n\n @action(\n detail=True,\n methods=('post', 'delete'),\n permission_classes=(IsAuthenticated,)\n )\n def favorite(self, request, pk):\n if request.method == 'POST':\n recipe = get_object_or_404(Recipe, id=pk)\n obj, created = FavoriteRecipe.objects.get_or_create(\n user=request.user,\n recipe=recipe\n )\n if created:\n serializer = ShortRecipeSerializer(recipe)\n return Response(serializer.data,\n status=status.HTTP_201_CREATED)\n else:\n return Response({'Ошибка': 'Рецепт уже есть в избранном'},\n status=status.HTTP_400_BAD_REQUEST)\n\n obj = FavoriteRecipe.objects.filter(user=request.user,\n recipe__id=pk).delete()\n\n if obj[0] > 0:\n return Response(status=status.HTTP_204_NO_CONTENT)\n else:\n return Response({'Ошибка': 'Рецепт отсутствует в избранном'},\n status=status.HTTP_400_BAD_REQUEST)\n\n @action(\n detail=True,\n methods=('post', 'delete'),\n permission_classes=(IsAuthenticated,)\n )\n def shopping_cart(self, request, pk):\n if request.method == 'POST':\n recipe = get_object_or_404(Recipe, id=pk)\n obj, created = ShoppingList.objects.get_or_create(\n user=request.user,\n recipe=recipe\n )\n if created:\n serializer = ShortRecipeSerializer(recipe)\n return Response(serializer.data,\n status=status.HTTP_201_CREATED)\n else:\n return Response({'Ошибка': 'Рецепт уже есть в списке покупок'},\n status=status.HTTP_400_BAD_REQUEST)\n\n obj = ShoppingList.objects.filter(user=request.user,\n recipe__id=pk).delete()\n\n if obj[0] > 0:\n return Response(status=status.HTTP_204_NO_CONTENT)\n else:\n return Response({'Ошибка': 'Рецепт отсутствует в списке покупок'},\n status=status.HTTP_400_BAD_REQUEST)\n\n @action(\n detail=False,\n permission_classes=(IsAuthenticated,)\n )\n def download_shopping_cart(self, request):\n user = request.user\n shopping_list = user.shopping_list.all()\n\n if not shopping_list:\n return Response(\"Список покупок пуст.\",\n status=status.HTTP_400_BAD_REQUEST)\n\n ingredients = RecipeIngredient.objects.filter(\n recipe__shopping_list__user=user\n ).values(\n 'ingredient__name',\n 'ingredient__measurement_unit'\n ).annotate(amount=Sum('amount'))\n\n content = 'Список покупок:\\n\\n'\n for ingredient in ingredients:\n name = ingredient.get('ingredient__name')\n measurement_unit = ingredient.get(\n 'ingredient__measurement_unit'\n )\n amount = ingredient.get('amount')\n content += f'{name} ({measurement_unit}) - {amount}\\n'\n\n response = HttpResponse(content, content_type='text/plain')\n response['Content-Disposition'] = 'attachment; filename=\"shoplist.txt\"'\n return response\n\n\nclass UserViewSet(UserViewSet):\n queryset = User.objects.all()\n serializer_class = CustomUserSerializer\n pagination_class = Pagination\n\n @action(\n detail=True,\n methods=('post', 'delete'),\n permission_classes=(IsAuthenticated,)\n )\n def subscribe(self, request, **kwargs):\n user = request.user\n author_id = self.kwargs.get('id')\n author = get_object_or_404(User, id=author_id)\n\n if request.method == 'POST':\n serializer = SubscriptionsSerializer(\n author,\n data=request.data,\n context={'request': request}\n )\n serializer.is_valid(raise_exception=True)\n Subscribe.objects.create(user=user, author=author)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n if request.method == 'DELETE':\n if not Subscribe.objects.filter(user=user, author=author).exists():\n raise ValidationError(\n detail='Вы и так не подписаны на этого пользователя',\n code=status.HTTP_400_BAD_REQUEST\n )\n subscription = get_object_or_404(Subscribe,\n user=user,\n author=author)\n subscription.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(\n detail=False,\n permission_classes=(IsAuthenticated,)\n )\n def subscriptions(self, request):\n user = request.user\n queryset = User.objects.filter(subscribing__user=user)\n paginated_queryset = self.paginate_queryset(queryset)\n serializer = SubscriptionsSerializer(\n paginated_queryset,\n many=True,\n context={'request': request}\n )\n return self.get_paginated_response(serializer.data)\n","repo_name":"DayKotya/foodgram-project-react","sub_path":"backend/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33407523331","text":"# http://stackoverflow.com/questions/17322041/visualizing-a-2d-random-walk-in-python\n\n# def original_walk():\n# walk = randomWalkb(25)\n# print(walk)\n# plt.plot(walk[0],walk[1],'b+', label= 'Random walk')\n# plt.axis([-10,10,-10,10])\n# plt.show()\n\nmass = 0.9 # number between 0 and 1 that determines how likely the particle is to keep its momentum, related to expected length of straight run (=/100?)\n\nx_max = 40\ny_max = 70\nborder = False\nif border:\n border_width = 0\n border_thick = 3\nwrap_line = False\nweird = False # still doesn't do what I wanted (see use in function random_walk); supposed to make the gaps between parallel lines not so quantized\n\ndef get_origin():\n return (random.randint(-x_max,x_max),random.randint(-y_max,y_max))\n\ndef random_walk(length, origin = (0,0), repeat = True, use_momentum = False):\n momentum = 0\n walk_x = [origin[0]]\n walk_y = [origin[1]]\n\n # also keep the steps here so that if the more efficient way of checking x before y fails, then we still can check the pair without zip\n walk_path = [origin]\n \n for i in range(length):\n try_again = True # first time for this step in the length\n stopper = 0 # allows repeating of values if an unseen one is not reached after 50 tries\n while try_again and stopper < 50:\n x,y = walk_x[-1],walk_y[-1]\n x_,y_,m_ = random_step_super(x,y,momentum=momentum)\n if use_momentum:\n momentum = m_\n if repeat or x_ not in walk_x or y_ not in walk_y:\n try_again = False\n else:\n try_again = (x_,y_) in walk_path\n stopper += 1\n if weird:\n x_ = x_ * max(random.normalvariate(1,0.5),0)\n y_ = y_ * max(random.normalvariate(1,0.5),0)\n walk_x.append(x_)\n walk_y.append(y_)\n walk_path.append((x,y))\n # if use_momentum:\n # x_f = walk_x[-1]\n # x_i = walk_x[-2]\n # y_f = walk_y[-1]\n # y_i = walk_y[-2]\n # if x_f != x_i:\n # momentum = iudhxueix\n # elif y_f != y_i:\n # momentum = uoedfxidb\n # else:\n # print(\"function random_walk: You are moving in both directions at once. Check the construction of walk_x and walk_y.\")\n return walk_path\n # fucking around: return [(i/(j+1),j/(i+1)) for i in range(math.floor(math.sqrt(length))) for j in range(math.floor(math.sqrt(length)))]\n\ndef random_step_super(x,y,momentum = 0):\n # momentum equal to 0 means anything goes, can be set by having parameter use_momentum false on function random_walk\n # momentum in range(1,5) means that same number will be taken without question half the time, or a new choice will occur\n # this function does not return the momentum, as that is calculated within function random_walk from the difference in the points\n if momentum > 0 and random.random() < mass: # we have momentum and choose to use it\n x_ = x\n y_ = y\n if momentum == 1:\n x_ += 1\n elif momentum == 2:\n y_ += 1\n elif momentum == 3:\n x_ += -1\n else:\n y_ += -1\n m_ = momentum\n else: # if we have momentum, we don't use it\n x_,y_,m_ = random_step(x,y)\n x_ = wrap(x_, x_max)\n y_ = wrap(y_, y_max)\n return x_,y_,m_\n\ndef random_step(x,y):\n new = random.randint(1,4) # both ends inclusive, weird i know\n x_ = x\n y_ = y\n if new == 1:\n x_ += 1\n elif new == 2:\n y_ += 1\n elif new == 3:\n x_ += -1\n else:\n y_ += -1\n return x_,y_,new\n\ndef wrap(n, bound): # problem with toroidal array in that module turtle connects all the way across when a point is wrapped\n if n > bound:\n n -= 2*bound\n elif n < -bound:\n n += 2*bound\n return n\n\ndef main():\n import turtle\n\n turtle.speed(\"fastest\")\n turtle.pen(shown = False)\n origin = get_origin()\n\n total_length = 4000.0 # float\n seg_length = 1000\n seg_length = int(min(total_length,seg_length))\n dilate = 3.0 # float\n\n for _ in range(math.ceil(total_length/seg_length)): # macro-steps\n walk = random_walk(seg_length, repeat = False, use_momentum = True, origin = origin)\n origin = walk[-1]\n for x_displace in [0]:#range(-1,2):\n for y_displace in [0]:#range(-1,2):\n turtle.pendown()\n for u in range(len(walk)):\n x,y = walk[u][0]+(2*x_max-1)*x_displace,walk[u][1]+(2*y_max-1)*y_displace\n if u > 0:\n x_last,y_last = walk[u-1][0]+(2*x_max-1)*x_displace,walk[u-1][1]+(2*y_max-1)*y_displace\n else:\n x_last,y_last = origin\n lift = (not wrap_line) and (abs(x-x_last) > 1 or abs(y-y_last) > 1) # the pointer wrapped around to the other side\n if lift:\n turtle.penup()\n turtle.goto(x*dilate,y*dilate)\n if lift:\n turtle.pendown()\n turtle.penup()\n yf = origin[1]\n if border:\n bx = x_max-border_width/dilate\n by = y_max-border_width/dilate\n turtle.goto((x_max-border_width/dilate)*dilate,(yf)*dilate) # east side\n turtle.goto((bx*dilate,by*dilate)) # far northeast\n for j in range(border_thick):\n for x,y in [\n #(bx,by), # northeast\n (-bx,by), # northwest\n (-bx,-by), # southwest\n (bx,-by), # southeast\n (bx,by-1/dilate) # northeast to lead into next cycle by straight line rather than diagonal\n ]:\n turtle.goto(x*dilate-math.copysign(j,x),y*dilate-math.copysign(j,y))\n\n turtle.exitonclick()\n\ndef test():\n print(random_walk(3, repeat = False))\n\nif __name__ == \"__main__\":\n #import numpy as np\n #import matplotlib.pyplot as plt\n import math, random\n \n main()\n #test()","repo_name":"Kuhron/programming","sub_path":"RandomWalkModernArt.py","file_name":"RandomWalkModernArt.py","file_ext":"py","file_size_in_byte":6031,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"34251518757","text":"from meowbit import *\r\nfrom time import *\r\n\r\n#变量\r\nbuttons = 0 \r\n\r\n#中断\r\ndef up_pressed():\r\n global buttons\r\n buttons = 1\r\n sleep_ms(50)\r\n\r\ndef down_pressed():\r\n global buttons\r\n buttons = 2\r\n sleep_ms(50)\r\n\r\ndef left_pressed():\r\n global buttons\r\n buttons = 3\r\n sleep_ms(50)\r\n\r\ndef right_pressed():\r\n global buttons\r\n buttons = 4\r\n sleep_ms(50)\r\n\r\ndef a_pressed():\r\n global buttons\r\n buttons = 5\r\n sleep_ms(50)\r\n\r\ndef b_pressed():\r\n global buttons\r\n buttons = 6\r\n sleep_ms(50)\r\n\r\nsensor.btnTrig['up'] = up_pressed\r\nsensor.btnTrig['down'] = down_pressed\r\nsensor.btnTrig['left'] = left_pressed\r\nsensor.btnTrig['right'] = right_pressed\r\nsensor.btnTrig['a'] = a_pressed\r\nsensor.btnTrig['b'] = b_pressed\r\nsensor.startSchedule()\r\n\r\n#启动画面\r\nscreen.fill((0, 0, 0))\r\nscreen.drawRect(50,50,60,10,7)\r\nscreen.drawRect(50,50,10,60,7)\r\nscreen.drawRect(50,100,60,10,7)\r\nscreen.drawRect(90,60,10,10,7)\r\nscreen.drawRect(80,70,10,10,7)\r\nscreen.drawRect(70,80,10,10,7)\r\nscreen.drawRect(60,90,10,10,7)\r\nsleep(3)\r\n\r\n#欢迎\r\nscreen.fill((168,218,241))\r\nscreen.text(\"welcome!\",30,30)\r\nsleep(3)\r\n\r\n#主界面\r\nscreen.fill((168,218,241))\r\nscreen.text(\"Ready\",0,0)\r\n'''\r\nif buttons == 1:\r\n while True:\r\n screen.fill((168,218,241))\r\n screen.text(\"buttons test\",10,0)\r\n if buttons != 0:\r\n screen.text(\"pressed:\",10,10)\r\n screen.text(\"the {text} button is pressed\"format(text=buttons),10,20)\r\nelif buttons == 2:\r\n while True:\r\n screen.fill((168,218,241))\r\n screen.text(\"file test\",10,0)\r\n fi = read(\"file.txt\",\"r\")\r\n li = fi.readlines()\r\n screen.text(\"file:\",10,10)\r\n screen.text(li,10,20)'''","repo_name":"lukezluke/xes_hm_os","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40424802590","text":"# Author: YuYuE (1019303381@qq.com) 2018.03.16\nfrom data_admin.models import Business_structure\nfrom data_admin.models import Business_production\nfrom data_admin.models import Business_operation\nfrom data_admin.models import Business_formula\nfrom data_admin.models import Business_keywords\nfrom chatbot.settings import PICKLE_DIR\nimport pickle\n\n\ndef siphon_business_structure(name, bank=None, area=None, like=False, field='name'):\n \"\"\"\n 抽取业务信息,业务id,业务名称,上级业务id tzinfo报错\n :param name:\n :param bank:\n :param area:\n :param like\n :param field\n :return:\n \"\"\"\n result = {}\n if bank and area:\n business_structure = Business_structure.objects.filter(name=name, bank=bank, area=area)[:10]\n elif bank and area is None:\n business_structure = Business_structure.objects.filter(name=name, bank=bank)[:10]\n elif bank is None and area:\n business_structure = Business_structure.objects.filter(name=name, area=area)[:10]\n elif like:\n business_structure = Business_structure.objects.filter(name__contains=name)[:10]\n else:\n business_structure = Business_structure.objects.filter(name=name)[:10]\n cnt = business_structure.count()\n if cnt == 1:\n result['business_id'] = business_structure[0].id\n result['business_name'] = business_structure[0].name\n result['parent_id'] = business_structure[0].parent_id\n return [business_structure[0].name]\n elif cnt > 1:\n if field:\n fields = []\n for business in business_structure:\n if business.name not in fields:\n fields.append(business.name)\n return fields\n else:\n pass\n # print(result)\n return result\n\n\ndef siphon_same_level_business(id=False, name=None):\n \"\"\"\n 取同级业务数据\n :param id:\n :param name:\n :return:\n \"\"\"\n business_sets = business_sets_ = []\n if id and name is None:\n business_sets_ = Business_structure.objects.filter(parent_id=id)\n elif id is False and name:\n business_structure = siphon_business_structure(name)\n business_sets_ = Business_structure.objects.filter(parent_id=business_structure['parent_id'])\n else:\n pass\n if business_sets_:\n for business in business_sets_:\n business_sets.append(business.name)\n # print(business_sets)\n return business_sets\n\n\ndef siphon_business_type(question, ners=False):\n \"\"\"\n 业务分类,咨询类,流程类,计算类\n :param question:\n :return:\n \"\"\"\n type_ = ''\n if question.find('是什么') != -1 or question.find('什么是') != -1:\n type_ = 'production'\n elif question.find('怎么') != -1 or question.find('怎样') != -1:\n type_ = 'operation'\n elif question.find('怎么计算') != -1 or question.find('怎么算') != -1 or question.find('是多少') != -1:\n type_ = 'formula'\n else:\n is_operation = siphon_business_operation(False, ners)\n if is_operation:\n type_ = 'operation'\n else:\n type_ = 'production'\n return type_\n\n\ndef siphon_business_production(id=False, product=None):\n \"\"\"\n 获取业务相关产品信息 tzinfo报错 create_time字段导致\n :param id:\n :param product:\n :param same:\n :return:\n \"\"\"\n business_sets_ = []\n if id and product is None:\n business_sets_ = Business_production.objects.filter(id=id)[:10]\n elif id is False and product:\n business_sets_ = Business_production.objects.filter(product=product)[:10]\n else:\n pass\n return business_sets_\n\n\ndef siphon_business_operation(id=False, name=None, same=False):\n \"\"\"\n 获取业务相关操作流程\n :param id:\n :param name:\n :param same:\n :return:\n \"\"\"\n business_sets = {}\n business_sets_ = []\n if id and name is None:\n business_sets_ = Business_operation.objects.filter(id=id)\n elif id is False and name:\n business_sets_ = Business_operation.objects.filter(name=name)\n else:\n pass\n # 取最佳匹配\n if len(business_sets_) == 1:\n business_sets['operation_id'] = business_sets_[0].id\n business_sets['operation_name'] = business_sets_[0].name\n business_sets['item'] = business_sets_[0].item\n business_sets['description'] = business_sets_[0].description\n elif len(business_sets_) > 1:\n pass\n else:\n pass\n business_sets_ = []\n # 取同级信息\n if business_sets and same is False:\n return business_sets\n elif business_sets and same:\n same_business = Business_operation.objects.filter(item=business_sets['item'])\n if same_business:\n for same_ in same_business:\n business_sets_.append(same_.name)\n return business_sets_\n return business_sets\n\n\ndef siphon_linked_operation(item):\n business_sets_ = []\n if item:\n same_business = Business_operation.objects.filter(item=item)\n if same_business:\n for same_ in same_business:\n business_sets_.append(same_.name)\n return business_sets_\n\n\ndef siphon_reference_business_operation(id=False, name=None, num=2):\n \"\"\"\n 获取流程相关操作\n :param id:\n :param name:\n :param num:\n :return:\n \"\"\"\n business_sets = siphon_business_operation(id, name)\n if business_sets:\n business_sets_ = Business_operation.objects.filter(business_id=business_sets['business_id']).order_by('id')\n if business_sets_:\n cnt = len(business_sets_)\n min_step = business_sets_[0].id\n max_step = business_sets_[cnt - 1].id\n if business_sets['operation_id'] == min_step:\n business_sets['position'] = 'first'\n reference = []\n for business in business_sets_:\n if (business.id < min_step + num + 1) and business.id > business_sets['operation_id']:\n reference.append(business.name)\n business_sets['reference'] = reference\n elif business_sets['operation_id'] == max_step:\n business_sets['position'] = 'last'\n reference = []\n for business in business_sets_:\n if (business.id > max_step - num - 1) and business.id < business_sets['operation_id']:\n reference.append(business.name)\n business_sets['reference'] = reference\n else:\n business_sets['position'] = 'mid'\n reference = []\n min_temp = business_sets['operation_id'] - num / 2 - 1\n max_temp = business_sets['operation_id'] + num / 2 + 1\n for business in business_sets_:\n if (business.id > min_temp) and (business.id < max_temp) and \\\n business.id != business_sets['operation_id']:\n reference.append(business.name)\n business_sets['reference'] = reference\n # print(business_sets)\n return business_sets\n\n\ndef siphon_business_formula(id=False, name=None, same=False):\n \"\"\"\n 获取业务相关计算公式\n :param id:\n :param name:\n :param same:\n :return:\n \"\"\"\n business_sets = {}\n business_sets_ = []\n if id and name is None:\n business_sets_ = Business_formula.objects.filter(id=id)\n elif id is False and name:\n business_sets_ = Business_formula.objects.filter(name=name)\n else:\n pass\n # 取最佳匹配\n if len(business_sets_) == 1:\n business_sets['formula_id'] = business_sets_[0].id\n business_sets['formula_name'] = business_sets_[0].name\n business_sets['formula_params'] = business_sets_[0].params\n business_sets['formula'] = business_sets_[0].formula\n business_sets['business_id'] = business_sets_[0].business_id\n elif len(business_sets_) > 1:\n pass\n else:\n pass\n business_sets_ = []\n # 取同级信息\n if business_sets and same is False:\n return business_sets\n elif business_sets and same:\n same_business = Business_formula.objects.filter(business_id=business_sets['business_id'])\n if same_business:\n for same_ in same_business:\n business_sets_.append(same_.name)\n return business_sets_\n return business_sets\n\n\ndef detect_business_scene(inp, ners):\n result = {}\n business_type = siphon_business_type(inp, ners)\n if business_type == 'production':\n references = siphon_same_level_business(False, ners)\n elif business_type == 'operation':\n references = siphon_reference_business_operation(False, ners)\n else:\n references = siphon_business_formula(False, ners)\n result['type'] = business_type\n result['reference'] = references\n return result\n\n\ndef siphon_business_keywords(name):\n \"\"\"\n 抽取业务信息,业务id,业务名称,上级业务id\n :param name:\n :param bank:\n :param area:\n :return:\n \"\"\"\n result = {}\n business_keywords = []\n if name:\n business_keywords = Business_keywords.objects.filter(keyword=name)\n if len(business_keywords) == 1:\n result['type_id'] = business_keywords[0].type_id\n result['type_name'] = business_keywords[0].type_name\n elif len(business_keywords) > 1:\n pass\n else:\n pass\n # print(result)\n return result\n\n\ndef encode_ontology_pickle():\n keywords = []\n business_keywords = Business_keywords.objects.values(\"keyword\").filter(type_id=1)\n if business_keywords:\n for business_keyword in business_keywords:\n if business_keyword['keyword'] not in keywords:\n keywords.append(business_keyword['keyword'])\n fp = open(PICKLE_DIR / 'ontology_keywords.pickle', 'wb')\n pickle.dump(keywords, fp, True)\n\n\ndef decode_ontology_pickle():\n f = open(PICKLE_DIR / 'ontology_keywords.pickle', \"rb+\")\n keywords = pickle.load(f)\n return keywords\n\n\ndef encode_attr_pickle():\n keywords = []\n business_keywords = Business_keywords.objects.values(\"keyword\").filter(type_id__gt=1)\n if business_keywords:\n for business_keyword in business_keywords:\n if business_keyword['keyword'] not in keywords:\n keywords.append(business_keyword['keyword'])\n fp = open(PICKLE_DIR / 'attr_keywords.pickle', 'wb')\n pickle.dump(keywords, fp, True)\n\n\ndef decode_attr_pickle():\n f = open(PICKLE_DIR / 'attr_keywords.pickle', \"rb+\")\n keywords = pickle.load(f)\n return keywords\n\n\nif __name__ == \"__main__\":\n bs = siphon_business_keywords(\"信用卡\")\n for b in len(bs):\n print(b)\n","repo_name":"Innerface/innerface","sub_path":"chatbot/core/generate_business_model.py","file_name":"generate_business_model.py","file_ext":"py","file_size_in_byte":10724,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"22950949953","text":"class EventHandler:\n \"\"\"\n A class to keep track of event handlers\n \"\"\"\n\n def __init__(self, logger, case_sensitive=False):\n \"\"\"\n Create a new EventHandler\n Args:\n logger - the logger to use\n case_sensitive - whether event names are sensitive to case\n (default False)\n \"\"\"\n self.handlers = {}\n self.case_sensitive = case_sensitive\n self.logger = logger\n\n def __setitem__(self, k, handler):\n \"\"\"\n Set the handler for an action.\n This is called via `handlers[k] = handler`\n Args:\n k - the key to use for this handler\n handler - func, what handles this event\n \"\"\"\n if not self.case_sensitive and hasattr(k, 'lower'):\n k = k.lower()\n if k not in self.handlers:\n self.handlers[k] = []\n self.handlers[k].append(handler)\n return self\n\n def __call__(self, ev_name, *args, **kwargs):\n \"\"\"\n Call the handlers for a message.\n Passes arguments after ev_name to the function being called\n Args:\n ev_name - the name of the event to be fired\n fail_silently - when True don't log message if event is unhandled\n (default False)\n \"\"\"\n internal_name = ev_name\n if not self.case_sensitive and hasattr(internal_name, 'lower'):\n internal_name = internal_name.lower()\n fail_silently = kwargs.pop('fail_silently', False)\n if internal_name in self.handlers:\n for handler in self.handlers[internal_name]:\n handler(*args, **kwargs)\n elif not fail_silently:\n self.logger.warning(\"Not handling message: {0}\".format(ev_name))\n","repo_name":"jbbe/translator-skill","sub_path":"mycroft/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10309639132","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.io import loadmat\nfrom tkinter import filedialog\nfrom tkinter import Tk\nimport os\nimport re\n\nif __name__ == \"__main__\":\n # Hide the main tkinter window\n root = Tk()\n root.withdraw()\n\n # Open a directory dialog\n directory_path = filedialog.askdirectory(title=\"Select a Directory\")\n\n # Get all the .mat files in the chosen directory\n file_paths = [\n os.path.join(directory_path, f)\n for f in os.listdir(directory_path)\n if f.endswith(\".mat\")\n ]\n\n # Get the number of files the user wants to plot\n num_files_to_plot = int(input(\"Enter the number of files you want to plot: \"))\n\n # Ensure the user doesn't request more files than selected\n # num_files_to_plot = min(num_files_to_plot, len(file_paths))\n\n # Get the starting channel index\n start_channel = 0\n file_start_index = int(input(\"Enter the starting file index (0-based index): \"))\n\n for file_path in file_paths[\n file_start_index : num_files_to_plot + file_start_index\n ]:\n # Get the directory path\n directory_path = os.path.dirname(file_path)\n\n # Get the filename without the extension\n filename = os.path.splitext(os.path.basename(file_path))[0]\n\n # Use a regex to find the date pattern \"DD_MM_YYYY\" in the filename\n match = re.search(r\"\\d{2}_\\d{2}_\\d{4}\", filename)\n if match:\n # Split the filename into parts before and after the date\n parts = filename.split(match.group())\n participant_type = parts[0].split(\"_\")[0]\n yoga_position = \"_\".join(parts[0].split(\"_\")[1:]).rstrip(\"_\")\n else:\n print(\"Could not find a date in the filename.\")\n\n # Load the data from the .mat file\n mat = loadmat(file_path)\n data = mat[\"data\"]\n\n # Dynamically determine the number of channels\n total_channels = 64\n print(f\"Total number of channels: {total_channels}\")\n\n # Create frequency array for x-axis, considering sampling frequency\n sampling_frequency = 2000 # Hz\n frequencies = np.fft.fftfreq(data.shape[1], d=1 / sampling_frequency)\n\n # Number of channels to plot at a time\n channels_per_plot = 8\n\n save_dir = os.path.join(\n directory_path, \"data_processing_stages\", \"FFT_raw_signal\"\n )\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n for channel_group_start in range(\n start_channel, total_channels, channels_per_plot\n ):\n fig, axs = plt.subplots(\n channels_per_plot, 1, sharex=True, figsize=(10, 2 * channels_per_plot)\n )\n fig.suptitle(\n f\"{participant_type} - {yoga_position}\", fontsize=14, weight=\"bold\"\n )\n\n # Plot the magnitude of the Fourier transform in separate subplots\n for idx, channel_num in enumerate(\n range(\n channel_group_start,\n min(channel_group_start + channels_per_plot, total_channels),\n )\n ):\n spectrum = np.abs(np.fft.fft(data[channel_num, :]))\n axs[idx].plot(frequencies, spectrum)\n axs[idx].set_title(f\"Channel {channel_num + 1}\")\n axs[idx].set_ylabel(\"Magnitude\")\n axs[idx].set_xlim(\n [0, sampling_frequency / 2]\n ) # Only show positive frequencies\n\n # Adding vertical lines at 20Hz and 450Hz\n axs[idx].axvline(20, color=\"r\", linestyle=\"--\", alpha=0.6)\n axs[idx].axvline(450, color=\"r\", linestyle=\"--\", alpha=0.6)\n\n axs[-1].set_xlabel(\"Frequency (Hz)\")\n\n # Show the plot\n plt.tight_layout()\n # Save the figure to the directory\n plt.savefig(\n os.path.join(\n save_dir,\n f\"{participant_type}_{yoga_position}_channels_{channel_group_start+1}_to_{min(channel_group_start + channels_per_plot, total_channels)}.png\",\n )\n )\n\n # Close the figure\n plt.close()\n","repo_name":"lg519/Yoga_project_data_processing","sub_path":"Process_EMG_data/visualize_high_density_electrodes_data/visualize_FFT_64_channels.py","file_name":"visualize_FFT_64_channels.py","file_ext":"py","file_size_in_byte":4212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41215415110","text":"from projectq.ops._qubit_operator import _PAULI_OPERATOR_PRODUCTS\nimport cmath\n\n_PRECISION = 10**-5\n\ndef permute_cnot(control_gate, rotation, rotation_info):\n\tcontributions=[]\n\tfor basis_element in rotation_info[0]:\n\t\t# does it anti-commute with control?\n\t\tif(rotation.qubits[0][basis_element[0]] in control_gate.control_qubits):\n\t\t\tif(basis_element[1] != \"Z\"):\n\t\t\t\t# add X contribution to target\n\t\t\t\tfor qubit in control_gate.qubits[0]:\n\t\t\t\t\tcontributions.append((qubit, \"X\"))\n\n\t\t# does it anti-commute with the target?\n\t\tif(rotation.qubits[0][basis_element[0]] in control_gate.qubits[0]):\n\t\t\tif(basis_element[1] != \"X\"):\n\t\t\t\tfor qubit in control_gate.control_qubits:\n\t\t\t\t\tcontributions.append((qubit, \"Z\"))\n\n\t# update the basis contributions\n\t_add_basis_contribution(rotation, rotation_info, contributions)\n\treturn\n\n# Helper functions\n\ndef _add_basis_contribution(rotation, rotation_info, contributions):\n\t# first check if qubit is already in multi qubit rotation gate\n\ttotal_factor = 1\n\tfor contrib in contributions:\n\t\tif (contrib[0] in rotation.qubits[0]):\n\t\t\tpos = rotation.qubits[0].index(contrib[0])\n\t\telse:\n\t\t\tpos = len(rotation.qubits[0])\n\t\t\trotation.qubits[0].append(contrib[0])\n\n\t\t# pos is now defined and points to the qubit that needs to be modified\n\t\tmodified = False\n\t\tfor i in range(len(rotation_info[0])):\n\t\t\tif(rotation_info[0][i][0] == pos):\n\t\t\t\tmodified = True\n\t\t\t\tf, b = _PAULI_OPERATOR_PRODUCTS[(rotation_info[0][i][1], contrib[1])]\n\t\t\t\trotation_info[0][i] = (rotation_info[0][i][0], b)\n\t\t\t\ttotal_factor *= f\n\t\tif(not modified):\n\t\t\trotation_info[0].append((pos,contrib[1]))\n\tif(abs(total_factor + 1)< _PRECISION):\n\t\trotation_info[2] = 4*cmath.pi - rotation_info[2]\n\t\treturn\n\telif(abs(total_factor - 1) < _PRECISION):\n\t\treturn\n\traise(\"Total factor is imaginary! This is not unitary!\")","repo_name":"quantumresource/projqube","sub_path":"projqube/projectq/cengines/_permutation_engine/_permutation_relations/_controlled_gate.py","file_name":"_controlled_gate.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"19311810286","text":"#!/usr/bin/env python\n\n# author: Guillaume WALCK (2015)\n\nimport threading\nimport functools\n\nimport rospy\nfrom copy import deepcopy\nfrom control_msgs.msg import JointTrajectoryControllerState\n\nfrom controller_manager_msgs.srv import ListControllersRequest, \\\n ListControllers\n\nfrom trajectory_msgs.msg import JointTrajectory\n\nfrom posture_recorder.msg import PostureRecordErrorCodes,\\\n PostureRecordPosture, PostureRecordWaypoint\nfrom posture_recorder.srv import PostureRecordStart, PostureRecordStop,\\\n PostureRecordSave, PostureRecordAddWaypoint, PostureRecordStartResponse,\\\n PostureRecordStopResponse, PostureRecordAddWaypointResponse,\\\n PostureRecordSaveResponse, PostureRecordGetAllPosturesResponse,\\\n PostureRecordGetAllPostures\n\nfrom posture.posture import Posture\n\n\nclass PostureRecorder(object):\n \"\"\"\n Class to record postures via services\n \"\"\"\n def __init__(self, name):\n\n self._name = name\n rospy.init_node(name, anonymous=False, log_level=rospy.INFO)\n\n self._name = name\n self._prefix = \"roscontrol\"\n self._controller_list = {}\n self._state = {}\n self._sub = {}\n self.init_controller_list()\n self._current_postures = {}\n self._mp = Posture(\"mypostures\")\n self._overall_time_from_start = 0.0\n\n self.init_services()\n\n threading.Thread(None, rospy.spin)\n\n def init_controller_list(self):\n \"\"\"\n Get all the controllers running and store their name per group\n \"\"\"\n service_name = self._prefix + \\\n '/controller_manager/list_controllers'\n rospy.loginfo(\"Waiting for %s\", service_name)\n try:\n rospy.wait_for_service(service_name, 5.0)\n except rospy.ROSException:\n rospy.logerr(\"%s did not show up. Giving up\", service_name)\n return False\n cm_list_client = rospy.ServiceProxy(service_name,\n ListControllers)\n rospy.loginfo(\"Found %s\", service_name)\n\n # get all the controllers\n try:\n resp = cm_list_client(ListControllersRequest())\n except rospy.ServiceException:\n rospy.logerr(\"Could not call list_controllers\")\n return\n\n # loop on the controllers\n if resp:\n for controller in resp.controller:\n cname_split = controller.name.split(\"_\")\n if len(cname_split) > 1:\n if cname_split[0] in [\"torso\", \"zlift\", \"head\"]:\n self._controller_list[cname_split[0]] = controller.name\n else:\n if len(cname_split) > 2:\n if cname_split[1] in [\"arm\", \"hand\"]:\n group_name = cname_split[0] + \"_\" +\\\n cname_split[1]\n self._controller_list[group_name] =\\\n controller.name\n for group_name in self._controller_list:\n if group_name not in self._sub:\n try:\n self.set_up_subscriber(group_name)\n # give some time for the subscriber\n # to receive its first data\n rospy.sleep(0.5)\n except rospy.ROSException:\n rospy.logerr(\"Could not set up subscriber \\\n for group %s.\", group_name)\n resp.error_code.val =\\\n PostureRecordErrorCodes.NOCONTROLLER\n \n\n def init_services(self):\n \"\"\"\n Initialize the service servers\n \"\"\"\n service_prefix = rospy.get_name() + \"/\"\n\n self._start_record_serv = rospy.Service(service_prefix +\n 'start_record',\n PostureRecordStart,\n self.start_recording_cb)\n self._stop_record_serv = rospy.Service(service_prefix + 'stop_record',\n PostureRecordStop,\n self.stop_recording_cb)\n self._add_wp_serv = rospy.Service(service_prefix +\n 'add_waypoint',\n PostureRecordAddWaypoint,\n self.add_waypoint_cb)\n self._save_serv = rospy.Service(service_prefix + 'save',\n PostureRecordSave, self.save_cb)\n \n self._getall_serv = rospy.Service(service_prefix + 'get_all_postures',\n PostureRecordGetAllPostures, self.getall_cb)\n\n def set_up_subscriber(self, group_name):\n \"\"\"\n Sets up client to communicate with the trajectory controller\n \"\"\"\n\n if group_name in self._controller_list:\n self._sub[group_name] = rospy.Subscriber(\n self._prefix + \"/\" + self._controller_list[group_name] +\n \"/state\", JointTrajectoryControllerState,\n functools.partial(self.state_cb, group_name=group_name))\n else:\n rospy.logerr(\"No controller for group %s\", group_name)\n return\n\n def start_recording_cb(self, req):\n \"\"\"\n Callback for the start recording service\n \"\"\"\n resp = PostureRecordStartResponse()\n resp.error_code.val = PostureRecordErrorCodes.SUCCESS\n # check if there are ongoing recordings for the request groups\n ongoing = False\n for group in req.group_names:\n if group in self._current_postures:\n rospy.logwarn(\"Group %s is still in record mode,\\\n stop it first\", group)\n ongoing = True\n\n if ongoing:\n resp.error_code.val = PostureRecordErrorCodes.NOTSTOPPED\n else:\n for group_name in req.group_names:\n if group_name not in self._sub:\n try:\n self.set_up_subscriber(group_name)\n # give some time for the subscriber\n # to receive its first data\n rospy.sleep(0.5)\n except rospy.ROSException:\n rospy.logerr(\"Could not set up subscriber \\\n for group %s.\", group_name)\n resp.error_code.val =\\\n PostureRecordErrorCodes.NOCONTROLLER\n break\n # create a new traj for this group\n self._current_postures[group_name] = JointTrajectory()\n # set joint_names\n joint_names = self.get_joint_names(group_name)\n if joint_names is not None:\n self._current_postures[group_name].joint_names = \\\n joint_names\n continue\n else:\n rospy.logerr(\"No joint_names for group %s.\", group_name)\n resp.error_code.val = PostureRecordErrorCodes.NOSTATE\n break\n # clear the overall time from start\n self._overall_time_from_start = 0.0\n\n if resp.error_code.val != PostureRecordErrorCodes.SUCCESS:\n # clear current_postures as we failed\n # to start recording for some groups\n self._current_postures = {}\n\n return resp\n\n def stop_recording_cb(self, req):\n \"\"\"\n Callback for the stop recording service\n \"\"\"\n posture_name = req.posture_name\n resp = PostureRecordStopResponse()\n resp.error_code.val = PostureRecordErrorCodes.SUCCESS\n group_to_clear = []\n for group in self._current_postures:\n if not self._mp.add_posture(group, posture_name,\n self._current_postures[group]):\n rospy.logerr(\"Could not record current posture\\\n for group %s with name %s\", group, posture_name)\n resp.error_code.val = PostureRecordErrorCodes.FAILURE\n else:\n group_to_clear.append(group)\n self._overall_time_from_start = 0.0\n for group in group_to_clear:\n self._current_postures.pop(group)\n return resp\n\n def add_waypoint_cb(self, req):\n \"\"\"\n Callback for the add waypoint service\n \"\"\"\n resp = PostureRecordAddWaypointResponse()\n resp.error_code.val = PostureRecordErrorCodes.SUCCESS\n resp.waypoint_count = []\n resp.overall_time_from_start = 0.0\n wp_nb = []\n max_time_from_start = 0.0\n\n if len(req.waypoint.group_names) != len(req.waypoint.time_from_start):\n resp.error_code.val = PostureRecordErrorCodes.INVALID\n return resp\n\n # for requested groups, check existance and way point first\n # so if it fails, we don't end up in an intermediate state\n wp = {}\n for group_name in req.waypoint.group_names:\n # if exists\n if group_name not in self._current_postures:\n rospy.logerr(\"Group %s was not started, start it first\",\n group_name)\n resp.error_code.val = PostureRecordErrorCodes.NOTSTARTED\n break\n # get current state as waypoint\n wp[group_name] = self.get_current_point(group_name)\n if wp[group_name] is None:\n rospy.logerr(\"Could not get current state for group %s, \\\n not adding any waypoint\", group_name)\n resp.error_code.val = PostureRecordErrorCodes.NOSTATE\n break\n\n # if got all the way points\n if resp.error_code.val == PostureRecordErrorCodes.SUCCESS:\n for i, group_name in enumerate(req.waypoint.group_names):\n\n if max_time_from_start < req.waypoint.time_from_start[i]:\n max_time_from_start = req.waypoint.time_from_start[i]\n\n # set time_from_start\n wp[group_name].time_from_start = rospy.Duration.from_sec(self._overall_time_from_start) + rospy.Duration.from_sec(req.waypoint.time_from_start[i])\n # store it\n self._current_postures[group_name].points.append(deepcopy(wp[group_name]))\n wp_nb.append(len(self._current_postures[group_name].points))\n\n #print self._current_postures\n # add the maximum time to overall time\n self._overall_time_from_start += max_time_from_start\n resp.overall_time_from_start = self._overall_time_from_start\n resp.waypoint_count = wp_nb\n\n return resp\n\n def save_cb(self, req):\n \"\"\"\n Callback for the save recording service\n \"\"\"\n resp = PostureRecordSaveResponse()\n resp.error_code.val = PostureRecordErrorCodes.SUCCESS\n if len(self._current_postures.keys()) > 0:\n rospy.logwarn(\"There are currently unstopped recordings, \\\n did you stop all of them ?\")\n resp.error_code.val = PostureRecordErrorCodes.NOTSTOPPED\n else:\n self._mp.save_postures(req.filepath, req.strategy)\n self._mp.clear_postures()\n return resp\n\n def getall_cb(self, req):\n \"\"\"\n Callback for the get all postures service (included the non stored ones)\n \"\"\"\n resp = PostureRecordGetAllPosturesResponse()\n resp.error_code.val = PostureRecordErrorCodes.SUCCESS\n posture_dict = self._mp.list_postures()\n selected_groups_dict = {}\n # invert the dictionnary\n for group in posture_dict:\n for posture_name in posture_dict[group]:\n if posture_name not in selected_groups_dict:\n selected_groups_dict[posture_name] = []\n selected_groups_dict[posture_name].append(group)\n \n # process stored postures\n for posture_name in selected_groups_dict:\n overall_time_from_start = 0.0\n posture = PostureRecordPosture()\n posture.posture_name = str(posture_name)\n posture.selected_groups = selected_groups_dict[posture_name]\n \n no_more_waypoint = False\n waypoint_idx = 0\n while(not no_more_waypoint):\n no_more_waypoint = True\n wp = PostureRecordWaypoint()\n for group_name in posture.selected_groups:\n joint_traj = self._mp.get_posture(group_name, posture_name)\n # is there a waypoint at this index ?\n if waypoint_idx < len(joint_traj.points):\n no_more_waypoint = False\n wp.group_names.append(group_name)\n wp.time_from_start.append(joint_traj.points[waypoint_idx].time_from_start.to_sec())\n \n # only the last max will be used\n \n if(not no_more_waypoint):\n #print \"ovt:\", overall_time_from_start, \" max wp t:\",max(wp.time_from_start)\n if overall_time_from_start < max(wp.time_from_start):\n overall_time_from_start = max(wp.time_from_start)\n posture.waypoints.append(deepcopy(wp))\n waypoint_idx += 1\n \n resp.postures.append(posture)\n resp.overall_time_from_start.append(overall_time_from_start)\n \n # process non stored postures\n if self._current_postures:\n overall_time_from_start = 0.0\n posture = PostureRecordPosture()\n posture.posture_name = str(\"unstored\")\n posture.selected_groups = list(self._current_postures.keys())\n no_more_waypoint = False\n waypoint_idx = 0\n while(not no_more_waypoint):\n no_more_waypoint = True\n wp = PostureRecordWaypoint()\n for group_name in self._current_postures:\n joint_traj = self._current_postures[group_name]\n # is there a waypoint at this index ?\n if waypoint_idx < len(joint_traj.points):\n no_more_waypoint = False\n wp.group_names.append(group_name)\n wp.time_from_start.append(joint_traj.points[waypoint_idx].time_from_start.to_sec())\n # only the last max will be used\n if(not no_more_waypoint):\n if overall_time_from_start < max(wp.time_from_start):\n overall_time_from_start = max(wp.time_from_start)\n posture.waypoints.append(deepcopy(wp))\n waypoint_idx += 1\n resp.postures.append(posture)\n resp.overall_time_from_start.append(overall_time_from_start)\n return resp\n\n def state_cb(self, msg, group_name):\n \"\"\"\n Callback to handle new state from the running controllers\n \"\"\"\n self._state[group_name] = msg\n\n def get_joint_names(self, group_name):\n \"\"\"\n Retrieve the joint_names for the given group\n requires a running controller\n @param group_name: name of the group to retrieve the joints for\n \"\"\"\n if group_name in self._state:\n return self._state[group_name].joint_names\n else:\n return None\n\n def get_current_point(self, group_name):\n \"\"\"\n Retrieve the current traj point for the given group\n requires a running controller\n @param group_name: name of the group to retrieve the point for\n \"\"\"\n if group_name in self._state:\n point = self._state[group_name].actual\n # set velocity to empty\n point.velocities = []\n return point\n else:\n return None\n\n\ndef main():\n\n posture_recorder = PostureRecorder('posture_recorder')\n rospy.spin()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"CentralLabFacilities/clf_posture","sub_path":"posture_recorder/src/posture_recorder/posture_recorder.py","file_name":"posture_recorder.py","file_ext":"py","file_size_in_byte":16254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41804511518","text":"import numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\nimport seaborn as sn\nimport pandas as pd\nimport torch, torchvision\nfrom torchvision import transforms\nfrom torchvision import datasets\nimport time\nimport copy\nimport sys\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport torchvision.models as models\nfrom torchinfo import summary\nfrom PIL import ImageFile, Image\nfrom tqdm import tqdm\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nargs = {\"dataset\": sys.argv[1], \"weights\": sys.argv[2], \"output_name\": sys.argv[3]}\nprint(f\"Arguments passed: dataset is {args['dataset']}, weights are {args['weights']}, output_name is : {args['output_name']}\")\n\ndata_transforms = {\n \"test\": transforms.Compose(\n [\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ]\n )\n}\n\nd_size = args[\"dataset\"]\n\n\ndata_dir = f\"heirarchy_data/{d_size}/wikipaintings_\"\nimage_datasets = {\n x: datasets.ImageFolder(data_dir + x, data_transforms[x]) for x in [\"test\"]\n}\n\ndataloaders = {\n x: torch.utils.data.DataLoader(\n image_datasets[x],\n batch_size=256,\n shuffle=False,\n num_workers=48,\n pin_memory=True,\n )\n for x in [\"test\"]\n}\ndataset_sizes = {x: len(image_datasets[x]) for x in [\"test\"]}\nclasses = image_datasets[\"test\"].classes\nprint(classes)\n\nmodel = models.resnext101_32x8d(pretrained=True)\nnum_ftrs = model.fc.in_features\nmodel.fc = nn.Linear(num_ftrs, len(classes))\nweights = torch.load(args[\"weights\"], map_location=\"cpu\")\nmodel.load_state_dict(weights)\n\ny_pred = []\ny_true = []\n\nmodel.eval()\nmodel.to(device)\n\nwith torch.no_grad():\n loop = tqdm(dataloaders[\"test\"])\n for idx, (data) in enumerate(loop):\n images, labels = data\n images = images.to(device)\n outputs = model(images)\n _, predictions = torch.max(outputs, 1)\n # collect the correct predictions for each class\n y_pred.extend(predictions.cpu().numpy())\n y_true.extend(labels.cpu().numpy())\n\ncf_matrix = confusion_matrix(y_true, y_pred)\nprint(cf_matrix)\nprint(np.sum(cf_matrix, axis=1))\ndf_cm = pd.DataFrame(\n (cf_matrix.T / np.sum(cf_matrix, axis=1)).T * 100,\n index=[i for i in classes],\n columns=[i for i in classes],\n)\nprint(df_cm)\nplt.figure(figsize=(12, 7))\nsn.heatmap(df_cm, annot=True)\nplt.savefig(args[\"output_name\"])\n","repo_name":"sami-amer/art-style-classification","sub_path":"scripts/confusion_matrix.py","file_name":"confusion_matrix.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20213275080","text":"import sqlite3\r\nimport os, os.path, sys\r\nimport configparser\r\n# import signal\r\nimport logging\r\nimport time, locale\r\nfrom pathlib import Path\r\nfrom logging.handlers import RotatingFileHandler\r\nfrom datetime import date, datetime, timedelta\r\nfrom tzlocal import get_localzone\r\nfrom bisect import bisect_left\r\n# from urllib.parse import urlparse\r\nfrom apscheduler.schedulers.background import BackgroundScheduler\r\nfrom apscheduler.triggers import cron\r\nfrom src import client_connect\r\n\r\nif sys.platform == \"win32\":\r\n\tfrom src import win_functions\r\n\r\n\r\nclass ManageDB:\r\n\tdef __init__(self):\r\n\t\tself.data_dir = \"\"\r\n\t\tif sys.platform == \"win32\":\r\n\t\t\tself.data_dir = os.path.join(os.path.dirname(sys.executable), \"TorrentStats\")\r\n\t\telse:\r\n\t\t\tself.data_dir = os.path.join(os.getcwd(), \"TorrentStats\")\r\n\r\n\t\tself.log_dir = os.path.join(self.data_dir, \"logs\")\r\n\r\n\t\tPath(self.log_dir).mkdir(parents=True, exist_ok=True)\r\n\t\tPath(os.path.join(self.data_dir, \"backup\")).mkdir(parents=True, exist_ok=True)\r\n\r\n\t\tself.logger = logging.getLogger(__name__)\r\n\t\tself.logger.setLevel(logging.DEBUG)\r\n\r\n\t\tformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\r\n\r\n\t\tfile_handler = RotatingFileHandler(os.path.join(self.log_dir, \"log.log\"), maxBytes=102400, backupCount=5,\r\n\t\t\t\t\t\t\t\t\t\t encoding='utf-8')\r\n\t\tfile_handler.setFormatter(formatter)\r\n\t\tfile_handler.setLevel(logging.DEBUG)\r\n\t\tself.logger.addHandler(file_handler)\r\n\r\n\t\tself.logger.info(\"Application started\")\r\n\r\n\t\ttz = str(get_localzone())\r\n\t\t# self.scheduler = BlockingScheduler()\r\n\t\tself.scheduler = BackgroundScheduler(timezone=tz)\r\n\t\t\r\n\t\tself.ts_db = os.path.join(self.data_dir, \"torrentstats.db\")\r\n\t\tself.config_file = os.path.join(self.data_dir, \"config.ini\")\r\n\t\tif os.path.isfile(self.ts_db) == False:\r\n\t\t\tself.first_start(self.ts_db, self.config_file, self.logger)\r\n\t\telse:\r\n\t\t\tself.initial_start(self.data_dir, self.ts_db, self.config_file, self.logger)\r\n\t\t\r\n\t\tconfig = configparser.ConfigParser()\r\n\t\tconfig.read(self.config_file)\r\n\t\t\r\n\t\tself.t_check_frequency = config['Preferences']['torrent_check_frequency']\r\n\t\tself.backup_frequency = config['Preferences']['backup_frequency']\r\n\t\tself.d_check_frequency = config['Preferences']['deleted_check_frequency']\r\n\r\n\t\ttrigger = cron.CronTrigger(hour='*', minute='*/' + self.t_check_frequency, timezone=tz)\r\n\t\tself.scheduler.add_job(self.multiple_frequent_checks, trigger=trigger, args=[self.ts_db, self.config_file,\r\n\t\t\t\t\t\t\t self.scheduler, self.logger], misfire_grace_time=30, id='1')\r\n\r\n\t\ttrigger = cron.CronTrigger(day_of_week='*/' + self.backup_frequency, hour='0', minute='1', second='45', \r\n\t\t\t\t\t\t\t\t timezone=tz)\r\n\t\tself.scheduler.add_job(self.backup_database, trigger=trigger, args=[self.data_dir, self.ts_db, self.logger], \r\n\t\t\t\t\t\t\t misfire_grace_time=30, id='2')\r\n\r\n\t\tif int(self.d_check_frequency) > 59:\r\n\t\t\ttrigger = cron.CronTrigger(hour='*', minute='0', second='30', timezone=tz)\r\n\t\t\tself.scheduler.add_job(self.multiple_update_info, trigger=trigger, args=[self.ts_db, self.config_file, \r\n\t\t\t\t\t\t\t\t self.logger], misfire_grace_time=30, id='3')\r\n\t\telse:\r\n\t\t\ttrigger = cron.CronTrigger(hour='*', minute='*/' + self.d_check_frequency, second='30', timezone=tz)\r\n\t\t\tself.scheduler.add_job(self.multiple_update_info, trigger=trigger, args=[self.ts_db, self.config_file, \r\n\t\t\t\t\t\t\t\t self.logger], misfire_grace_time=30, id='3')\r\n\r\n\t\ttrigger = cron.CronTrigger(hour='*/4', minute='0', second='15', timezone=tz)\r\n\t\tself.scheduler.add_job(self.multiple_update_client_version, trigger=trigger, args=[self.config_file, \r\n\t\t\t\t\t\t\t self.logger], misfire_grace_time=30, id='4')\r\n\r\n\t\tself.scheduler.start()\r\n\r\n\tdef print_to_log(self, log, logger):\r\n\t\tlogger.info(log)\r\n\r\n\tdef close_ts(self, ts_db, config_file, scheduler, logger):\r\n\t\tself.multiple_frequent_checks(ts_db, config_file, scheduler, logger)\r\n\t\tif sys.platform == \"win32\":\r\n\t\t\tconfig = configparser.ConfigParser()\r\n\t\t\tconfig.read(config_file)\r\n\t\t\tself.verify_win_options(config)\r\n\t\tscheduler.shutdown()\r\n\t\tlogger.info(\"Closing application\")\r\n\t\tos._exit(0)\r\n\r\n\t# Create database file and add tables\r\n\tdef first_start(self, ts_db, config_file, logger):\r\n\t\t# create log file\r\n\t\tlogger.info(\"No database exists. Creating...\")\r\n\r\n\t\tconn = sqlite3.connect(ts_db)\r\n\t\tc = conn.cursor()\r\n\r\n\t\tc.execute(\"\"\"CREATE TABLE trackers (\r\n\t\t\t\t\tid INTEGER PRIMARY KEY AUTOINCREMENT,\r\n\t\t\t\t\tname TEXT NOT NULL\r\n\t\t\t\t\t)\r\n\t\t\t\t\t\"\"\")\r\n\r\n\t\tc.execute(\"\"\"CREATE TABLE clients (\r\n\t\t\t\t\tid INTEGER PRIMARY KEY AUTOINCREMENT,\r\n\t\t\t\t\tsection_name TEXT NOT NULL,\r\n\t\t\t\t\tdisplay_name TEXT NOT NULL\r\n\t\t\t\t\t)\r\n\t\t\t\t\t\"\"\")\r\n\r\n\t\tc.execute(\"\"\"CREATE TABLE torrents (\r\n\t\t\t\t\tid INTEGER PRIMARY KEY AUTOINCREMENT, \r\n\t\t\t\t\tname TEXT NOT NULL,\r\n\t\t\t\t\ttracker_id INTEGER NOT NULL,\r\n\t\t\t\t\tclient_id INTEGER NOT NULL,\r\n\t\t\t\t\tadded_date INTEGER,\r\n\t\t\t\t\tstatus TEXT NOT NULL,\r\n\t\t\t\t\tdirectory TEXT,\r\n\t\t\t\t\tsize INTEGER NOT NULL,\r\n\t\t\t\t\thash TEXT NOT NULL,\r\n\t\t\t\t\thidden INTEGER NOT NULL,\r\n\t\t\t\t\tFOREIGN KEY (tracker_id) REFERENCES trackers (id),\r\n\t\t\t\t\tFOREIGN KEY (client_id) REFERENCES clients (id)\r\n\t\t\t\t\t)\r\n\t\t\t\t\t\"\"\")\r\n\r\n\t\tc.execute(\"\"\"CREATE TABLE torrent_history (\r\n\t\t\t\t\tid INTEGER PRIMARY KEY AUTOINCREMENT,\r\n\t\t\t\t\ttorrent_id INTEGER NOT NULL,\r\n\t\t\t\t\tdate INTEGER,\r\n\t\t\t\t\tdownloaded INTEGER,\r\n\t\t\t\t\tuploaded INTEGER,\r\n\t\t\t\t\ttotal_downloaded INTEGER,\r\n\t\t\t\t\ttotal_uploaded INTEGER,\r\n\t\t\t\t\tprogress REAL NOT NULL,\r\n\t\t\t\t\tratio REAL NOT NULL,\r\n\t\t\t\t\tFOREIGN KEY (torrent_id) REFERENCES torrents (id)\r\n\t\t\t\t\t)\r\n\t\t\t\t\t\"\"\")\r\n\r\n\t\tc.execute(\"\"\"CREATE TABLE lists (\r\n\t\t\t\t\tid INTEGER PRIMARY KEY AUTOINCREMENT,\r\n\t\t\t\t\tname TEXT NOT NULL\r\n\t\t\t\t\t)\r\n\t\t\t\t\t\"\"\")\r\n\r\n\t\tc.execute(\"\"\"CREATE TABLE torrents_lists (\r\n\t\t\t\t\ttorrents_id INTEGER NOT NULL,\r\n\t\t\t\t\tlists_id INTEGER NOT NULL,\r\n\t\t\t\t\tFOREIGN KEY (torrents_id) REFERENCES torrents (id),\r\n\t\t\t\t\tFOREIGN KEY (lists_id) REFERENCES lists (id)\r\n\t\t\t\t\t)\r\n\t\t\t\t\t\"\"\")\r\n\r\n\t\tconn.commit()\r\n\t\tconn.close()\r\n\r\n\t\tconfig = configparser.ConfigParser()\r\n\t\tl = locale.getdefaultlocale()\r\n\t\tstart_at_login = start_menu = \"0\"\r\n\t\tif sys.platform == \"win32\":\r\n\t\t\tstart_at_login = start_menu = \"no\"\r\n\t\tport = \"5656\"\r\n\t\tconfig['Preferences'] = {'locale': l[0],\r\n\t\t\t\t\t\t\t\t 'torrent_check_frequency': '5',\r\n\t\t\t\t\t\t\t\t 'backup_frequency': '1',\r\n\t\t\t\t\t\t\t\t 'deleted_check_frequency': '30',\r\n\t\t\t\t\t\t\t\t 'start_at_login': start_at_login,\r\n\t\t\t\t\t\t\t\t 'start_menu_shortcut': start_menu,\r\n\t\t\t\t\t\t\t\t 'port': port}\r\n\r\n\t\twith open(config_file, 'w') as config_new:\r\n\t\t\tconfig.write(config_new)\r\n\r\n\t\tlogger.info(\"Database created and application locale set to '\" + l[0] + \"'\")\r\n\r\n\t# add all torrents from client when client is first added\r\n\tdef add_client_to_db(self, ts_db, client_torrents, display_name, client_name, section_name, client_type, ip,\r\n\t\t\t\t\t\t user, pw, logger):\r\n\t\tlogger.info(\"New client detected: '\" + display_name + \"' (\" + client_name + \"). Adding to DB...\")\r\n\r\n\t\tconn = sqlite3.connect(ts_db)\r\n\t\tc = conn.cursor()\r\n\r\n\t\t# fill client table\r\n\t\tc.execute(\"INSERT INTO clients VALUES (NULL,?,?)\", (section_name, display_name))\r\n\r\n\t\tselect_client_id = c.execute(\"SELECT id FROM clients WHERE section_name=?\", (section_name,))\r\n\t\tclient_id = select_client_id.fetchone()\r\n\r\n\t\ttorrents_table = []\r\n\t\thistory_table = []\r\n\r\n\t\tqbit_cookie = []\r\n\t\t# login to qbit and get cookie\r\n\t\tif client_type == 'qbittorrent':\r\n\t\t\tqbit_cookie = client_connect.get_qbit_cookie(ip, user, pw, display_name, client_name, logger)\r\n\r\n\t\t# fill tracker table\r\n\t\tfor torrent in client_torrents:\r\n\t\t\t# need to get tracker for qbittorrent\r\n\t\t\tif client_type == 'qbittorrent':\r\n\t\t\t\ttorrent['tracker'] = client_connect.get_qbit_tracker(torrent['hash'], qbit_cookie, ip, display_name,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t client_name, logger)\r\n\r\n\t\t\tget_tracker_id = c.execute(\"SELECT id FROM trackers WHERE name=?\", (torrent['tracker'],))\r\n\t\t\ttracker_id = get_tracker_id.fetchone()\r\n\r\n\t\t\t# if tracker doesn't exist, insert new entry into trackers table\r\n\t\t\tif not tracker_id:\r\n\t\t\t\tc.execute(\"INSERT INTO trackers VALUES (NULL,?)\", (torrent['tracker'],))\r\n\t\t\t\tlogger.info(\"New tracker. Added '\" + torrent['tracker'] + \"' to database\")\r\n\r\n\t\t# fill torrents table next\r\n\t\tfor torrent in client_torrents:\r\n\t\t\t# need to get the trackerID again\r\n\t\t\tselect_tracker_id = c.execute(\"SELECT id FROM trackers WHERE name=?\", (torrent['tracker'],))\r\n\t\t\ttracker_id = select_tracker_id.fetchone()\r\n\r\n\t\t\t# entry = name / tracker id / client id / added date / status / directory / selected size / hash / hidden\r\n\t\t\ttorrents_entry = (torrent['name'], tracker_id[0], client_id[0], torrent['addedDate'], torrent['state'],\r\n\t\t\t\t\t\t\t torrent['downloadDir'], torrent['size'], torrent['hash'], 1)\r\n\r\n\t\t\ttorrents_table.append(torrents_entry)\r\n\t\tc.executemany(\"INSERT INTO torrents VALUES (NULL,?,?,?,?,?,?,?,?,?)\", torrents_table)\r\n\r\n\t\t# fill torrent_history table\r\n\t\tfor torrent in client_torrents:\r\n\t\t\tget_torrent_id = c.execute(\"SELECT id FROM torrents WHERE client_id=? AND hash=?\", (client_id[0],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttorrent['hash']))\r\n\t\t\ttorrent_id = get_torrent_id.fetchone()\r\n\t\t\t# entry = torrents id / date / downloaded / uploaded / total downloaded / total uploaded / progress / ratio\r\n\t\t\thistory_entry = (torrent_id[0], None, None, None, torrent['downloaded'], torrent['uploaded'],\r\n\t\t\t\t\t\t\t torrent['progress'], torrent['ratio'])\r\n\t\t\thistory_table.append(history_entry)\r\n\t\tc.executemany(\"INSERT INTO torrent_history VALUES (NULL,?,?,?,?,?,?,?,?)\", history_table)\r\n\r\n\t\tconn.commit()\r\n\t\tconn.close()\r\n\t\tlogger.info(\"'\" + display_name + \"' and all torrents successfully added to DB\")\r\n\r\n\t# add recently changed to DB\r\n\tdef add_to_db(self, torrent, display_name, client_name, section_name, client_type, start_today, qbit_cookie, ip, c,\r\n\t\t\t\t logger):\r\n\t\t# need to get tracker for qbittorrent\r\n\t\tif client_type == 'qbittorrent':\r\n\t\t\ttorrent['tracker'] = client_connect.get_qbit_tracker(torrent['hash'], qbit_cookie, ip, display_name,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t client_name, logger)\r\n\r\n\t\tselect_client_id = c.execute(\"SELECT id FROM clients WHERE section_name=?\", (section_name,))\r\n\t\tclient_id = select_client_id.fetchone()\r\n\r\n\t\tif not client_id:\r\n\t\t\tlogger.error(\"Client not found in DB. Can't add torrent\")\r\n\t\t\treturn\r\n\r\n\t\tselect_torrent_id = c.execute(\"SELECT id FROM torrents WHERE client_id=? AND hash=? AND added_date=?\",\r\n\t\t\t\t\t\t\t\t\t (client_id[0], torrent['hash'], torrent['addedDate']))\r\n\t\ttorrent_id = select_torrent_id.fetchone()\r\n\r\n\t\t# if there's no matching entry in the torrents table, it must be a new torrent, so we'll need to add it to\r\n\t\t# the DB\r\n\t\tif not torrent_id:\r\n\t\t\t# first need to check if tracker already exists in tracker table\r\n\t\t\tget_tracker_id = c.execute(\"SELECT id FROM trackers WHERE name=?\", (torrent['tracker'],))\r\n\t\t\ttracker_id = get_tracker_id.fetchone()\r\n\r\n\t\t\t# if tracker doesn't exist, insert new entry into trackers table\r\n\t\t\tif not tracker_id:\r\n\t\t\t\tc.execute(\"INSERT INTO trackers VALUES (NULL,?)\", (torrent['tracker'],))\r\n\t\t\t\tlogger.info(\"'\" + display_name + \"': New tracker. Added '\" + torrent['tracker'] + \"' to database\")\r\n\t\t\t\tget_tracker_id = c.execute(\"SELECT id FROM trackers WHERE name=?\", (torrent['tracker'],))\r\n\t\t\t\ttracker_id = get_tracker_id.fetchone()\r\n\r\n\t\t\t# entry = name / tracker id / client id / added_date / status / directory / selected size / hash / hidden\r\n\t\t\tc.execute(\"INSERT INTO torrents VALUES (NULL,?,?,?,?,?,?,?,?,?)\", (torrent['name'], tracker_id[0],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t client_id[0], torrent['addedDate'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t torrent['state'], torrent['downloadDir'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t torrent['size'], torrent['hash'], 1))\r\n\r\n\t\t\tget_torrent_id = c.execute(\"SELECT id FROM torrents WHERE client_id=? AND hash=? AND added_date=?\",\r\n\t\t\t\t\t\t\t\t\t (client_id[0], torrent['hash'], torrent['addedDate']))\r\n\t\t\ttorrent_id = get_torrent_id.fetchone()\r\n\t\telse:\r\n\t\t\t# update status and size\r\n\t\t\tc.execute(\"UPDATE torrents SET status=?, size=? WHERE id=?\", (torrent['state'], torrent['size'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t torrent_id[0]))\r\n\r\n\t\t# make variables for logs\r\n\t\tlog_name = \"'\" + torrent['name'] + \"'\"\r\n\t\tlog_dl = log_t_dl = str(torrent['downloaded'])\r\n\t\tlog_ul = log_t_ul = str(torrent['uploaded'])\r\n\r\n\t\tfetch_recent = c.execute(\"SELECT id, total_downloaded, total_uploaded, progress FROM torrent_history WHERE \"\r\n\t\t\t\t\t\t\t\t \"torrent_id=? AND date>=?\", (torrent_id[0], start_today))\r\n\t\trecent = fetch_recent.fetchone()\r\n\r\n\t\tfetch_history = c.execute(\"SELECT total_downloaded, total_uploaded FROM torrent_history WHERE torrent_id=? AND \"\r\n\t\t\t\t\t\t\t\t \"(date 0:\r\n\t\t\t\t\t\t# if new torrent has completed since last launch and was completed before the last activity,\r\n\t\t\t\t\t\t# we'll assume the torrent was added and completed on the same date.\r\n\t\t\t\t\t\t# Insert an entry with full downloaded and 0 upload on completed date.\r\n\t\t\t\t\t\t# Add a second entry on the activity date with 0 down and full uploaded.\r\n\t\t\t\t\t\tif self.end_of_date(datetime.fromtimestamp(torrent['doneDate'])) < torrent['activityDate']:\r\n\t\t\t\t\t\t\tentries.append((torrent_id[0], torrent['doneDate'], torrent['downloaded'], 0,\r\n\t\t\t\t\t\t\t\t\t\t\ttorrent['downloaded'], 0, torrent['progress'], torrent['ratio']))\r\n\r\n\t\t\t\t\t\t\tif torrent['downloaded']:\r\n\t\t\t\t\t\t\t\tlog_dl = \"+\" + log_dl\r\n\t\t\t\t\t\t\tlogger.info(\"'\" + display_name + \"': \" + log_name + \" | date:\" + str(torrent['doneDate']) +\r\n\t\t\t\t\t\t\t\t\t\t\" | 🡣:\" + log_dl + \"b | 🡡:0b | total🡣:\" + log_dl + \"b | \" + \"total🡡:0b\")\r\n\t\t\t\t\t\t\tlog_dl = \"0\"\r\n\r\n\t\t\t\t\t\t\tif torrent['uploaded']:\r\n\t\t\t\t\t\t\t\tlog_ul = log_t_ul = \"+\" + log_ul\r\n\r\n\t\t\t\t\t\t\tentries.append((torrent_id[0], torrent['activityDate'], 0, torrent['uploaded'],\r\n\t\t\t\t\t\t\t\t\t\t\ttorrent['downloaded'], torrent['uploaded'], torrent['progress'],\r\n\t\t\t\t\t\t\t\t\t\t\ttorrent['ratio']))\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tif torrent['downloaded']:\r\n\t\t\t\t\t\t\t\tlog_dl = log_t_dl = \"+\" + log_dl\r\n\t\t\t\t\t\t\tif torrent['uploaded']:\r\n\t\t\t\t\t\t\t\tlog_ul = log_t_ul = \"+\" + log_ul\r\n\r\n\t\t\t\t\t\t\tentries.append((torrent_id[0], torrent['activityDate'], torrent['downloaded'],\r\n\t\t\t\t\t\t\t\t\t\t\ttorrent['uploaded'], torrent['downloaded'], torrent['uploaded'],\r\n\t\t\t\t\t\t\t\t\t\t\ttorrent['progress'], torrent['ratio']))\r\n\t\t\t\t\t# if new torrent hasn't been completed, add one entry on the activity date\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tif torrent['downloaded']:\r\n\t\t\t\t\t\t\tlog_dl = log_t_dl = \"+\" + log_dl\r\n\t\t\t\t\t\tif torrent['uploaded']:\r\n\t\t\t\t\t\t\tlog_ul = log_t_ul = \"+\" + log_ul\r\n\r\n\t\t\t\t\t\tentries.append((torrent_id[0], torrent['activityDate'], torrent['downloaded'],\r\n\t\t\t\t\t\t\t\t\t\ttorrent['uploaded'], torrent['downloaded'], torrent['uploaded'],\r\n\t\t\t\t\t\t\t\t\t\ttorrent['progress'], torrent['ratio']))\r\n\t\t\t\telse:\r\n\t\t\t\t\tdownloaded = torrent['downloaded'] - history[0]\r\n\t\t\t\t\tuploaded = torrent['uploaded'] - history[1]\r\n\r\n\t\t\t\t\tif uploaded == 0 and downloaded == 0:\r\n\t\t\t\t\t\treturn\r\n\r\n\t\t\t\t\t# if existing torrent has completed since last launch and was completed before the last activity,\r\n\t\t\t\t\t# we'll assume the final progress of the download was completed on the done date.\r\n\t\t\t\t\t# Insert an entry with downloaded-historyDownloaded on completed date.\r\n\t\t\t\t\t# Add a second entry on the activity date with 0 down and uploaded-historyUploaded.\r\n\t\t\t\t\tif torrent['doneDate'] > 0 and downloaded:\r\n\t\t\t\t\t\tif self.end_of_date(datetime.fromtimestamp(torrent['doneDate'])) < torrent['activityDate']:\r\n\t\t\t\t\t\t\tratio_estimate = history[1] / torrent['downloaded']\r\n\t\t\t\t\t\t\tentries.append((torrent_id[0], torrent['doneDate'], downloaded, 0, torrent['downloaded'],\r\n\t\t\t\t\t\t\t\t\t\t\thistory[1], torrent['progress'], ratio_estimate))\r\n\r\n\t\t\t\t\t\t\tlog_dl = \"+\" + str(downloaded)\r\n\t\t\t\t\t\t\tlog_t_dl = \"+\" + log_t_dl\r\n\t\t\t\t\t\t\tlogger.info(\"'\" + display_name + \"': \" + log_name + \" | date:\" + str(torrent['doneDate']) +\r\n\t\t\t\t\t\t\t\t\t\t\" | 🡣:\" + log_dl + \"b | 🡡:0b | total🡣:\" + log_t_dl + \"b | \" + \"total🡡:\" + \r\n\t\t\t\t\t\t\t\t\t\tstr(history[1]) + \"b\")\r\n\t\t\t\t\t\t\tlog_dl = \"0\"\r\n\t\t\t\t\t\t\tlog_t_dl = str(torrent['downloaded'])\r\n\t\t\t\t\t\t\tif uploaded:\r\n\t\t\t\t\t\t\t\tlog_ul = \"+\" + str(uploaded)\r\n\t\t\t\t\t\t\t\tlog_t_ul = \"+\" + log_t_ul\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tlog_ul = str(uploaded)\r\n\r\n\t\t\t\t\t\t\tentries.append((torrent_id[0], torrent['activityDate'], 0, uploaded, torrent['downloaded'],\r\n\t\t\t\t\t\t\t\t\t\t\ttorrent['uploaded'], torrent['progress'], torrent['ratio']))\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tif downloaded:\r\n\t\t\t\t\t\t\t\tlog_dl = \"+\" + str(downloaded)\r\n\t\t\t\t\t\t\t\tlog_t_dl = \"+\" + log_t_dl\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tlog_dl = str(downloaded)\r\n\t\t\t\t\t\t\tif uploaded:\r\n\t\t\t\t\t\t\t\tlog_ul = \"+\" + str(uploaded)\r\n\t\t\t\t\t\t\t\tlog_t_ul = \"+\" + log_t_ul\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tlog_ul = str(uploaded)\r\n\r\n\t\t\t\t\t\t\tentries.append((torrent_id[0], torrent['activityDate'], downloaded, uploaded,\r\n\t\t\t\t\t\t\t\t\t\t\ttorrent['downloaded'], torrent['uploaded'], torrent['progress'],\r\n\t\t\t\t\t\t\t\t\t\t\ttorrent['ratio']))\r\n\r\n\t\t\t\t\t# if existing torrent hasn't been completed, add one entry on the activity date\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tif downloaded:\r\n\t\t\t\t\t\t\tlog_dl = \"+\" + str(downloaded)\r\n\t\t\t\t\t\t\tlog_t_dl = \"+\" + log_t_dl\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tlog_dl = str(downloaded)\r\n\t\t\t\t\t\tif uploaded:\r\n\t\t\t\t\t\t\tlog_ul = \"+\" + str(uploaded)\r\n\t\t\t\t\t\t\tlog_t_ul = \"+\" + log_t_ul\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tlog_ul = str(uploaded)\r\n\r\n\t\t\t\t\t\tentries.append((torrent_id[0], torrent['activityDate'], downloaded, uploaded,\r\n\t\t\t\t\t\t\t\t\t\ttorrent['downloaded'], torrent['uploaded'], torrent['progress'],\r\n\t\t\t\t\t\t\t\t\t\ttorrent['ratio']))\r\n\r\n\t\t\t\tc.executemany(\"INSERT INTO torrent_history VALUES (NULL,?,?,?,?,?,?,?,?)\", entries)\r\n\r\n\t\t\t# if there's historical and recent history (multiple times today of old torrent), update the entry with\r\n\t\t\t# latest stats\r\n\t\t\telse:\r\n\t\t\t\trecent_down = torrent['downloaded'] - recent[1]\r\n\t\t\t\trecent_up = torrent['uploaded'] - recent[2]\r\n\r\n\t\t\t\tprogress_diff = torrent['progress'] - recent[3]\r\n\t\t\t\tif not progress_diff:\r\n\t\t\t\t\tif recent_down == 0 and recent_up == 0:\r\n\t\t\t\t\t\treturn\r\n\r\n\t\t\t\t# if there is a recent but not a historical, it must be an update to a new entry from today.\r\n\t\t\t\tif not history:\r\n\t\t\t\t\tif recent_down:\r\n\t\t\t\t\t\tlog_dl = \"+\" + str(recent_down)\r\n\t\t\t\t\t\tlog_t_dl = \"+\" + log_t_dl\r\n\t\t\t\t\tif recent_up:\r\n\t\t\t\t\t\tlog_ul = \"+\" + str(recent_up)\r\n\t\t\t\t\t\tlog_t_ul = \"+\" + log_t_ul\r\n\r\n\t\t\t\t\tentries.append((torrent['activityDate'], torrent['downloaded'], torrent['uploaded'],\r\n\t\t\t\t\t\t\t\t\ttorrent['downloaded'], torrent['uploaded'], torrent['progress'],\r\n\t\t\t\t\t\t\t\t\ttorrent['ratio'], recent[0]))\r\n\t\t\t\telse:\r\n\t\t\t\t\tdownloaded = torrent['downloaded'] - history[0]\r\n\t\t\t\t\tuploaded = torrent['uploaded'] - history[1]\r\n\r\n\t\t\t\t\tif uploaded == 0 and downloaded == 0:\r\n\t\t\t\t\t\treturn\r\n\r\n\t\t\t\t\tif downloaded:\r\n\t\t\t\t\t\tlog_dl = \"+\" + str(downloaded)\r\n\t\t\t\t\t\tlog_t_dl = \"+\" + log_t_dl\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tlog_dl = \"0\"\r\n\t\t\t\t\tif uploaded:\r\n\t\t\t\t\t\tlog_ul = \"+\" + str(uploaded)\r\n\t\t\t\t\t\tlog_t_ul = \"+\" + log_t_ul\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tlog_ul = \"0\"\r\n\r\n\t\t\t\t\tentries.append((torrent['activityDate'], downloaded, uploaded, torrent['downloaded'],\r\n\t\t\t\t\t\t\t\t\ttorrent['uploaded'], torrent['progress'], torrent['ratio'], recent[0]))\r\n\r\n\t\t\t\tc.executemany(\"UPDATE torrent_history SET date=?, downloaded=?, uploaded=?, total_downloaded=?, \"\r\n\t\t\t\t\t\t\t \"total_uploaded=?, progress=?, ratio=? WHERE id=?\", entries)\r\n\r\n\t\tlogger.info(\"'\" + display_name + \"': \" + log_name + \" | date:\" + str(torrent['activityDate']) + \" | 🡣:\" +\r\n\t\t\t\t\tlog_dl + \"b | 🡡:\" + log_ul + \"b | total🡣:\" + log_t_dl + \"b | \" + \"total🡡:\" + log_t_ul + \"b\")\r\n\r\n\t# Binary search, returning index if there's a match, else None\r\n\tdef index(self, a, x):\r\n\t\ti = bisect_left(a, x)\r\n\t\tif i != len(a) and a[i] == x:\r\n\t\t\treturn i\r\n\t\treturn\r\n\r\n\t# return timestamp at start(00:00) of date for checking torrent dates\r\n\tdef start_of_date(self, dt):\r\n\t\tstart = datetime.combine(dt, datetime.min.time())\r\n\t\treturn datetime.timestamp(start)\r\n\r\n\t# return timestamp at end(11:59:59.999) of date for checking torrents\r\n\tdef end_of_date(self, dt):\r\n\t\tend = datetime.combine(dt, datetime.max.time())\r\n\t\treturn datetime.timestamp(end)\r\n\r\n\t# no activity date in deluge. to find recent torrents we'll just have to check for matching hashes and\r\n\t# changes to down/up\r\n\tdef check_deluge_rtorrent(self, c, ts_db, client_torrents, client_id):\r\n\t\trecent = []\r\n\t\tfor torrent in client_torrents:\r\n\t\t\tselect_search_recent = c.execute(\"SELECT t.id FROM torrents t INNER JOIN torrent_history th ON t.id = \"\r\n\t\t\t\t\t\t\t\t\t\t\t \"th.torrent_id WHERE t.hash=? AND t.client_id=? AND t.added_date=? AND \"\r\n\t\t\t\t\t\t\t\t\t\t\t \"th.total_downloaded=? AND th.total_uploaded=?\", (torrent['hash'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t client_id,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t torrent['addedDate'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t torrent['downloaded'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t torrent['uploaded']))\r\n\t\t\tsearch_recent = select_search_recent.fetchone()\r\n\t\t\tif not search_recent:\r\n\t\t\t\trecent.append(torrent)\r\n\t\treturn recent\r\n\r\n\t# check for recent changes on program start, and add them to the DB\r\n\tdef initial_check(self, ts_db, client_torrents, display_name, client_name, section_name, client_type, ip, user,\r\n\t\t\t\t\t pw, logger):\r\n\t\tlogger.info(\"Checking for recent activity from '\" + display_name + \"' (\" + client_name + \")\")\r\n\t\tconn = sqlite3.connect(ts_db)\r\n\t\tc = conn.cursor()\r\n\r\n\t\trecent_torrents = []\r\n\t\tselect_most_recent = c.execute(\"SELECT date FROM torrent_history ORDER BY date DESC LIMIT 1\")\r\n\t\tmost_recent = select_most_recent.fetchone()[0]\r\n\r\n\t\tcurrent_time = datetime.now()\r\n\t\tif current_time.hour == 0 and current_time.minute == 0:\r\n\t\t\tcurrent_time = current_time - timedelta(seconds=90)\r\n\t\tstart_today = self.start_of_date(current_time)\r\n\r\n\t\tif client_type == 'deluge' or client_type == 'rtorrent':\r\n\t\t\tselect_client_id = c.execute(\"SELECT id FROM clients WHERE section_name=?\", (section_name,))\r\n\t\t\tclient_id = select_client_id.fetchone()[0]\r\n\t\t\trecent_torrents = self.check_deluge_rtorrent(c, ts_db, client_torrents, client_id)\r\n\t\telse:\r\n\t\t\tfor torrent in client_torrents:\r\n\t\t\t\tif torrent['activityDate'] > 0:\r\n\t\t\t\t\tif most_recent:\r\n\t\t\t\t\t\tif torrent['activityDate'] > most_recent:\r\n\t\t\t\t\t\t\trecent_torrents.append(torrent)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tif torrent['activityDate'] >= start_today:\r\n\t\t\t\t\t\t\trecent_torrents.append(torrent)\r\n\r\n\t\tif recent_torrents:\r\n\t\t\tqbit_cookie = None\r\n\t\t\tif client_type == 'qbittorrent':\r\n\t\t\t\tqbit_cookie = client_connect.get_qbit_cookie(ip, user, pw, display_name, client_name, logger)\r\n\r\n\t\t\tfor torrent in recent_torrents:\r\n\t\t\t\tself.add_to_db(torrent, display_name, client_name, section_name, client_type, start_today, qbit_cookie,\r\n\t\t\t\t\t\t\t ip, c, logger)\r\n\r\n\t\tconn.commit()\r\n\t\tconn.close()\r\n\t\tlogger.info(\"Check complete\")\r\n\r\n\t# check for recent changes at intervals\r\n\tdef frequent_check(self, ts_db, client_torrents, display_name, client_name, section_name, client_type, ip, user,\r\n\t\t\t\t\t pw, logger):\r\n\t\tconn = sqlite3.connect(ts_db)\r\n\t\tc = conn.cursor()\r\n\r\n\t\tcurrent_time = datetime.now()\r\n\t\tif current_time.hour == 0 and current_time.minute == 0:\r\n\t\t\tcurrent_time = current_time - timedelta(seconds=90)\r\n\t\tstart_today = self.start_of_date(current_time)\r\n\r\n\t\tif client_type == 'deluge' or client_type == 'rtorrent':\r\n\t\t\tselect_client_id = c.execute(\"SELECT id FROM clients WHERE section_name=?\", (section_name,))\r\n\t\t\tclient_id = select_client_id.fetchone()[0]\r\n\t\t\trecent_torrents = self.check_deluge_rtorrent(c, ts_db, client_torrents, client_id)\r\n\t\t\tfor torrent in recent_torrents:\r\n\t\t\t\tself.add_to_db(torrent, display_name, client_name, section_name, client_type, start_today, None, None,\r\n\t\t\t\t\t\t\t c, logger)\r\n\t\telse:\r\n\t\t\tqbit_cookie = None\r\n\t\t\tif client_type == 'qbittorrent':\r\n\t\t\t\tqbit_cookie = client_connect.get_qbit_cookie(ip, user, pw, display_name, client_name, logger)\r\n\r\n\t\t\tfor torrent in client_torrents:\r\n\t\t\t\tif torrent['activityDate'] >= start_today:\r\n\t\t\t\t\tself.add_to_db(torrent, display_name, client_name, section_name, client_type, start_today,\r\n\t\t\t\t\t\t\t\t qbit_cookie, ip, c, logger)\r\n\r\n\t\tconn.commit()\r\n\t\tconn.close()\r\n\r\n\t# when we have multiple clients, use this method to call frequent checks for each one, one after another\r\n\t# read the config file fresh every time, to account for new clients\r\n\tdef multiple_frequent_checks(self, ts_db, config_file, scheduler, logger):\r\n\t\tconfig = configparser.ConfigParser()\r\n\t\tconfig.read(config_file)\r\n\r\n\t\t# get all existing client names from the DB\r\n\t\tconn = sqlite3.connect(ts_db)\r\n\t\tc = conn.cursor()\r\n\t\tselect_clients = c.execute(\"SELECT section_name FROM clients\")\r\n\t\tclients_list = select_clients.fetchall()\r\n\t\texisting_clients = []\r\n\t\tfor client in clients_list:\r\n\t\t\texisting_clients.append(client[0])\r\n\r\n\t\t# do a check on each client. If it's a new client, add it\r\n\t\tfor section in config:\r\n\t\t\tif 'Client' in section:\r\n\t\t\t\tif config[section]['sync'] == 'yes':\r\n\t\t\t\t\tclient_torrents = client_connect.get_torrents(config[section]['ip'], config[section]['user'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t config[section]['pass'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t config[section]['client_type'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t config[section]['display_name'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t config[section]['client_name'], logger)\r\n\t\t\t\t\tif client_torrents:\r\n\t\t\t\t\t\tif section not in existing_clients:\r\n\t\t\t\t\t\t\tself.add_client_to_db(ts_db, client_torrents, config[section]['display_name'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t config[section]['client_name'], section, \r\n\t\t\t\t\t\t\t\t\t\t\t\t config[section]['client_type'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t config[section]['ip'], config[section]['user'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t config[section]['pass'], logger)\r\n\t\t\t\t\t\tself.frequent_check(ts_db, client_torrents, config[section]['display_name'],\r\n\t\t\t\t\t\t\t\t\t\t\tconfig[section]['client_name'], section, config[section]['client_type'],\r\n\t\t\t\t\t\t\t\t\t\t\tconfig[section]['ip'], config[section]['user'], config[section]['pass'],\r\n\t\t\t\t\t\t\t\t\t\t\tlogger)\r\n\r\n\t\tconn.commit()\r\n\t\tconn.close()\r\n\t\r\n\t# reschedule jobs when user updates preferences\r\n\tdef update_jobs(self, updated_jobs, scheduler, log, logger):\r\n\t\tlogger.info(log)\r\n\t\tfor job in updated_jobs:\r\n\t\t\tif job[0] == 1:\r\n\t\t\t\ttrigger = cron.CronTrigger(hour='*', minute='*/' + str(job[1]))\r\n\t\t\t\tscheduler.reschedule_job('1', trigger=trigger)\r\n\t\t\telif job[0] == 2:\r\n\t\t\t\ttrigger = cron.CronTrigger(day_of_week='*/' + str(job[1]), hour='0', minute='1', second='45')\r\n\t\t\t\tscheduler.reschedule_job('2', trigger=trigger)\r\n\t\t\t\t\r\n\t\t\telif job[0] == 3:\r\n\t\t\t\tif job[1] > 59:\r\n\t\t\t\t\ttrigger = cron.CronTrigger(hour='*', minute='0', second='30')\r\n\t\t\t\t\tscheduler.reschedule_job('3', trigger=trigger)\r\n\t\t\t\telse:\r\n\t\t\t\t\ttrigger = cron.CronTrigger(hour='*', minute='*/' + str(job[1]), second='30')\r\n\t\t\t\t\tscheduler.reschedule_job('3', trigger=trigger)\r\n\t\tfor job in scheduler.get_jobs():\r\n\t\t\tlogger.info(\"name: %s, trigger: %s, next run: %s\" % (job.name, job.trigger, job.next_run_time))\r\n\t\t\t\r\n\t# Update the version name of a client\r\n\tdef update_client_version(self, config, config_file, new_version, section):\r\n\t\tconfig.set(section, 'client_name', new_version)\r\n\r\n\t\twith open(config_file, 'w') as config_new:\r\n\t\t\tconfig.write(config_new)\r\n\r\n\t# Check if any clients have updated\r\n\tdef multiple_update_client_version(self, config_file, logger):\r\n\t\tconfig = configparser.ConfigParser()\r\n\t\tconfig.read(config_file)\r\n\r\n\t\tfor section in config:\r\n\t\t\tif 'Client' in section:\r\n\t\t\t\tif config[section]['sync'] == 'yes':\r\n\t\t\t\t\tnew_version = client_connect.compare_client_version(config[section]['ip'], config[section]['user'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconfig[section]['pass'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconfig[section]['client_type'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconfig[section]['display_name'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconfig[section]['client_name'], logger)\r\n\t\t\t\t\tif new_version:\r\n\t\t\t\t\t\tself.update_client_version(config, config_file, new_version, section)\r\n\t\t\t\t\t\tlogger.info(\"Updated application version of \" + config[section]['display_name'])\r\n\r\n\t# Change status to 'Deleted' for deleted torrents, update directories of torrents and add missing torrents\r\n\tdef update_torrent_info(self, ts_db, client_torrents, display_name, client_name, section_name, client_type, ip,\r\n\t\t\t\t\t\t\tuser, pw, logger):\r\n\t\tconn = sqlite3.connect(ts_db)\r\n\t\tc = conn.cursor()\r\n\t\t\r\n\t\tc.execute(\"SELECT torrents.id, torrents.status, torrents.hash, torrents.directory, torrents.name FROM torrents \"\r\n\t\t\t\t \"INNER JOIN clients ON torrents.client_id = clients.id WHERE clients.section_name=? AND \"\r\n\t\t\t\t \"torrents.status<>'Deleted' ORDER BY torrents.hash\", (section_name,))\r\n\t\tdb_hashes = c.fetchall()\r\n\r\n\t\t# update torrents status\r\n\r\n\t\t# add the client hashes and status to a list for sorting\r\n\t\tclient_hashes_status = []\r\n\t\tclient_hashes = []\r\n\t\tclient_status = []\r\n\t\tfor torrent in client_torrents:\r\n\t\t\tclient_hashes_status.append((torrent['hash'], torrent['state']))\r\n\r\n\t\tclient_hashes_status.sort()\r\n\t\t# split apart so we can search the hashes\r\n\t\tfor torrent in client_hashes_status:\r\n\t\t\tclient_hashes.append(torrent[0])\r\n\t\t\tclient_status.append(torrent[1])\r\n\t\t\t\r\n\t\t# if torrent from db not found in client hashes, change status to 'Deleted'.\r\n\t\t# if there is match, check for changed status, then pop to reduce array size for next search\r\n\t\tstatus_update = []\r\n\t\tfor db_hash in db_hashes:\r\n\t\t\tsearch = self.index(client_hashes, db_hash[2])\r\n\t\t\tif search == None:\r\n\t\t\t\tstatus_update.append((\"Deleted\", db_hash[0]))\r\n\t\t\t\tlogger.info(\"'\" + display_name + \"': '\" + db_hash[4] + \"' Status: \" + db_hash[1] + \" -> Deleted\")\r\n\t\t\telse:\r\n\t\t\t\tif client_status[search] != db_hash[1]:\r\n\t\t\t\t\tstatus_update.append((client_status[search], db_hash[0]))\r\n\t\t\t\t\tlogger.info(\"'\" + display_name + \"': '\" + db_hash[4] + \"' Status: \" + db_hash[1] + \" -> \" +\r\n\t\t\t\t\t\t\t\tclient_status[search])\r\n\t\t\t\tclient_hashes.pop(search)\r\n\t\t\t\tclient_status.pop(search)\r\n\r\n\t\tc.executemany(\"UPDATE torrents SET status=? WHERE id=?\", status_update)\r\n\r\n\t\t# update directories of torrents, checking for any missing torrents in the process\r\n\r\n\t\t# add the sorted hashes to a list for searching\r\n\t\texisting_hashes = []\r\n\t\tfor torrent in db_hashes:\r\n\t\t\texisting_hashes.append(torrent[2])\r\n\r\n\t\tdirectory_update = []\r\n\t\tname_update = []\r\n\t\tqbit_cookie = None\r\n\t\tif client_type == 'qbittorrent':\r\n\t\t\tqbit_cookie = client_connect.get_qbit_cookie(ip, user, pw, display_name, client_name, logger)\r\n\r\n\t\tfor torrent in client_torrents:\r\n\t\t\ti = self.index(existing_hashes, torrent['hash'])\r\n\t\t\t# if the torrent isn't found in the database, it must have been missed\r\n\t\t\tif i == None:\r\n\t\t\t\tstart_today = self.start_of_date(datetime.now())\r\n\t\t\t\tlogger.info(\"'\" + display_name + \"': Missed torrent. Adding '\" + torrent['name'] + \"' to database...\")\r\n\t\t\t\tself.add_to_db(torrent, display_name, client_name, section_name, client_type, start_today, qbit_cookie,\r\n\t\t\t\t\t\t\t ip, c, logger)\r\n\t\t\telse:\r\n\t\t\t\tif torrent['downloadDir'] != db_hashes[i][3]:\r\n\t\t\t\t\tdirectory_update.append((torrent['downloadDir'], db_hashes[i][0]))\r\n\t\t\t\t\tlogger.info(\"'\" + display_name + \"': '\" + db_hashes[i][4] + \"' Directory: '\" +\r\n\t\t\t\t\t\t\t\tdb_hashes[i][3] + \"' -> '\" + torrent['downloadDir'] + \"'\")\r\n\t\t\t\tif torrent['name'] != db_hashes[i][4]:\r\n\t\t\t\t\tname_update.append((torrent['name'], db_hashes[i][0]))\r\n\t\t\t\t\tlogger.info(\"'\" + display_name + \"': '\" + db_hashes[i][4] + \"' renamed to '\" + torrent['name'] +\r\n\t\t\t\t\t\t\t\t\"'\")\r\n\r\n\t\tc.executemany(\"UPDATE torrents SET directory=? WHERE id=?\", directory_update)\r\n\t\tc.executemany(\"UPDATE torrents SET name=? WHERE id=?\", name_update)\r\n\t\tconn.commit()\r\n\t\tconn.close()\r\n\r\n\t# for each client, check for deleted torrents and modified directories\r\n\tdef multiple_update_info(self, ts_db, config_file, logger):\r\n\t\tconfig = configparser.ConfigParser()\r\n\t\tconfig.read(config_file)\r\n\r\n\t\tfor section in config:\r\n\t\t\tif 'Client' in section:\r\n\t\t\t\tif config[section]['sync'] == 'yes':\r\n\t\t\t\t\tclient_torrents = client_connect.get_torrents(config[section]['ip'], config[section]['user'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t config[section]['pass'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t config[section]['client_type'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t config[section]['display_name'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t config[section]['client_name'], logger)\r\n\t\t\t\t\tif client_torrents:\r\n\t\t\t\t\t\tself.update_torrent_info(ts_db, client_torrents, config[section]['display_name'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t config[section]['client_name'], section,\r\n\t\t\t\t\t\t\t\t\t\t\t\t config[section]['client_type'], config[section]['ip'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t config[section]['user'], config[section]['pass'], logger)\r\n\r\n\t# Backup database\r\n\tdef backup_database(self, data_dir, ts_db, logger):\r\n\t\tlogger.info(\"Backing up database...\")\r\n\t\tbackup_dir = os.path.join(data_dir, \"backup\")\r\n\t\tconn = sqlite3.connect(ts_db)\r\n\t\tbackup_conn = sqlite3.connect(os.path.join(backup_dir, (\"torrentstats-backup-\" + str(date.today()) + \".db\")))\r\n\r\n\t\twith backup_conn:\r\n\t\t\tconn.backup(backup_conn)\r\n\t\tbackup_conn.close()\r\n\t\tconn.close()\r\n\r\n\t\t# keep 3 DB backups. If we have more, delete the oldest one\r\n\t\tfiles = {}\r\n\t\tfor filename in os.scandir(backup_dir):\r\n\t\t\tfiles[filename.name] = os.path.getmtime(os.path.join(backup_dir, filename.name))\r\n\r\n\t\tif len(files) > 4:\r\n\t\t\tos.remove(os.path.join(backup_dir, min(files, key=files.get)))\r\n\r\n\t\tlogger.info(\"Database backup completed\")\r\n\r\n\t# check modified time of backed up files. If they're all older than 4 days, we're overdue a backup\r\n\tdef check_backups(self, data_dir, ts_db, logger):\r\n\t\tbackup_dir = os.path.join(data_dir, \"backup\")\r\n\t\tfor filename in os.scandir(backup_dir):\r\n\t\t\tif os.path.getmtime(os.path.join(backup_dir, filename.name)) < (time.time() - 345600):\r\n\t\t\t\tself.backup_database(data_dir, ts_db, logger)\r\n\t\t\t\treturn\r\n\t\t \r\n\t# verify windows options are correct \r\n\tdef verify_win_options(self, config):\r\n\t\tif config['Preferences']['start_at_login'] == '1':\r\n\t\t\twin_functions.add_to_startup()\r\n\t\telif config['Preferences']['start_at_login'] == '2':\r\n\t\t\twin_functions.remove_startup()\r\n\t\t\r\n\t\tif config['Preferences']['start_menu_shortcut'] == '1':\r\n\t\t\twin_functions.add_to_start_menu()\r\n\t\telif config['Preferences']['start_menu_shortcut'] == '2':\r\n\t\t\twin_functions.remove_start_menu()\r\n\r\n\t# on program start, let's do a check on all frequent tasks to see if they're overdue, and execute them if needed\r\n\tdef initial_start(self, data_dir, ts_db, config_file, logger):\r\n\t\tlogger.info(\"Performing initial database check...\")\r\n\t\tconfig = configparser.ConfigParser()\r\n\t\tconfig.read(config_file)\r\n\r\n\t\tconn = sqlite3.connect(ts_db)\r\n\t\tc = conn.cursor()\r\n\t\t# get all existing client names from the DB\r\n\t\tselect_clients = c.execute(\"SELECT section_name FROM clients\")\r\n\t\tclients_list = select_clients.fetchall()\r\n\t\tclients = []\r\n\t\tfor client in clients_list:\r\n\t\t\tclients.append(client[0])\r\n\r\n\t\t# do a check on each client\r\n\t\tfor section in config:\r\n\t\t\tif 'Client' in section:\r\n\t\t\t\tif config[section]['sync'] == 'yes':\r\n\t\t\t\t\tclient_torrents = client_connect.get_torrents(config[section]['ip'], config[section]['user'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t config[section]['pass'], \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t config[section]['client_type'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t config[section]['display_name'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t config[section]['client_name'], logger)\r\n\t\t\t\t\tif client_torrents:\r\n\t\t\t\t\t\t# if it's a new client, add it to the DB\r\n\t\t\t\t\t\tif section not in clients:\r\n\t\t\t\t\t\t\tself.add_client_to_db(ts_db, client_torrents, config[section]['display_name'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t config[section]['client_name'], section, \r\n\t\t\t\t\t\t\t\t\t\t\t\t config[section]['client_type'], config[section]['ip'], \r\n\t\t\t\t\t\t\t\t\t\t\t\t config[section]['user'], config[section]['pass'], logger)\r\n\t\t\t\t\t\t# check for activity since last run\r\n\t\t\t\t\t\tself.initial_check(ts_db, client_torrents, config[section]['display_name'],\r\n\t\t\t\t\t\t\t\t\t\t config[section]['client_name'], section, config[section]['client_type'],\r\n\t\t\t\t\t\t\t\t\t\t config[section]['ip'], config[section]['user'], config[section]['pass'],\r\n\t\t\t\t\t\t\t\t\t\t logger)\r\n\t\t\t\t\t\tlogger.info(\"'\" + config[section]['display_name'] + \"': Updating all torrent info...\")\r\n\t\t\t\t\t\tself.update_torrent_info(ts_db, client_torrents, config[section]['display_name'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t config[section]['client_name'], section,\r\n\t\t\t\t\t\t\t\t\t\t\t\t config[section]['client_type'], config[section]['ip'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t config[section]['user'], config[section]['pass'], logger)\r\n\r\n\t\t\t\t\t\tnew_version = client_connect.compare_client_version(config[section]['ip'], \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconfig[section]['user'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconfig[section]['pass'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconfig[section]['client_type'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconfig[section]['display_name'],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconfig[section]['client_name'], logger)\r\n\t\t\t\t\t\tif new_version:\r\n\t\t\t\t\t\t\tself.update_client_version(config, config_file, new_version, section)\r\n\t\t\t\t\t\t\tlogger.info(\"'\" + config[section]['display_name'] + \"': Updated application version\")\r\n\t\t\t\t\t\tlogger.info(\"Update complete\")\r\n\r\n\t\tself.check_backups(data_dir, ts_db, logger)\r\n\r\n\t\tif sys.platform == \"win32\":\r\n\t\t\tself.verify_win_options(config)\r\n\r\n\t\tlogger.info(\"Check complete\")\r\n\r\n\t\tconn.commit()\r\n\t\tconn.close()\r\n\t \r\n\t# return port number\r\n\tdef get_port(self, config_file):\r\n\t\tconfig = configparser.ConfigParser()\r\n\t\tconfig.read(config_file)\r\n\t\t\r\n\t\treturn config['Preferences']['port']","repo_name":"csteven1/TorrentStats","sub_path":"src/manage_db.py","file_name":"manage_db.py","file_ext":"py","file_size_in_byte":40313,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"37859796064","text":"import sys\nimport mcb185\nimport argparse\n\nparser = argparse.ArgumentParser(description='Translation of RNA to aa')\n# required arguments\nparser.add_argument('--file', required=True, type=str,\n\tmetavar='', help='required string argument')\n#finalization\narg = parser.parse_args()\n\nfor name, seq in mcb185.read_fasta(arg.file):\n\tprint(f'>{name}')\n\tprint(mcb185.translate(seq))\n\n# You have been given the code above\n# An example command line is\n#\tpython3 translate.py ATGCGCCCGAACTAG ATGAAACCCGGGTTT\n\n# Your task is to write a new program with the following features\n# 1. Proper command line (argparse)\n# 2. Reads sequences in from fasta format rather than sys.argv\n# 3. Outputs sequences as fasta format\n# 4. Accepts both uppercase and lowercase letters\n# 5. Translates ambiguous amino acids as X (e.g. for weird codons)\n\n# Hints\n# 1. add functions and put them in your library\n# 2. use string.upper() to normalize case\n","repo_name":"annguyen642/learning_python","sub_path":"translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16009337700","text":"import ast\nimport argparse\nimport logging\nimport json\nimport os\nimport io\nimport glob\n\nimport numpy as np\nfrom PIL import Image\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\nfrom torchvision import transforms\n\nfrom fastai import *\nfrom fastai.vision import *\nfrom fastai.docs import *\n\n# setup the logger\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n# set the constants for the content types\nJSON_CONTENT_TYPE = 'application/json'\nJPEG_CONTENT_TYPE = 'image/jpeg'\n\n# get the image size from an environment variable for inference\nIMG_SIZE = int(os.environ.get('IMAGE_SIZE', '224'))\n\n# define the classification classes\nclasses = ('cats', 'dogs')\n\n# By default split models between first and second layer\ndef _default_split(m:Model): return (m[1],)\n# Split a resnet style model\ndef _resnet_split(m:Model): return (m[0][6],m[1])\n\n_default_meta = {'cut':-1, 'split':_default_split}\n_resnet_meta = {'cut':-2, 'split':_resnet_split }\n\n_model_meta = {\n tvm.resnet18 :{**_resnet_meta}, tvm.resnet34: {**_resnet_meta},\n tvm.resnet50 :{**_resnet_meta}, tvm.resnet101:{**_resnet_meta},\n tvm.resnet152:{**_resnet_meta}}\n\n# define the image preprocess steps for inference\n_preprocess = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(IMG_SIZE),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n])\n\n# The train method\ndef _train(args):\n print(f'Called _train method with model arch: {args.model_arch}, batch size: {args.batch_size}, image size: {args.image_size}, epochs: {args.epochs}')\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n print(\"Device Type: {}\".format(device))\n print(f'Getting training data from dir: {args.data_dir}')\n data = image_data_from_folder(args.data_dir, ds_tfms=get_transforms(), tfms=imagenet_norm, size=args.image_size, bs=args.batch_size)\n print(f'Model architecture is {args.model_arch}')\n arch = getattr(tvm, args.model_arch)\n print(\"Creating pretrained conv net\")\n learn = ConvLearner(data, arch, metrics=accuracy)\n print(\"Fit one cycle\")\n learn.fit_one_cycle(1)\n print(f'Unfreeze and run {args.epochs} more cycles')\n learn.fit_one_cycle(args.epochs, slice(1e-5,3e-4), pct_start=0.05)\n return _save_model(args.model_arch, learn.model, args.model_dir)\n\n# save the model\ndef _save_model(name, model, model_dir):\n print(\"Saving the model.\")\n path = os.path.join(model_dir, f'{name}.pth')\n # recommended way from http://pytorch.org/docs/master/notes/serialization.html\n torch.save(model.state_dict(), path)\n print('Saved model')\n\n# create the model similar to source code here: https://github.com/fastai/fastai/blob/master/fastai/vision/learner.py\ndef _create_model(arch, device):\n print(\"Creating new model\")\n meta = _model_meta.get(arch, _default_meta)\n if device == 'cuda' : torch.backends.cudnn.benchmark = True\n body = create_body(arch(False), meta['cut'])\n nf = num_features(body) * 2\n head = create_head(nf, len(classes))\n model = nn.Sequential(body, head)\n print(\"Model created\")\n return model\n \n# Return the Convolutional Neural Network model\ndef model_fn(model_dir):\n logger.debug('model_fn')\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n print(\"Device Type: {}\".format(device))\n # get the model architecture from name of saved model weights\n arch_name = os.path.splitext(os.path.split(glob.glob(f'{model_dir}/resnet*.pth')[0])[1])[0]\n print(f'Model architecture is: {arch_name}')\n arch = getattr(tvm, arch_name)\n model = _create_model(arch, device)\n print(\"Loading model weights\")\n with open(os.path.join(model_dir, f'{arch_name}.pth'), 'rb') as f:\n model.load_state_dict(torch.load(f, map_location=lambda storage, loc: storage))\n print(\"Model weights loaded\")\n model.to(device)\n model.eval()\n return model\n\n# Deserialize the Invoke request body into an object we can perform prediction on\ndef input_fn(request_body, content_type=JPEG_CONTENT_TYPE):\n logger.info('Deserializing the input data.')\n if content_type == JPEG_CONTENT_TYPE:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n print(\"Device Type: {}\".format(device)) \n logger.info('Processing jpeg image.')\n img_pil = PIL.Image.open(io.BytesIO(request_body)).convert('RGB')\n img_tensor = _preprocess(img_pil)\n img_tensor.unsqueeze_(0)\n img_variable = Variable(img_tensor.to(device))\n logger.info(\"Returning image as PyTorch Variable.\")\n return img_variable\n raise Exception('Requested unsupported ContentType in content_type: {}'.format(content_type))\n\n# Perform prediction on the deserialized object, with the loaded model\ndef predict_fn(input_object, model):\n logger.info(\"Calling model\")\n output = model(input_object)\n print(\"Raw output\")\n print(output.data) \n preds = F.softmax(output, dim=1)\n print(\"Softmax output\")\n print(preds)\n logger.info(\"Getting class and confidence score\")\n conf_score, indx = torch.max(preds, 1)\n print(f'conf score {conf_score.item()}, index: {indx.item()}')\n response = {}\n response['class'] = classes[indx.item()]\n response['confidence'] = conf_score.item() \n logger.info(response)\n return response\n\n# Serialize the prediction result into the desired response content type\ndef output_fn(prediction, accept=JSON_CONTENT_TYPE): \n logger.info('Serializing the generated output.')\n if accept == JSON_CONTENT_TYPE:\n return json.dumps(prediction), accept\n raise Exception('Requested unsupported ContentType in Accept: {}'.format(accept)) \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--workers', type=int, default=2, metavar='W',\n help='number of data loading workers (default: 2)')\n parser.add_argument('--epochs', type=int, default=2, metavar='E',\n help='number of total epochs to run (default: 2)')\n parser.add_argument('--batch-size', type=int, default=64, metavar='BS',\n help='batch size (default: 64)')\n parser.add_argument('--lr', type=float, default=0.001, metavar='LR',\n help='initial learning rate (default: 0.001)')\n parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='momentum (default: 0.9)')\n parser.add_argument('--dist-backend', type=str, default='gloo', help='distributed backend (default: gloo)')\n\n # fast.ai specific parameters\n parser.add_argument('--image-size', type=int, default=224, metavar='IS',\n help='image size (default: 224)')\n parser.add_argument('--model-arch', type=str, default='resnet34', metavar='MA',\n help='model arch (default: resnet34)')\n \n # The parameters below retrieve their default values from SageMaker environment variables, which are\n # instantiated by the SageMaker containers framework.\n # https://github.com/aws/sagemaker-containers#how-a-script-is-executed-inside-the-container\n parser.add_argument('--hosts', type=str, default=ast.literal_eval(os.environ['SM_HOSTS']))\n parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])\n parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])\n parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAINING'])\n parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])\n\n _train(parser.parse_args())\n","repo_name":"mattmcclean/sagemaker-reinvent-chalktalk-2018","sub_path":"src/dogscats/dogscats.py","file_name":"dogscats.py","file_ext":"py","file_size_in_byte":7644,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"67"} +{"seq_id":"36166758552","text":"import glob\nimport os\nimport personality\nimport re\nimport sys\nimport pdb\n\nfrom .. import freddebugger\nfrom .. import debugger\nfrom .. import fredio\nfrom .. import fredutil\n\ngn_user_code_min = 0\ngn_user_code_max = 0\ngs_inferior_name = \"\"\n\nclass PersonalityPerl(personality.Personality):\n def __init__(self):\n personality.Personality.__init__(self)\n self.s_name = \"perl\"\n self.GS_NEXT = \"n\"\n self.GS_STEP = \"s\"\n self.GS_CONTINUE = \"c\"\n self.GS_BREAKPOINT = \"b\"\n self.GS_WHERE = \"T\"\n self.GS_INFO_BREAKPOINTS = \"L b\"\n self.GS_PRINT = \"p\"\n self.GS_FINISH = \"r\"\n self.GS_CURRENT_POS = \".\"\n\n self.gs_next_re = fredutil.getRE(self.GS_NEXT)\n self.gs_step_re = fredutil.getRE(self.GS_STEP)\n self.gs_continue_re = fredutil.getRE(self.GS_CONTINUE)\n self.gs_breakpoint_re = fredutil.getRE(self.GS_BREAKPOINT)\n self.gs_where_re = fredutil.getRE(self.GS_WHERE)\n self.gs_info_breakpoints_re = fredutil.getRE(self.GS_INFO_BREAKPOINTS)\n self.gs_print_re = fredutil.getRE(self.GS_PRINT)\n self.gs_program_not_running_re = \\\n \"\"\"Debugged program terminated. Use q to quit or R to restart,\n use o inhibit_exit to avoid stopping after program termination,\n h q, h R or h o to get additional info.\"\"\"\n\n self.GS_PROMPT = \"DB\"\n self.gre_prompt = self.GS_PROMPT + \"<\\d+?>\"\n self.gre_backtrace_frame = \". = (\\w+::\\w+)\\(?.*?\\)? called from file `(.+)\\' line (\\d+)\"\n self.gre_breakpoint = \"\\s*(\\d+):\\s+.+$\"\n # List of regexes that match debugger prompts for user input\n self.ls_needs_user_input = []\n # Things like 'next 5' are allowed:\n self.b_has_count_commands = False \n self.b_coalesce_support = False\n self.n_top_backtrace_frame = 0\n\n def prompt_string(self):\n \"\"\"Return the debugger's prompt string.\"\"\"\n return self.GS_PROMPT\n\n def prompt(self):\n \"\"\"Bring user back to debugger prompt.\"\"\"\n sys.stdout.write(self.GS_PROMPT)\n sys.stdout.flush()\n\n def sanitize_print_result(self, s_printed):\n \"\"\"Sanitize the result of a debugger 'print' command.\n This is to normalize out things like gdb's print result:\n $XX = 16\n Where $XX changes with each command executed.\"\"\"\n # Strip the control characters and \\r\n s_printed = re.sub(\"\\\\x1b\\[\\d+?m\", \"\", s_printed)\n s_printed = s_printed.replace(\"\\r\", \"\")\n # Perl gives us no print result formatting. We use the\n # following heuristic: find the first instance of the prompt\n # string, and take the output to be the text between the\n # prompt string and the previous newline char.\n # Ex: 'p $i < 3\\n1\\n DB<4>' should return '1'.\n lines = s_printed.split(\"\\n\")\n prevline = lines[0]\n for line in lines[1:]:\n if re.search(self.gre_prompt, line):\n return prevline\n prevline = line\n\n def _parse_backtrace_frame(self, match_obj):\n \"\"\"Return a BacktraceFrame from the given re Match object.\n The Match object should be a tuple (result of gre_backtrace_frame.)\"\"\"\n frame = debugger.BacktraceFrame()\n frame.s_function = match_obj[0]\n frame.s_file = match_obj[1]\n frame.n_line = int(match_obj[2])\n return frame\n\n def _parse_backtrace_internal(self, backtrace):\n result = re.findall(self.gre_backtrace_frame, backtrace)\n # Perl backtraces are empty at the top-level. The reverse-*\n # algorithms assume that a top-level backtrace has depth\n # 1. Therefore, we always prepend a dummy top-level backtrace\n # frame, although it is not displayed to the user.\n result = [(\"FReD::dummy\", \"fred-nofile\", 0)] + result\n return result\n\n def _parse_one_breakpoint(self, match_obj):\n \"\"\"Return a Breakpoint from the given re Match object.\n The Match object should be a tuple (the result of gre_breakpoint).\"\"\"\n breakpoint = debugger.Breakpoint()\n breakpoint.s_file = match_obj[0]\n breakpoint.n_line = int(match_obj[1])\n breakpoint.n_number = int(match_obj[1])\n return breakpoint\n\n def _parse_breakpoints(self, info_str):\n \"\"\"Return a list of Breakpoint objects parsed from output of 'info\n breakpoints' cmd.\"\"\"\n perlFileRE = \"(.+):\"\n file = re.search(perlFileRE, info_str)\n l_breakpoints = []\n l_matches = re.findall(self.gre_breakpoint, info_str, re.MULTILINE)\n for m in l_matches:\n ff = file.group(1)\n ff = ff.replace(\"\\x1b[m\", \"\")\n l_breakpoints.append(self._parse_one_breakpoint((ff, m)))\n return l_breakpoints\n","repo_name":"fred-dbg/fred","sub_path":"fred/personality/personalityPerl.py","file_name":"personalityPerl.py","file_ext":"py","file_size_in_byte":4771,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"67"} +{"seq_id":"73413551893","text":"import argparse\nimport time\nimport torch,os\nimport torch.backends.cudnn as cudnn\nfrom data.utils import psnr,ssim,get_hr_lr_bicubic,preprocess_to_tesnor,preprocess_to_pil,preprocess_to_y,merge\nimport matplotlib.pyplot as plt\nimport config\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--weights-file', type=str, default='./network/best{}.pth'.format(config.scale))\n parser.add_argument('--image-file', type=str, default='./data/test/Set5/baby.png')\n parser.add_argument('--mode', type=str, help='y:表示ycbcr', default=config.mode)\n parser.add_argument('--scale', type=int, default=config.scale)\n args = parser.parse_args()\n\n cudnn.benchmark = True\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n model = torch.load(args.weights_file, map_location=lambda storage, loc: storage).to(device)\n model.eval()\n hr,lr,bicubic=get_hr_lr_bicubic(args.image_file,args.scale) ###这里每个都是rgb通道\n basename = args.image_file.split('/')[-1]\n basename=basename.split('.')[0] ###baby\n ##保存图片\n hr.save(os.path.join('./fig',basename+'__hr{}.png'.format(args.scale)))\n bicubic.save(os.path.join('./fig', basename + '__bicubic{}.png'.format(args.scale)))\n if args.mode=='y':\n print('在y通道进行测试')\n lr,_=preprocess_to_y(lr,device=device)\n hr1, ycbcr = preprocess_to_y(hr, device=device)\n bicubic1,_=preprocess_to_y(bicubic,device=device)\n else:\n print('在rgb进行测试')\n lr=preprocess_to_tesnor(lr,device)\n hr1=preprocess_to_tesnor(hr,device)\n bicubic1=preprocess_to_tesnor(bicubic,device)\n with torch.no_grad():\n end=time.time()\n preds = model(lr).clamp(0.0, 1.0)\n print('处理图片的时间',time.time()-end,'s')\n ##打印信息\n print('bicubic and hr psnr:{}'.format(psnr(hr1,bicubic1)))\n print('pred and hr psnr:{}'.format(psnr(hr1,preds)))\n print('bicubic and hr ssim:{}'.format(ssim(bicubic1,hr1)))\n print('pred and hr ssim:{}'.format(ssim(preds,hr1)))\n if args.mode=='y':\n output=merge(preds,ycbcr)\n else:\n output=preprocess_to_pil(preds)\n output.save(os.path.join('./fig', basename + '__pred{}.png'.format(args.scale)))\n ##显示\n plt.figure()\n plt.subplot(131)\n plt.imshow(hr)\n plt.xticks([]) # 去掉x轴的刻度\n plt.yticks([])\n plt.title(\"hr\")\n plt.subplot(132)\n plt.imshow(bicubic)\n plt.xticks([]) # 去掉x轴的刻度\n plt.yticks([])\n plt.title(\"bicubic\")\n plt.subplot(133)\n plt.imshow(output)\n plt.xticks([]) # 去掉x轴的刻度\n plt.yticks([])\n plt.title('pred')\n plt.show()","repo_name":"laity-sir/sr","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43037419214","text":"\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\nimport random\r\na=random.randint(1,9)\r\nb=random.randint(1,9)\r\nc=random.randint(1,9)\r\nwhile a==b:\r\n b=random.randint(1,9)\r\nwhile c==a or c==b:\r\n c=random.randint(1,9)\r\nl=[]\r\nl.append(a)\r\nl.append(b)\r\nl.append(c)\r\nt=[]\r\nwhile t!=l:\r\n t=[]\r\n try:\r\n s1=((input(\"Give Your Guess: \")))\r\n s1=(eval(s1))\r\n except:\r\n s1=str(0)\r\n if type(s1)==float:\r\n print('Give an integer')\r\n else:\r\n s1=str(s1)\r\n for item in (s1):\r\n try:\r\n t.append(int(item))\r\n except:\r\n print('Give a proper integer')\r\n \r\n if len(t)==3:\r\n if len(t)!=3:\r\n print (\"Give a three digit Number\")\r\n if t[0]==0 or t[1]==0 or t[2]==0:\r\n print('0 is not allowed')\r\n elif t[0]==t[1] or t[0]==t[2]:\r\n print('Repitition is not allowed')\r\n elif t[1]==t[2]:\r\n print('Repitition is not allowed')\r\n else:\r\n countc=0\r\n countb=0\r\n if t[0]==l[0]:\r\n countb+=1\r\n if (t[0])==l[1] or (t[0])==l[2]:\r\n countc+=1\r\n if (t[1])==l[1]:\r\n countb+=1\r\n if (t[1])==l[0] or (t[1])==l[2]:\r\n countc+=1\r\n if (t[2])==l[2]:\r\n countb+=1\r\n if (t[2])==l[1] or (t[2])==l[0]:\r\n countc+=1\r\n if t[0]!=l[0] and t[0]!=l[1] and t[0]!=l[2] and t[1]!=l[0] and t[1]!=l[1] and t[1]!=l[2] and t[2]!=l[0] and t[2]!=l[1] and t[2]!=l[2]:\r\n print(\"Badluck nothing is present try again \")\r\n print (countc,'cows')\r\n print (countb,'bulls')\r\n if countb==3:\r\n print ('Bravo!! You are awesome')\r\n else:\r\n print('Give a three digit number')","repo_name":"PradeepNalluri/Cows-and-Bulls","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23932735394","text":"import os\ndef checkIfArmstrong(num):\n\tc = int(num)\n\tsum = 0\n\tstrnum = str(num)\n\tlength = len(strnum)\n\twhile (int(c) > 0):\n\t\td = int(c) % 10\n\t\tsum += int(pow(int(d),int(length)))\n\t\tc = c / 10\n\tif (int(num) == int(sum)):\n\t\tprint(num, \" is an Armstrong number.\")\n\telse:\n\t\tprint(num, \" is not an Armstrong number.\")\n\treturn\nnumber = int(input(\"Number to check if Armstrong number: \"))\ncheckIfArmstrong(number)\nos.system(\"pause\")","repo_name":"dev-arthur-g20r/how-to-code-together-using-python","sub_path":"How to Code Together using Python/EVENT DRIVEN PYTHON/armstrong.py","file_name":"armstrong.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"23979974416","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\nsensorList = [4, 8, 16, 32, 64, 128, 256, 512]\r\n# sensorList = [4, 8, 16, 32]\r\n\r\nfig, ax = plt.subplots(1, 1)\r\n\r\nmaxVals = []\r\navgVals = []\r\nminVals = []\r\n\r\nfileA = \"send_time.csv\"\r\nfileB = \"recv_time.csv\"\r\n\r\noutDir = \"/mnt/c/Users/PaulA/Desktop/Journal-2/submission_02-01-18/data/OUT_600sec_error256/out-\"\r\n\r\nfor s in sensorList:\r\n\r\n # dataDir = \"../Testing/out-\" + str(s) + \"/\"\r\n # dataDir = \"../../../out-60SEC-test/out-\" + str(s) + \"/\"\r\n\r\n dataDir = outDir + str(s) + \"/\"\r\n print(dataDir)\r\n\r\n A = np.loadtxt(dataDir + fileA, delimiter=\",\")\r\n B = np.loadtxt(dataDir + fileB, delimiter=\",\")\r\n\r\n x = B[:,0] - A[:,0]\r\n y = B[:,1] - A[:,1]\r\n z = B[:,2] - A[:,2]\r\n\r\n N = len(x)\r\n t = np.arange(1, N+1, 1)\r\n\r\n #=====\r\n # compute the time difference\r\n # HH:MM:SS.ssssss\r\n #=====\r\n e = np.zeros(N)\r\n for i in range(0, N):\r\n \tx[i] = float( x[i] / 3600.0 )\r\n \ty[i] = float( y[i] / 60.0 )\r\n \te[i] = float( x[i] + y[i] + z[i] )\r\n\r\n a = np.mean(e, dtype=np.float64)\r\n f = a * np.ones(N)\r\n\r\n b = np.max(e)\r\n c = np.min(e)\r\n\r\n avgVals.append(a * 1000.0) # convert to ms\r\n maxVals.append(b * 1000.0) # convert to ms\r\n minVals.append(c * 1000.0) # convert to ms\r\n\r\n # the error \"e\" is in [sec]\r\n e_ms = 1000.0 * e\r\n f_ms = 1000.0 * f\r\n \r\n testName = str(s) + \"-sensors\"\r\n testNameAvg = testName + \"-avg\"\r\n ax.semilogy(t, e_ms, 'o-', label=testName)\r\n ax.semilogy(t, f_ms, '--', label=testNameAvg)\r\n\r\nfig.legend(loc=2)\r\n\r\n\r\nf2, ax2 = plt.subplots(1,1)\r\n\r\nax2.loglog(sensorList, avgVals, 'k-')\r\nax2.loglog(sensorList, maxVals, 'bx--')\r\nax2.loglog(sensorList, minVals, 'bo--')\r\n\r\n# ax2.semilogx(sensorList, avgVals, 'k-')\r\n# ax2.semilogx(sensorList, maxVals, 'bx--')\r\n# ax2.semilogx(sensorList, minVals, 'bo--')\r\n\r\nax2.set_xlim([10**0, 10**3])\r\nax2.set_ylim([10**-1, 10**1])\r\n\r\nax2.set_xlabel(\"Number of Sensors\")\r\nax2.set_ylabel(\"Computing Time per Measurement [ms]\")\r\n\r\nplt.show()\r\n","repo_name":"UMcompute/RTFM","sub_path":"PostProcess/OLD_compare_timing.py","file_name":"OLD_compare_timing.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"31745380257","text":"from __future__ import annotations\n\nfrom zero_agent import ZeroAgent\nimport pommerman\nimport settings\nimport numpy as np\n\n\nclass Evaluator:\n def __init__(self, best_net, new_net, num_games, num_simulations):\n \"\"\"\n Initialize the evaluation component\n :param best_net: Network 1\n :param new_net: Network 2\n :param num_games: Number of games played in each evaluation\n :param num_simulations: Number of MCTS simulations to select each move\n \"\"\"\n self._net_1 = best_net\n self._net_2 = new_net\n self._num_games = num_games\n self._env = pommerman.make(\n settings.game_config_id,\n [\n ZeroAgent(best_net, num_simulations=num_simulations, is_self_play=False, num_exploration_steps=0),\n ZeroAgent(new_net, num_simulations=num_simulations, is_self_play=False, num_exploration_steps=0),\n ]\n )\n\n def start(self):\n \"\"\"Start evaluation and return win ratios of two players\"\"\"\n win_count = np.zeros(2)\n for i in range(self._num_games):\n\n state = self._env.reset()\n done = False\n reward = None\n while not done:\n # print('[Evaluation] Step %d' % self._env._step_count)\n actions = self._env.act(state)\n state, reward, done, info = self._env.step([a.value for a in actions])\n if reward[0] == settings.win_reward and reward[1] == settings.lose_reward:\n win_count[0] += 1\n elif reward[1] == settings.win_reward and reward[0] == settings.lose_reward:\n win_count[1] += 1\n\n return win_count\n\n","repo_name":"terryzhao127/pommerman-implementations","sub_path":"AlphaGoZero/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"38268377471","text":"import sys\r\nfrom PyQt5 import QtWidgets, uic\r\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QLabel\r\nfrom PyQt5.QtSerialPort import QSerialPort, QSerialPortInfo\r\nfrom PyQt5.QtCore import QIODevice\r\nfrom pyqtgraph import PlotWidget\r\nimport pyqtgraph as pg\r\n\r\nclass Window(QMainWindow):\r\n def __init__(self):\r\n super(Window, self).__init__()\r\n\r\n uic.loadUi(\"terminal.ui\", self)\r\n self.setWindowTitle('GUI')\r\n \r\n self.serial = QSerialPort()\r\n self.serial.setBaudRate(115200)\r\n portList = []\r\n portType = []\r\n ports = QSerialPortInfo().availablePorts()\r\n for port in ports:\r\n portList.append(port.portName())\r\n portType.append(port.description())\r\n\r\n print(portList)\r\n print(portType)\r\n self.comL.addItems(portList)\r\n self.OpenB.clicked.connect(self.onOpen)\r\n self.CloseB.clicked.connect(self.onClose)\r\n self.serial.readyRead.connect(self.onRead)\r\n self.LED_blue.stateChanged.connect(self.ledControl_blue)\r\n self.LED_red.stateChanged.connect(self.ledControl_red)\r\n self.LED_green.stateChanged.connect(self.ledControl_green)\r\n self.listX = []\r\n self.listY = []\r\n for i in range(100): self.listX.append(i)\r\n for i in range(100): self.listY.append(0)\r\n\r\n def onOpen(self):\r\n self.serial.setPortName(self.comL.currentText())\r\n self.serial.open(QIODevice.ReadWrite)\r\n print('Serial port COM8 opened')\r\n\r\n def onClose(self):\r\n self.serial.close()\r\n print('Serial port COM8 closed')\r\n\r\n def onRead(self):\r\n rx = self.serial.readLine()\r\n rxs = str(rx, 'utf-8').strip()\r\n data = rxs.split('.')\r\n self.listY = self.listY[1:]\r\n self.listY.append(int(data[0]))\r\n self.graph.clear()\r\n self.graph.plot(self.listX, self.listY)\r\n\r\n\r\n def serialSend(self, data):\r\n txs = data\r\n self.serial.write(txs)\r\n\r\n def ledControl_blue(self, val):\r\n message = b'\\xAB\\xA1\\x01'\r\n self.serialSend(message)\r\n\r\n def ledControl_red(self, val):\r\n message = b'\\xAB\\xA2\\x01'\r\n self.serialSend(message)\r\n\r\n def ledControl_green(self, val):\r\n message = b'\\xAB\\xA3\\x01'\r\n self.serialSend(message)\r\n\r\ndef application():\r\n app = QApplication(sys.argv)\r\n window = Window()\r\n\r\n window.show()\r\n sys.exit(app.exec())\r\n\r\napplication()","repo_name":"igor234515/STM32","sub_path":"Terminal/Terminal.py","file_name":"Terminal.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71765591254","text":"import serial\nimport time\nimport json\nfrom datetime import date\nfrom datetime import datetime\nimport paho.mqtt.client as paho\nbroker=\"192.168.5.12\"\nport=1883\ntopic = str(\"solar/wr/power\")\n\ndef on_publish(client,userdata,result): #create function for callback\n print(\"mqtt published \\n\")\n pass\n\nser = serial.Serial()\nser.baudrate = 2400\nser.port = '/dev/ttyUSB0'\nser.bytesize= 8\nser. parity= 'N'\nser.stopbits= 1\nser.open()\n\ndata = {}\n\ndef on_publish(client,userdata,result): #create function for callback\n print(\"mqtt published\")\n pass\n\n\nclient1= paho.Client(\"wr0\") #create client object\nclient1.on_publish = on_publish #assign function to callback\nclient1.connect(broker,port) #establish connect\n\nsensor_topic_template = 'homeassistant/sensor/wr/*/config'\nsensor_value_template = \\\n '{\\\n \"name\" :\"WR *\",\\\n \"unique_id\" :\"wr_#_*\",\\\n \"state_topic\" :\"solar/wr/#\",\\\n \"value_template\":\"{{ value_json.*}}\",\\\n \"device\" :{\"name\":\"wr\",\"ids\":\"wr\",\"cu\":\"http://solaranzeige.fritz.box\",\"mf\":\"FSP\",\"mdl\":\"PowerManager-Hybrid 10kW\",\"sw\":\"00000001\"},\\\n \"expire_after\":45,\\\n \"state_class\":\"measurement\"\\\n &\\\n }'\n \nparameters = ',\\\n \"device_class\": \"power\",\\\n \"unit_of_measurement\": \"W\",\\\n \"icon\": \"mdi:speedometer\",\\\n \"min\": -10000, \"max\": 10000'\n\nret = client1.publish( str(sensor_topic_template).replace(\"*\",\"DefFeedInPow\"), \\\n (str(sensor_value_template).replace(\"*\",\"DefFeedInPow\")).replace(\"#\",\"power\").replace(\"&\",parameters))\n\nret = client1.publish( str(sensor_topic_template).replace(\"*\",\"ActPvPow\"), \\\n (str(sensor_value_template).replace(\"*\",\"ActPvPow\")).replace(\"#\",\"power\").replace(\"&\",parameters))\n \nret = client1.publish( str(sensor_topic_template).replace(\"*\",\"ReservPow\"), \\\n (str(sensor_value_template).replace(\"*\",\"ReservPow\")).replace(\"#\",\"power\").replace(\"&\",parameters)) \n\nret = client1.publish( str(sensor_topic_template).replace(\"*\",\"ActFeedPow\"), \\\n (str(sensor_value_template).replace(\"*\",\"ActFeedPow\")).replace(\"#\",\"power\").replace(\"&\",parameters)) \n\nparameters = ',\\\n \"unit_of_measurement\": \"%\",\\\n \"icon\": \"mdi:speedometer\",\\\n \"min\": 0, \"max\": 150'\n\nret = client1.publish( str(sensor_topic_template).replace(\"*\",\"LoadFactor\"), \\\n (str(sensor_value_template).replace(\"*\",\"LoadFactor\")).replace(\"#\",\"power\").replace(\"&\",parameters))\n\nprevious = datetime.now();\nnow = datetime.now()\n\nwhile True:\n\tser_bytes = ser.read_until(b'\\r')\n\tnow = datetime.now()\t\n\tstring = str(ser_bytes)\n\ttxt = string.split(\",\")\n#\tprint (len(txt),txt[0]) \n\tif(len(txt) == 6):\n\t\tvalue = '{'+\\\n\t\t\t'\"Time\": '+'\"'+str(now).replace(\" \", \"T\")+'\"'+','+\\\n\t\t\t'\"DefFeedInPow\": ' +str(int(txt[1]))+','+\\\n\t '\"ActPvPow\": '+str(int(txt[2]))+','+\\\n\t '\"ActFeedPow\": ' +str(int(txt[3]))+','+\\\n\t '\"LoadFactor\": '+str(float(int(txt[2])) /float(int(txt[4])))+','+\\\n\t '\"ReservPow\": ' +str(int(txt[4]))+\\\n\t '}'\n\t \n\t \t\t\t \n\t\tif ((now-previous).total_seconds() >= 5): # WR offers values after 1..7s - stabilize a bit bei rejecting \n\n\t\t\tret = client1.publish(topic, value)\n#\t\t\tprint (txt[0], value, txt[5]) \n\t\t\tprevious = now\nser.close()\n\n","repo_name":"plewka/PylontechUSX00C_CAN2MQTT","sub_path":"fsp_pip_p17.py","file_name":"fsp_pip_p17.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"972006357","text":"from django.conf.urls import url, include\nfrom django.views.generic.base import RedirectView\nfrom . import views\nfrom . import nginx\nfrom . import httpdns\n\nurlpatterns = [\n url('^getDns$', httpdns.GetDns, name='GetDns'),\n url('^updaterecord$', views.UpdateRecord, name='UpdateRecord'),\n \n #cloudflare\n url('^cloudflare/index$', views.Index, name='Index'),\n url('^cloudflare/get_product_records$', views.GetProductRecords, name='GetProductRecords'),\n url('^cloudflare/get_zone_records$', views.GetZoneRecords, name='GetZoneRecords'),\n url('^cloudflare/create_records$', views.CreateRecords, name='CreateRecords'),\n url('^cloudflare/update_records$', views.UpdateRecords, name='UpdateRecords'),\n url('^cloudflare/delete_records$', views.DeleteRecords, name='DeleteRecords'),\n url('^cloudflare/update_api_route$', views.UpdateApiRoute, name='UpdateApiRoute'),\n url('^cloudflare/get_api_route$', views.GetApiRoute, name='GetApiRoute'),\n\n #dnspod\n url('^dnspod/index$', views.DndpodIndex, name='DndpodIndex'),\n url('^dnspod/get_product_records$', views.GetDnspodProductRecords, name='GetDnspodProductRecords'),\n url('^dnspod/get_zone_records$', views.GetDnspodZoneRecords, name='GetDnspodZoneRecords'),\n url('^dnspod/create_records$', views.CreateDnspodRecords, name='CreateDnspodRecords'),\n url('^dnspod/update_records$', views.UpdateDnspodRecords, name='UpdateDnspodRecords'),\n url('^dnspod/delete_records$', views.DeleteDnspodRecords, name='DeleteDnspodRecords'),\n \n #config nginx\n url('^nginx$', nginx.Nginx, name='Nginx'),\n\n]","repo_name":"sadwebing/phx_web_python37","sub_path":"dns/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"16658980625","text":"from math import floor\r\n'''\r\nProblem 9:\r\n\r\nA Pythagorean triplet is a set of three natural numbers, a b c, for which,\r\n\r\na**2 + b**2 = c**2\r\n\r\nFor example, 3**2 + 4**2 = 9 + 16 = 25 = 5**2.\r\n\r\nThere exists exactly one Pythagorean triplet for which a + b + c = 1000.\r\nFind the product abc.\r\n\r\n'''\r\n\r\ns = 1000\r\na = 3\r\n\r\nfor a in range(3, floor((s-3)/3)):\r\n for b in range(a+1, floor((s-1-a)/2)):\r\n c = s-a-b\r\n if c*c == a*a + b*b:\r\n print(a, b, c)\r\n print ('Product: ' + str(a*b*c)) # 31875000\r\n ","repo_name":"klbinns/project-euler-solutions","sub_path":"Solutions/Problem09.py","file_name":"Problem09.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6791597948","text":"from os import get_terminal_size\r\nimport string\r\nfrom icecream import ic\r\n\r\n### DO NOT MODIFY THIS FUNCTION ###\r\ndef load_words(file_name):\r\n print('Loading word list from file...')\r\n in_file = open(file_name, 'r')\r\n line = in_file.readline()\r\n word_list = line.split()\r\n print(' ', len(word_list), 'words loaded.')\r\n in_file.close()\r\n return word_list\r\n\r\n### DO NOT MODIFY THIS FUNCTION ###\r\ndef is_word(word_list, word):\r\n word = word.lower()\r\n word = word.strip(\" !@#$%^&*()-_+={}[]|\\:;'<>?,./\\\"\")\r\n return word in word_list\r\n\r\n### DO NOT MODIFY THIS FUNCTION ###\r\ndef get_story_string():\r\n f = open(\"story.txt\", \"r\")\r\n story = str(f.read())\r\n f.close()\r\n return story\r\n\r\nWORDLIST_FILENAME = 'words.txt'\r\n\r\nclass Message(object):\r\n ### DO NOT MODIFY THIS METHOD ###\r\n def __init__(self, text):\r\n self.message_text = text\r\n self.valid_words = load_words(WORDLIST_FILENAME)\r\n\r\n ### DO NOT MODIFY THIS METHOD ###\r\n def get_message_text(self):\r\n return self.message_text\r\n\r\n ### DO NOT MODIFY THIS METHOD ###\r\n def get_valid_words(self):\r\n return self.valid_words[:]\r\n \r\n def build_shift_dict(self, shift):\r\n\r\n self.shift_dict = {}\r\n sets_to_shift = (string.ascii_lowercase, string.ascii_uppercase)\r\n for alpha_set in sets_to_shift:\r\n for letter in alpha_set:\r\n try:\r\n shifted_letter = alpha_set[alpha_set.index(letter) + shift]\r\n except ValueError:\r\n pass\r\n except IndexError:\r\n offset = alpha_set.index(letter) + shift - len(alpha_set)\r\n shifted_letter = alpha_set[offset]\r\n self.shift_dict[letter] = shifted_letter\r\n return self.shift_dict\r\n \r\n def get_shift_dict(self):\r\n return self.shift_dict\r\n\r\n def apply_shift(self, shift):\r\n self.build_shift_dict(shift)\r\n message = list(self.get_message_text())\r\n shifted_message = []\r\n for letter in message:\r\n try:\r\n shifted_message.append(self.get_shift_dict()[letter])\r\n except KeyError:\r\n shifted_message.append(letter)\r\n shifted_message = \"\".join(shifted_message)\r\n return shifted_message\r\n\r\nclass PlaintextMessage(Message):\r\n def __init__(self, text, shift):\r\n Message.__init__(self,text)\r\n self.shift = int(shift)\r\n self.encrypting_dict = Message.build_shift_dict(self,shift)\r\n self.message_text_encrypted = Message.apply_shift(self,shift)\r\n\r\n def get_shift(self):\r\n return self.shift\r\n\r\n def get_encrypting_dict(self):\r\n encrypted_dict_copy = self.encrypting_dict.copy()\r\n return encrypted_dict_copy\r\n\r\n def get_message_text_encrypted(self):\r\n return self.message_text_encrypted\r\n\r\n def change_shift(self, shift):\r\n self.shift = int(shift)\r\n self.encrypting_dict = Message.build_shift_dict(self,shift)\r\n self.message_text_encrypted = Message.apply_shift(self,shift)\r\n\r\nclass CiphertextMessage(Message):\r\n def __init__(self, text):\r\n Message.__init__(self,text)\r\n\r\n def decrypt_message(self):\r\n best_shift = None\r\n best_message = \"\"\r\n highest_real_words = 0\r\n for s in range(26):\r\n message = self.get_message_text()\r\n decrypted_message = self.apply_shift(s)\r\n decrypted_message_list = decrypted_message.split()\r\n real_words = 0\r\n for word in decrypted_message_list:\r\n if is_word(self.get_valid_words(), word):\r\n real_words += 1\r\n if real_words > highest_real_words:\r\n highest_real_words = real_words\r\n best_shift = s\r\n best_message = decrypted_message\r\n return best_shift, best_message\r\n\r\ndef decrypt_story():\r\n\r\n story = CiphertextMessage(get_story_string())\r\n return story.decrypt_message()\r\n\r\nic(decrypt_story())\r\n","repo_name":"ozervesh/MIT-6.001x","sub_path":"0. Problem Sets/pset5/ps6 without comments.py","file_name":"ps6 without comments.py","file_ext":"py","file_size_in_byte":4041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27705627169","text":"import logging\nlogger = logging.getLogger(\"geolife.user\")\nimport os\nfrom raw import pltfile\nfrom collections import defaultdict\nfrom raw.record import RawRecord\nfrom raw.record import GeoLifeUser\nfrom utils import datetimerange\nfrom sqlalchemy import update\n\n\ndef from_directory(directory):\n users = []\n for d in os.listdir(directory):\n if os.path.isdir(os.path.join(directory, d)):\n logger.debug(\"Yielding user {0}\".format(d))\n yield GeoLifeUserFromFile(user_id=d, directory=directory)\n\n\nclass BaseGeoLifeUser:\n def __init__(self):\n self.id = None\n\n def __str__(self):\n return \"<{0}(id={1}, size={2})>\".format(\n self.__class__.__name__,\n self.id,\n len(self)\n )\n __repr__=__str__\n \n def __iter__(self):\n raise NotImplemented(\n \"Class {0} must implement the __iter__() method.\".format(\n self.__class__.__name__\n ))\n\n def __len__(self):\n raise NotImplemented(\n \"Class {0} must implement the __len__() method.\".format(\n self.__class__.__name__\n ))\n\n def __bool__(self):\n raise NotImplemented(\n \"Class {0} must implement the __bool__() method.\".format(\n self.__class__.__name__\n ))\n\n\nclass GeoLifeUserFromFile(BaseGeoLifeUser):\n def __init__(self, user_id, directory):\n self.id = int(user_id)\n self.files = pltfile.load_from_directory(\n directory=os.path.join(directory, user_id),\n user=self\n )\n self.num_records = 0\n\n def to_DB(self):\n return GeoLifeUser(id=self.id, count=self.num_records)\n\n\n\"\"\"\nclass GeoLifeUserFromDB(BaseGeoLifeUser):\n def __init__(self):\n BaseGeoLifeUser.__init__(self)\n self.records = []\n self.linked_list = None\n self.record_ptr = None\n self.synthesized_records = []\n self.modified_records = []\n\n def add(self, record):\n if self.id is None:\n self.id = record.user\n self.records.append(record)\n\n def sort(self):\n self.records.sort(key=lambda r: r.datetime)\n\n def __len__(self):\n return len(self.records)\n\n def __iter__(self):\n for i in self.records:\n yield i\n\n def __bool__(self):\n return bool(self.records)\n\n def __getitem__(self, key):\n return self.records[key]\n\n def homogenizeTimeDeltas(self, start, end, delta, session):\n current = self.linked_list\n reference_record = current.record\n c_datetime = current.record.datetime\n for d in datetimerange(start, end+delta, delta):\n logger.debug(\"=\"*80)\n logger.debug(\"Current record ptr: {0}\".format(current.record))\n logger.debug(\"Homogenized date time: {0}\".format(d))\n lower_bound = d-delta\n upper_bound = d+delta\n logger.debug(\"Looking between window of {0} to {1}\".format(lower_bound, upper_bound))\n # Three possible states could be encountered.\n # 1. The current record is within a window\n # surrounding d\n # e.g. d-delta <= c.datetime < d+delta\n if lower_bound < c_datetime < upper_bound:\n logger.debug(\"Current record falls within window! Modifying...\")\n if lower_bound < c_datetime <= d:\n logger.debug(\"Current record occurs within previous window. We must\"\n \" delete all nodes except the one less than or equal to\"\n \" {0}\".format(d))\n # Find the most recent record that is closest to the target date d\n start = current\n while current.next is not None and current.record.datetime <= d:\n logger.debug(\"Delete {0}\".format(current.record))\n current = current.next\n \n if current.next is None:\n pass\n\n elif current.record.datetime == d:\n pass\n\n else:\n logger.debug(\"Don't delete {0}!!!\".format(current.record))\n current = current.prev\n\n # Remove all nodes between current (inclusive) and searcher (exclusive)\n if start != current:\n start.removeSegmentEndingAt(current)\n start = None\n logger.debug(\"Searching for record less than or equal to {0} found {1}\".format(\n d, current\n ))\n\n else:\n logger.debug(\"Current record occurs within next window.\")\n\n # Adjust the current element's datetime.\n logger.debug(current.record)\n logger.debug(\"... to ...\")\n current.record.datetime = d\n current.record.date = d.date()\n current.record.time = d.time()\n logger.debug(current.record)\n #self.modified_records.append(current.record)\n\n # Move forward if possible\n reference_record = current.record\n if current.next is not None:\n logger.debug(\"Moving forward to {0}\".format(current.next.record))\n current = current.next\n\n else:\n logger.debug(\"Cannot move forward! current.next points to None.\")\n c_datetime = current.record.datetime\n\n else:\n logger.debug(\"Current record falls outside window.\")\n logger.debug(\"Generating a new record with current timestamp.\")\n logger.debug(\"Base record: {0}\".format(reference_record))\n modified_record = RawRecord(\n user=reference_record.user,\n latitude=reference_record.latitude,\n longitude=reference_record.longitude,\n datetime=d,\n weekday=d.weekday(),\n )\n logger.debug(\"Modified record: {0}\".format(modified_record))\n self.synthesized_records.append(modified_record)\n\n # 2. The current record is before this window\n # e.g. c.datetime < d-delta\n if c_datetime <= lower_bound:\n logger.debug(\"Current record is behind!\")\n logger.debug(\"Modified record will be added after current\")\n logger.debug(\"Current record: {0}\".format(current.record))\n logger.debug(\" .next := {0}\".format(current.next))\n logger.debug(\" .prev := {0}\".format(current.prev))\n current.insertAfter(modified_record)\n current = current.next\n c_datetime = current.record.datetime\n\n # 3. The current record is after this window\n # e.g d+delta <= c.datetime\n elif upper_bound <= c_datetime:\n logger.debug(\"Current record is ahead!\")\n logger.debug(\"Modified record will be added before current\")\n logger.debug(\"Current record: {0}\".format(current.record))\n logger.debug(\" .next := {0}\".format(current.next))\n logger.debug(\" .prev := {0}\".format(current.prev))\n current.insertBefore(modified_record)\n\n\n else:\n logger.error(\"Uh... something went wrong.\")\n\n # Verify previous node is ahead of current node by delta\n if current.prev.prev is not None:\n assert (current.prev.record.datetime - current.prev.prev.record.datetime) == delta, \"Previous record {0} is not {1} ahead of {2}\".format(\n current.prev.prev.record,\n delta,\n current.prev.record,\n )\n\n\n if len(self.synthesized_records) > 10000:\n self.__commit_synthesized_records(session)\n #if len(self.modified_records) > 10000:\n # self.__commit_modified_records(session)\n\n self.__commit_synthesized_records(session)\n #self.__commit_modified_records(session)\n\n def __commit_synthesized_records(self, session):\n if self.synthesized_records:\n logger.info(\"Adding synthesized records for {0}\".format(self))\n #session.add_all(self.synthesized_records)\n #session.commit()\n del self.synthesized_records[:]\n\n def is_time_homogenized(self):\n logger.info(\"Verifying time homogenization for {0}\".format(self))\n current = self.linked_list\n expected_delta = current.getTimeDeltaWithNextNode()\n\n while current.next.next is not None:\n current = current.next\n actual_delta = current.getTimeDeltaWithNextNode()\n\n if expected_delta != actual_delta:\n c = current\n for i in range(5):\n logger.debug(\" \"*80 + str(c.record))\n logger.debug(\" \"*90 + \"| next\")\n logger.debug(\" \"*90 + \"v\")\n c = c.next\n if c is None:\n break\n logger.debug(\"#\"*100)\n c = current\n for i in range(5):\n logger.debug(\" \"*80 + str(c.record))\n logger.debug(\" \"*90 + \"| prev\")\n logger.debug(\" \"*90 + \"v\")\n c = c.prev\n if c is None:\n break\n logger.error(\"Following records do not have expected time delta of\"\n \" {0}\\n\\t{1}\\n\\t{2}\".format(\n expected_delta,\n current.record,\n current.next.record\n ))\n \n return False\n\n return True\n\n def getExtent(self):\n return self.linked_list.extent\n\n def getRecordOn(self, timestamp):\n #Assume the timestamps are incrementing upward at a regular interval\n assert self.record_ptr.record.datetime == timestamp, (\n \"Record {0} does not have expected timestamp of {1}\".format(\n self.record_ptr.record, timestamp\n ))\n r = self.record_ptr.record\n self.record_ptr = self.record_ptr.next\n r.node_address = self.address\n return r\n\n def verifyLinkListPointsToTrueHead(self):\n head = self.linked_list\n while head.prev is not None:\n head = head.prev\n self.linked_list = head\n self.record_ptr = head\n\n def setAddress(self, addr):\n self.address = addr\n\n\ndef from_Query(query, normalize_ids):\n # The query provided may have user IDs that are not consecutive. For\n # instance, user IDs 3, 4, 9, 21, etc. If normalize_ids is True,\n # then these IDs will be mapped to consecutive IDs: 0, 1, 2, ...\n\n # Split up the results by user\n users = defaultdict(GeoLifeUserFromDB)\n for record in query:\n users[record.user].add(record)\n\n users_list = users.values()\n i = 0\n for u in users_list:\n u.sort()\n u.link_listify_records()\n\n if normalize_ids:\n logger.info(\"{0} will have address {1}\".format(u, i))\n u.setAddress(i)\n i += 1\n\n return users_list\n\"\"\"\n","repo_name":"dogmgeen/GeoLifeReader","sub_path":"raw/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":9990,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"42730422972","text":"import argparse\n\nimport skorch\nimport torch\nfrom torch.autograd import Variable\n\nimport data\nimport model\nimport net\n\nparser = argparse.ArgumentParser(description='PyTorch PennTreeBank RNN/LSTM Language Model')\nparser.add_argument('--data', type=str, default='./data/penn',\n help='location of the data corpus')\nparser.add_argument('--bptt', type=int, default=35,\n help='sequence length')\nparser.add_argument('--seed', type=int, default=1111,\n help='random seed')\nparser.add_argument('--no-cuda', dest='cuda', action='store_false',\n help='use CUDA')\nparser.add_argument('--checkpoint', type=str, default='./model.pt',\n help='model checkpoint to use')\nparser.add_argument('--outf', type=str, default='generated.txt',\n help='output file for generated text')\nparser.add_argument('--temperature', type=float, default=1.0,\n help='temperature - higher will increase diversity')\nparser.add_argument('--words', type=int, default='1000',\n help='number of words to generate')\nparser.add_argument('--log-interval', type=int, default=100,\n help='reporting interval')\n\nargs = parser.parse_args()\n\ntorch.manual_seed(args.seed)\n\ncorpus = data.Corpus(args.data)\nntokens = len(corpus.dictionary)\ndevice = 'cuda' if args.cuda else 'cpu'\n\nnet = net.Net(\n module=model.RNNModel,\n batch_size=1,\n device=device,\n module__rnn_type='LSTM',\n module__ntoken=ntokens,\n module__ninp=200,\n module__nhid=200,\n module__nlayers=2)\nnet.initialize()\nnet.load_params(args.checkpoint)\n\nhidden = None\ninput = skorch.utils.to_tensor(torch.rand(1, 1).mul(ntokens).long(),\n device=device)\n\nwith open(args.outf, 'w') as outf:\n for i in range(args.words):\n word_idx, hidden = net.sample(\n input=input,\n temperature=args.temperature,\n hidden=hidden)\n input = skorch.utils.to_tensor(\n torch.LongTensor([[word_idx]]),\n device=device)\n\n word = corpus.dictionary.idx2word[word_idx]\n outf.write(word + ('\\n' if i % 20 == 19 else ' '))\n\n if i % args.log_interval == 0:\n print('| Generated {}/{} words'.format(i, args.words))\n","repo_name":"skorch-dev/skorch","sub_path":"examples/word_language_model/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","stars":5403,"dataset":"github-code","pt":"67"} +{"seq_id":"20272500665","text":"import sqlite3\nimport pandas as pd\nimport numpy as np\nimport math\n\n\"\"\"\nФункциональность модуля заключается в формировании данных о вакансий и их сохранение в БД\n\"\"\"\n\ndef none_to_nan(num):\n \"\"\"\n Функция превращает None в NAN\n Args:\n num (float or None): изначальное число\n\n Returns:\n (float): Либо Nan, либо число\n \"\"\"\n if num is None:\n return np.NAN\n else:\n return num\n\ndef to_int(num):\n \"\"\"\n Функция превращает Nan в None или берет целую часть от числа\n Args:\n num(Nan or float):\n\n Returns:\n (None or int): Либо None, либо целое число\n \"\"\"\n if math.isnan(num):\n return None\n else:\n return int(num)\n\ndf = pd.read_csv('vacancies_dif_currencies.csv')\nprint('Файл вакансий загружен')\ndf.salary_from = df[['salary_from', 'salary_to']].mean(axis=1)\ndf['published_at'] = df.published_at.apply(lambda z: z[:10])\ndf['date'] = df.published_at.apply(lambda z: z[:7])\ncurrency_to_num = {\n 'BYR': 2, 'EUR': 3, 'KZT': 4, 'UAH': 5, 'USD': 6, 'UZS': 7, 'KGS': 8, 'AZN': 9, 'GEL': 10\n}\n\nwith sqlite3.connect('Chaganov.db') as con:\n print('База данных загружена')\n cursor = con.cursor()\n cursor.execute('DROP TABLE IF EXISTS salary')\n cursor.execute('''CREATE TABLE salary (\n name TEXT,\n salary INTEGER,\n area_name TEXT,\n published_at TEXT\n )''')\n df['salary_from'] = df.apply(\n lambda x: to_int(float(x['salary_from'] * none_to_nan(cursor.execute(f'SELECT * FROM currency WHERE date = \"{x[\"date\"]}\"').fetchone()[currency_to_num[x['salary_currency']]])))\n if (x['salary_currency'] != 'RUR' and not np.isnan(x['salary_from']))\n else x['salary_from'], axis=1\n )\n df = df.drop(['salary_to', 'date', 'salary_currency'], axis=1).rename(columns={'salary_from': 'salary'})\n df.to_sql(name='salary', con=con, if_exists='append', index=False, index_label=False)\n\n # cursor.execute('ALTER TABLE salary ADD COLUMN salary INTEGER')\n # cursor.execute('UPDATE salary SET salary = CAST(salary_from as INTEGER)')\n # cursor.execute('ALTER TABLE salary DROP COLUMN salary_from')","repo_name":"RomanChaganov/UrFU_Python_Elearn","sub_path":"task3.5.2.py","file_name":"task3.5.2.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8418125276","text":"# -*- coding: utf-8 -*-\nfrom django.core.handlers.wsgi import WSGIRequest\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.apps import apps\n\nfrom contrib.ipware import AccessIPAddress\nfrom exceptions import DobermanImproperlyConfigured\nfrom doberman import configuration\n\ndef get_doberman_model():\n try:\n return apps.get_model(configuration.doberman_model)\n except ValueError:\n raise DobermanImproperlyConfigured(\"AUTH_USER_MODEL must be of the form 'app_label.model_name'\")\n except LookupError:\n raise DobermanImproperlyConfigured(\n \"DOBERMAN-MODEL refers to model '%s' that has not been installed: \" % configuration.doberman_model\n )\n\n\nclass AccessAttempt(AccessIPAddress):\n \"\"\"\n Failed Access Attempt class\n \"\"\"\n max_failed_attempts = configuration.behavior.max_failed_attempts\n block_login_seconds = configuration.behavior.lockout_time\n template_name = configuration.iplockout_template\n\n def __init__(self, request, response):\n super(AccessAttempt, self).__init__()\n\n if isinstance(request, WSGIRequest):\n self.request = request\n else:\n self.request = request.request #cbv\n\n self.response = response\n\n self.ip = self.get_client_ip_address(self.request)\n\n self.last_attempt_instance = None\n self.username = self.request.POST.get(configuration.username_form_field, None)\n\n self._FailedAccessAttemptModel = get_doberman_model() # doberman supported custom models, see documentation\n\n def get_last_failed_access_attempt(self, **kwargs):\n \"\"\"\n Return the last failed access attempt or None,\n the model can be change but is obligatory implement the method \"get_last_failed_access_attempt\"\n \"\"\"\n\n last_failed_access = self._FailedAccessAttemptModel.get_last_failed_access_attempt(\n **kwargs\n )\n\n return last_failed_access\n\n def check_failed_login(self):\n \"\"\"\n 'Private method', check failed logins, it's used for wath_login decorator\n \"\"\"\n last_attempt = self.get_last_failed_access_attempt()\n\n if not last_attempt:\n # create a new entry\n user_access = self._FailedAccessAttemptModel(ip_address=self.ip)\n elif last_attempt:\n user_access = last_attempt\n\n if self.request.method == 'POST':\n if self.username is None:\n raise DobermanImproperlyConfigured(\n \"Bad username form field, if you are using a custom field please configure: \"\n \"DOBERMAN_USERNAME_FORM_FIELD via settings.\"\n )\n\n if self.response.status_code != 302:\n\n user_access.user_agent = self.request.META.get('HTTP_USER_AGENT', '')[:255]\n user_access.username = self.username\n user_access.failed_attempts += 1\n user_access.params_get = self.request.GET\n user_access.params_post = self.request.POST\n\n if user_access.failed_attempts >= self.max_failed_attempts:\n user_access.is_locked = True\n\n user_access.save()\n\n elif self.response.status_code == 302 and not user_access.is_locked:\n user_access.is_expired = True\n user_access.save()\n\n return user_access\n\n def inspect(self):\n \"\"\"\n Inspect access attempt, used for catpcha flow\n :return:\n \"\"\"\n last_attempt = self.get_last_failed_access_attempt(\n ip_address=self.ip,\n captcha_enabled=True,\n captcha_passed=False,\n is_expired=False\n )\n\n if last_attempt is None and not self.request.user.is_authenticated():\n # create a new entry\n user_access = self._FailedAccessAttemptModel(\n ip_address=self.ip,\n username=self.username,\n captcha_enabled=True,\n captcha_passed=False,\n is_expired=False\n )\n elif last_attempt:\n user_access = last_attempt\n\n if self.request.method == 'POST':\n\n if not self.request.user.is_authenticated():\n\n user_access.user_agent = self.request.META.get('HTTP_USER_AGENT', '')[:255]\n user_access.username = self.username\n user_access.failed_attempts += 1\n user_access.params_get = self.request.GET\n user_access.params_post = self.request.POST\n\n if user_access.failed_attempts >= self.max_failed_attempts:\n user_access.is_locked = True\n user_access.save()\n\n elif self.request.user.is_authenticated() and last_attempt:\n last_attempt.is_expired = True\n last_attempt.save()\n\n def get_lockout_response(self):\n \"\"\"\n :return:\n \"\"\"\n\n return render_to_response(\n self.template_name,\n {'user_attempts': self.last_attempt_instance,\n 'lockout_time': self.block_login_seconds,\n 'ip_address': self.ip\n }, context_instance=RequestContext(self.request)\n )","repo_name":"django-py/django-doberman","sub_path":"doberman/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":5344,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"67"} +{"seq_id":"8124700155","text":"import sys\nimport os\n\nos.environ[\"PYGAME_HIDE_SUPPORT_PROMPT\"] = \"hide\"\n\nimport pygame\nimport random\nfrom time import sleep\nimport threading\nimport copy\n\n# Engine imports\nfrom engine.game import Game\nfrom engine.players import Player\nfrom engine.structures import State\n\n# Local UI imports\nfrom UI.graphics import CardGraphics, CardBackGraphics\nfrom UI.button import Button\nfrom UI.card import UICard\n\nSCREEN_BACKGROUND_COLOR = (0, 100, 0)\nSCREEN_WIDTH = 1000\nSCREEN_HEIGHT = 800\n\nCARD_WIDTH = 80\nCARD_HEIGHT = 120\n\nPLAYER_POSITION_1 = (375, 50)\nPLAYER_POSITION_2 = (725, 200)\nPLAYER_POSITION_3 = (725, 450)\nPLAYER_POSITION_4 = (375, 600)\nPLAYER_POSITION_5 = (25, 450)\nPLAYER_POSITION_6 = (25, 200)\n\nDECK_POSITION = (412, 350)\nTRUMP_POSITION = (512, 350)\n\nSPACE_BETWEEN_CARDS = 15\n\n\nclass BiscaGameUI:\n def __init__(self):\n # Initialize the game\n pygame.init()\n pygame.display.set_caption(\"BASIS Platform\")\n icon = pygame.image.load(\"UI/deck-gui/card-game.png\")\n pygame.display.set_icon(icon)\n\n # Set up the screen\n self.size = self.width, self.height = SCREEN_WIDTH, SCREEN_HEIGHT\n self.screen = pygame.display.set_mode(self.size)\n self.screen.fill(SCREEN_BACKGROUND_COLOR) # Use dark green color\n\n self.last_clicked = pygame.time.get_ticks()\n\n # Initial screen selection of agents for game\n self.agent_count = dict()\n\n # Keep active buttons\n self.buttons = []\n self.play_again_button = Button(400, 500, 200, 50, \"Play Again\", self.playAgain)\n self.player_takes_hand_text = \"\"\n\n # -----------------------------------------------------\n # ---------------------- Game -------------------------\n # -----------------------------------------------------\n\n self.game = Game()\n\n self.card_representations = dict()\n for card in self.game.deck.cards:\n uicard = UICard(\n name=card.__repr__(),\n rank=card.rank,\n suit=card.suit,\n filename=card.get_filename(),\n )\n uicard.graphics = CardGraphics(uicard)\n uicard.back_graphics = CardBackGraphics(uicard)\n self.card_representations[card] = uicard\n\n # Get all available agents\n agent_types = dict()\n agent_names = []\n for subclass in Player.__subclasses__():\n agent_types[subclass.__name__] = subclass\n agent_names.append(subclass.__name__)\n\n # Let the user select the players\n agent_count = self.showInitialScreen(agent_names)\n\n # Register the players\n player_count = 0\n self.human_game = False\n for agent in agent_count.keys():\n for playernr in range(agent_count[agent]):\n player = agent_types[agent](f\"Player {str(player_count + 1)} ({agent})\")\n if agent == \"Human\":\n self.human_game = True\n player.register_input_handler(self.getUserSelectedCard)\n self.game.add_player(player)\n player_count += 1\n\n # -----------------------------------------------------\n # ------------------- Positioning ---------------------\n # -----------------------------------------------------\n\n # Define the number of players and their positions\n self.num_players = player_count\n if self.num_players == 2:\n self.player_positions = [PLAYER_POSITION_1, PLAYER_POSITION_4]\n if self.num_players == 3:\n self.player_positions = [\n PLAYER_POSITION_1,\n PLAYER_POSITION_3,\n PLAYER_POSITION_5,\n ]\n if self.num_players == 4:\n self.player_positions = [\n PLAYER_POSITION_1,\n PLAYER_POSITION_2,\n PLAYER_POSITION_4,\n PLAYER_POSITION_5,\n ]\n if self.num_players == 5:\n self.player_positions = [\n PLAYER_POSITION_1,\n PLAYER_POSITION_2,\n PLAYER_POSITION_3,\n PLAYER_POSITION_4,\n PLAYER_POSITION_5,\n ]\n if self.num_players == 6:\n self.player_positions = [\n PLAYER_POSITION_1,\n PLAYER_POSITION_2,\n PLAYER_POSITION_3,\n PLAYER_POSITION_4,\n PLAYER_POSITION_5,\n PLAYER_POSITION_6,\n ]\n\n # Set up card positions for each player\n self.hand_positions = [\n [\n (position[0] + i * (CARD_WIDTH + SPACE_BETWEEN_CARDS), position[1])\n for i in range(3)\n ]\n for position in self.player_positions\n ]\n\n # Set up deck and trump positions\n self.deck_position = DECK_POSITION\n self.trump_position = TRUMP_POSITION\n\n # Keep track of available card buttons and the hands at the start of the trick\n self.cardbuttons = []\n self.trick_hands = []\n\n self.game.player_pool.register_callback(self.drawCurrentStatus)\n self.game.start_match()\n self.startGame()\n\n # Displays the initial screen that allows the user to select\n # which and how many agents will play\n def showInitialScreen(self, agents):\n while self.game.state == State.INIT:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n # Handle button clicks\n for button in self.buttons:\n button.handle_event(event)\n\n # Draw background\n self.screen.fill(SCREEN_BACKGROUND_COLOR)\n\n self.buttons = [] # Initialize the list of buttons\n\n for agentnr in range(len(agents)):\n agent = copy.deepcopy(agents[agentnr])\n\n if agent not in self.agent_count.keys():\n self.agent_count[agent] = 0\n\n # Render and display the agent text\n self.drawText(agent, 500, 200 + agentnr * 40, alignment=\"right\")\n\n # Create and display the minus button\n minus_button = Button(\n 520 - 10,\n 200 + agentnr * 40 - 10,\n 20,\n 20,\n \"-\",\n lambda agent=agent: self.removeAgent(agent),\n backgroundcolor=SCREEN_BACKGROUND_COLOR,\n )\n minus_button.draw(self.screen)\n self.buttons.append(minus_button)\n\n # Render and display the agent count text\n font = pygame.font.Font(None, 24)\n agent_count_label = font.render(\n f\"{self.agent_count[agent]}\", True, pygame.Color(\"white\")\n )\n agent_count_rect = agent_count_label.get_rect(\n center=(535, 200 + agentnr * 40)\n )\n self.screen.blit(agent_count_label, agent_count_rect)\n\n # Create and display the plus button\n plus_button = Button(\n 542,\n 200 + agentnr * 40 - 10,\n 20,\n 20,\n \"+\",\n lambda agent=agent: self.addAgent(agent),\n backgroundcolor=SCREEN_BACKGROUND_COLOR,\n )\n plus_button.draw(self.screen)\n self.buttons.append(plus_button)\n\n # Render and display start game button\n start_game_button = Button(\n 425, 200 + len(agents) * 40 + 5, 150, 50, \"Start Game\", self.endInit\n )\n start_game_button.draw(self.screen)\n self.buttons.append(start_game_button)\n\n # Render and display informative max player text\n self.drawText(\n \"Maximum number of Players is 6\", 500, 200 + len(agents) * 40 + 75\n )\n\n pygame.display.flip()\n\n # Limit the frame rate\n sleep(0.1)\n\n self.buttons = []\n return self.agent_count\n\n # Starts a game and enters the program into a loop for each trick\n def startGame(self):\n self.trick_hands = [\n copy.deepcopy(player.get_hand()) for player in self.game.player_pool.players\n ]\n\n # Main game loop\n while not self.game.is_over():\n self.screen.fill(SCREEN_BACKGROUND_COLOR)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n # Draw most recent display status of the game\n self.drawCurrentStatus()\n\n if len(self.game.tricks) > 0:\n self.showPlayerTakesHand(self.game.tricks[-1].get_winner())\n\n # Advance to next round and keep track of players hands at the beginning of the round\n self.game.next_round()\n self.trick_hands = [\n copy.deepcopy(player.get_hand())\n for player in self.game.player_pool.players\n ]\n\n self.drawEndScreen()\n\n def getUserSelectedCard(self):\n # This function is called by the Human agent when it needs to select a card\n while True:\n # Checks for all events so as not to block the UI\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n # Having each button check if it was clicked. If it was, return its own index to the game engine\n for button in self.cardbuttons:\n cardIndex = button.handle_event(event)\n if cardIndex != None:\n return cardIndex\n\n # Updates the display status\n self.drawCurrentStatus()\n\n pygame.display.flip()\n\n # Limit the frame rate\n sleep(0.1)\n\n # Displays the current status of the game\n def drawCurrentStatus(self, new_player=None):\n self.screen.fill(SCREEN_BACKGROUND_COLOR)\n played_cards = []\n self.cardbuttons = []\n self.buttons = []\n\n # Get already played cards to display them above the rest of the hand\n if self.game.current_trick != None:\n played_cards = self.game.current_trick.get_cards()\n\n for playernr in range(len(self.game.player_pool.get_players())):\n player = self.game.player_pool.get_players()[playernr]\n for cardnr in range(len(self.trick_hands[playernr])):\n card = self.trick_hands[playernr][cardnr]\n cardUI = self.card_representations[card]\n\n # Chack if card has been played and display accordingly\n if card not in played_cards:\n cardUI.graphics.position = self.hand_positions[playernr][cardnr]\n else:\n cardUI.graphics.position = (\n self.hand_positions[playernr][cardnr][0],\n self.hand_positions[playernr][cardnr][1] - 25,\n ) # Move the card up by 50 pixels\n cardUI.button = Button(\n cardUI.graphics.position[0],\n cardUI.graphics.position[1],\n cardUI.graphics.size[0],\n cardUI.graphics.size[1],\n \"\",\n lambda c=card: self.handleCardClick(c),\n )\n # Keep track of active buttons\n self.cardbuttons.append(cardUI.button)\n\n if not self.human_game:\n # Display card on screen\n self.screen.blit(cardUI.graphics.surface, cardUI.graphics.position)\n else:\n if type(player).__name__ == \"Human\":\n self.screen.blit(\n cardUI.graphics.surface, cardUI.graphics.position\n )\n else:\n if card not in played_cards:\n self.screen.blit(\n cardUI.back_graphics.surface, cardUI.graphics.position\n )\n else:\n self.screen.blit(\n cardUI.graphics.surface, cardUI.graphics.position\n )\n\n # Draw player label\n font = pygame.font.Font(None, 22)\n label = font.render(player.name, True, pygame.Color(\"white\"))\n label_rect = label.get_rect(\n center=(\n self.player_positions[playernr][0] + 125,\n self.player_positions[playernr][1] + 140,\n )\n )\n self.screen.blit(label, label_rect)\n\n # Draw player score\n score_label = font.render(\n f\"Score: {sum([card.points for card in player.pile])} points\",\n True,\n pygame.Color(\"white\"),\n )\n score_rect = score_label.get_rect(\n center=(\n self.player_positions[playernr][0] + 125,\n self.player_positions[playernr][1] + 160,\n )\n )\n self.screen.blit(score_label, score_rect)\n\n # Draw deck\n if len(self.game.deck.cards) > 0:\n top_deck_card = self.game.deck.cards[-1]\n top_deck_card_graphics = self.card_representations[top_deck_card]\n self.screen.blit(\n top_deck_card_graphics.back_graphics.surface, self.deck_position\n )\n\n # Draw trump card\n if self.game.trump_card:\n trump_card_graphics = self.card_representations[self.game.trump_card]\n self.screen.blit(trump_card_graphics.graphics.surface, self.trump_position)\n\n # Draw current player text\n current_player_label = font.render(\n f\"{self.game.player_pool.get_current_player()} is playing\",\n True,\n pygame.Color(\"white\"),\n )\n current_player_rect = current_player_label.get_rect(\n center=(self.width // 2, self.height - 300)\n )\n self.screen.blit(current_player_label, current_player_rect)\n\n # Draw player takes hand text\n player_takes_hand_label = font.render(\n self.player_takes_hand_text, True, pygame.Color(\"white\")\n )\n player_takes_hand_rect = player_takes_hand_label.get_rect(\n center=(self.width // 2, self.height - 275)\n )\n self.screen.blit(player_takes_hand_label, player_takes_hand_rect)\n\n pygame.display.update()\n\n # Limit the frame rate\n sleep(0.1)\n\n # Displays the end screen for the user with each players points and the winner\n def drawEndScreen(self):\n self.buttons = [self.play_again_button]\n\n while self.game.state == State.OVER:\n self.screen.fill(SCREEN_BACKGROUND_COLOR)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n self.play_again_button.handle_event(event)\n\n # Render and display end of the game information\n self.drawText(f\"Game Over\", 500, 200, size=45)\n self.drawText(\n f\"Winner is {self.game.winner.name} with {sum([card.points for card in self.game.winner.pile])} points\",\n 500,\n 250,\n )\n\n self.drawText(f\"Points per player\", 500, 300)\n\n for playernr in range(len(self.game.player_pool.players)):\n player = self.game.player_pool.players[playernr]\n self.drawText(\n f\"{player.name}: {sum([card.points for card in player.pile])}\",\n 500,\n 330 + 30 * playernr,\n )\n\n self.play_again_button.draw(self.screen)\n\n pygame.display.flip()\n\n # Limit the frame rate\n sleep(0.1)\n\n # Checks if the agent count is valid and if so starts a game\n def endInit(self):\n if sum(self.agent_count.values()) > 1:\n self.buttons = []\n self.game.state = State.RUNNING\n\n # Remove an agent in the initial screen\n def removeAgent(self, agent):\n if self.agent_count[agent] > 0:\n self.agent_count[agent] -= 1\n\n # Add an agent in the initial screen\n def addAgent(self, agent):\n # Only one human allowed per game\n if agent == \"Human\":\n if sum(self.agent_count.values()) < 6 and self.agent_count[\"Human\"] == 0:\n self.agent_count[agent] += 1\n else:\n if sum(self.agent_count.values()) < 6:\n self.agent_count[agent] += 1\n\n # Utils function to make it easier to draw text\n def drawText(\n self, text, x, y, alignment=\"center\", size=24, color=pygame.Color(\"white\")\n ):\n font = pygame.font.Font(None, size)\n label = font.render(text, True, color)\n if alignment == \"center\":\n label_rect = label.get_rect(center=(x, y))\n elif alignment == \"right\":\n label_rect = label.get_rect(right=x, centery=y)\n elif alignment == \"left\":\n label_rect = label.get_rect(left=x, centery=y)\n self.screen.blit(label, label_rect)\n return label\n\n # Play Again Button Action\n def playAgain(self):\n BiscaGameUI()\n\n # Handles the click on a displayed card\n def handleCardClick(self, card):\n if pygame.time.get_ticks() - self.last_clicked >= 500:\n # This condition checks the card against the player's hand\n if card in self.game.player_pool.get_current_player().hand:\n self.last_clicked = pygame.time.get_ticks()\n\n self.drawCurrentStatus()\n\n return self.trick_hands[\n self.game.player_pool.current_player_index\n ].index(card)\n\n # Displays informative text about which player won the last trick\n def showPlayerTakesHand(self, player):\n self.player_takes_hand_text = player.name + \" takes trick\"\n timer = threading.Timer(2, self.resetPlayerTakesHand)\n timer.start()\n\n # Hides informative text about which player won the last trick\n def resetPlayerTakesHand(self):\n self.player_takes_hand_text = \"\"\n","repo_name":"zev4l/basis","sub_path":"UI/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":18592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24822711058","text":"#######################################################################################################################\n\n####################################\n# Funny Money A: Database analysis #\n# Created by Aniket N Prabhu #\n# References: ML_datanalysis2.py #\n####################################\n\nimport numpy as np # needed for arrays and math\nimport pandas as pd # needed to read the data\nimport matplotlib.pyplot as plt # needed for plotting\nfrom matplotlib import cm as cm # needed for the color map\nimport seaborn as sns # needed data visualization (pairs plot)\n\n# Defining a function for correlation #################################################################################\n# Dataframe is a dictionary-like container and is a pandas feature\n\n\ndef highcorr(datfram, repnum):\n cormat = datfram.corr() # correlation matrix\n print(cormat)\n\n # \".corr()\" computes pairwise correlation of columns, excluding any null/NA values. This finds the correlations\n\n cormat *= np.tri(*cormat.values.shape, k=-1).T\n\n # \"x*=y\" corresponds to \"x=x*y\". \"np.tri\" creates lower triangular matrix with 1s on &/or below the diagonal(diag).\n # It also unpacks the tuple into rows and columns. k=-1 means that the diag of 0s and 1s below the diag.\n\n # print(cormat) # For debugging\n cormat = cormat.stack() # Reorganizes columns into rows\n # print(cormat) # For debugging\n\n cormat = cormat.reindex(cormat.abs().sort_values(ascending=False).index).reset_index()\n\n # \".abs()\" returns absolute numeric values of each element in the dataframe.\n # \".sort_values\" will sort values in descending order since \"ascending=False\".\n # \".index\" returns the index/row labels of the array.\n # \".reset_index()\" resets the indices.\n # \".reindex()\" conform DataFrame to new index with optional filling logic.\n # print(cormat) # For debugging\n\n # Assigning column names.\n cormat.columns = [\"FirstVariable\", \"SecondVariable\", \"Correlation\"]\n print(\"\\nMost Highly Correlated\")\n print(cormat.head(repnum)) # prints the top values\n\n\n#######################################################################################################################\n\n# Heat map ############################################################################################################\n\n\ndef heatmap(df): # Although not explicitly asked, I don't intend to make compromises.\n # creating a figure that's 7x7 units with 100 dots per inch\n fig = plt.figure(figsize=(7, 7), dpi=100)\n\n # add a subplot that has 1 row, 1 column, and is the first subplot\n ax1 = fig.add_subplot(111)\n\n # get the 'jet' color map\n cmap = cm.get_cmap('jet', 30)\n\n # Perform the correlation and take the absolute value of it. Then map\n # the values to the color map using the \"nearest\" value\n cax = ax1.imshow(np.abs(df.corr()), interpolation='nearest', cmap=cmap)\n\n # now set up the axes\n major_ticks = np.arange(0, len(df.columns), 1)\n ax1.set_xticks(major_ticks)\n ax1.set_yticks(major_ticks)\n ax1.grid(True, which='both', axis='both')\n plt.title('Correlation Matrix')\n ax1.set_xticklabels(df.columns, fontsize=9)\n ax1.set_yticklabels(df.columns, fontsize=12)\n\n # add the legend and show the plot\n fig.colorbar(cax, ticks=[-0.4, -0.25, -.1, 0, 0.1, .25, .5, .75, 1])\n plt.show()\n\n#######################################################################################################################\n\n# Function to create pairs plot #######################################################################################\n\n\ndef prplt(dfram):\n sns.set(style='whitegrid', context='notebook') # Setting appearance.\n # color = ['amber', 'violet']\n sns.pairplot(dfram, hue='Class', height=2.5) # creates pairs plot.\n plt.show() # Shows the plot.\n\n\n# Using above defined functions to achieve our goals #\n\nfunmon = pd.read_csv('data_banknote_authentication.txt', names=['Variance', 'Skewness', 'Curtosis', 'Entropy', 'Class'])\nfunmon.to_csv('data_banknote_authentication.csv') # converts to \".csv\" format so that we may add hue in the pairs plot\nhighcorr(funmon, 10) # No. of ways of choosing 2 objects from 5 objects is 5C2=10\nheatmap(funmon)\nprplt(funmon)\n\n#######################################################################################################################\n# End #################################################################################################################\n#######################################################################################################################\n","repo_name":"whizbuzzer/Funny-Money","sub_path":"project1_A.py","file_name":"project1_A.py","file_ext":"py","file_size_in_byte":4705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37938690994","text":"import graphene \nfrom graphene_django import DjangoObjectType\nfrom graphql import GraphQLError\nfrom django.db.models import Q, Avg, Count, Sum, Max, F\nfrom django.db.models.functions import Greatest\nfrom datetime import datetime\n\nfrom .models import Trail, Hike, Hiker, Buddy, EquipmentUsed, EquipmentType, SuggestedEquipment, Tag, Friend, Message \n\nclass MessageType(DjangoObjectType):\n mostRecentSent = graphene.DateTime()\n mostRecentReceived = graphene.DateTime()\n mostRecentThreadActivity = graphene.DateTime()\n class Meta:\n model = Message\n\nclass FriendType(DjangoObjectType):\n class Meta:\n model = Friend\n\nclass TagType(DjangoObjectType):\n class Meta:\n model = Tag\n\nclass SuggestedEquipmentType(DjangoObjectType):\n class Meta:\n model = SuggestedEquipment\n\nclass EquipmentTypeType(DjangoObjectType):\n class Meta:\n model = EquipmentType\n\nclass EquipmentUsedType(DjangoObjectType):\n class Meta:\n model = EquipmentUsed\n\nclass TrailType(DjangoObjectType):\n avgDifficulty = graphene.Float() # place aggregate (derived) values here\n avgEnjoyability = graphene.Float() # place aggregate (derived) values here\n numHikes = graphene.Int()\n class Meta:\n model = Trail \n\nclass HikeType(DjangoObjectType):\n totalHikerDistance = graphene.Int()\n date = graphene.Date()\n class Meta:\n model = Hike \n\nclass BuddyType(DjangoObjectType):\n class Meta:\n model = Buddy\n\nclass Query(graphene.ObjectType):\n trails = graphene.List(TrailType, search=graphene.String())\n hikes = graphene.List(HikeType)\n beginner_trails = graphene.List(TrailType)\n all_equ_types = graphene.List(EquipmentTypeType)\n popular_trails = graphene.List(TrailType)\n trail_details = graphene.List(TrailType, trailID=graphene.Int(required=True))\n expert_reviews = graphene.List(HikeType, trailID=graphene.Int(required=True))\n recent_hikers = graphene.List(HikeType, trailID=graphene.Int(required=True))\n hike_detail = graphene.Field(HikeType, hikeID=graphene.Int(required=True))\n conversation_threads = graphene.List(MessageType, hikerID=graphene.Int(required=True))\n thread_detail = graphene.List(MessageType, hikerID=graphene.Int(required=True), recipientID=graphene.Int(required=True))\n hiker_most_recent_hike_on_trail = graphene.List(HikeType, trailID=graphene.Int(required=True), hikerID=graphene.Int())\n\n def resolve_trails(self, info, search=None):\n if search:\n filter = (\n Q(name__icontains=search) |\n Q(prop__icontains=search) |\n Q(city__icontains=search) | \n Q(state__icontains=search)\n )\n return Trail.objects.filter(filter)\n else:\n return Trail.objects.all() \n\n def resolve_hikes(self, info):\n return Hike.objects.all()\n\n def resolve_beginner_trails(self, info):\n return Trail.objects.annotate(avgDifficulty=Avg('hikes__difficulty')). \\\n annotate(avgEnjoyability=Avg('hikes__enjoyability')).order_by('avgDifficulty')[:15]\n\n def resolve_all_equ_types(self, info):\n return EquipmentType.objects.all()\n\n def resolve_popular_trails(self, info):\n return Trail.objects.annotate(numHikes=Count('hikes')).annotate(avgDifficulty=Avg('hikes__difficulty')). \\\n annotate(avgEnjoyability=Avg('hikes__enjoyability')).order_by('-numHikes')[:15]\n\n def resolve_trail_details(self, info, trailID):\n # return Trail.objects.get(id=trailID)\n return Trail.objects.filter(id=trailID).annotate(numHikes=Count('hikes')).annotate(avgDifficulty=Avg('hikes__difficulty')). \\\n annotate(avgEnjoyability=Avg('hikes__enjoyability'))\n\n def resolve_expert_reviews(self, info, trailID):\n return Hike.objects.filter(trail__id=trailID).exclude(review=None).annotate(totalHikerDistance=Sum('hiker__hikes__trail__distance')). \\\n order_by('-totalHikerDistance')[:5].annotate(date=F('checkInDate__date'))\n\n def resolve_recent_hikers(self, info, trailID):\n return Hike.objects.filter(trail__id=trailID).annotate(latestDateForHiker=Max('hiker__hikes__checkInDate', \\\n filter=Q(hiker__hikes__trail__id=trailID))).filter(checkInDate=F('latestDateForHiker')).order_by('-checkInDate')\n\n def resolve_hike_detail(self, info, hikeID):\n return Hike.objects.get(id=hikeID)\n\n def resolve_conversation_threads(self, info, hikerID):\n return Message.objects.filter(Q(hikerID__id=hikerID) | Q(recipientID__id=hikerID)). \\\n annotate(mostRecentSent=Max('hikerID__messagesSent__timeSent', \\\n filter=Q(hikerID__messagesSent__recipientID=F('recipientID')))). \\\n annotate(mostRecentReceived=Max('hikerID__messagesReceived__mostRecentSent', \\\n filter=Q(hikerID__messagesReceived__hikerID=F('recipientID')))). \\\n annotate(mostRecentThreadActivity=Greatest('mostRecentSent', 'mostRecentReceived')). \\\n filter(timeSent=F('mostRecentThreadActivity')).order_by('-timeSent')\n\n def resolve_thread_detail(self, info, hikerID, recipientID):\n return Message.objects.filter((Q(hikerID__id=hikerID) & Q(recipientID__id=recipientID)) | \\\n (Q(hikerID__id=recipientID) & Q(recipientID__id=hikerID))).order_by('-timeSent')\n\n def resolve_hiker_most_recent_hike_on_trail(self, info, trailID, hikerID=None):\n trail = Trail.objects.get(id=trailID)\n if hikerID:\n hiker = Hike.objects.get(id=hikerID)\n else:\n user = info.context.user \n if user.is_anonymous:\n raise Exception(\"Not logged in\")\n hiker = Hiker.objects.get(user=user)\n hike = Hike.objects.filter(hiker=hiker, trail=trail).order_by('-checkInDate')[:1].annotate(date=F('checkInDate__date'))\n return hike\n\n\nclass CreateTrail(graphene.Mutation):\n trail = graphene.Field(TrailType) \n\n class Arguments:\n name = graphene.String(required=True) \n prop = graphene.String(required=True) \n city = graphene.String(required=True)\n state = graphene.String(required=True) \n description = graphene.String(required=True) \n isOpen = graphene.Boolean(required=True) \n altitudeChange = graphene.Int(required=True) \n distance = graphene.Int(required=True) \n fee = graphene.Float(required=True)\n\n def mutate(self, info, name, prop, city, state, description, isOpen, altitudeChange, distance, fee):\n trail = Trail(name=name, prop=prop, city=city, state=state, description=description, isOpen=isOpen, altitudeChange=altitudeChange, distance=distance, fee=fee)\n trail.save()\n return CreateTrail(trail=trail)\n\nclass CheckIn(graphene.Mutation):\n hike = graphene.Field(HikeType)\n date = graphene.Date()\n\n class Arguments:\n trailID = graphene.Int(required=True)\n hikerID = graphene.Int()\n\n # def mutate(self, info, trailID, hikerID):\n # trail = Trail.objects.get(id=trailID)\n # hiker = Hiker.objects.get(id=hikerID)\n # hike = Hike(trail=trail, hiker=hiker)\n # hike.save()\n # return CheckIn(hike=hike)\n\n def mutate(self, info, trailID, **kwargs):\n trail = Trail.objects.get(id=trailID)\n hikerID = kwargs.get('hikerID', None)\n if hikerID:\n hiker = Hiker.objects.get(id=hikerID)\n else:\n user = info.context.user \n if user.is_anonymous:\n raise Exception(\"Not logged in.\")\n hiker = Hiker.objects.get(user=user)\n hike = Hike(trail=trail, hiker=hiker)\n hike.save()\n return CheckIn(hike=hike, date=hike.checkInDate.date())\n\n\nclass LeaveReview(graphene.Mutation):\n hike = graphene.Field(HikeType)\n\n class Arguments:\n hikeID = graphene.Int(required=True)\n review = graphene.String()\n difficulty = graphene.Int()\n enjoyability = graphene.Int()\n\n def mutate(self, info, hikeID, review, difficulty, enjoyability):\n hike = Hike.objects.get(id=hikeID)\n hike.review = review\n hike.difficulty = difficulty \n hike.enjoyability = enjoyability\n hike.save()\n return LeaveReview(hike=hike)\n\nclass CheckOut(graphene.Mutation):\n hike = graphene.Field(HikeType)\n\n class Arguments:\n hikeID = graphene.Int()\n trailID = graphene.Int(required=True)\n\n def mutate(self, info, trailID, **kwargs):\n hikeID = kwargs.get('hikeID', None)\n hike = None\n if hikeID:\n hike = Hike.objects.get(id=hikeID)\n else: \n user = info.context.user\n if user.is_anonymous:\n raise Exception('Not logged in')\n hiker = Hiker.objects.get(user=user)\n trail = Trail.objects.get(id=trailID)\n hike = Hike.objects.filter(hiker=hiker, trail=trail).order_by('-checkInDate')[0]\n hike.checkOutDate = datetime.now()\n hike.save()\n return CheckOut(hike=hike)\n\nclass AddBuddy(graphene.Mutation):\n buddy = graphene.Field(BuddyType)\n\n class Arguments:\n hikeID = graphene.Int(required=True)\n friendID = graphene.Int(required=True)\n\n def mutate(self, info, hikeID, friendID):\n hike = Hike.objects.get(id=hikeID)\n friend = Hiker.objects.get(id=friendID)\n buddy = Buddy(friendID=friend, hikeID=hike)\n buddy.save()\n return AddBuddy(buddy=buddy)\n\nclass CreateSuggestedEquipment(graphene.Mutation): \n suggestedEquipment = graphene.Field(SuggestedEquipmentType)\n\n class Arguments:\n trailID = graphene.Int(required=True) \n equTypeID = graphene.Int(required=True) \n\n def mutate(self, info, trailID, equTypeID):\n trail = Trail.objects.get(id=trailID)\n equType = EquipmentType.objects.get(id=equTypeID)\n suggestedEquipment = SuggestedEquipment(trailID=trail, equipmentTypeID=equType)\n suggestedEquipment.save()\n return CreateSuggestedEquipment(suggestedEquipment=suggestedEquipment)\n\nclass AddEquipmentUsed(graphene.Mutation):\n equipmentUsed = graphene.Field(EquipmentUsedType)\n\n class Arguments:\n hikeID = graphene.Int(required=True)\n equTypeID = graphene.Int(required=True)\n\n def mutate(self, info, hikeID, equTypeID):\n hike = Hike.objects.get(id=hikeID)\n equType = EquipmentType.objects.get(id=equTypeID)\n equipmentUsed = EquipmentUsed(hikeID=hike, equipmentID=equType)\n equipmentUsed.save()\n return AddEquipmentUsed(equipmentUsed=equipmentUsed)\n\nclass CreateTag(graphene.Mutation):\n tag = graphene.Field(TagType)\n\n class Arguments:\n trailID = graphene.Int(required=True) \n tag = graphene.String(required=True) \n\n def mutate(self, info, trailID, tag):\n trail = Trail.objects.get(id=trailID)\n tag = Tag(trailID=trail, tag=tag)\n tag.save()\n return CreateTag(tag=tag)\n\nclass AddFriend(graphene.Mutation):\n friend = graphene.Field(FriendType)\n\n class Arguments:\n hikerID = graphene.Int(required=True)\n friendID = graphene.Int(required=True)\n\n def mutate(self, info, hikerID, friendID):\n hiker = Hiker.objects.get(id=hikerID)\n friend = Hiker.objects.get(id=friendID)\n friendListing = Friend(hikerID=hiker, friendID=friend, friendedBack=False)\n friendListing.save()\n return AddFriend(friend=friendListing)\n\nclass SendMessage(graphene.Mutation):\n message = graphene.Field(MessageType)\n\n class Arguments:\n hikerID = graphene.Int(required=True)\n recipientID = graphene.Int(required=True)\n content = graphene.String(required=True)\n\n def mutate(self, info, hikerID, recipientID, content):\n hikerID = Hiker.objects.get(id=hikerID)\n recipientID = Hiker.objects.get(id=recipientID)\n message = Message(hikerID=hikerID, recipientID=recipientID, content=content)\n message.save()\n return SendMessage(message=message)\n\nclass CreateEquipmentType(graphene.Mutation):\n equipmentType = graphene.Field(EquipmentTypeType)\n\n class Arguments:\n equType = graphene.String(required=True) \n\n def mutate(self, info, equType):\n equipmentType = EquipmentType(equType=equType)\n equipmentType.save()\n return CreateEquipmentType(equipmentType=equipmentType)\n\n################################ vvvvvvvvvvvvvv TESTING: POPULATE TABLES vvvvvvvvvvvvvvv ##############################################\nclass PopTrail(graphene.Mutation):\n trail = graphene.Field(TrailType) \n\n class Arguments:\n name = graphene.String() \n prop = graphene.String() \n city = graphene.String()\n state = graphene.String() \n description = graphene.String() \n isOpen = graphene.Boolean() \n altitudeChange = graphene.Int() \n distance = graphene.Int() \n fee = graphene.Float()\n\n def mutate(self, info):\n count = Trail.objects.all().count()\n name = \"name\" + str(count+1)\n prop = \"prop\" + str(count+1)\n city = \"city\" + str(count+1)\n state = \"state\" + str(count+1)\n description = \"description\" + str(count+1)\n isOpen = True \n altitudeChange = 1000\n distance = 5000 \n fee = 1.5\n trail = Trail(name=name, prop=prop, city=city, state=state, description=description, isOpen=isOpen, altitudeChange=altitudeChange, distance=distance, fee=fee)\n trail.save()\n return CreateTrail(trail=trail)\n\n\n################################ ^^^^^^^^^^^^^^^ TESTING: POPULATE TABLES ^^^^^^^^^^^^^^^^ ##############################################\n\n\nclass Mutation(graphene.ObjectType):\n create_trail = CreateTrail.Field()\n check_in = CheckIn.Field()\n check_out = CheckOut.Field()\n leave_review = LeaveReview.Field()\n create_suggested_equipment = CreateSuggestedEquipment.Field()\n create_tag = CreateTag.Field()\n add_friend = AddFriend.Field()\n send_message= SendMessage.Field()\n create_equipment_type = CreateEquipmentType.Field()\n add_buddy = AddBuddy.Field()\n add_equipment_used = AddEquipmentUsed.Field()\n\n ############ vvvvvvv TESTING: POPULATE TABLES vvvvvv ###########\n pop_trail = PopTrail.Field()","repo_name":"CookUpThatCode/TheUltimateHikersGuide","sub_path":"a0_django/uhg/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":13749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71036499415","text":"_DATA_FILE = \"2022/data/02_rock_paper_scissors.txt\"\n\n_ROCK_LETTER_THEM = 'A'\n_ROCK_LETTER_ME = 'X'\n_ROCK_SCORE = 1\n_LOSE_SCORE = 0\n_DRAW_SCORE = 3\n_WIN_SCORE = 6\n\n_LOSE_OUTCOME_LETTER = 'X'\n_LOSE_OUTCOME_MOD_DIFF = 2\n\ndef me_throw_score(letter: str) -> int:\n return ord(letter) - ord(_ROCK_LETTER_ME) + _ROCK_SCORE\n\ndef them_throw_score(letter: str) -> int:\n return ord(letter) - ord(_ROCK_LETTER_THEM) + _ROCK_SCORE\n\ndef rps_score(them: str, me: str) -> int:\n # Rock-paper-scissors:\n # You win if you're 1 greater than your opponent mod 3 (1 mod 3).\n # You lose if you're 1 less than your opponent mod 3 (2 mod 3).\n # You draw if you're equal (0 mod 3).\n mod_diff = (me_throw_score(me) - them_throw_score(them)) % 3\n if mod_diff == 2:\n return _LOSE_SCORE + me_throw_score(me)\n elif mod_diff == 1:\n return _WIN_SCORE + me_throw_score(me)\n elif mod_diff == 0:\n return _DRAW_SCORE + me_throw_score(me)\n\n# Determine the desired mod-diff based on the prescribed outcome.\ndef outcome_mod_diff(letter: str) -> int:\n return (ord(letter) - ord(_LOSE_OUTCOME_LETTER) + _LOSE_OUTCOME_MOD_DIFF) % 3\n\ndef needed_throw(them: str, outcome: str) -> str:\n # me - them = outcome, mod 3\n # me = outcome + them, mod 3\n me_throw_score = (outcome_mod_diff(outcome) + them_throw_score(them)) % 3\n return chr((me_throw_score - _ROCK_SCORE) % 3 + ord(_ROCK_LETTER_ME))\n\ngames = []\n\nwith open(_DATA_FILE, \"r\") as input:\n games = [tuple(c.strip() for c in line.split(' ')) for line in input]\n\ntotal_score = 0\n\nfor game in games:\n them = game[0]\n outcome = game[1]\n me = needed_throw(them, outcome)\n score = rps_score(them, me)\n print(\"Them: {them:s} Me: {me:s} Outcome: {outcome_mod_diff:d} Throw Score: {me_throw_score:d} RPS Score: {rps_score:d}\".format(\n them = them,\n me = me,\n outcome_mod_diff = outcome_mod_diff(outcome),\n me_throw_score = me_throw_score(me),\n rps_score = rps_score(them, me)\n ))\n total_score += score\n\nprint(\"Total: {t:d}\".format(t = total_score))","repo_name":"iandimayuga/adventofcode","sub_path":"2022/02_rock_paper_scissors.py","file_name":"02_rock_paper_scissors.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42955602872","text":"def check_values(A1, A2, A3, A4):\n '''\n По свойству диагоналей четырехугольника ABCD — параллелограмм, \n если координаты середин отрезков АС и BD, совпадают\n '''\n x1 = (A1['x'] + A3['x']) / 2\n x2 = (A2['x'] + A4['x']) / 2\n y1 = (A1['y'] + A3['y']) / 2\n y2 = (A2['y'] + A4['y']) / 2\n\n if (x1 == x2 and y1 == y2):\n return True\n else:\n return False\n\ndef input_point(point_name='A'):\n point = list(map(\n float,\n input(f\"Type {point_name} coords [x,y]: \").split(',')\n ))\n print(point)\n return {'x': point[0], 'y': point[1]}\n\na1 = input_point('A1')\na2 = input_point('A2')\na3 = input_point('A3')\na4 = input_point('A4')\n\nprint(check_values(a1, a2, a3, a4))","repo_name":"SmirnoffRD/geek","sub_path":"DZ34N.py","file_name":"DZ34N.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25856981014","text":"from transformers import TrOCRProcessor, VisionEncoderDecoderModel\nfrom PIL import Image\nimport requests\n\n# load image from the IAM database (actually this model is meant to be used on printed text)\nimage = Image.open('./isample2.jpg').convert(\"RGB\")\n\nprocessor = TrOCRProcessor.from_pretrained('microsoft/trocr-base-printed')\nmodel = VisionEncoderDecoderModel.from_pretrained('microsoft/trocr-base-printed')\npixel_values = processor(images=image, return_tensors=\"pt\").pixel_values\n\ngenerated_ids = model.generate(pixel_values)\ngenerated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]\nprint(generated_text)\n","repo_name":"Kashyapdevesh/Manga-Newsletter","sub_path":"future_scope/image_caption.py","file_name":"image_caption.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"17799632166","text":"from datetime import datetime\nname=input(\"enter your name:\")\n\n#list of items\n\nlists=\"\"\"\n Rice Rs 20/kg\n Sugar Rs 30/kg\n Salt Rs 20/kg\n Oil Rs 80/liter\n Maggi Rs 50/kg\n Boost Rs 90/each\n Colgate Rs 85/each\n \"\"\"\n\n# print(lists)\n\n#declaration\n\nprice=0\npricelist=[]\ntotalprice=0\nfinalprice=0\nilist=[]\nqlist=[]\nplist=[]\n\n#Rates for items\n\nitems={'Rice':20,\n 'Sugar':30,\n 'Salt':20,\n 'Oil':80,\n 'Maggi':50,\n 'Boost':90,\n 'Colgate':85}\n\noption=int(input(\"for list of items press 1:\"))\nif option==1:\n print(lists)\n\nfor i in range(len(items)):\n inp1=int(input(\"if you want to buy press 1 or press 2 for exit:\"))\n if inp1==2:\n break\n if inp1==1:\n item=input(\"enter your items:\")\n quantity=int(input(\"enter quantity:\"))\n if item in items.keys():\n price=quantity*(items[item])\n# print(price)\n pricelist.append((item,quantity,items,price))\n totalprice+=price\n ilist.append(item)\n plist.append(price)\n qlist.append(quantity)\n gst=(totalprice*5)/100\n finalamount=gst+totalprice\n else:\n print(\"sorry you entered item is not available\")\n\nelse:\n print(\"you entered wrong number\")\ninp=input(\"can i bill the items yes or No:\")\nif inp=='yes':\n pass\n if finalamount!=0:\n print(25*\"=\",\"PRAVEEN SUPER MARKET\",25*\"=\")\n print(28*\" \",\"ACHAMPET\")\n print(\"Name:\",name,30*\"\",\"Date:\",datetime.now())\n print(75*\"-\")\n print(\"sno\",8*\" \",\"items\",8*\" \",\"Quantity\",3*\" \",\"price\")\n\nfor i in range(len(pricelist)):\n print(i,8*\" \",8*\" \",ilist[i],3*\" \",qlist[i],plist[i])\n print(75*\"-\")\n print(50*\" \",\"TotalAmount:\",'Rs',totalprice)\n print(\"gst Amount\",50*\" \",'Rs',gst)\n print(75*\"-\")\n print(50*\" \",\"finalamount\",'Rs',finalamount)\n print(75*\"-\")\n print(20*\" \",\"Thank for Visiting\")\n print(75*\"-\")\n","repo_name":"Praveenmittakadapala8794/Super-Market-Bill-Generating-Project","sub_path":"Super_Market_Bill.py","file_name":"Super_Market_Bill.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4527992684","text":"## @ingroup Visualization-Performance-Noise\n# RCAIDE/Visualization/Noise/plot_noise_hemisphere.py\n# \n# \n# Created: Jul 2023, M. Clarke\n\n# ----------------------------------------------------------------------------------------------------------------------\n# IMPORT\n# ---------------------------------------------------------------------------------------------------------------------- \nfrom RCAIDE.Visualization import *\nimport plotly.graph_objects as go \n\n# ----------------------------------------------------------------------------------------------------------------------\n# PLOTS\n# ---------------------------------------------------------------------------------------------------------------------- \n## @ingroup Visualization-Performance-Noise\ndef plot_noise_hemisphere(noise_data,\n noise_level = False,\n min_noise_level = 35, \n max_noise_level = 90, \n noise_scale_label = None,\n save_figure = False,\n show_figure = True,\n vehicle = None,\n save_filename = \"Noise_Hemisphere\", \n colormap = 'jet',\n file_type = \".png\",\n background_color = 'white',\n grid_color = 'white',\n width = 1400, \n height = 800,\n *args, **kwargs):\n \n \"\"\"This plots a noise hemisphere of an aircraft \n\n Assumptions:\n None\n\n Source:\n None\n\n Inputs: \n noise_data - noise data structure \n noise_level - noise level \n min_noise_level - minimal noise level \n max_noise_level - maximum noise level \n save_figure - save figure \n show_figure - show figure \n save_filename - save file flag \n\n Outputs:\n Plots\n\n Properties Used:\n N/A\n \"\"\" \n \n plot_data = [] \n if vehicle != None:\n plot_data,_,_,_,_,_,_, = generate_3d_vehicle_geometry_data(plot_data,vehicle)\n \n X = noise_data.ground_microphone_locations[:,:,0] \n Y = noise_data.ground_microphone_locations[:,:,1] \n Z = noise_data.ground_microphone_locations[:,:,2] \n \n # ---------------------------------------------------------------------------\n # TRHEE DIMENSIONAL NOISE CONTOUR\n # --------------------------------------------------------------------------- \n # TERRAIN CONTOUR \n ground_contour = contour_surface_slice(-X,Y,Z,noise_level,color_scale=colormap, showscale= True, colorbar_title = noise_scale_label, colorbar_location = 'right', colorbar_orientation = 'v' )\n plot_data.append(ground_contour) \n\n # Define Colorbar Bounds \n fig_3d = go.Figure(data=plot_data) \n \n \n fig_3d.update_layout(\n title_text = save_filename, \n title_x = 0.5,\n width = width,\n height = height, \n font_size = 12,\n scene_aspectmode = 'auto', \n scene = dict(xaxis = dict(visible=False),\n yaxis = dict(visible=False),\n zaxis =dict(visible=False)), \n scene_camera=dict(up = dict(x=0, y=0, z=1),\n center= dict(x=-0.05, y=0, z=-0.0),\n eye = dict(x=-1.0, y=-1.0, z=.4)) \n ) \n \n if save_figure:\n fig_3d.write_image(save_filename + \".png\")\n \n if show_figure:\n fig_3d.write_html( save_filename + '.html', auto_open=True) \n\n return fig_3d \n\ndef colorax(vmin, vmax):\n return dict(cmin=vmin, cmax=vmax)\n","repo_name":"leadsgroup/RCAIDE_UIUC","sub_path":"RCAIDE/Visualization/Noise/plot_noise_hemisphere.py","file_name":"plot_noise_hemisphere.py","file_ext":"py","file_size_in_byte":4261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"17076439043","text":"__title__ = 'simulator'\n__version__ = '1.1.0'\n__author__ = 'Dajun Luo'\n\nfrom heapq import heappush, heappop\nfrom collections import defaultdict\nimport time as systime\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nsns.set(rc={'figure.figsize': (20, 10)})\nfrom scipy import stats\n\n\nclass Transaction(object):\n \"\"\"\n Represents a single transaction.\n \"\"\"\n\n def __init__(self, amount, fee, create_time):\n \"\"\"\n :param amount: transaction amount\n :param fee: transaction fee\n :param create_time: create time\n \"\"\"\n self.amount = amount\n self.fee = fee\n self.create_time = create_time\n self.complete_time = None\n\n def complete(self, complete_time):\n \"\"\"\n Mark the transaction is completed\n :param complete_time: complete time\n :return:\n \"\"\"\n self.complete_time = complete_time\n\n def is_complete(self):\n \"\"\"\n Returns a boolean of whether the transaction is finished\n :return:\n \"\"\"\n return self.complete_time is not None\n\n\nclass Transcation_Pool(object):\n \"\"\"\n A pool of all transaction in the simulation system. We use a heap structure to retrieve the transaction with\n highest fee.\n \"\"\"\n\n def __init__(self, K):\n self.pool = []\n self.history = []\n self.K = K\n self.pending_transaction = 0\n self.finished_transaction = 0\n self.pending_amount = 0\n self.finished_amount = 0\n self.pending_fee = 0\n self.finished_fee = 0\n self.delay = 0\n self.block = 0\n\n def add_transaction(self, transaction):\n \"\"\"\n Add one transaction into the poll\n :param transaction: transaction object\n :return: None\n \"\"\"\n self.pending_transaction += 1\n self.pending_amount += transaction.amount\n self.pending_fee += transaction.fee\n heappush(self.pool, (-transaction.fee, id(transaction), transaction))\n self.history.append(transaction)\n\n def generate_block(self, time):\n \"\"\"\n Generate a block\n :param time: the time of generation\n :return: None\n \"\"\"\n self.block += 1\n for i in range(self.K):\n if len(self.pool) == 0:\n return\n fee, id, transaction = heappop(self.pool)\n transaction.complete(time)\n self.pending_transaction -= 1\n self.pending_amount -= transaction.amount\n self.pending_fee -= transaction.fee\n self.finished_transaction += 1\n self.finished_amount += transaction.amount\n self.finished_fee += transaction.fee\n self.delay += time - transaction.create_time\n\n def summary(self):\n \"\"\"\n Summary report of transaction\n :return: A pandas data frame of transaction information.\n \"\"\"\n stats = defaultdict(list)\n for transaction in self.history:\n stats['amount'].append(transaction.amount)\n stats['fee'].append(transaction.fee)\n stats['create_time'].append(transaction.create_time)\n stats['complete_time'].append(transaction.complete_time)\n stats['finished'].append(transaction.complete_time != None)\n stats = pd.DataFrame(stats)\n stats['pending_time'] = stats['complete_time'] - stats['create_time']\n return stats\n\n\nclass BRC_log(object):\n \"\"\"\n Records stats information.\n \"\"\"\n\n def __init__(self):\n self.time_line = defaultdict(list)\n\n def snapshot(self, simulator):\n \"\"\"\n Take a snapshot of the current status in the simulation system\n :param simulator: simulator object\n :return: None\n \"\"\"\n self.time_line['time(minutes)'].append(simulator.time)\n pool = simulator.pool\n self.time_line['all_transaction_count'].append(\n pool.pending_transaction + pool.finished_transaction)\n self.time_line['pending_transaction_count'].append(\n pool.pending_transaction)\n self.time_line['finished_transaction_count'].append(\n pool.finished_transaction)\n self.time_line['all_transaction_amount'].append(\n pool.pending_amount + pool.finished_amount)\n self.time_line['pending_transaction_amount'].append(\n pool.pending_amount)\n self.time_line['finished_transaction_amount'].append(\n pool.finished_amount)\n self.time_line['all_transaction_fee'].append(\n pool.pending_fee + pool.finished_fee)\n self.time_line['pending_transaction_fee'].append(pool.pending_fee)\n self.time_line['finished_transaction_fee'].append(pool.finished_fee)\n self.time_line['delay'].append(pool.delay)\n self.time_line['block'].append(pool.block)\n\n def generate_stats(self):\n \"\"\"\n Generate overall stats information, such as average, from time line.\n :return: A pandas data frame of stats information.\n \"\"\"\n data = pd.DataFrame(self.time_line)\n data['average_delay'] = data['delay'] / \\\n data['finished_transaction_count']\n data['average_transaction_amount'] = data[\n 'all_transaction_amount'] / data['all_transaction_count']\n data['average_transaction_fee'] = data[\n 'all_transaction_fee'] / data['all_transaction_count']\n data['average_pending_transaction_amount'] = data['pending_transaction_amount'] / data[\n 'pending_transaction_count']\n data['average_pending_transaction_fee'] = data[\n 'pending_transaction_fee'] / data['pending_transaction_count']\n data['average_finished_transaction_amount'] = data['finished_transaction_amount'] / data[\n 'finished_transaction_count']\n data['average_finished_transaction_fee'] = data[\n 'finished_transaction_fee'] / data['finished_transaction_count']\n data['average_block_size'] = data[\n 'finished_transaction_count'] / data['block']\n data['average_block_amount'] = data[\n 'finished_transaction_amount'] / data['block']\n data['average_block_fee'] = data[\n 'finished_transaction_fee'] / data['block']\n return data\n\n\nclass Simulator(object):\n \"\"\"\n Simulator of the dynamic in the DES system. We use a heap transaction to fast retrieve the next event.\n \"\"\"\n\n def __init__(self, transaction_generator, mu_generator, K):\n \"\"\"\n Configure the simulator\n :param transaction_generator: a function returns a random transaction (amount, fee, time interval).\n :param mu_generator: a function return a random time interval for block generation.\n :param K: maximum transaction in a block\n \"\"\"\n self.transaction_generator = transaction_generator\n self.mu_generator = mu_generator\n self.K = K\n self.reset()\n\n def add_transaction_event(self):\n # Generate a transaction\n amount, fee, inter_arrival_time = self.transaction_generator()\n # Add it into the transaction pool\n self.pool.add_transaction(Transaction(amount, fee, self.time))\n # Add next transaction into the event pool\n heappush(self.event, (self.time + inter_arrival_time,\n self.add_transaction_event))\n\n def generate_block_event(self):\n # Generate a block\n self.pool.generate_block(self.time)\n # Add next block generation event into the event pool\n heappush(self.event, (self.time + self.mu_generator(),\n self.generate_block_event))\n\n def run(self, duration, warm_up=0, verbose=False):\n \"\"\"\n Run simulation.\n :param duration: length of the simulation\n :param warm_up: warm up time\n :return: None\n \"\"\"\n # Loop until there is no event\n counter = 0\n start_time = systime.time()\n while len(self.event) != 0:\n counter += 1\n if counter % 10000 == 0:\n print('Simulated %s events at time %s...' % (counter, time))\n time, event = heappop(self.event)\n if time > duration:\n # Stop simulation after reach the deadline\n break\n # Update the time stamp\n self.time = time\n event()\n if self.time > warm_up:\n # Log after warmed up\n self.log.snapshot(self)\n # Generate statistics\n self.stats = self.log.generate_stats()\n self.transaction_stats = self.pool.summary()\n elapse = systime.time() - start_time\n print('Simulation finished')\n print('Simulated %s events, last %.1lf seconds, average speed is %.3lf events/s' %\n (counter, elapse, counter / elapse))\n\n def reset(self):\n \"\"\"\n Reset the simulation system.\n :return: None\n \"\"\"\n self.event = [(0, self.add_transaction_event),\n (0, self.generate_block_event)]\n self.pool = Transcation_Pool(self.K)\n self.time = 0\n self.log = BRC_log()\n self.stats = None\n\n def plot(self, response):\n \"\"\"\n Plot response versus time.\n :param response: available response\n :return: seaborn plot\n \"\"\"\n if response not in self.stats.columns:\n raise ValueError(\n 'Only the following variables can be plotted. %s' % str(self.stats.columns))\n column = self.stats[response]\n sns.lineplot(x='time(minutes)', y=response, data=self.stats)\n\n def transaction_plot(self, x=None, y=None):\n \"\"\"\n Plot transaction related variables.\n :param x, y: if only x is specified, then plot the distribution of x, otherwise plot the scatterplot\n :return: seaborn plot\n \"\"\"\n if x not in self.transaction_stats.columns:\n raise ValueError('Only the following variables can be plotted. %s' % str(\n self.transaction_stats.columns))\n\n if y == None:\n sns.distplot(a=self.transaction_stats[x])\n elif y not in self.transaction_stats.columns:\n raise ValueError('Only the following variables can be plotted. %s' % str(\n self.transaction_stats.columns))\n else:\n sns.scatterplot(x=x, y=y, hue='finished',\n data=self.transaction_stats)\n\nif __name__ == '__main__':\n def transaction_generator():\n \"\"\"\n Define a transaction generator, it should return 3 value, the transaction amount, fee \n and an inter-arrival time. Notice that the unit for time is minute.\n \"\"\"\n # Uniform transaction amount between 5, 25.\n amount = stats.uniform.rvs() * 20 + 5\n # Binomial transaction fee of 1% * amount or 2% * amount\n # fee = amount * (stats.binom.rvs(1, 0.5) / 100 + 0.01)\n fee = 0.1\n # Exponential inter-arrival time of 2 transaction / minute\n inter_arrival_time = stats.expon.rvs(scale=0.5)\n return amount, fee, inter_arrival_time\n\n def mu_generator():\n \"\"\"\n Define a block event generator, it should return a random value of an inter-arrival time for block\n generation.\n \"\"\"\n # Exponential inter-arrival time of 0.5 block / minute\n return stats.expon.rvs(scale=2)\n\n simulator = Simulator(transaction_generator, mu_generator, K=10)\n simulator.run(duration=5000, warm_up=1000)\n","repo_name":"KingofCatfish/ORIE5580_project","sub_path":"simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":11510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13155269075","text":"import os\nimport subprocess\nimport telebot\nfrom telebot.types import Message, Document\nfrom telebot.types import ReplyKeyboardMarkup, KeyboardButton, Message\n\n\nkeyboard = ReplyKeyboardMarkup(resize_keyboard=True)\nbutton = KeyboardButton('Run script')\nkeyboard.add(button)\n\n\nBOT_TOKEN = \"5964401295:AAFknoLLBy-DOXCDmZ5dfUs9DwRuBFMH6i4\" # Replace with your bot token\nPERL_SCRIPT = \"moss.pl\" # Replace with the path to your Perl script\nFILES_FOLDER = \"storage\" # Replace with the path to the folder where you want to save the files\n\nbot = telebot.TeleBot(BOT_TOKEN)\n\n@bot.message_handler(content_types=['document'])\ndef handle_files(message: Message):\n try:\n file_paths = []\n file = message.document\n file_path = os.path.join(FILES_FOLDER, file.file_name)\n file_paths.append(file_path)\n file_info = bot.get_file(file.file_id)\n downloaded_file = bot.download_file(file_info.file_path)\n with open(file_path, 'wb') as f:\n f.write(downloaded_file)\n except Exception as e:\n bot.reply_to(message, f\"Error: {str(e)}\")\n\nkeyboard = ReplyKeyboardMarkup(resize_keyboard=True)\nrun_button = KeyboardButton('Run script')\ndelete_button = KeyboardButton('Delete files')\nshow_button = KeyboardButton('Show files')\nkeyboard.add(run_button, delete_button, show_button)\n\n@bot.message_handler(commands=['start', 'help'])\ndef send_welcome(message: Message):\n bot.reply_to(message, \"Welcome to the file processing bot!\", reply_markup=keyboard)\n\n@bot.message_handler(func=lambda message: message.text == 'Run script')\ndef run_script(message: Message):\n try:\n # здесь вы можете добавить свой код для обработки файла и отправки сообщения\n \n files = os.listdir(FILES_FOLDER)\n \n # запускаем Perl скрипт и передаем ему все файлы в качестве аргументов\n script_path = 'moss.pl'\n script_args = [os.path.join(FILES_FOLDER, file) for file in files]\n output = subprocess.check_output(['perl', script_path] + script_args)\n \n # отправляем результат работы скрипта пользователю\n bot.send_message(message.chat.id, output.decode('utf-8'))\n except Exception as e:\n # отправляем сообщение об ошибке\n bot.send_message(message.chat.id, f\"Error running script: {e}\")\n\n\n@bot.message_handler(func=lambda message: message.text == 'Delete files')\ndef delete_files(message: Message):\n try:\n # удаляем все файлы в указанной директории\n files = os.listdir(FILES_FOLDER)\n for file in files:\n os.remove(os.path.join(FILES_FOLDER, file))\n \n # отправляем сообщение об успешном удалении файлов\n bot.send_message(message.chat.id, \"All files have been deleted!\")\n except Exception as e:\n # отправляем сообщение об ошибке\n bot.send_message(message.chat.id, f\"Error deleting files: {e}\")\n\n@bot.message_handler(func=lambda message: message.text == 'Show files')\ndef show_files(message: Message):\n try:\n # получаем список файлов в указанной директории\n files = os.listdir(FILES_FOLDER)\n \n # формируем сообщение с перечислением файлов\n files_list = \"\\n\".join(files)\n message_text = f\"Files in {FILES_FOLDER}:\\n{files_list}\"\n \n # отправляем сообщение пользователю\n bot.send_message(message.chat.id, message_text)\n except Exception as e:\n # отправляем сообщение об ошибке\n bot.send_message(message.chat.id, f\"Error showing files: {e}\")\n\nbot.polling()","repo_name":"diaskabdualiev1/PlagarismTestBotTelegram","sub_path":"perl.py","file_name":"perl.py","file_ext":"py","file_size_in_byte":3942,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"34305345654","text":"import pytorch_lightning as pl\nimport torchmetrics\nfrom torch import nn\nimport torch\nimport torchvision\nfrom pl_bolts.models.autoencoders.components import (\n resnet18_decoder,\n resnet18_encoder,\n)\nimport numpy as np\n\ndef compute_conv(input_vol, stack, kernel_size, stride, padding):\n vol = input_vol\n for i in range(len(stack)):\n vol = ((vol - kernel_size + 2 * padding) / stride) + 1\n return int(vol * vol * stack[-1])\n\nclass classifier_head(pl.LightningModule):\n def __init__(self, encoder, linear_stack, n_class=10, **kwargs):\n super().__init__(**kwargs)\n layers = []\n in_dim = 512\n\n self.encoder = encoder\n\n layers.append(nn.Flatten())\n\n for size in linear_stack:\n layers.append(\n nn.Sequential(\n nn.Linear(in_dim, size),\n nn.Softmax()\n )\n )\n in_dim = size\n \n layers.append(\n nn.Sequential(\n nn.Linear(linear_stack[-1], n_class),\n nn.Softmax()\n )\n )\n self.classifier = nn.Sequential(*layers)\n def forward(self, x):\n x = self.encoder(x)\n return self.classifier(x) \n\nclass VAEclassifier(pl.LightningModule):\n def __init__(self, \n head,\n enc_out_dim=512, \n latent_dim=10, \n categorical_dim=10,\n input_height=32, \n in_channels=3,\n temperature: float = 0.5,\n min_temperature: float = 0.2,\n anneal_rate: float = 3e-5,\n anneal_interval: int = 100, # every 100 batches\n alpha: float = 1.,\n kl_coeff = 1.):\n super().__init__()\n \n gen_param = lambda x : nn.Parameter(torch.Tensor([x]))\n\n self.prepare_data_per_node = False\n self.save_hyperparameters(ignore='head')\n\n self.l_dim = latent_dim\n self.c_dim = categorical_dim\n\n self.t = gen_param(temperature)\n self.min_t = gen_param(min_temperature)\n self.rate = gen_param(anneal_rate)\n self.interval = gen_param(anneal_interval)\n self.alpha = gen_param(alpha)\n self.kl_coeff = gen_param(kl_coeff)\n\n self.accuracy = torchmetrics.Accuracy(task='multiclass', num_classes=categorical_dim)\n\n # encoder, decoder\n self.encoder = resnet18_encoder(False, False)\n self.decoder = resnet18_decoder(\n latent_dim=latent_dim * categorical_dim,\n input_height=input_height,\n first_conv=False,\n maxpool1=False\n )\n self.encoder.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.head = head\n\n # distribution parameters\n self.fc_z = nn.Linear(enc_out_dim, latent_dim * categorical_dim)\n \n # for the gaussian likelihood\n self.log_scale = nn.Parameter(torch.Tensor([0.0]))\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=1e-4)\n\n def gaussian_likelihood(self, x_hat, logscale, x):\n scale = torch.exp(logscale)\n mean = x_hat\n dist = torch.distributions.Normal(mean, scale)\n\n # measure prob of seeing image under p(x|z)\n log_pxz = dist.log_prob(x)\n return log_pxz.sum(dim=(1, 2, 3))\n\n def kl_divergence(self, q, eps=1e-20):\n q_p = nn.functional.softmax(q, dim=-1)\n e = q_p * torch.log(q_p + eps)\n ce = q_p * np.log(1. / self.c_dim + eps)\n\n kl = torch.mean(torch.sum(e - ce, dim =(1,2)), dim=0)\n return kl\n \n def reparameterize(self, z, eps=1e-20):\n u = torch.rand_like(z)\n g = - torch.log(- torch.log(u + eps) + eps)\n\n # Gumbel-Softmax Trick\n s = nn.functional.softmax((z + g) / self.t, dim=-1)\n s = s.view(-1, self.l_dim * self.c_dim)\n return s\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n\n # encode x to get the mu and variance parameters\n x_encoded = self.encoder(x)\n \n q = self.fc_z(x_encoded)\n q = q.view(-1, self.l_dim, self.c_dim)\n z = self.reparameterize(q)\n\n # decoded\n x_hat = self.decoder(z)\n y_pred = self.head(x_hat)\n\n if batch_idx % self.interval == 0:\n self.t = torch.nn.Parameter(torch.max(self.t * torch.exp(- self.rate * batch_idx),\n self.min_t))\n\n # reconstruction loss\n recons_loss = self.gaussian_likelihood(x_hat, self.log_scale, x) \n\n # kl\n kl = self.kl_divergence(q)\n\n label_error = nn.functional.cross_entropy(y_pred, y.long())\n\n # elbo\n elbo = (self.kl_coeff)*kl - self.alpha * recons_loss\n elbo = elbo.mean()\n\n self.accuracy(y_pred, y.long())\n\n self.log_dict({\n 'elbo': elbo,\n 'kl': -kl.mean(),\n 'recon_loss': recons_loss.mean(),\n 'cce' : label_error,\n 'train_acc_step' : self.accuracy\n })\n\n return elbo + label_error\n \n def training_epoch_end(self, outs):\n # log epoch metric\n self.log('train_acc_epoch', self.accuracy)\n \n def validation_step(self, batch, batch_idx):\n x, y = batch\n x_encoded = self.encoder(x)\n q = self.fc_z(x_encoded)\n x_hat = self.decoder(q)\n y_hat = self.head(x_hat)\n \n loss = nn.functional.cross_entropy(y_hat, y.long())\n self.log(\"val_loss\", loss)\n return loss\n \n def test_step(self, batch, batch_idx):\n x, y = batch\n x_encoded = self.encoder(x)\n q = self.fc_z(x_encoded)\n x_hat = self.decoder(q)\n y_hat = self.head(x_hat)\n \n loss = nn.functional.cross_entropy(y_hat, y.long())\n self.log(\"test_loss\", loss)\n return loss","repo_name":"Parry-Parry/AggregateVAE","sub_path":"old/image/vae/seq.py","file_name":"seq.py","file_ext":"py","file_size_in_byte":5937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34305294544","text":"import faiss\nimport torch\nimport numpy as np\n\nclass Kmeans:\n def __init__(self, \n spherical : bool = True, \n verbose : bool = False, \n seed : int = 42, \n gpu : bool = False) -> None:\n self.spherical = spherical\n self.verbose = verbose\n self.seed = seed\n self.gpu = False if not gpu else torch.cuda.device_count()\n\n self.centroids = None\n self.index = None\n\n def fit(self, X : np.ndarray, K : int = 50, iters : int = 20) -> None:\n kmeans = faiss.Kmeans(X.shape[1], \n K, \n niter=iters, \n spherical=self.spherical, \n verbose=self.verbose, \n seed=self.seed, \n gpu=self.gpu, \n min_points_per_centroid=X.shape[0] // K)\n kmeans.train(X)\n self.centroids = kmeans.centroids\n self.index = kmeans.index\n\n return None\n\n def fit_transform(self, X : np.ndarray, K : int = 50, iters : int = 20) -> np.ndarray:\n self.fit(X, K, iters)\n _, I = self.index.search(X, 1)\n return I.reshape(-1)","repo_name":"Parry-Parry/AggregateVAE","sub_path":"aggrVAE/cluster/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29132139554","text":"from linker import *\nfrom datetime import date\nfrom openpyxl import *\nfrom openpyxl.styles import PatternFill\n\nNON_CONFORME = []\n\ntoday = date.today()\nfile_name = today.strftime(\"%b-%d-%Y\")\n\n\nclass Computer:\n name = \"\"\n location = \"\"\n type = \"\"\n av = \"\"\n crit = \"\"\n wsus = \"\"\n sccm = \"\"\n ou = \"\"\n alive = \"\"\n os = \"\"\n update = \"\"\n bulle = \"\"\n remediation = \"\"\n\n\nPARC = []\n\n\ndef typedef(name):\n name = name[0]\n if name == 'L':\n return \"Laptop\"\n if name == 'W':\n return \"Desktop - VM\"\n print(\"UNABLE TO DETERMINE TYPE OF \" + name)\n return \"UNDEFINED\"\n\n\ndef locationdef(name):\n identity = name[1] + name[2]\n if identity.isnumeric():\n number = identity\n if number == \"00\":\n return \"Wallis\"\n if number == \"10\":\n return \"Guadeloupe\"\n if number == \"20\":\n return \"Guyane\"\n if number == \"30\":\n return \"Martinique\"\n if number == \"40\":\n return \"Mayotte\"\n if number == \"50\":\n return \"Nouvelle Caledonie\"\n if number == \"60\":\n return \"Polynesie\"\n if number == \"70\":\n return \"Reunion\"\n if number == \"80\":\n return \"SPM\"\n if number == \"90\":\n return \"Paris\"\n else:\n return \"UNDEFINED\"\n else:\n print(\"Unable to find location of computer: \" + name)\n return \"UNDEFINED\"\n\n\ndef avdef(name):\n if name in BITDEFENDER_STATUS:\n return \"WARN\"\n else:\n return \"OK\"\n\n\ndef critdef(name):\n if name in PC_CRITIQUE:\n return \"CRITIQUE\"\n else:\n return \"BUREAUTIQUE\"\n\n\ndef wsusdef(name):\n if name in WSUS_STATUS:\n return \"WARN\"\n\n else:\n return \"OK\"\n\n\ndef sccmdef(name):\n if name in SCCM_STATUS:\n return \"WARN\"\n else:\n return \"OK\"\n\n\ndef oudef(name):\n if name in OU_STATUS:\n return \"WARN\"\n else:\n return \"OK\"\n\n\ndef updatedef(name):\n if name in UPDATE_STATUS:\n return \"WARN\"\n else:\n return \"OK\"\n\n\ndef bulledef(name):\n if name in BULLE:\n return \"POSTE ISOLE\"\n else:\n return \"NON ISOLE\"\n\n\ndef remediationdef(name):\n if name in REMEDIATION:\n return \"EN REMEDIATION\"\n else:\n return \"NON\"\n\n\ndef generate_data():\n for el in LANSWEEPER_STATUS:\n pc = Computer()\n\n pc.name = el[0]\n pc.os = el[1]\n pc.alive = el[2]\n\n pc.location = locationdef(el[0])\n pc.type = typedef(el[0])\n pc.av = avdef(el[0])\n pc.crit = critdef(el[0])\n pc.wsus = wsusdef(el[0])\n pc.sccm = sccmdef(el[0])\n pc.ou = oudef(el[0])\n pc.update = updatedef(el[0])\n pc.bulle = bulledef(el[0])\n pc.remediation = remediationdef(el[0])\n\n PARC.append(pc)\n\n\ndef check_issue(pc):\n if pc.os != \"WIN 7\":\n if pc.remediation == \"NON\":\n if \"WARN\" in pc.av:\n return True\n if \"WARN\" in pc.wsus:\n return True\n if \"WARN\" in pc.sccm:\n return True\n if \"WARN\" in pc.ou:\n return True\n if \"WARN\" in pc.update:\n return True\n return 0\n\n\ndef write_data():\n file = Workbook(write_only=True)\n\n overview = file.create_sheet()\n overview.title = \"OVERVIEW\"\n overview.append(\n [\"NAME\", \"TYPE\", \"LOCATION\", \"AV\", \"WSUS\", \"SCCM\", \"OU\", \"UPDATE\", \"CRITICITE\", \"OS\", \"BULLE\", \"REMEDIATION\",\n \"ALIVE\"])\n for el in PARC:\n overview.append(\n [el.name, el.type, el.location, el.av, el.wsus, el.sccm, el.ou, el.update, el.crit, el.os, el.bulle,\n el.remediation, el.alive])\n\n issues = file.create_sheet()\n issues.title = \"ISSUES\"\n issues.append(\n [\"NAME\", \"TYPE\", \"LOCATION\", \"AV\", \"WSUS\", \"SCCM\", \"OU\", \"UPDATE\", \"CRITICITE\", \"OS\", \"BULLE\", \"REMEDIATION\",\n \"ALIVE\"])\n for el in PARC:\n if check_issue(el):\n NON_CONFORME.append(\n [el.name, el.type, el.location, el.av, el.wsus, el.sccm, el.ou, el.update, el.crit, el.os, el.bulle,\n el.remediation, el.alive])\n issues.append(\n [el.name, el.type, el.location, el.av, el.wsus, el.sccm, el.ou, el.update, el.crit, el.os, el.bulle,\n el.remediation, el.alive])\n\n file.save(filename=\"../conformity-report/\" + file_name + \".xlsx\")\n file.close()\n\n\ndef color():\n data = load_workbook(\"../conformity-report/\" + file_name + \".xlsx\")\n for sheet_data in data.worksheets:\n for row in sheet_data.rows:\n for el in row:\n if el.value == \"WARN\":\n el.fill = PatternFill('solid', fgColor='FFFF0000')\n elif el.value == \"CRITIQUE\":\n el.fill = PatternFill('solid', fgColor='FF7B00')\n elif el.value == \"WIN 10\" or el.value == \"BUREAUTIQUE\" or el.value == \"NON\":\n el.fill = PatternFill('solid', fgColor='ADD8E6')\n elif el.value == \"WIN 7\":\n el.fill = PatternFill('solid', fgColor='FFFFFF00')\n elif el.value == \"OK\":\n el.fill = PatternFill('solid', fgColor='FF00FF00')\n elif el.value == \"NON ISOLE\":\n el.fill = PatternFill('solid', fgColor='ADD8E6')\n elif el.value == \"POSTE ISOLE\" or el.value == \"EN REMEDIATION\":\n el.fill = PatternFill('solid', fgColor='FF7B00')\n\n data.save(filename=\"../conformity-report/\" + file_name + \".xlsx\")\n data.close()\n\n\ndef class_default():\n data = load_workbook(\"../conformity-report/\" + file_name + \".xlsx\")\n bureautique_issues = data.create_sheet()\n bureautique_issues.title = \"BUREAUTIQUE\"\n critique_issues = data.create_sheet()\n critique_issues.title = \"CRITIQUE\"\n bureautique_issues.append(\n [\"NAME\", \"TYPE\", \"LOCATION\", \"AV\", \"WSUS\", \"SCCM\", \"OU\", \"UPDATE\", \"CRITICITE\", \"OS\", \"BULLE\", \"REMEDIATION\",\n \"ALIVE\"])\n critique_issues.append(\n [\"NAME\", \"TYPE\", \"LOCATION\", \"AV\", \"WSUS\", \"SCCM\", \"OU\", \"UPDATE\", \"CRITICITE\", \"OS\", \"BULLE\", \"REMEDIATION\",\n \"ALIVE\"])\n for el in NON_CONFORME:\n if \"BU\" in el[8]:\n bureautique_issues.append(el)\n else:\n critique_issues.append(el)\n\n data.save(filename=\"../conformity-report/\" + file_name + \".xlsx\")\n data.close()\n\n\ndef generate_rapport():\n init()\n status()\n generate_data()\n write_data()\n class_default()\n color()\n\n\ngenerate_rapport()\n","repo_name":"aurelienizl/CRG","sub_path":"report-generator/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":6575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73214787413","text":"from pyspark.sql import SparkSession\n\nspark = SparkSession.builder.getOrCreate()\nspark.sparkContext.setLogLevel('WARN')\n\nimport os\nimport sys\nfrom pyspark.sql.types import *\nfrom model import pipeline\n\ndataset_path = sys.argv[1]\nmodel_path = sys.argv[2]\n\nschema = StructType([\n StructField(\"asin\", StringType()),\n StructField(\"id\", LongType()),\n StructField(\"overall\", DoubleType()),\n StructField(\"reviewText\", StringType()),\n StructField(\"reviewTime\", DateType()),\n StructField(\"reviewerID\", StringType()),\n StructField(\"reviewerName\", StringType()),\n StructField(\"vote\", IntegerType()),\n StructField(\"summary\", StringType()),\n StructField(\"unixReviewTime\", TimestampType()),\n StructField(\"verified\", BooleanType())\n])\n\ndataset = spark.read.json(dataset_path, schema=schema,dateFormat='MM dd, yyyy').cache()\npipeline_model = pipeline.fit(dataset)\npipeline_model.write().overwrite().save(model_path)","repo_name":"alex-hse-repository/ozon-masters-bigdata","sub_path":"projects/4/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1426815420","text":"# 36. How to find the correlation between two columns of a numpy array?\n# Q. Find the correlation between SepalLength(1st column) and PetalLength(3rd column) in iris_2d\nimport numpy as np\n# Input\nurl = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'\niris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3])\nx = iris_2d[:,:1]\ny = iris_2d[:,2:3]\nmx = np.mean(x)\nmy = np.mean(y)\nx_minus_mx = x - mx\ny_minus_my = y - my\nmultiple = x_minus_mx * y_minus_my\nsqt_x_minus_mx = x_minus_mx**2\nsqt_y_minus_my = y_minus_my**2\nsigma = np.sum(multiple)\nsigmasqtx = np.sum(sqt_x_minus_mx)\nsigmasqty = np.sum(sqt_y_minus_my)\nr= sigma / (sigmasqtx * sigmasqty)**(1/2)\nprint(r)\n\n","repo_name":"ElModelo/101-NumPy-Exercises-for-Data-Analysis-Python-","sub_path":"36.py","file_name":"36.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24851015241","text":"\"\"\"\n给定一个整数数组A,坡是元组(i, j),其中i < j且A[i] <= A[j]。这样的坡的宽度为j - i。\n找出A中的坡的最大宽度,如果不存在,返回 0 。\n\n示例 1:\n输入:[6,0,8,2,1,5]\n输出:4\n解释:\n最大宽度的坡为 (i, j) = (1, 5): A[1] = 0 且 A[5] = 5.\n示例 2:\n输入:[9,8,1,0,1,9,4,0,4,1]\n输出:7\n解释:\n最大宽度的坡为 (i, j) = (2, 9): A[2] = 1 且 A[9] = 1.\n提示:\n\n2 <= A.length <= 50000\n0 <= A[i] <= 50000\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/maximum-width-ramp\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n # 超时\n def maxWidthRamp(self, nums: List[int]) -> int:\n arr = [] # 排序数组\n import bisect\n n = len(nums)\n result = 0\n for idx in range(n - 1, -1, -1):\n if not arr:\n arr.append((nums[idx], idx))\n else:\n idx_new = bisect.bisect_left(arr, (nums[idx], idx))\n arr_new = arr[idx_new:]\n if arr_new:\n temp = max(arr[idx_new:], key=lambda x: x[1])\n result = max(result, temp[1] - idx)\n bisect.insort(arr, (nums[idx], idx))\n return result\n\n def maxWidthRamp(self, nums: List[int]) -> int:\n \"\"\"\n 1 维护一个单调递减栈,其中第一个元素是A中第一个元素,最后一个元素是A的最小值。由于需要计算长度,所以栈中存储A的索引。\n\n 2 从后向前遍历A,当元素大于栈顶元素时,计算一次最大宽度坡,并弹出(因为再往前面遍历宽度肯定会减少),由于当栈顶索引等于当前遍历到的元素的索引时,肯定会被弹出,所以没有必要判断栈顶索引是否小于等于当前遍历到的索引。\n\n 作者:Elmer\n 链接:https://leetcode-cn.com/problems/maximum-width-ramp/solution/dan-diao-zhan-python-yi-kan-jiu-dong-by-elmer/\n 来源:力扣(LeetCode)\n 著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。\n \"\"\"\n stack = []\n n = len(nums)\n for i in range(n):\n if not stack or nums[stack[-1]] > nums[i]:\n stack.append(i)\n\n res = 0\n i = n - 1\n while i > res: # 当res大于等于i时没必要继续遍历了\n while stack and nums[stack[-1]] <= nums[i]:\n res = max(res, i - stack[-1])\n stack.pop()\n i -= 1\n\n return res\n\n\nclass Solution:\n def maxWidthRamp(self, nums: List[int]) -> int:\n stack = []\n for idx, num in enumerate(nums):\n if not stack or nums[stack[-1]] > num:\n stack.append(idx)\n n = len(nums)\n ans = float('-inf')\n for idx in range(n - 1, -1, -1):\n while stack and nums[idx] >= nums[stack[-1]]:\n ans = max(ans, idx - stack[-1])\n stack.pop()\n return ans\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.maxWidthRamp([6, 0, 8, 2, 1, 5]))\n # print(s.maxWidthRamp([9,8,1,0,1,9,4,0,4,1]))\n","repo_name":"wanzhouyi/leetcode","sub_path":"1.数组和字符串/栈和队列/单调栈/962. 最大宽度坡.py","file_name":"962. 最大宽度坡.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"32623092292","text":"import sys,zlib,re,warnings\nwarnings.filterwarnings(\"ignore\")\n\n#python .\\dc.py /path/to/file.pdf\npath = sys.argv[1]\nfile = open(path,'rb')\npdf = file.read()\nstream = re.compile(rb'.*?FlateDecode.*?stream(.*?)endstream', re.S)\nobjects = stream.findall(pdf)\n#Most times, the object was stored as the last object.\ncount = 0 \nwhile len(objects) > 0:\n\tlast = objects.pop()\n\tmydata = zlib.decompress(last.strip(b'\\r\\n'))\n\tprint(str(mydata, errors='replace'))\n\n\tsys.exit(0) #Remove this if the object isn't the last one.","repo_name":"caryhooper/scripts","sub_path":"dc.py","file_name":"dc.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"38400174092","text":"from typing import List\n\n\ndef solution(n: int, stairs: List[int]) -> int:\n\n answer: List[int] = [0 for _ in range(n + 1)]\n\n def recursion(m: int):\n if m == 0:\n return stairs[0]\n if m == 1:\n return stairs[1]\n if m == 2:\n return stairs[1] + stairs[2]\n if answer[m]:\n return answer[m]\n\n answer[m] = stairs[m] + max(recursion(m - 2), stairs[m - 1] + recursion(m - 3))\n return answer[m]\n\n return recursion(n)\n\n\nif __name__ == \"__main__\":\n N: int = int(input())\n arr: List[int] = [0]\n\n for _ in range(N):\n arr.append(int(input()))\n\n print(solution(N, arr))\n","repo_name":"rxdcxdrnine/problem-solving","sub_path":"python/boj/BOJ_2579.py","file_name":"BOJ_2579.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"28067679647","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def append(self, data):\n if self.head is None:\n self.head = Node(data)\n return\n\n current = self.head\n while not current.next is None:\n current = current.next\n\n current.next = Node(data)\n\n def prepend(self,data):\n newHead = Node(data)\n newHead.next = self.head\n self.head = newHead\n\n def deleteWithValue(self, data):\n if self.head is None:\n return\n\n if self.head.data == data:\n self.head = self.head.next\n return\n\n current = self.head\n while not current.next is None:\n if current.next.data == data:\n current.next = current.next.next\n return\n\n current = current.next\n\n def remove(self):\n head = self.head.data\n self.head = self.head.next\n return head\n\n\n def print(self):\n print(\"Linked List\")\n print(\"Head : \", self.head.data)\n current = self.head\n while not current.next is None:\n print(\"--------\")\n print(current.next.data)\n current = current.next\n\n\n#linkedList = LinkedList()\n#linkedList.append(67)\n#linkedList.append(7)\n#linkedList.append(3)\n#linkedList.append(90)\n#linkedList.print()\n#linkedList.deleteWithValue(7)\n#linkedList.print()\n#linkedList.prepend(80)\n#linkedList.print()","repo_name":"codexcod/AlgorithmsAndDataStructures","sub_path":"DataStructures/LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6157322846","text":"import json\nimport urllib.request\nimport os\nimport boto3\nfrom boto3.dynamodb.conditions import Key\n\n\ndef handler(e, context):\n\n print(e)\n\n body = json.loads(e['body'])\n\n print(body)\n\n out = {}\n\n if 'challenge' in body:\n out['statusCode'] = 200\n out['body'] = body['challenge']\n return out\n\n event = body['event']\n\n post_channel = e['queryStringParameters'].get('channel')\n\n if 'subtype' in event:\n out['statusCode'] = 200\n out['body'] = 'bot'\n return out\n\n dynamo = boto3.resource('dynamodb').Table(os.environ['TABLE_NAME'])\n\n slack_token = os.environ[\"SLACK_TOKEN\"]\n\n slack_webhook = os.environ[\"SLACK_WEBHOOK\"]\n\n user_id = event['user']\n channel_id = event['channel']\n\n user = get_from_dynamo(dynamo, user_id)\n\n if user is None:\n user = get_user(user_id, slack_token)\n dynamo.put_item(Item=user)\n\n channel = get_from_dynamo(dynamo, channel_id)\n\n if channel is None:\n channel = get_channel(channel_id, slack_token)\n dynamo.put_item(Item=channel)\n\n payload = create_payload(event, user, channel, post_channel)\n\n post_slack(payload, slack_webhook)\n\n out['statusCode'] = 200\n out['body'] = \"ok\"\n\n return out\n\n\ndef post_slack(payload, slack_webhook):\n headers = {\n \"X-Slack-No-Retry\": 1\n }\n json_data = json.dumps(payload).encode('utf-8')\n request = urllib.request.Request(\n slack_webhook, headers=headers, data=json_data, method='POST')\n with urllib.request.urlopen(request) as response:\n return response.getcode(), response.read().decode('utf-8')\n\n\ndef get_user(user_id, token):\n url = 'https://slack.com/api/users.info?token=' + \\\n token + \\\n '&user=' + user_id\n\n request = urllib.request.Request(url)\n response = urllib.request.urlopen(request)\n user = json.loads(response.read().decode('utf-8'))['user']\n return {\n 'id': user_id,\n 'name': user['name'],\n 'icon': user['profile']['image_72']\n }\n\n\ndef get_channel(channel_id, token):\n url = 'https://slack.com/api/channels.info?token=' + \\\n token + \\\n '&channel=' + channel_id\n\n request = urllib.request.Request(url)\n response = urllib.request.urlopen(request)\n channel = json.loads(response.read().decode('utf-8'))['channel']\n return {\n 'id': channel_id,\n 'name': channel['name']\n }\n\n\ndef create_payload(event, user, channel, post_channel):\n payload = {\n 'icon_url': user['icon'],\n 'username': user['name'],\n 'unfurl_links': True,\n 'unfurl_media': True,\n 'link_names': 1,\n 'text': event['text'].replace('\\\\', ''),\n 'attachments': [\n {\n 'footer': '#' + channel['name'],\n 'ts': event['event_ts']\n }\n ]\n }\n if post_channel is not None:\n payload['channel'] = post_channel\n return payload\n\n\ndef get_from_dynamo(dynamo, id):\n items = dynamo.query(KeyConditionExpression=Key('id').eq(id))\n\n if items['Count'] == 0:\n return None\n\n return items['Items'][0]\n","repo_name":"cohalz/slagg","sub_path":"src/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31171630231","text":"def snail(array):\n a = []\n while array:\n a.extend(list(array.pop(0)))\n array = list(zip(*array))\n array.reverse()\n return a\n\n\n\nmat = [\n[1, 2, 3],\n[4, 5, 6],\n[7, 8, 9]\n]\n# [1, 2, 3, 6, 9, 8, 7, 4, 5]\nprint(snail(mat))\n\n\n\"\"\"\na.extend(list(array.pop(0))) - добавляет содержимое seq в список.\na = [1, 2, 3]\narray = [[4, 5, 6], [7, 8, 9]]\narray = list(zip(*array))\narray = [(4, 7), (5, 8), (6, 9)]\narray.reverse()\narray = [(6, 9), (5, 8), (4, 7)]\na.extend(list(array.pop(0)))\na = [1, 2, 3, 6, 9]\narray = [(5, 8), (4, 7)]\narray = list(zip(*array))\narray = [(5, 4), (8, 7)]\narray.reverse()\narray = [(8, 7), (5, 4)]\n\"\"\"\n\n\n","repo_name":"RinatStar420/programming_training","sub_path":"lesson/snail.py","file_name":"snail.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17663187219","text":"class HashOpenAddr:\n\tdef __init__(self, size=10):\n\t\t\tself.size = size\n\t\t\tself.keys = [None]*self.size\n\t\t\tself.values = [None]*self.size\n\tdef __str__(self):\n\t\t\ts = \"\"\n\t\t\tfor k in self:\n\t\t\t\t\tif k == None:\n\t\t\t\t\t\t\tt = \"{0:5s}|\".format(\"\")\n\t\t\t\t\telse:\n\t\t\t\t\t\t\tt = \"{0:-5d}|\".format(k)\n\t\t\t\t\ts = s + t\n\t\t\treturn s\n\tdef __iter__(self):\n\t\t\tfor i in range(self.size):\n\t\t\t\t\tyield self.keys[i]\n\n\n\tdef __getitem__(self, key):\n\t\t\treturn self.keys[key]\n\n\n\tdef __setitem__(self, key, value):\n\t\t\tself.set(key, value)\n\n\tdef find_slot(self, key):\n\t\t\ti = self.hash_function(key)\n\t\t\tstart = i\n\t\t\t#H[i] 가 다른 값이 있거나\n\t\t\twhile (self.keys[i] != None ) and (self.keys[i]!= key):\n\t\t\t\t\ti = (i + 1) % 10\n\t\t\t\t\tif (i == start): # 한바퀴 다돌면\n\t\t\t\t\t\t\treturn None\n\t\t\treturn i\n\n\tdef set(self, key, value=None):\n\t\t\ti = self.find_slot(key)\n\t\t\tif (i == None) : return None\n\t\t\tif self.keys[i] != None: # 이미 key 값을 갖는 item이 H에 존재함 (수정)\n\t\t\t\tself.keys[i] = key # value 값 update 후 리턴\n\t\t #### 저장 공간이 꽉차면 증가?\n\t\t\telse :\n\t\t\t\t\tself.keys[i] = key\n\t\t\t\t\tself.values[i] = value\n\t\t\treturn key #value 는 none 값으로 이루어\n\n\n\tdef hash_function(self, key):\n\t\t\treturn key % self.size\n\n\t#key is extinct > erase it and return\n\tdef remove(self, key):\n\t\t\ti = self.find_slot(key)\n\t\t\tif self.keys[i] == None :\n\t\t\t\t\treturn None\n\t\t\tj = i\n\t\t\twhile True:\n\t\t\t\t\tself.keys[i] = None\n\t\t\t\t\twhile True:\n\t\t\t\t\t\t\tj = (j + 1) % 10\n\t\t\t\t\t\t\tif self.keys[j] == None: return key\n\t\t\t\t\t\t\t#\n\t\t\t\t\t\t\tk =self.find_slot(self.keys[j])\n\t\t\t\t\t\t\tif not (i < k <= j or j < i < k or k <= j < i): # H[j] --> H[i]\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\tself.keys[i] = self.keys[j]\n\t\t\t\t\ti = j\n\n\n\tdef search(self, key):\n\t\t\ti = self.find_slot(key)\n\t\t\tif i == None :\n\t\t\t\treturn None\n\t\t\t\n\t\t\tif self.keys[i] == key:\n\t\t\t\t\treturn key\n\t\t\telse : \n\t\t\t\treturn None\n\nH = HashOpenAddr()\nwhile True:\n\tcmd = input().split()\n\tif cmd[0] == 'set':\n\t\t\tkey = H.set(int(cmd[1]))\n\t\t\tif key == None: print(\"* H is full!\")\n\t\t\telse: print(\"+ {0} is set into H\".format(cmd[1]))\n\telif cmd[0] == 'search':\n\t\t\tkey = H.search(int(cmd[1]))\n\t\t\tif key == None: print(\"* {0} is not found!\".format(cmd[1]))\n\t\t\telse: print(\" * {0} is found!\".format(cmd[1]))\n\telif cmd[0] == 'remove':\n\t\t\tkey = H.remove(int(cmd[1]))\n\t\t\tif key == None:\n\t\t\t\t\tprint(\"- {0} is not found, so nothing happens\".format(cmd[1]))\n\t\t\telse:\n\t\t\t\t\tprint(\"- {0} is removed\".format(cmd[1]))\n\telif cmd[0] == 'print':\n\t\t\tprint(H)\n\telif cmd[0] == 'exit':\n\t\t\tbreak\n\telse:\n\t\t\tprint(\"* not allowed command. enter a proper command!\")","repo_name":"chosunghyun18/Problem_Solving","sub_path":"DataStructure/hashTable/Hashwithadd.py","file_name":"Hashwithadd.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11014817319","text":"import zipfile\r\nimport os\r\nimport xml.etree.ElementTree as ET\r\nfrom PIL import Image\r\nimport io\r\nimport csv\r\n\r\n# File path\r\nfile_path = '/mnt/data/Chem 6 Unit 3 (2) 2.key'\r\n\r\n# Function to extract and analyze the content of a Keynote file\r\ndef analyze_keynote(file_path):\r\n # Check if the file is a valid zipfile (Keynote files are essentially zip files)\r\n if not zipfile.is_zipfile(file_path):\r\n return \"The file does not appear to be a valid Keynote (zip) file.\"\r\n \r\n extracted_data = {\r\n 'metadata': [],\r\n 'comments': [],\r\n 'slide_content': [],\r\n 'diagrams': []\r\n }\r\n\r\n with zipfile.ZipFile(file_path, 'r') as zip_ref:\r\n # Extracting the zipfile contents to a temporary directory\r\n temp_dir = '/mnt/data/temp_keynote_extraction'\r\n zip_ref.extractall(temp_dir)\r\n\r\n # Parse metadata\r\n metadata_file = os.path.join(temp_dir, 'docProps/app.xml')\r\n if os.path.exists(metadata_file):\r\n tree = ET.parse(metadata_file)\r\n root = tree.getroot()\r\n for child in root:\r\n extracted_data['metadata'].append((child.tag, child.text))\r\n\r\n # Parse comments (this is not straightforward in Keynote files and may not be accurate)\r\n # ...\r\n\r\n # Parse slide content and diagrams\r\n slides_dir = os.path.join(temp_dir, 'slides')\r\n if os.path.exists(slides_dir):\r\n for slide_file in os.listdir(slides_dir):\r\n slide_path = os.path.join(slides_dir, slide_file)\r\n if slide_file.endswith('.xml'):\r\n # Parsing slide content\r\n tree = ET.parse(slide_path)\r\n root = tree.getroot()\r\n slide_text = []\r\n for elem in tree.iter():\r\n if 'text' in elem.tag.lower():\r\n slide_text.append(elem.text)\r\n extracted_data['slide_content'].append(' '.join(slide_text))\r\n\r\n elif slide_file.endswith(('.png', '.jpg', '.jpeg')):\r\n # Extracting diagram images\r\n with open(slide_path, 'rb') as img_file:\r\n img_data = img_file.read()\r\n extracted_data['diagrams'].append(img_data)\r\n\r\n # Cleanup temporary extraction directory\r\n os.rmdir(temp_dir)\r\n\r\n return extracted_data\r\n\r\n# Extract and analyze the Keynote file\r\nkeynote_data = analyze_keynote(file_path)\r\nkeynote_data\r\n\n","repo_name":"clockcoinG1/keynote_extractor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41755107053","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\n# Q1 Create a class called BankAccount that has four attributes: bankname, firstname, lastname, and balance.\n\nclass BankAccount:\n def __init__(self, bankname, firstname, lastname, balance = 0): # attributes are initialized and balance is set to 0\n self.bankname = bankname\n self.firstname = firstname\n self.lastname = lastname\n self.balance = balance\n def deposit(self, amount): # method deposit will accept user input amount to add to the balance attribute\n self.balance = self.balance + amount\n return print('Your current balance is ', self.balance) \n def withdrawal(self, amount): # method withdrawal accepts user input of amount to withdraw\n if self.balance >= amount: # checks that there is enough in the balance for withdrawal\n self.balance = self.balance - amount # if enough in balance, balance is reduced by amount\n print('Your current balance is ', self.balance)\n else:\n print('Withdrawal request exceeds the available balance.') # message is returned if not enough is in balance for withdrawal\n def __str__(self): # this dunder method allows user to access attribute values as strings, user can enter print() to access object details\n return 'Bank Name: {self.bankname} \\nOwner Name: {self.firstname} {self.lastname} \\nCurrent Balance: ${self.balance}'.format(self = self)\n\n# test 1 \naccount1 = BankAccount('chase','cat','cho',2000) \naccount1.deposit(200) # deposit 200\nprint(account1) # balance should be 2200\naccount1.withdrawal(500) # withdraw 500\nprint(account1) # balance should be 1700\n\n# test 2\naccount2 = BankAccount('wells fargo','Tim','Smith',10000) \naccount2.deposit(1000) # deposit 1000\nprint(account2) # balance should be 11000\naccount2.withdrawal(2000) # withdraw 2000\nprint(account2) # balance should be 9000\n\n# Q2: Create a class Box that has attributes length and width that takes values for length and width upon construction (instantiation via the constructor).\nimport math\n\nclass Box:\n def __init__(self, length, width): #initialized with attributes length and width of a box/rectangle\n self.length = length\n self.width = width\n def render(self):\n for l in range(1,self.length+1): #for every row the following indented code is performed\n for w in range(1,self.width+1): #for every row, the for loop will cycle through the number of columns to print an '*' until the end of width. \n print('*', end = \" \")\n print('\\n') # a new row begins for the next row of asterisks\n def invert(self):\n length_orig = self.length #original attributes are stored in new variable to make a proper swap of values\n width_orig = self.width\n self.width = length_orig \n self.length = width_orig\n return 'The new width is {self.width} and the new length is {self.length}'.format(self = self)\n def get_area(self):\n return 'The area of this box is {}'.format(self.width * self.length)\n def get_perimeter(self):\n return 'The perimeter of this box is {}'.format(self.width * 2 + self.length * 2)\n def double(self):\n self.length = 2 * self.length\n return 'Doubling the size of the box results in an area of {}'.format(self.length * self.width) # the length is doubled to double the area of the box \n def __eq__(self, other):\n if isinstance(other, Box): #checks that the objects being compared are of the class Box\n if other.width == self.width and other.length == self.length: # checks that width and length are equal between two boxes\n return True\n return False\n def print_dim(self):\n return 'The width of the box is {self.width} and the length of the box is {self.length}'.format(self = self)\n def combine(self, other):\n self.length = self.length + other.length\n self.width = self.width + other.width\n return 'The new width is {self.width} and new length is {self.length}'.format(self = self)\n def get_hypot(self):\n d = math.sqrt(float(self.length**2) + float(self.width**2))\n return 'the diagonal length of the box is {:.2f}'.format(d)\n\n# Instantiate 3 boxes of dimensions 5,10 , 3,4 and 5,10 and assign to variables box1, box2 and box3 respectively \nbox1 = Box(5,10)\nbox2 = Box(3,4)\nbox3 = Box(5,10)\n\n# Print dimension info for each using print_dim()\nbox1.print_dim()\nbox2.print_dim()\nbox3.print_dim()\n\n# Evaluate if box1 == box2, and also evaluate if box1 == box3, print True or False to the screen accordingly\nbox1 == box2\nbox1 == box3\n\n# Combine box3 into box1 \nbox1.combine(box3)\n\n# Double the size of box2\nbox2.double()\n\n# Combine box2 into box1\nbox1.combine(box2)\n\n\n\n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n\n \n \n \n \n\n\n \n \n\n \n\n\n ","repo_name":"catcho1632/DATA-602","sub_path":"assignment_04.py","file_name":"assignment_04.py","file_ext":"py","file_size_in_byte":5210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5078041047","text":"import pandas as pd\nimport numpy as np\nfrom torch.utils.data import Dataset\nimport glob\nimport random\n\nclass CustomCSVDataset(Dataset):\n def __init__(self, filenames, batch_size):\n self.filenames = filenames \n self.batch_size = batch_size # `batch_size` number of files make a batch\n\n def __len__(self):\n return int(np.ceil(len(self.filenames) / float(self.batch_size)))\n\n def __getitem__(self, index):\n batch = self.filenames[index * self.batch_size:(index+1) * self.batch_size]\n v, x, y = [], [], []\n for file in batch:\n df = pd.read_csv(open(file, 'r'), skiprows=1)\n v.append(df.values[:, 5:11].astype(np.float32))\n x.append(df.values[:, 12:14].astype(np.float32))\n y.append(df.values[:, 4].astype(np.float32))\n v = np.concatenate(v)\n x = np.concatenate(x)\n y = np.concatenate(y)\n y = y.reshape(len(y), 1)\n\n if index == self.__len__(): \n raise IndexError\n\n return v, x, y\n\nif __name__ == \"__main__\":\n files = glob.glob(\"data/*.csv\")\n print(files[:5])\n dataset = CustomCSVDataset(files, 4)\n print(\"Length of Dataset: {}\".format(len(dataset)))\n print(\"\\nData preview:\")\n record = next(iter(dataset))\n print(\"\\nv:\\t\")\n print(record['v'])\n print(\"\\nx:\\t\")\n print(record['x'])\n print(\"\\ny:\\t\")\n print(record['y'])","repo_name":"yiranlll/auto-iv","sub_path":"csv_dataset.py","file_name":"csv_dataset.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"19821377047","text":"import pandas as pd\n\ndiqu_file = r'E:\\ZCXX\\广日物流\\1. RPA\\3. 实施\\地区信息.xlsx'\ndf_file = r'E:\\TEMP\\02GR\\kefahuo\\可发货通知书(代运)2020——导入2.0______1.xlsx'\ndiqu_1 = pd.read_excel(diqu_file, sheet_name=\"省内区级\")\ndiqu_2 = pd.read_excel(diqu_file, sheet_name=\"省内市级\")\ndiqu_3 = pd.read_excel(diqu_file, sheet_name=\"省外\")\ndf = pd.read_excel(df_file, keep_default_na=False) # 20201204 增加\n\n# diqu = diqu_1.append(diqu_2, diqu_3, ignore_index=False)\ndiqu = pd.concat([diqu_1, diqu_2, diqu_3])\ndiqu.sort_values(\"编号\", inplace=True)\n# aa = diqu.名称.tolist()\n# bb = set(aa)\n# print(len(aa),len(bb))\n\n# df[\"安装地址\"] = df[\"安装地址\"].astype(\"string\")\n# diqu[\"名称\"] = diqu[\"名称\"].astype(\"string\")\n# df = df.applymap(str)\n# diqu = diqu.applymap(str)\n\n\nno_list = []\ncount = len(diqu)\nfor i in range(len(df)):\n for n in range(len(diqu)):\n\n if (df.iloc[i].安装地址) != '': # 20201204 增加\n if (diqu.iloc[n].名称) in df.iloc[i].安装地址:\n # print(diqu.iloc[n].编号, diqu.iloc[n].名称, df.iloc[i].安装地址)\n\n # [:5] 为安装地址前5个字中包含省份,\n if diqu.iloc[n].编号 >= 300 and (diqu.iloc[n].名称) in df.iloc[i].安装地址[:5]:\n no_list = [i, n, diqu.iloc[n].编号, diqu.iloc[n].名称]\n elif diqu.iloc[n].编号 < 300:\n no_list = [i, n, diqu.iloc[n].编号, diqu.iloc[n].名称]\n # 20201204 增加\n elif df.iloc[i].安装地址.split(\"区\")[0].split(\"市\")[0] in str(diqu.iloc[n].包含城市): # 部分地址没有填写省份,只有区或市\n no_list = [i, n, diqu.iloc[n].编号, diqu.iloc[n].名称]\n\n\n if count == n + 1:\n # print(no_list)\n if no_list == []:\n df.loc[i, \"地区\"] = ''\n else:\n df.loc[i, \"地区\"] = [no_list[-1]]\n no_list = []\n\n # print(df.iloc[no_list[0]].安装地址 , diqu.iloc[[no_list[1]], [1]])\n\n #\n # break\ndf.to_excel(df_file, index=False)\n","repo_name":"xdpbydl/untitled111111","sub_path":"Gr/地区信息.py","file_name":"地区信息.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16483867086","text":"from rest_framework import serializers\nfrom django.db.models import Avg\n\nfrom .models import Curso,Avaliacao\n\nclass AvaliacaoSerializer(serializers.ModelSerializer):\n\n class Meta:\n #campo email, não vai ser apresentando quando alguém\n #consultar a avaliação. Só vai ver na hora de cadastrar\n extra_kwargs = {\n 'email': {'write_only':True}\n }\n model = Avaliacao\n fields = (\n 'id',\n 'curso',\n 'nome',\n 'email',\n 'comentario',\n 'avaliacao',\n 'criacao',\n 'ativo',\n )\n \n def validate_avaliacao(self,valor):\n # se valor da nota estivar entre 1 e 5\n if valor in range(1, 6):\n return valor\n raise serializers.ValidationError(\"A avaliação precisa ser um inteiro entre 1 e 5\")\n\n\nclass CursoSerializer(serializers.ModelSerializer):\n # Nested relationship\n #recomendado para relacionamento 1 para 1:\n # avaliacoes = AvaliacaoSerializer(many=True, read_only=True)\n\n # HyperLinked Related Field\n # Oference links para acessar os dados extras, caso o cliente queira\n # avaliacoes = serializers.HyperlinkedRelatedField(\n # many=True,\n # read_only=True,\n # view_name='avaliacao-detail'\n # )\n \n #primary key rekated field\n avaliacoes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)\n\n media_avaliacoes = serializers.SerializerMethodField()\n\n class Meta:\n model = Curso\n fields = (\n 'id',\n 'titulo',\n 'url',\n 'criacao',\n 'ativo',\n 'avaliacoes',\n 'media_avaliacoes'\n )\n \n def get_media_avaliacoes(self,obj):\n media = obj.avaliacoes.aggregate(Avg('avaliacao')).get('avaliacao__avg')\n\n if media is None:\n return 0\n \n return round(media * 2 ) / 2","repo_name":"albertojr/escola-django-rest-api","sub_path":"cursos/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38573463752","text":"\"\"\"\nCLI to detect and analyse cell assemblies\n * `assemblyfire assemblies config_path`: detect assemblies in spiking data\n * `assemblyfire consensus config_path`: create consensus assemblies from assemblies across seeds\n * `assemblyfire conn-mat config_path`: gets connectivity matrix\n * `assemblyfire syn-clust config_path`: finds clusters of synapses in assembly neurons\n * `assemblyfire syn-nnd config_path assembly_group_name`: gets synapses nearest neighbour distances\n * `assemblyfire rerun config_path seed gid`: reruns single gid in BGLibPy (with extra reporting and modifications)\n * `assemblyfire single-cell config_path`: gets single cell features from simulations\nlast modified: Thomas Delemontex, András Ecker 01.2023\n\"\"\"\n\nimport click\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\nL = logging.getLogger(\"assemblyfire\")\n\n\ndef set_verbose(logger, verbose):\n \"\"\"Set the verbose level for the CLI\"\"\"\n logger.setLevel((logging.WARNING, logging.INFO, logging.DEBUG)[min(verbose, 2)])\n\n\n@click.group()\n@click.option('-v', '--verbose', count=True)\ndef cli(verbose):\n \"\"\"CLI entry point.\"\"\"\n set_verbose(L, verbose)\n\n\n@cli.command()\n@click.argument(\"config_path\", required=True)\ndef assemblies(config_path):\n \"\"\"CLI for `find_assemblies.py/run()`\"\"\"\n from assemblyfire.find_assemblies import run\n run(config_path)\n \n\n@cli.command()\n@click.argument(\"config_path\", required=True)\n@click.argument(\"average\", required=False, default=False)\ndef consensus(config_path, average):\n \"\"\"CLI for `find_consensus_assemblies.py/run()`\"\"\"\n from assemblyfire.find_consensus_assemblies import run\n run(config_path, average)\n\n\n@cli.command()\n@click.argument(\"config_path\", required=True)\ndef conn_mat(config_path):\n \"\"\"CLI for `get_connectivity_matrix.py/run()`\"\"\"\n from assemblyfire.get_connectivity_matrix import run\n run(config_path)\n\n\n@cli.command()\n@click.argument(\"config_path\", required=True)\n@click.argument(\"debug\", required=False, default=False)\ndef syn_clust(config_path, debug):\n \"\"\"CLI for `find_synapse_clusters.py/run()`\"\"\"\n from assemblyfire.find_synapse_clusters import run\n run(config_path, debug)\n\n\n@cli.command()\n@click.argument(\"config_path\", required=True)\n@click.argument(\"assembly_grp_name\", required=True)\n@click.argument(\"buf_size\", required=False, default=100)\n@click.argument(\"seed\", required=False, default=100)\ndef syn_nnd(config_path, assembly_grp_name, buf_size, seed):\n \"\"\"CLI for `get_synapse_nnds.py/run()`\"\"\"\n from assemblyfire.get_synapse_nnds import run\n run(config_path, assembly_grp_name, buf_size, seed)\n\n\n@cli.command()\n@click.argument(\"config_path\", required=True)\n@click.argument(\"seed\", required=True)\n@click.argument(\"gid\", required=True)\ndef rerun(config_path, seed, gid):\n \"\"\"CLI for `rerun_single_cell.py/run()`\"\"\"\n from assemblyfire.rerun_single_cell import run\n run(config_path, seed, gid)\n\n\n@cli.command()\n@click.argument(\"config_path\", required=True)\ndef single_cell(config_path):\n \"\"\"CLI for `get_single_cell_features.py/run()`\"\"\"\n from assemblyfire.get_single_cell_features import run\n run(config_path)\n\n\n@cli.command()\n@click.argument(\"config_path\", required=True)\n@click.argument(\"prefix\", required=True)\ndef clean_h5(config_path, prefix):\n \"\"\"Removes data under the given `prefix` in the HDF5 file\"\"\"\n import h5py\n from assemblyfire.config import Config\n config = Config(config_path)\n with h5py.File(config.h5f_name, \"a\") as h5f:\n assert prefix in list(h5f.keys())\n del h5f[prefix]\n","repo_name":"BlueBrain/assemblyfire","sub_path":"assemblyfire/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"15062255881","text":"from sys import stdin\ninput = stdin.readline\n\nN = int(input())\nl = [int(input().rstrip()) for _ in range(N)]\n\ndef solution(l):\n l.sort(reverse=True)\n\n for i in range(len(l)-2):\n if l[i] < l[i+1] + l[i+2]:\n ans = l[i] + l[i+1] + l[i+2]\n return ans\n \n return -1\n\nprint(solution(l))","repo_name":"chldppwls12/StudyTocoteAllsolve","sub_path":"BOJ/1447_삼각형_만들기/chldppwls12.py","file_name":"chldppwls12.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39821758392","text":"\"\"\"\nШироко известна следующая задача для младших школьников. Три черепахи ползут по\nдороге. Одна черепаха говорит: \"Впереди меня две черепахи\". Другая черепаха гово-\nрит: \"Позади меня две черепахи\". Третья черепаха говорит: \"Впереди меня две чере-\nпахи и позади меня две черепахи\". Как такое может быть? Ответ: третья черепаха\nврёт!\nПо дороге одна за другой движутся N черепах. Каждая черепаха говорит фразу вида:\n\"Впереди меня ai черепах, а позади меня bi черепах\". Ваша задача определить,\nсколько самое большое количество черепах могут говорить правду.\nExample:\n N = 3\n a = [0, 2, 2]\n b = [2, 0, 2]\nAnswer:\n nswr = 2\n\"\"\"\n\n\ndef right_turtles(f, s):\n used_before = set()\n n = len(f)\n for i in range(n):\n a = f[i]\n b = s[i]\n if a + b == n - 1 and a >= 0 and b >= 0:\n used_before.add(a)\n return len(used_before)\n\n\n# Raw Example\nn = int(input())\nused_before = set()\nfor i in range(n):\n a, b = map(int, input().split())\n if a + b == n - 1 and a >= 0 and b >= 0:\n used_before.add(a)\nprint(len(used_before))\n\n\n# Function Example\na = [0, 2, 2]\nb = [2, 0, 2]\nprint(right_turtles(a, b))\n","repo_name":"EgorCry/Deep_Learning_Playground","sub_path":"Решение задач с собеседований/Turtles.py","file_name":"Turtles.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41861746651","text":"#!/usr/bin/env python3\n\n\"\"\"\nBuild helper script for Botan's Sublime Text integration\n\n(C) 2022 Jack Lloyd\n(C) 2022 René Meusel (neXenio GmbH)\n\nBotan is released under the Simplified BSD License (see license.txt)\n\"\"\"\n\nimport argparse\nimport multiprocessing\nimport subprocess\nimport sys\nimport os\nimport re\n\n\nclass BuildError(Exception):\n pass\n\n\ndef run_cmd(cmd):\n if isinstance(cmd, str):\n print('> running: ' + cmd)\n shell = True\n else:\n print('> running: ' + ' '.join(cmd))\n shell = False\n sys.stdout.flush()\n\n try:\n subprocess.run(cmd, shell=shell, check=True)\n except subprocess.CalledProcessError as ex:\n raise BuildError('Command failed, aborting...') from ex\n\n\ndef _find_regex_in_makefile(regex):\n if not os.path.exists('Makefile'):\n raise BuildError(\n 'No Makefile found. Initial ./configure.py invocation must be performed manually.')\n\n with open('Makefile', 'r', encoding='utf8') as f:\n return re.search(regex, f.read())\n\n\ndef _retrieve_test_binary_name():\n match = _find_regex_in_makefile(r'TEST\\s*=\\s*([^\\n]+)\\n')\n if not match:\n raise BuildError('Test binary name not found in Makefile')\n test_file = os.path.split(match.group(1))[1]\n if not test_file:\n raise BuildError(\n 'Cannot make sense of test binary name: ' + match.group(0))\n\n return test_file\n\n\ndef _retrieve_configure_command():\n match = _find_regex_in_makefile(r'\\'(configure\\.py.+)\\'\\n')\n if not match:\n raise BuildError('configure.py command not found in Makefile')\n return match.group(1)\n\n\ndef reconfigure():\n run_cmd(\"./\" + _retrieve_configure_command())\n\n\ndef build(target=''):\n reconfigure()\n cmd = ['make', '-j', str(multiprocessing.cpu_count())]\n if target:\n cmd.append(target)\n run_cmd(cmd)\n\n\ndef _parse_test_file(test_file):\n if not re.search(r'.+/tests/.+\\.cpp', test_file):\n raise BuildError(\n 'Given file path is not a Botan unit test: ' + test_file)\n\n with open(test_file, 'r', encoding='utf8') as f:\n find_test_registration = \\\n re.compile(\n r'BOTAN_REGISTER_TEST(_FN)?\\s*\\(\\s*\\\"(.+)\\\",\\s*\\\"(.+)\\\",[^)]+\\)')\n\n matches = find_test_registration.findall(f.read())\n tests = [match[-1] for match in matches]\n\n if not tests:\n raise BuildError(\n 'Failed to find a BOTAN_REGISTER_TEST in the given test file: ' + test_file)\n\n return tests\n\n\ndef unittests(test_file):\n tests = _parse_test_file(test_file) if test_file else []\n\n build('tests')\n run_cmd(['./' + _retrieve_test_binary_name()] + tests)\n\n\ndef apply_astyle_format(format_file):\n ext = os.path.splitext(format_file)[1]\n if ext not in ['.cpp', '.h']:\n raise BuildError(\n \"Refuse to format source files that appear to be non-C++\")\n\n try:\n run_cmd(['astyle',\n '--suffix=none', # do not create a backup copy of the unformatted file\n '--project=src/configs/astyle.rc',\n format_file])\n except FileNotFoundError as ex:\n raise BuildError(\n \"astyle utility not installed, cannot apply formatting\") from ex\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Sublime build helper')\n parser.add_argument('job', type=str)\n parser.add_argument('--project-root', type=str, required=True)\n parser.add_argument('--test-file', type=str, default='')\n parser.add_argument('--format-file', type=str, default='')\n\n opts = parser.parse_args()\n\n os.chdir(opts.project_root)\n\n if opts.job == 'all':\n build()\n elif opts.job == 'test':\n unittests(opts.test_file)\n elif opts.job == 'format':\n apply_astyle_format(opts.format_file)\n else:\n raise RuntimeError('Unknown build job: ' + opts.job)\n\n\nif __name__ == '__main__':\n try:\n main()\n except BuildError as msg:\n print(msg, file=sys.stderr)\n sys.exit(1)\n","repo_name":"OgurtsovAndrei/Messenger-project","sub_path":"Crypto-libs/botan-master/src/editors/sublime/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"9887048270","text":"# -*- coding: utf-8 -*-\nfrom storage.storage import Storage\n\nimport json\nimport os\n\n\nclass JsonStorage(Storage):\n\tdef __init__(self, storage_url=None):\n\t\tif storage_url is None:\n\t\t\tself.storage_url = \"\"\n\t\telse:\n\t\t\tself.storage_url = storage_url\n\t\n\tdef read(self, filename):\n\t\tif filename is None:\n\t\t\traise ValueError(\"Filename must be not None\")\n\t\t\n\t\tif len(self.storage_url) == 0:\n\t\t\tactual_filename = filename\n\t\telse:\n\t\t\tactual_filename = self.storage_url + \"/\" + filename\n\t\t\n\t\tprint(\"Actual filename \" + actual_filename)\n\t\t\n\t\t# Check if file exists\n\t\tif not os.path.exists(actual_filename):\n\t\t\traise FileNotFoundError(\"file \" + actual_filename + \" not found\")\n\t\t\n\t\ttry:\n\t\t\tjson_data = open(actual_filename).read()\n\t\t\tif json_data is not None:\n\t\t\t\tdata = json.loads(json_data)\n\t\t\t\treturn data\n\t\t\t\n\t\t\telse:\n\t\t\t\treturn None\n\t\t\t\t\n\t\texcept IOError:\n\t\t\traise IOError(\"Something happened during reading the json file %s\", actual_filename)\n\t\t\n\t\texcept Exception:\n\t\t\traise Exception(\"Something unexpected happened\")\n\n\tdef store(self, filename, content):\n\t\tif filename is None or content is None or len(filename) == 0 or len(content) == 0:\n\t\t\traise ValueError(\"Either invalid filename or content\")\n\t\t\n\t\tif len(self.storage_url) == 0:\n\t\t\tactual_filename = filename\n\t\telse:\n\t\t\tactual_filename = self.storage_url + \"/\" + filename\n\t\t\n\t\twith open(actual_filename, \"w\", encoding=\"utf-8\") as f:\n\t\t\tjson.dump(content, f, ensure_ascii=False)\n","repo_name":"minhlongdo/json-url-opener","sub_path":"storage/jsonstorage.py","file_name":"jsonstorage.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35702007112","text":"# coding=utf-8\n\nimport logging\nfrom threading import Lock\n\nfrom telegram import ChatAction\n\nfrom googlesheets import goglemogle\nfrom utils.commands import TASK_DELETE_COMMAND, handle_error, get_operands\nfrom utils.user_check import get_user_group\n\nlock = Lock()\n\nlog = logging.getLogger(__name__)\n\n\ndef task(bot, update):\n log.info(msg=\"Adding a task \" + str(update.message))\n\n user = update.message.from_user.username\n user_group = get_user_group(user)\n if user_group is None:\n handle_error(bot, update, None, \"Access denied\")\n return\n\n bot.sendChatAction(chat_id=update.message.chat_id,\n action=ChatAction.TYPING)\n with lock:\n task_str = update.message.text \\\n .replace('/task', '') \\\n .replace('@DnJTodoBot', '') \\\n .strip() \\\n .split(\";\")\n if task_str[0] == '':\n log.info(\"Input task is empty\" + str(update.message.text))\n bot.sendMessage(chat_id=update.message.chat_id, text=\"Ф��рмат: /task Имя задачи [; категория; дата; ссылка]\")\n return\n\n task_name = task_str[0].strip()\n category = task_str[1].strip() if len(task_str) > 1 else \"\"\n due_date = task_str[2].strip() if len(task_str) > 2 else \"\"\n link = task_str[3].strip() if len(task_str) > 3 else \"\"\n\n try:\n result = goglemogle.add_task(user_group, task_name, due_date, category, link)\n except Exception as e:\n log.error(e)\n bot.sendMessage(chat_id=update.message.chat_id, text=\"Sorry,\\n\" + str(e))\n raise e\n\n log.info(result)\n reply_msg = update.message.from_user.first_name + \", я добавил задачу: \" + task_name\n bot.sendMessage(chat_id=update.message.chat_id, text=reply_msg)\n\n\ndef task_list(bot, update):\n log.info(msg=\"Listing tasks \")\n\n user = update.message.from_user.username\n user_group = get_user_group(user)\n if user_group is None:\n handle_error(bot, update, None, \"Access denied\")\n return\n\n bot.sendMessage(chat_id=update.message.chat_id, text=\"I have something for you:\")\n\n bot.sendChatAction(chat_id=update.message.chat_id,\n action=ChatAction.TYPING)\n try:\n values = goglemogle.task_list(user_group)\n except Exception as e:\n log.error(e)\n bot.sendMessage(chat_id=update.message.chat_id, text=\"Sorry,\\n\" + str(e))\n raise e\n\n if not values:\n log.error(msg=\"empty response from google sheet\")\n bot.sendMessage(chat_id=update.message.chat_id,\n text=\"Oops. I cannot find anything. \\nHave you finished everything? Amazing!\")\n else:\n try:\n print_task_list(bot, update, values)\n\n except Exception as e:\n log.error(e)\n bot.sendMessage(chat_id=update.message.chat_id, text=\"Sorry,\\n\" + str(e))\n raise e\n\n\ndef print_task_list(bot, update, values):\n # get rid of the title rows\n values = values[3:]\n todo_str = \"\"\n task_number = 0\n # append only actual task names\n for row in values:\n if row[0] == \"\":\n task_number += 1\n todo_str += \"[\" + row[5] + \"] \" + row[4] + \"\\n\"\n # print by chunks of 10 tasks\n if task_number >= 10:\n bot.sendMessage(chat_id=update.message.chat_id, text=todo_str)\n log.info(msg=\"list of tasks \" + todo_str)\n task_number = 0\n todo_str = \"\"\n\n # print the rest part\n if todo_str != \"\":\n bot.sendMessage(chat_id=update.message.chat_id, text=todo_str)\n log.info(msg=\"list of tasks \" + todo_str)\n\n\ndef done_task(bot, update):\n log.info(msg=\"Finishing a task \" + str(update.message))\n\n user = update.message.from_user.username\n user_group = get_user_group(user)\n if user_group is None:\n handle_error(bot, update, None, \"Access denied\")\n return\n\n bot.sendChatAction(chat_id=update.message.chat_id,\n action=ChatAction.TYPING)\n with lock:\n task_str = update.message.text \\\n .replace('/done', '') \\\n .replace('@DnJTodoBot', '') \\\n .strip()\n if task_str == '':\n log.info(\"String with task id is empty\" + str(update.message.text))\n bot.sendMessage(chat_id=update.message.chat_id, text=\"Формат: /done id\")\n return\n\n # TODO check what if it is not int\n\n try:\n task_id = int(task_str)\n result = goglemogle.finish_task(user_group, task_id)\n except Exception as e:\n log.error(e)\n bot.sendMessage(chat_id=update.message.chat_id, text=\"Sorry,\\n\" + str(e))\n raise e\n\n if result is False:\n log.info(result)\n reply_msg = update.message.from_user.first_name + \", эта задача уже была завершена\"\n bot.sendMessage(chat_id=update.message.chat_id, text=reply_msg)\n else:\n log.info(result)\n reply_msg = update.message.from_user.first_name + \", я завершил задачу \" + str(task_id)\n bot.sendMessage(chat_id=update.message.chat_id, text=reply_msg)\n\n\ndef task_delete_handler(bot, update):\n log.info(msg=\"Deleting a task \" + str(update.message))\n\n user = update.message.from_user.username\n user_group = get_user_group(user)\n if user_group is None:\n handle_error(bot, update, None, \"Access denied\")\n return\n\n bot.sendChatAction(chat_id=update.message.chat_id,\n action=ChatAction.TYPING)\n\n try:\n operands = get_operands(TASK_DELETE_COMMAND, update.message.text)\n except Exception as e:\n handle_error(bot, update, TASK_DELETE_COMMAND, str(e))\n raise e\n\n if operands[0] is not None:\n task_id = operands[0]\n else:\n handle_error(bot, update, TASK_DELETE_COMMAND, \"No task id provided\")\n return\n\n try:\n task_id = int(task_id)\n with lock:\n result = goglemogle.delete_task(user_group, task_id)\n except Exception as e:\n handle_error(bot, update, TASK_DELETE_COMMAND, str(e))\n raise e\n\n log.info(result)\n reply_msg = update.message.from_user.first_name + \", я удалил задачу \" + str(task_id)\n bot.sendMessage(chat_id=update.message.chat_id, text=reply_msg)\n","repo_name":"denolia/swissobot","sub_path":"core/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":6436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"40004102666","text":"import book\nimport calculator\nimport icons\nimport json\nimport logic\nimport mega\nimport mega2\nimport sys\nimport serde\nfrom collections import defaultdict\n\ndef main():\n serde.load('rail_and_signal')\n f3()\n\n\ndef f3():\n prints = serde.load('s12')[\"blueprint_book\"][\"blueprints\"]\n new_prints = mega2.go(prints)\n new_book = book.make(\n \"Mega Base\",\n icons.mkIcons('satellite'),\n new_prints)\n serde.dump(\"megabase\", new_book)\n\n\ndef f2():\n t_print = serde.load(\"colortrain\")[\"blueprint\"]\n t_out = logic.mkColorTrains(t_print)\n new_book = book.make(\n \"Train colors\",\n icons.mkIcons('locomotive'),\n t_out)\n serde.dump(\"colortrain\", new_book)\n\n\ndef f1():\n # Load the prints\n l_book = serde.load(\"l\")\n l_print = l_book[\"blueprint_book\"][\"blueprints\"][0][\"blueprint\"]\n factory_book = serde.load(\"factory\")\n factory_print = factory_book[\"blueprint_book\"][\"blueprints\"][0][\"blueprint\"]\n fluid_factory_print = factory_book[\"blueprint_book\"][\"blueprints\"][1][\"blueprint\"]\n inoutline_book = serde.load(\"inoutlines\")\n in4 = inoutline_book[\"blueprint_book\"][\"blueprints\"][0][\"blueprint\"]\n in8 = inoutline_book[\"blueprint_book\"][\"blueprints\"][1][\"blueprint\"]\n out4 = inoutline_book[\"blueprint_book\"][\"blueprints\"][2][\"blueprint\"]\n out8 = inoutline_book[\"blueprint_book\"][\"blueprints\"][3][\"blueprint\"]\n jump4 = inoutline_book[\"blueprint_book\"][\"blueprints\"][4][\"blueprint\"]\n jump8 = inoutline_book[\"blueprint_book\"][\"blueprints\"][5][\"blueprint\"]\n\n elderaxe_book = serde.load(\"elderaxe\")\n logic.augmentElderaxe(elderaxe_book)\n serde.dump(\"elderaxe_augmented\", elderaxe_book)\n\n # Load crafting info.\n # 0=assemble, 2=chem, 4=centrifuge, 6=oil, 8=rocket\n recipes = {e[\"id\"]: e for e in json.loads(serde.read(\"recipes.json\"))}\n crafting_types = json.loads(serde.read(\"craft_info.txt\"))\n\n '''\n need = calculator.calculate(recipes, crafting_types, {\n \"rocket-part\": 100,\n \"satellite\": 1,\n \"science-pack-1\": 1000,\n \"science-pack-2\": 1000,\n \"science-pack-3\": 1000,\n \"high-tech-science-pack\": 1000,\n \"production-science-pack\": 1000,\n })\n revd = defaultdict(list)\n for item, amt in need.items():\n revd[amt].append(item)\n for amt in sorted(revd.keys()):\n for item in revd[amt]:\n print(f\"{amt}\\t{item}\")\n\n return \n '''\n\n # Process the blueprint. So begins super-custom code.\n new_prints = logic.process(\n recipes,\n l_print,\n factory_print, fluid_factory_print,\n in4, in8, out4, out8, jump4, jump8)\n\n # Put all the blueprints into a new book\n new_book = book.make(\n \"Factory prints\",\n icons.mkIcons('assembling-machine-1', 'rail'),\n new_prints)\n\n # Output\n serde.dump(\"all\", new_book)\n\n\nmain()\n\n# Notes\n#\n# Direction: N=0, E=2, S=4, W=6\n# Orientation (for trains): N=0, E=0.25, S=0.5, W=0.75\n#\n","repo_name":"Gownta/factorio","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28237146964","text":"#!/usr/bin/env python\n\n'''Select a paper at random.'''\n\nimport argparse\nimport bibtexparser\nimport random\nimport re\nimport sys\n\nimport util\n\n\nSORT_KEY = re.compile(r'^(.+)(\\d{4})(.*?)$')\n\n\ndef main():\n options = get_options()\n entries = util.get_entries(options.strings, options.input)\n if options.year:\n entries = [e for e in entries if options.year in e['ID']]\n if options.random:\n entries = [random.choice(entries)]\n entries.sort(key=sortKey)\n for e in entries:\n print(e['ID'], e['title'])\n\n\ndef sortKey(entry):\n '''Create a sorting key for an entry.'''\n match = SORT_KEY.match(entry['ID'])\n suffix = f'+{match.group(3)}' if match.group(3) else ''\n return (match.group(2), match.group(1) + suffix)\n\n\ndef get_options():\n '''Turn arguments into configuration object.'''\n parser = argparse.ArgumentParser()\n parser.add_argument('--input', help='specify input file')\n parser.add_argument('--random', action='store_true', help='select a single random entry')\n parser.add_argument('--strings', help='string definitions file (optional)')\n parser.add_argument('--year', nargs='?', help='specify a year')\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"AmPaschal/neverworkintheory.github.io","sub_path":"bin/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"16381372705","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n \n def deepestLeavesSum(self, root: Optional[TreeNode]) -> int:\n q = [root]\n \n while q:\n temp = []\n level_sum = 0\n for node in q:\n level_sum += node.val\n if node.left:\n temp.append(node.left)\n if node.right:\n temp.append(node.right)\n \n q = temp\n return level_sum","repo_name":"sravanneeli/LeetCode","sub_path":"1302-deepest-leaves-sum/1302-deepest-leaves-sum.py","file_name":"1302-deepest-leaves-sum.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24183454005","text":"\"\"\"\nToy1 dataset is composed of a root node with single child XML node. No attributes and no text\nproperty.The output for each tree is the same tree with the tags of parent and child swapped.\nFor example\n\n becomes \n\"\"\"\n\nappConfigDefaults = {\n # AppConfig defaults\n \"checkpoint_every\": 10,\n}\nmodelArgsDefaults = {\n \"attrib_value_vec_len\": 32,\n \"node_info_propagator_stack_depth\": 3,\n \"propagated_info_len\": 128,\n \"output_decoder_stack_depth\": 1,\n \"output_decoder_state_width\": 128,\n}\n","repo_name":"nishantsharma/xml.ai","sub_path":"domains/toy1/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"67"} +{"seq_id":"40129058470","text":"# Crie um programa que faça o computador jogar Jokenpô com você. V2!\r\n\r\nfrom random import randint\r\n\r\nprint(\"\"\"Bem vindo ao jogo do Jokenpo! Você vai jogar contra a maquina. Escolha uma das opções abaixo\r\n[ 1 ] Jó ( PEDRA )\r\n[ 2 ] Ken ( PAPEL )\r\n[ 3 ] Po ( TESOURA )\"\"\")\r\njokenpo = ['0', 'PEDRA', 'PAPEL', 'TESOURA']\r\nvoce = str(input('Deseja jogar o modo infinito ? digite SIM ou NAO\\n: '))\r\n\r\n\r\ndef jkp():\r\n voce = input('OPÇÂO: ')\r\n maquina = int(randint(1, 3))\r\n if voce != 'PARAR':\r\n voce = int(voce)\r\n print('Você escolheu: \"{}\"\\nA maquina escolheu \"{}\"'.format(jokenpo[voce], jokenpo[maquina]))\r\n if voce == maquina:\r\n print('\\033[7;49;37m EMPATE! \\033[m')\r\n return 1\r\n elif (voce == 1 and maquina == 3) or (voce == 2 and maquina == 1) or (voce == 3 and maquina == 2):\r\n print('\\033[7;30;42m VOCÊ GANHOU! \\033[m')\r\n return 2\r\n else:\r\n print('\\033[7;30;41m A MAQUINA GANHOU \\033[m')\r\n return 3\r\n else:\r\n exit()\r\n\r\n\r\nif voce == 'SIM':\r\n win = int(0)\r\n lost = int(0)\r\n draw = int(0)\r\n print('Se desejar parar, digite \"PARAR\" no lugar da escolha.')\r\n while voce == 'SIM':\r\n state = (jkp())\r\n if state == 1:\r\n draw = draw + 1\r\n elif state == 2:\r\n win = win + 1\r\n elif state == 3:\r\n lost = lost + 1\r\n print('PLACAR: VITORIAS = {} / DERROTAS = {} / EMPATES = {}'.format(win, lost, draw))\r\nelse:\r\n jkp()\r\n","repo_name":"AleCJunior/Python_CeV_Ex","sub_path":"ex045v2.py","file_name":"ex045v2.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25683454571","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport os\n\nimport time\nimport socket\nfrom datetime import datetime\nimport argparse\n\nimport warnings\nimport sys\nsys.path.append('./src')\nwarnings.filterwarnings(action='ignore')\n\ntry:\n from tag_recognition.watcher import Watcher, EKF\n from planning.planner import Planner\n from control.controller import Controller\n from communication.server import Client, Client2\nexcept ImportError:\n from src.tag_recognition.watcher import Watcher, EKF\n from src.planning.planner import Planner\n from src.control.controller import Controller\n from src.communication.server import Client\n\n\nwatcher, planner, controller = None, None, None\nclient = None\n\nekf = None\n\n\ndef init_modules(map2planning_ratio=0.08):\n global watcher, planner, controller\n # draw map\n watcher = Watcher(img_size=(1920, 1080), tag_size=0.16)\n map_color = watcher.draw_map(color_full=(\n 255, 255, 255), color_empty=(0, 0, 0))\n map_planning = cv2.resize(map_color, dsize=(\n 0, 0), fx=map2planning_ratio, fy=map2planning_ratio)\n map_planning[np.where(map_planning != 255)] = 0\n # planning (m)\n planner = Planner(map_planning, meter_scale=0.01/map2planning_ratio)\n # control\n map_shape = map_color.shape\n controller = Controller(path=None, map_color=map_color,\n map_size=(map_shape[1]/100, map_shape[0]/100))\n print(\"modules initialized\")\n\n\ndef static_vars(**kwargs):\n def decorate(func):\n for k in kwargs:\n setattr(func, k, kwargs[k])\n return func\n return decorate\n\n\n@static_vars(past_pose=None, past_time=None)\ndef estimate_vel(pose_in: np.ndarray):\n if estimate_vel.past_pose is None:\n estimate_vel.past_pose = pose_in.copy()\n estimate_vel.past_time = time.time()\n return 0.\n else:\n diff = pose_in - estimate_vel.past_pose\n diff_norm = np.linalg.norm(diff)\n dt = time.time() - estimate_vel.past_time\n estimate_vel.past_pose = pose_in.copy()\n estimate_vel.past_time = time.time()\n return diff_norm / dt if diff_norm * dt > 0 else 0\n\n\n@static_vars(results_dir='results', img_idx=0)\ndef save_results(initial=False):\n if initial:\n now = datetime.now()\n time_now = '{}_{}_{}_{}_{}_{}'.\\\n format(now.year, now.month, now.day,\n now.hour, now.minute, now.second)\n save_results.results_dir = os.path.join(\n save_results.results_dir, time_now)\n os.mkdir(save_results.results_dir)\n else:\n plt.savefig(\n '{}/{}.jpg'.format(save_results.results_dir, save_results.img_idx))\n save_results.img_idx += 1\n\n\ndef find_parking_goal():\n global watcher, client\n empty_spots = {}\n while len(empty_spots) == 0:\n img_color = client.receive()\n _ = watcher.watch(img_color)\n empty_spots = watcher.find_empty_spots()\n cv2.imshow('tags', watcher.draw_tags())\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n time.sleep(0.1)\n cv2.destroyAllWindows()\n for spot_id in [2, 7, 5, 6, 7, 8, 9, 1, 2, 3, 4]:\n if spot_id in empty_spots:\n return empty_spots[spot_id]\n\n\n@static_vars(im_idx=0)\ndef find_car_pose(car_id=0):\n global watcher, client, ekf\n while True:\n img_color = client.receive()\n cv2.imwrite(\n 'results/imgs/{}.jpg'.format(find_car_pose.im_idx), img_color)\n find_car_pose.im_idx += 1\n tag_poses = watcher.watch(img_color)\n if tag_poses is None:\n continue\n elif car_id in tag_poses:\n trans, yaw = tag_poses[car_id]['trans'], tag_poses[car_id]['rot']\n # pose = ekf.apply(np.array([trans[0], trans[1], yaw]))\n pose = [trans[0], trans[1], yaw]\n return np.array(pose)\n\n\ndef planning_path(goal, car_id=0):\n global watcher, planner, client\n path = None\n while path is None:\n pose = find_car_pose(car_id=car_id)\n path = planner.plan_path(pose, goal)\n return path\n\n\ndef get_control(pose):\n global controller\n state = controller.update_pose(pose=pose, v=estimate_vel(pose[:2]))\n steer, accel = controller.estimate_control(state=state)\n return np.rad2deg(steer), accel\n\n\ndef jammed():\n global controller\n return len(controller.v) > 10 and np.mean(np.abs(controller.v[-10:])) < 0.1\n\n\ndef main(host='127.0.0.1', port=9999, modem='usbmodem', visualize=True, car_id=0, use_bt=False, save=True):\n global watcher, planner, controller, client, ekf\n\n # save results or not\n save_results(initial=True) if save else None\n\n # init modules\n init_modules(map2planning_ratio=0.08)\n print(\"modules initialized\")\n\n # start communication\n client = Client(host=host, port=port, use_bt=use_bt,\n basename=modem, vis=visualize)\n print(\"Host connected\")\n\n # find empty spots\n print('searching tags...')\n goal = find_parking_goal()\n print(\"empty spot found: {}\".format(goal))\n if visualize:\n plt.subplot(1, 2, 1)\n plt.imshow(cv2.flip(watcher.draw_map(color_full=(\n 100, 100, 100), color_empty=(0, 0, 0)), 0))\n plt.scatter([goal[0]*100], [watcher.map_h - goal[1]*100], s=50, c='c')\n plt.pause(1)\n plt.figure(figsize=(10, 5))\n\n # EKF setup\n ekf = EKF(initial_pose=goal.copy(),\n xy_obs_noise_std=1.0,\n initial_yaw_std=np.pi,\n forward_velocity_noise_std=0.5,\n yaw_rate_noise_std=0.5)\n\n # find path & control\n while True:\n # planning\n path = planning_path(goal)\n # reset controller\n controller.map_color = watcher.draw_map(\n color_full=(100, 100, 100), color_empty=(0, 0, 0))\n controller.init_path(path)\n\n while not controller.check_goal() and not jammed():\n # calculate control\n pose = find_car_pose(car_id=car_id)\n steer, accel = get_control(pose)\n\n # send msg\n handle = 'L' if steer > 0 else 'R'\n gear = 'F' if accel > 0 else 'B'\n msg = str('Q{}{}{:.2f},{:.2f}'.format(\n handle, gear, abs(steer), abs(accel)))\n client.send(msg)\n\n if visualize or save:\n plt.subplot(1, 2, 1)\n controller.show(ax=plt.gca())\n plt.subplot(1, 2, 2)\n ekf.show_results(ax=plt.gca())\n\n if save:\n save_results()\n\n plt.pause(0.01)\n plt.subplot(1, 2, 1)\n plt.gca().clear()\n plt.subplot(1, 2, 2)\n plt.gca().clear()\n\n if controller.check_goal():\n break\n\n # parking ended\n msg = str('QLF0.00,0.00'.format(0.00, 0.00))\n client.send(msg)\n client.send(msg)\n client.send(msg)\n client.send(msg)\n client.send(msg)\n print('parking finished')\n client.close()\n\n\ndef main2(host='127.0.0.1', port=9999, modem='usbmodem', visualize=True, car_id=0, use_bt=False, save=True):\n global watcher, planner, controller, client, ekf\n\n from src.communication.server import Client2\n\n # init modules\n init_modules(map2planning_ratio=0.08)\n print(\"modules initialized\")\n\n # start communication\n client = Client2(host=host, port=port, vis=visualize)\n print(\"Host connected\")\n\n # find empty spots\n print('searching tags...')\n goal = find_parking_goal()\n print(\"empty spot found: {}\".format(goal))\n if visualize:\n plt.subplot(1, 2, 1)\n plt.imshow(cv2.flip(watcher.draw_map(color_full=(\n 100, 100, 100), color_empty=(0, 0, 0)), 0))\n plt.scatter([goal[0]*100], [watcher.map_h - goal[1]*100], s=50, c='c')\n plt.pause(1)\n plt.figure(figsize=(10, 5))\n\n # EKF setup\n ekf = EKF(initial_pose=goal.copy(),\n xy_obs_noise_std=1.0,\n initial_yaw_std=np.pi,\n forward_velocity_noise_std=0.5,\n yaw_rate_noise_std=0.5)\n\n # find path & control\n while True:\n # planning\n path = planning_path(goal)\n # reset controller\n controller.map_color = watcher.draw_map(\n color_full=(100, 100, 100), color_empty=(0, 0, 0))\n controller.init_path(path)\n\n while not controller.check_goal() and not jammed():\n # calculate control\n pose = find_car_pose(car_id=car_id)\n steer, accel = get_control(pose)\n\n # send msg\n handle = 'L' if steer > 0 else 'R'\n gear = 'F' if accel > 0 else 'B'\n msg = str('Q{}{}{:.2f},{:.2f}'.format(\n handle, gear, abs(steer), abs(accel)))\n client.send(msg)\n\n if visualize or save:\n plt.subplot(1, 2, 1)\n controller.show(ax=plt.gca())\n plt.subplot(1, 2, 2)\n ekf.show_results(ax=plt.gca())\n\n if save:\n save_results()\n\n plt.pause(0.01)\n plt.subplot(1, 2, 1)\n plt.gca().clear()\n plt.subplot(1, 2, 2)\n plt.gca().clear()\n\n if controller.check_goal():\n break\n\n # parking ended\n msg = str('QLF0.00,0.00'.format(0.00, 0.00))\n client.send(msg)\n client.send(msg)\n client.send(msg)\n client.send(msg)\n client.send(msg)\n print('parking finished')\n client.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='main (vehicle parking)')\n parser.add_argument('--hostname', type=str,\n default=None, help='name of host')\n parser.add_argument('--modem', type=str,\n default='usbmodem', help='name of host')\n parser.add_argument('--ip', type=str,\n default='127.0.0.1', help='host ip adress')\n args = parser.parse_args()\n\n # HOST = '127.0.0.1'\n HOST = args.ip\n PORT = 9999\n if args.hostname is not None:\n HOST = socket.gethostbyname(args.hostname)\n\n main2(host=HOST, port=PORT, modem=args.modem,\n visualize=True, car_id=0, use_bt=False, save=True)\n","repo_name":"KaistParking/parking-pkg","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23483050639","text":"def pressurize(inside, outside):\n \"\"\"700 1000 30% -300*100/1000\"\"\"\n percent = (outside-inside)*100/inside\n if percent < -30:\n print(\"Underpressure\", end=\" \")\n elif percent > 30:\n print(\"Overpressure\", end=\" \")\n else:\n print(\"Safe\", end=\" \")\n print(\"%.4f%%\"%abs(percent))\npressurize(float(input()), float(input()))\n","repo_name":"ntsd/my-solutions","sub_path":"PSIT/PSIT2016 Midterm EXAM Experiment/Pressurize.py","file_name":"Pressurize.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"2252997811","text":"#\n# MODULES.py\n#\n# Part of the Factory. Contains all classes representing modules for factories\n# to purchase so they can make additional or more stuff.\n\nimport random\nimport sys\n\nimport APIs.calculator as calculator\nimport Tools\nfrom Tools import Date\nfrom Tools import Position\n\nclass Storage ():\n class Rule ():\n In = \"__IN\"\n Out = \"__OUT\"\n InOut = \"__IN_OUT\"\n def __init__(self, item, target_stored = \"[max]\", flow = InOut, target_modules = [ \"*\" ], anti_target_modules = [ \"depot\" ]):\n self.item = item\n # Allow the module to enter a variable target (e.g. [max]/2)\n self.form = calculator.parse(target_stored, verbose=False)\n self.flow = flow\n self.target_modules = target_modules\n self.anti_target_modules = anti_target_modules\n\n def __init__(self, max = 5000):\n self.__storage = {}\n self.rules = {}\n self.max = max\n self.total = 0\n\n def add_rule (self, id, rule):\n self.rules[id] = rule\n\n def get_rule (self, id):\n if id not in self.rules: return None\n return self.rules[id]\n\n def remove_rule (self, id):\n self.rules.pop(id, None)\n\n def __getitem__(self, key):\n if key not in self.__storage:\n return 0\n return self.__storage[key]\n\n def store (self, item, amount):\n if item not in self.__storage:\n # If we're already at maximum, return the amount as the overflow\n # (nothing fits)\n if self.stored == self.max:\n return amount\n # Init new item\n self.__storage[item] = 0\n # Compute the overflow\n overflow = (self.stored() + amount) - self.max\n if overflow < 0: overflow = 0\n # Store the amount\n self.__storage[item] += amount - overflow\n self.total += amount - overflow\n return overflow\n\n def retrieve (self, item, amount):\n # If the item doesn't exist, we cannot store anything\n if item not in self.__storage: return\n # Compute the underflow\n underflow = amount - self.__storage[item]\n if underflow < 0: underflow = 0\n # Get the amount\n self.__storage[item] -= amount - underflow\n self.total -= amount - underflow\n return amount - underflow\n\n # Return total amount stored\n def stored (self):\n return self.total\n\n # Prints the storage contents in a pretty way\n def print (self):\n # First, print some lines\n print(\"-\" * 50)\n print(\"Storage contents:\")\n for item in self.__storage:\n print(\" - {}: {}\".format(item, self.__storage[item]))\n if self.stored() == 0:\n print(\" (None)\")\n print(\"Total: {}/{} units stored\".format(self.stored(), self.max))\n print(\"-\" * 50)\n\nclass Module ():\n def __init__(self, name, cost, positions, factory_name, modules, time, construction_time):\n if name == type:\n print(\"Name of module cannot equal it's type\")\n return\n self.name = name\n self.cost = cost\n self.modules = modules\n self.time = time\n self.construction_time = construction_time\n self.positions = positions\n self.work_done = 0\n self.founded = self.time.now()\n self.factory_name = factory_name\n\n def do_work (self, workers):\n for w in workers:\n # Get the workload\n self.work_done += w.work()\n \n def log (self, text, end=\"\\n\"):\n if type(text) != str:\n text = str(text)\n Tools.CONSOLE.print(\"[\" + self.factory_name + \"] \" + text, end=end)\n\n# ADMINISTRATIVE MODULES\n\n# Basic to any factory: manages money, houses the boss\nclass Office (Module):\n type = \"office\"\n\n def __init__(self, budget, market, factory_name, modules, time):\n super().__init__(\"Headquarters (HQ)\", 0, [Position(\n name=\"CEO\",\n workload=2,\n salary=100,\n schedule=[ 9, 17 ],\n education_level = 3\n )], factory_name, modules, time, Date(0,0,0,0))\n\n self.budget = budget\n self.market = market\n # Prepare the production chains\n self.production_chains = []\n # Do the archive\n self.modules.archive.add_cabinet(\"General\")\n self.modules.archive.set(\"General\", \"Founded\", self.time.now())\n\n self.modules.archive.add_cabinet(\"Finance\")\n self.modules.archive.set(\"Finance\", \"Total Sold\", 0)\n self.modules.archive.set(\"Finance\", \"Daily Balances\", [])\n self.modules.archive.set(\"Finance\", \"Yearly Balances\", [])\n\n self.modules.archive.add_tick(self.manage_archive)\n\n def check_balance (self):\n return self.budget\n\n def deposit (self, amount):\n self.budget += amount\n\n def pay (self, amount):\n if self.budget >= amount:\n self.budget -= amount\n return amount\n return 0\n\n # Buy resources\n def buy_resources(self, item, amount):\n # Pay until we get to an amount we can buy\n if item not in self.market.buy_list:\n # Cannot buy\n Tools.CONSOLE.print(\"Attempted to buy unexisting item from market: {}\".format(item))\n return 0\n price = self.market.buy_list[item]\n while self.budget < price * amount:\n amount -= 1\n # Now get the money from the budget and use that to buy the items\n return self.market.buy(item, amount, self.pay(price * amount))\n\n # Sell resources\n def sell_resources(self, item, amount):\n # Sell it the market\n self.deposit(self.market.sell(item, amount))\n # Manage the counter for the archive\n self.modules.archive.update('Finance', 'Total Sold', 1)\n\n # Evaluate if we need more production chains\n def evaluate (self):\n if len(self.production_chains) == 0:\n pass\n\n # Archive certain stats\n @staticmethod\n def manage_archive (modules, ticked):\n if 'days' in ticked:\n # New day\n modules.archive.update('Finance', 'Daily Balances', modules.office.check_balance())\n if 'years' in ticked:\n # New year\n modules.archive.update('Finance', 'Yearly Balances', modules.office.check_balance())\n\n# Can be purchased to research new recipes and modules.\nclass Research (Module):\n type = \"research\"\n\n def __init__(self):\n super().__init__()\n\n# A requirement for any factory. Manages staff and salaries.\nclass HumanResources (Module):\n type = \"human_resources\"\n\n def __init__(self, factory_name, modules, time):\n worker_position = Position(\n name=\"Travel Agent\",\n workload=1,\n salary=50,\n schedule=[ 9, 17 ],\n education_level = 1\n )\n super().__init__(\"Human Resources (HR)\", 0, [worker_position for i in range(5)], factory_name, modules, time, Date(0,0,0,0))\n self.workers = []\n\n # Initialise the archive\n self.modules.archive.add_cabinet(\"Workers\")\n self.modules.archive.set(\"Workers\", \"History\", [])\n self.modules.archive.set(\"Workers\", \"Hired\", 0)\n self.modules.archive.set(\"Workers\", \"Fired\", 0)\n\n # Counts position in a module\n def count_positions(self, positions, position):\n c = 0\n for p in positions:\n c += (1 if p.name == position else 0)\n return c\n\n def manage_workers (self, available_workers):\n # First, check if any should be fired for some reason\n modules_count = {}\n for w in self.workers:\n # Pay him salary (is departure bonus if he's fired)\n if self.modules.office.pay(w.salary) == 0:\n # Oh no! Didn't have enough money to pay worker (kill his energy)\n w.no_salary += 1\n else:\n w.no_salary = 0\n\n if w.age > 67:\n # Fire due to pension\n self.fire(w, \"pension age\")\n elif w.no_salary == 4:\n # He quits do to not enough salary\n self.fire(w, \"not paid enough\")\n elif w.no_salary == 0 and w.perfect / (self.time - w.started).todays() < 0.5:\n # Fire due to too low performance\n self.fire(w, \"too low performance\")\n else:\n # Count the workers per module and position\n if w.module in modules_count:\n if w.position.name in modules_count[w.module]:\n modules_count[w.module][w.position.name] += 1\n else:\n modules_count[w.module][w.position.name] = 1\n else:\n modules_count[w.module] = {}\n modules_count[w.module][w.position.name] = 1\n # Hire additional workers if required (once per worker)\n for m in self.modules:\n if m.name not in modules_count:\n # It has no workers, init the module holder\n modules_count[m.name] = {}\n for p in m.positions:\n if p.name not in modules_count[m.name]:\n # The position isn't filled, init it\n modules_count[m.name][p.name] = 0\n if modules_count[m.name][p.name] < self.count_positions(m.positions, p.name):\n # Hire a possible additional worker (1 per module)\n w = available_workers[random.randint(0,len(available_workers)-1)]\n if self.hire(w, m, p) == True:\n modules_count[m.name][p.name] += 1\n if m != self: self.work_done -= 1\n available_workers.remove(w)\n if len(available_workers) == 0:\n # Stop because there are no more workers left\n break\n self.log(\"Hired \" + w.name + \" to work in \" + m.name + \" as \" + p.name)\n if m != self and self.work_done - 1 <= 0:\n # Stop because all the work that can be done is done (HumanResources is free)\n break\n if len(available_workers) == 0:\n break\n\n # Fire any too much workers (e.g., more workers for a position in a module than there are positions)\n for w in self.workers:\n if modules_count[w.module][w.position.name] > self.count_positions(self.modules[w.module].positions, w.position.name):\n # We gotta let him go :(\n self.fire(w, \"too many workers\")\n\n def manage_shifts (self):\n for w in self.workers:\n if self.time.hour == w.position.schedule[0] - 1:\n # Put the worker on duty\n w.on_duty = True\n elif self.time.hour == w.position.schedule[1] - 1:\n # Put the worker off-duty\n w.on_duty = False\n # Make him sleep\n w.sleep()\n\n # Called at the end of a day to evaluate workers\n def evaluate (self):\n for w in self.workers:\n if w.energy > 0:\n w.perfect += 1\n # Also level up the worker\n w.level_up()\n\n def hire (self, worker, module, position):\n if worker.stats.education_level >= position.education_level:\n # Hire it\n worker.module = module.name\n worker.position = position\n worker.started = self.time.now()\n worker.perfect = 0\n worker.salary = position.salary\n self.workers.append(worker)\n self.modules.archive.update(\"Workers\", \"Hired\", 1)\n return True\n return False\n\n def fire (self, worker, reason):\n # Remove the worker\n self.workers.remove(worker)\n # Now add his name and reason for fireing\n self.modules.archive.update(\"Workers\", \"History\", {\n 'name' : worker.name,\n 'module' : worker.module,\n 'position' : worker.position.name,\n 'days_employed' : (self.time - worker.started).todays(),\n 'reason' : reason\n })\n self.modules.archive.update(\"Workers\", \"Fired\", 1)\n\n # Returns worker with given name\n def get_worker(self, name):\n # Loop through the workers\n for w in self.workers:\n if w.name == name:\n return w\n return None\n\n # Returns all workers working in a specific model and / or position\n def get_workers(self, module=\"*\", position=\"*\"):\n # Assemble all workers with the module\n to_return = []\n for w in self.workers:\n if (w.module == module or module == \"*\") and (w.position == position or position == \"*\"):\n to_return.append(w)\n return to_return\n\n# Manages resource flow\nclass Logistics (Module):\n type = \"logistics\"\n\n def __init__(self, factory_name, modules, time):\n super().__init__(\"Logistics\", 0, [ Position(\n name = \"Manager\",\n workload = 2,\n salary = 300,\n schedule = [ 9, 17 ],\n education_level = 1\n ) ], factory_name, modules, time, Date(0,0,0,0))\n # Add some additional positions\n for _ in range(10):\n self.positions.append(Position(\n name = \"Forklift Operator\",\n workload = 1,\n salary = 200,\n schedule = [ 9, 17 ],\n education_level = 1\n ))\n # Set the amount that can be hauled per forktruck\n self.max_haul = 50\n\n # Override to do work\n def do_work (self, workers):\n if len(workers) == 0: return\n # For every rule in every valid storage, do...\n requests = []\n offers = []\n for m in self.modules:\n if hasattr(m, 'storage') and m.type != \"depot\":\n for r in m.storage.rules:\n rule = m.storage.rules[r]\n # Get the target value\n target = rule.form.calc({'max':m.storage.max})\n if (rule.flow == Storage.Rule.InOut or rule.flow == Storage.Rule.In) and m.storage[rule.item] < target:\n # Add it\n requests.append({\n 'module':m,\n 'targets':rule.target_modules,\n 'antitargets':rule.anti_target_modules,\n 'item':rule.item,\n 'amount':target - m.storage[rule.item]\n })\n if (rule.flow == Storage.Rule.InOut or rule.flow == Storage.Rule.Out) and m.storage[rule.item] > target:\n # Add it\n offers.append({\n 'module':m,\n 'targets':rule.target_modules,\n 'antitargets':rule.anti_target_modules,\n 'item':rule.item,\n 'amount':m.storage[rule.item] - target\n })\n\n # Now drive (not to the depot, do that afterwards)\n worker = 0\n requests_overflow = []\n while len(requests) > 0:\n r = requests[0]\n requests = requests[1:]\n # Find a possible matching offer\n check = True\n for o in offers:\n if self.isTarget(r, o) and self.isTarget(o, r):\n # Transport!\n hauled = self.transport(o['module'].storage, r['module'].storage, r['item'], r['amount'], self.max_haul * workers[worker].work())\n r['amount'] -= hauled\n o['amount'] -= hauled\n if r['amount'] > 0:\n requests.append(r)\n if o['amount'] == 0:\n offers.remove(o)\n if hauled > 0: worker += 1\n if worker >= len(workers):\n # No more workers :(\n return\n break\n if check:\n # Put it in the overflow back instead\n requests_overflow.append(r)\n\n # Handle the depot (first buys...)\n while len(requests_overflow) > 0:\n r = requests_overflow[0]\n requests_overflow = requests_overflow[1:]\n # Buy it from the depot if it's in their rule\n if 'depot' in r['targets'] or ('*' in r['targets'] and\n 'depot' not in r['antitargets']):\n # Transport!\n hauled = self.transport(self.modules.depot, r['module'].storage, r['item'], r['amount'], self.max_haul * workers[worker].work())\n r['amount'] -= hauled\n if r['amount'] > 0:\n requests_overflow.append(r)\n worker += 1\n if worker >= len(workers):\n # No more workers :(\n return\n else:\n requests_overflow.append(r)\n # (...then sales)\n while len(offers) > 0:\n o = offers[0]\n offers = offers[1:]\n # Sell it to the depot if it's in their rule\n if 'depot' in o.target_modules or ('*' in o.target_modules and\n 'depot' not in o.anti_target_modules):\n # Transport!\n hauled = self.transport(o['module'].storage, self.modules.depot, r['item'], r['amount'], self.max_haul * workers[worker].work())\n o['amount'] -= hauled\n if o['amount'] > 0:\n offers.append(r)\n worker += 1\n if worker >= len(workers):\n # No more workers :(\n return\n else:\n offers.append(o)\n\n\n # Checks to see whether offer is in the target range of request\n def isTarget (self, request, offer):\n return (\n (request['item'] == offer['item'] or offer['item'] == \"*\") and\n (offer['module'].name in request.target_modules or\n offer['module'].type in request.target_modules or\n \"*\" in request.target_modules) and\n (offer['module'].name not in request.anti_target_modules and\n offer['module'].type not in request.anti_target_modules and\n \"*\" not in request.anti_target_modules) and\n request['module'].name != offer['module'].name\n )\n\n # Does the physical transport\n def transport (self, storage_from, storage_to, item, amount, max):\n # It's a match, transport\n to_transport = amount if amount < max else max\n got = storage_from.retrieve(item, to_transport)\n # Store it\n overflow = storage_to.store(item, got)\n # Return the overflow\n storage_from.store(item, overflow)\n #print(\"Transported {} from item {}\".format(got - overflow, item))\n # Return that which we have successfuly stored\n return got - overflow\n\n# Serves as the connection between the factory and the (global) market.\nclass Depot (Module):\n type = \"depot\"\n\n def __init__(self, factory_name, modules, time):\n super().__init__(\"Depot\", 0, [ Position(\n name=\"Truck Driver\",\n workload = 2,\n salary = 300,\n schedule = [ 9, 17 ],\n education_level = 1\n ) for i in range(10)], factory_name, modules, time, Date(0,0,0,0))\n\n # Init the storage\n self.storage = Storage(max=float('inf'))\n\n # Do the trucks\n self.trucks = []\n\n # Set the truck max\n self.truck_max = 500\n\n # Override do_work\n def do_work (self, workers):\n for w in workers:\n work = w.work()\n if work > 0:\n self.trucks.append(int(self.truck_max * (2 / (1 / work))))\n\n # The store / retrieve functions\n def store (self, item, amount):\n # Sell these items\n for truck in self.trucks:\n shipped = truck\n if shipped > amount:\n shipped = amount\n self.modules.office.sell_resources(item, shipped)\n amount -= shipped\n # Return that which we could not transport\n return amount\n def retrieve (self, item, amount):\n bought = 0\n for truck in self.trucks:\n # Attempt to buy the resources\n shipped = truck\n if shipped > amount:\n shipped = amount\n amount -= shipped\n bought += self.modules.office.buy_resources(item, shipped)\n # Return the items bought instead\n return bought\n\n# Saves all sort of stats about the factory\nclass Archive (Module):\n type = \"archive\"\n\n def __init__(self, factory_name, modules, time):\n super().__init__(\"Archive\", 0, [Position(\n name=\"Clerk\",\n workload=0,\n salary=500,\n schedule=[9,17],\n education_level=2\n ) for _ in range(5)], factory_name, modules, time, Date(0,0,0,0))\n\n # Init the __cabinets list\n self.__cabinets = {}\n # Init the __ticks list\n self.__ticks = []\n \n # Done\n \n def add_cabinet(self, name):\n if name in self.__cabinets:\n Tools.CONSOLE.print(\"[Archive] Attempting to add cabinet that already exists: {}\".format(name))\n return False\n\n self.__cabinets[name] = {}\n return True\n\n def remove_cabinet (self, name):\n if name not in self.__cabinets:\n Tools.CONSOLE.print(\"[Archive] Attempting to remove cabinet that does not exists: {}\".format(name))\n return False\n\n del self.__cabinets[name]\n return True\n \n def add_tick (self, tick_handler):\n self.__ticks.append(tick_handler)\n\n def remove_tick (self, tick_handler):\n self.__ticks.remove(tick_handler)\n\n def manage (self, ticked):\n # For each clerk, manage it\n for tick_handler in self.__ticks:\n tick_handler(self.modules, ticked)\n \n def set (self, cabinet, shelf, value):\n if cabinet not in self.__cabinets:\n Tools.CONSOLE.print(\"[Archive] Attempting to log in non-existing cabinet '{}'\".format(cabinet))\n return False\n self.__cabinets[cabinet][shelf] = value\n return True\n \n def get (self, cabinet, shelf):\n if cabinet not in self.__cabinets:\n Tools.CONSOLE.print(\"[Archive] Attempting to retrieve from non-existing cabinet '{}'\".format(cabinet))\n return None\n if shelf not in self.__cabinets[cabinet]:\n Tools.CONSOLE.print(\"[Archive] Attempting to retrieve from non-existing shelf '{}' in cabinet '{}'\".format(shelf, cabinet))\n return None\n return self.__cabinets[cabinet][shelf]\n \n # Updates in a clever way\n def update (self, cabinet, shelf, d_value):\n if cabinet not in self.__cabinets:\n Tools.CONSOLE.print(\"[Archive] Attempting to update non-existing cabinet '{}'\".format(cabinet))\n return False\n if shelf not in self.__cabinets[cabinet]:\n Tools.CONSOLE.print(\"[Archive] Attempting to update non-existing shelf '{}' in cabinet '{}'\".format(shelf, cabinet))\n return False\n # Check that shelf type\n shelf_type = type(self.__cabinets[cabinet][shelf])\n if shelf_type == str:\n if type(d_value) != str:\n d_value = str(d_value)\n self.__cabinets[cabinet][shelf] += d_value\n elif shelf_type == int:\n if type(d_value) != int:\n Tools.CONSOLE.print(\"[Archive] Attempting to update shelf '{}' (integer) in cabinet '{}' with '{}' ({})\".format(shelf, cabinet, d_value, type(d_value)))\n return False\n self.__cabinets[cabinet][shelf] += d_value\n elif shelf_type == float:\n if type(d_value) != int and type(d_value) != float:\n Tools.CONSOLE.print(\"[Archive] Attempting to update shelf '{}' (float) in cabinet '{}' with '{}' ({})\".format(shelf, cabinet, d_value, type(d_value)))\n return False\n self.__cabinets[cabinet][shelf] += d_value\n elif shelf_type == list:\n if type(d_value) == list:\n self.__cabinets[cabinet][shelf] += d_value\n else:\n self.__cabinets[cabinet][shelf].append(d_value)\n elif shelf_type == dict:\n if type(d_value) != tuple or len(d_value) != 2:\n Tools.CONSOLE.print(\"[Archive] Attempting to update shelf '{}' (dictionary) in cabinet '{}', but not given key / value tuple\".format(shelf, cabinet))\n return False\n self.__cabinets[cabinet][shelf][d_value[0]] = d_value[1]\n else:\n Tools.CONSOLE.print(\"[Archive] Attempting to update shelf '{}' in cabinet '{}', but that value isn't updatable\".format(shelf, cabinet))\n return False\n return True\n\n def empty (self, cabinet, shelf):\n if cabinet not in self.__cabinets:\n Tools.CONSOLE.print(\"[Archive] Attempting to clean non-existing cabinet '{}'\".format(cabinet))\n return False\n if shelf not in self.__cabinets[cabinet]:\n Tools.CONSOLE.print(\"[Archive] Attempting to clean non-existing shelf '{}' in cabinet '{}'\".format(shelf, cabinet))\n return False\n del self.__cabinets[cabinet][shelf]\n return True\n\n# *** OPTIONAL ***\n# If the factory researched enough, they can unlock robots: cheaper, faster and\n# mostly less-complaining workers. They are managed in this module.\nclass RobotResources (Module):\n type = \"robot_resources\"\n\n def __init__(self):\n super().__init__()\n\n# COOKIE MODULES\n\n# Stores stuff\nclass StoreRoom (Module):\n type = \"store_room\"\n\n def __init__(self):\n super().__init__()\n\n# A Mixer, which can mix stuff.\nclass Mixer (Module):\n type = \"mixer\"\n recipe_fields = [('Inputs', dict), ('Outputs', dict)]\n\n def __init__(self):\n super().__init__()\n\n# An oven, which can heat stuff\nclass Oven (Module):\n type = \"oven\"\n recipe_fields = [('Inputs', dict), ('Outputs', dict), ('BakeTemp', int), ('BakeDuration', int)]\n\n def __init__(self):\n super().__init__()\n# OTHER MODULES\n\n# Test module\nclass SimpleProcessingUnit (Module):\n type = \"simple_processing_unit\"\n\n def __init__(self, name):\n super().__init__(name, \"simple_processing_unit\", 0, [ Position(\n name=\"Slave\",\n workload = 1,\n salary = 1,\n schedule = [ 6, 18 ],\n education_level = 1\n ) for i in range(10)], \"\", [], Date(0, 0, 0, 1970), Date(0,0,0,0))\n\n self.storage = Storage(max=2500)\n self.storage.add_rule(\"wheat\", Storage.Rule('Wheat', target_stored = \"[max]\", flow = Storage.Rule.In, target_modules = [ \"depot\" ], anti_target_modules = []))\n self.storage.add_rule(\"flour\", Storage.Rule('Flour', target_stored = \"0\", flow = Storage.Rule.Out, target_modules = [ \"depot\" ], anti_target_modules = []))\n\n def do_work (self, workers):\n # Convert resources\n for worker in workers:\n if worker.work() > 0:\n amount = self.storage.retrieve('Wheat', 10)\n overflow = self.storage.store('Flour', amount)\n print(\"Processed {} wheat\".format(amount - overflow))\n","repo_name":"Lut99/CookieFactory","sub_path":"Modules.py","file_name":"Modules.py","file_ext":"py","file_size_in_byte":27501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"5218975601","text":"#!/usr/bin/python\r\n\r\nimport cv2\r\nimport easyocr\r\nimport copy\r\nimport matplotlib.pyplot as plt\r\nimport time\r\n\r\ndef filter(result, umbral=0.3):\r\n resp = []\r\n for res in result:\r\n if res[-1] >= umbral:\r\n resp.append(res)\r\n return resp\r\n\r\n\r\ndef text_extraction(img, rot=[90, 180, 270],gpu=False):\r\n reader = easyocr.Reader([\"en\", \"es\"], gpu=gpu)\r\n result = reader.readtext(img, paragraph=False, rotation_info=rot, text_threshold=0.7)\r\n return filter(result)\r\n\r\n\r\ndef rotations(img):\r\n return [img, cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE), cv2.rotate(img, cv2.ROTATE_180),\r\n cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)]\r\n\r\n\r\ndef r_pred(images, predictions):\r\n n, m = 2, 2\r\n angles = [0, 90, 180, 270]\r\n for i in range(len(images)):\r\n plt.subplot(n, m, i + 1)\r\n plt.imshow(visual_pred(images[i], predictions[i]))\r\n plt.axis(\"off\")\r\n # plt.title(angles[f\"Prediction\\nRot:{angles[i]}\"])\r\n plt.tight_layout()\r\n\r\n\r\ndef visual_pred(img, result):\r\n for res in result:\r\n try:\r\n pt0 = res[0][0]\r\n pt1 = res[0][1]\r\n pt2 = res[0][2]\r\n cv2.rectangle(img, pt0, (pt1[0], pt1[1] - 23), (255, 0, 0), -1)\r\n cv2.putText(img, res[1] + F\" P:{round(res[-1], 2)}\", (pt0[0], pt0[1] - 3), 2, 0.8, (255, 255, 255), 1)\r\n cv2.rectangle(img, pt0, pt2, (255, 0, 0), 2)\r\n except:\r\n pass\r\n return img\r\n\r\n\r\ndef final_pred(original, pred):\r\n n, m = 1, 2\r\n plt.subplot(n, m, 1)\r\n plt.imshow(original)\r\n plt.axis(\"off\")\r\n plt.title(\"Original\")\r\n plt.subplot(n, m, 2)\r\n plt.imshow(pred)\r\n plt.axis(\"off\")\r\n plt.title(\"Prediction\")\r\n\r\n\r\ndef run(name, gpu=False, rot=[90,180,270]):\r\n image = plt.imread(name)\r\n original = copy.deepcopy(image)\r\n text = text_extraction(image, gpu=gpu, rot=rot)\r\n prediction = visual_pred(image, text)\r\n #final_pred(original, prediction)\r\n #plt.show()\r\n\r\n\r\ndef camara():\r\n cap = cv2.VideoCapture(0)\r\n acceso, frame = cap.read()\r\n if acceso:\r\n print(text_extraction(frame))\r\n else:\r\n print(\"Error al acceder a la cámara\")\r\n\r\n cap.release()\r\n\r\n\r\ndef pruebas_rot(name):\r\n r = rotations(plt.imread(name))\r\n p = []\r\n for i in r:\r\n p.append(text_extraction(i))\r\n r_pred(r, p)\r\n\r\nt0 = time.time()\r\n\r\nrun(\"libros.jpg\", gpu=False)\r\n\r\nt1 = time.time()\r\n\r\ntotal = t1-t0\r\nprint(\"F\",total)\r\n\r\nt0 = time.time()\r\n\r\nrun(\"libros.jpg\", gpu=True)\r\n\r\nt1 = time.time()\r\n\r\ntotal = t1-t0\r\nprint(\"T\",total)\r\nprint(\"Terminado\")\r\n\r\n\r\n","repo_name":"Ersebreck/Sinfonia","sub_path":"lectura/lectura.py","file_name":"lectura.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40449192816","text":"from owner_model import Owner\nfrom payment import Payment\nclass View:\n\n def show_title(self): #to display PRIME EVENTS TITLE\n print(\"\\n\")\n print(\"*\"*20 + \" WELCOME TO PRIME EVENTS \"+ \"*\"*20)\n print(\"\\n\")\n\n\n def display(self,output):\n print(output)\n \n def login_view(self): #to display login choices\n self.show_title()\n print(\"1 : CUSTOMER LOGIN\")\n print(\"2 : OWNER LOGIN\")\n print(\"3 : ADMIN LOGIN\")\n print(\"\\n4: BACK TO PREVIOUS MENU\")\n option = int(input(\"\\nSelect your option : \"))\n if option in [1,2,3]:\n return option\n elif(option==4):\n self.main_menu()\n else:\n print(\"Option is invalid. Please select the correct option\")\n self.login_view()\n return option\n\n def register_view(self):\n self.show_title()\n print (\"1 : REGISTER AS CUSTOMER\")\n print (\"2 : REGISTER AS OWNER\")\n print(\"\\n3: BACK TO PREVIOUS MENU\")\n option = int(input(\"\\nSelect your option : \"))\n if option in [1,2]:\n return option\n elif (option == 3):\n self.main_menu()\n else:\n print(\"Option is invalid. Please select the correct option\")\n self.login_view()\n return option\n\n def display_user_info(self,uid,uname):\n print(\"\\n\")\n print(\"- \"*10+\"PRIME EVENTS\"+\" -\"*10)\n print(\"\\nUser ID : \",uid,\"\\t\\t\\t\\tUser name : \",uname,\"\\n\")\n\n\n def login(self):\n username=input(\"\\nEnter username : \")\n password=input(\"\\nEnter password : \")\n return (username,password)\n\n def register(self):\n name = input(\"\\nEnter name : \")\n username = input(\"Enter username : \")\n password = input(\"Enter password : \")\n phone_number = int(input(\"Enter phone Number: \"))\n email_id = input(\"Enter email Id: \")\n return (name,username,password,phone_number,email_id)\n\n def owner_menu(self, owner_obj):\n print (\"Hi \"+ owner_obj.name)\n print (\"\\n\\t\\t\\t OWNER MENU \\n\")\n print (\"1 : CREATE HALL\")\n print (\"2 : MANAGE BOOKINGS\")\n print (\"3 : MANAGE HALL\")\n print (\"4 : MANAGE PAYMENTS\")\n print (\"5 : MANAGE DISCOUNTS\")\n print (\"6 : REPLY TO QUOTATION\")\n print (\"7 : LOGOUT\")\n option = int(input(\"Select your option : \"))\n if option in [1,2,3,4,5,6]:\n return option\n elif option == 7:\n print(\"\\nLOGGED OUT SUCCESSFULLY...\")\n self.main_menu()\n else:\n print(\"Option is invalid. Please select the correct option\")\n self.owner_menu()\n\n def owner_payment_menu(self): \n self.show_title()\n print(\"1 : ADD PAYMENT\")\n print(\"2 : UPDATE PAYMENT\")\n print(\"3 : DELETE PAYMENT\")\n print(\"4 : LOGOUT\")\n option = int(input(\"Select your option : \"))\n return option\n\n def owner_add_payment_view(self):\n self.show_title()\n payment_id = int(input(\"Enter Payment ID : \"))\n booking_id = int(input(\"Enter Booking ID : \"))\n deposit_amount = int(input(\"Enter Deposit Amount : \"))\n payment_obj = Payment(payment_id,booking_id,deposit_amount)\n return payment_obj\n\n def owner_update_payment_view(self):\n\n payment_id = int(input(\"Enter Payment ID to be updated ? : \"))\n booking_id =None\n deposit_amount = int(input(\"Enter new Deposit Amount : \"))\n payment_obj = Payment(payment_id,booking_id,deposit_amount)\n return payment_obj\n\n\n\n def main_menu(self): #displaying the main menu for prime events\n self.show_title()\n print(\"1 : LOGIN\")\n print(\"2 : REGISTER\")\n print(\"3 : EXIT\")\n option = int(input(\"Select your option : \"))\n return option\n\n\n\n ","repo_name":"ankit1411/hall_reservation","sub_path":"view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11551437353","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport os\nimport nibabel as nib\nfrom truenet.true_net import (truenet_model, truenet_evaluate, truenet_data_postprocessing)\nfrom truenet.utils import truenet_utils\n\n#=========================================================================================\n# Truenet main test function\n# Vaanathi Sundaresan\n# 09-03-2021, Oxford\n#=========================================================================================\n\ndef main(sub_name_dicts, eval_params, intermediate=False, model_dir=None,\n load_case='last', output_dir=None, verbose=False):\n '''\n The main function for testing Truenet\n :param sub_name_dicts: list of dictionaries containing subject filepaths\n :param eval_params: dictionary of evaluation parameters\n :param intermediate: bool, whether to save intermediate results\n :param model_dir: str, filepath containing the test model\n :param load_case: str, condition for loading the checkpoint\n :param output_dir: str, filepath for saving the output predictions\n :param verbose: bool, display debug messages\n '''\n assert len(sub_name_dicts) > 0, \"There must be at least 1 subject for testing.\"\n use_cpu = eval_params['Use_CPU']\n if use_cpu is True:\n device = torch.device(\"cpu\")\n else:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n nclass = eval_params['Nclass']\n num_channels = eval_params['Numchannels']\n\n model_axial = truenet_model.TrUENet(n_channels=num_channels, n_classes=nclass, init_channels=64, plane='axial')\n model_sagittal = truenet_model.TrUENet(n_channels=num_channels, n_classes=nclass, init_channels=64,\n plane='sagittal')\n model_coronal = truenet_model.TrUENet(n_channels=num_channels, n_classes=nclass, init_channels=64, plane='coronal')\n\n model_axial.to(device=device)\n model_sagittal.to(device=device)\n model_coronal.to(device=device)\n model_axial = nn.DataParallel(model_axial)\n model_sagittal = nn.DataParallel(model_sagittal)\n model_coronal = nn.DataParallel(model_coronal)\n\n model_name = eval_params['Modelname']\n\n try:\n model_path = os.path.join(model_dir, model_name + '_axial.pth')\n model_axial = truenet_utils.loading_model(model_path, model_axial)\n\n model_path = os.path.join(model_dir, model_name + '_sagittal.pth')\n model_sagittal = truenet_utils.loading_model(model_path, model_sagittal)\n\n model_path = os.path.join(model_dir, model_name + '_coronal.pth')\n model_coronal = truenet_utils.loading_model(model_path, model_coronal)\n except:\n try:\n model_path = os.path.join(model_dir, model_name + '_axial.pth')\n model_axial = truenet_utils.loading_model(model_path, model_axial, mode='full_model')\n\n model_path = os.path.join(model_dir, model_name + '_sagittal.pth')\n model_sagittal = truenet_utils.loading_model(model_path, model_sagittal, mode='full_model')\n\n model_path = os.path.join(model_dir, model_name + '_coronal.pth')\n model_coronal = truenet_utils.loading_model(model_path, model_coronal, mode='full_model')\n except ImportError:\n raise ImportError('In directory ' + model_dir + ', ' + model_name + '_axial.pth or' +\n model_name + '_sagittal.pth or' + model_name + '_coronal.pth ' +\n 'does not appear to be a valid model file')\n\n if verbose:\n print('Found' + str(len(sub_name_dicts)) + 'subjects', flush=True)\n for sub in range(len(sub_name_dicts)):\n if verbose:\n print('Predicting output for subject ' + str(sub+1) + '...', flush=True)\n \n test_sub_dict = [sub_name_dicts[sub]]\n basename = test_sub_dict[0]['basename']\n \n probs_combined = []\n flair_path = test_sub_dict[0]['flair_path']\n flair_hdr = nib.load(flair_path).header\n probs_axial = truenet_evaluate.evaluate_truenet(test_sub_dict, model_axial, eval_params, device, \n mode='axial', verbose=verbose)\n probs_axial = truenet_data_postprocessing.resize_to_original_size(probs_axial, test_sub_dict, \n plane='axial')\n probs_combined.append(probs_axial)\n \n if intermediate:\n save_path = os.path.join(output_dir,'Predicted_probmap_truenet_' + basename + '_axial.nii.gz')\n preds_axial = truenet_data_postprocessing.get_final_3dvolumes(probs_axial, test_sub_dict)\n if verbose:\n print('Saving the intermediate Axial prediction ...', flush=True)\n \n newhdr = flair_hdr.copy()\n newobj = nib.nifti1.Nifti1Image(preds_axial, None, header=newhdr)\n nib.save(newobj, save_path) \n \n probs_sagittal = truenet_evaluate.evaluate_truenet(test_sub_dict, model_sagittal, eval_params, device, \n mode='sagittal', verbose=verbose)\n probs_sagittal = truenet_data_postprocessing.resize_to_original_size(probs_sagittal, test_sub_dict, \n plane='sagittal')\n probs_combined.append(probs_sagittal)\n \n if intermediate:\n save_path = os.path.join(output_dir,'Predicted_probmap_truenet_' + basename + '_sagittal.nii.gz')\n preds_sagittal = truenet_data_postprocessing.get_final_3dvolumes(probs_sagittal, test_sub_dict)\n if verbose:\n print('Saving the intermediate Sagittal prediction ...', flush=True)\n \n newhdr = flair_hdr.copy()\n newobj = nib.nifti1.Nifti1Image(preds_sagittal, None, header=newhdr)\n nib.save(newobj, save_path) \n \n probs_coronal = truenet_evaluate.evaluate_truenet(test_sub_dict, model_coronal, eval_params, device, \n mode='coronal', verbose=verbose) \n probs_coronal = truenet_data_postprocessing.resize_to_original_size(probs_coronal, test_sub_dict, \n plane='coronal')\n probs_combined.append(probs_coronal)\n \n if intermediate:\n save_path = os.path.join(output_dir,'Predicted_probmap_truenet_' + basename + '_coronal.nii.gz')\n preds_coronal = truenet_data_postprocessing.get_final_3dvolumes(probs_coronal, test_sub_dict)\n if verbose:\n print('Saving the intermediate Coronal prediction ...', flush=True)\n \n newhdr = flair_hdr.copy()\n newobj = nib.nifti1.Nifti1Image(preds_coronal, None, header=newhdr)\n nib.save(newobj, save_path) \n \n probs_combined = np.array(probs_combined)\n prob_mean = np.mean(probs_combined,axis=0)\n \n save_path = os.path.join(output_dir,'Predicted_probmap_truenet_' + basename + '.nii.gz')\n pred_mean = truenet_data_postprocessing.get_final_3dvolumes(prob_mean, test_sub_dict)\n if verbose:\n print('Saving the final prediction ...', flush=True)\n\n newhdr = flair_hdr.copy()\n newobj = nib.nifti1.Nifti1Image(pred_mean, None, header=newhdr)\n nib.save(newobj, save_path) \n \n if verbose:\n print('Testing complete for all subjects!', flush=True)\n","repo_name":"v-sundaresan/truenet","sub_path":"truenet/true_net/truenet_test_function.py","file_name":"truenet_test_function.py","file_ext":"py","file_size_in_byte":7663,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"67"} +{"seq_id":"41444605731","text":"import os\nimport pandas as pd\nimport numpy as np\nfrom dataclasses import dataclass\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import OneHotEncoder,StandardScaler\nfrom src.exception import CustomException\nfrom src.logger import logging\nimport sys\nfrom src.utils import save_object,subtract_time,add_time,calculate_distance\n\n\n@dataclass\nclass Datatransformation_config:\n preprocessor_obj_file_path=os.path.join(\"Artifacts\",\"preprocessor.pkl\")\n\nclass Datatransformation:\n def __init__(self):\n self.data_transformation_config=Datatransformation_config()\n \n def get_data_transformation_obj(self):\n try:\n \n logging.info(\"data transformation initiated\")\n # numerical and categorical columns were separated\n numerical_columns=['Delivery_person_Age','distance']\n categorical_numerical=['Delivery_person_Ratings','Order_Year','Order_Month','Order_Day','orderd_hour','orderd_minute','orderd_picked_hour','orderd_picked_minute']\n categorical_columns=['Weather_conditions', 'Road_traffic_density', 'Type_of_order', 'Type_of_vehicle', 'Festival', 'City','Vehicle_condition','multiple_deliveries']\n\n \n\n logging.info(\"pipeline initiated\")\n # numerical pipeline\n num_pipeline1=Pipeline(\n steps=[\n (\"imputer\",SimpleImputer(strategy='mean')),\n (\"scaler\",StandardScaler())\n ]\n )\n num_pipeline2=Pipeline(\n steps=[\n (\"imputer\",SimpleImputer(strategy='most_frequent')),\n (\"scaler\",StandardScaler())\n ]\n )\n\n\n # categorical pipeline\n cat_pipeline=Pipeline(\n steps=[\n (\"imputer\",SimpleImputer(strategy=\"most_frequent\")),\n ('OneHotEncoder',OneHotEncoder(sparse=False)),\n (\"scaler\",StandardScaler(with_mean=False))\n ]\n )\n\n # joining numerical and categorical pipeline\n preprocessor=ColumnTransformer([\n (\"numerical_pipeline\",num_pipeline1,numerical_columns),\n (\"cat_numerical_pipeline\",num_pipeline2,categorical_numerical),\n (\"categorical_pipeline\",cat_pipeline,categorical_columns)\n ])\n logging.info(\"pipeline completed\")\n return preprocessor\n \n\n\n \n except Exception as e:\n logging.info(\"Exception occured at data transformation\")\n raise CustomException(e,sys)\n \n def initiate_data_transformation(self,train_path,test_path):\n try:\n logging.info(\"Reading train and test data started\")\n train_df=pd.read_csv(train_path)\n test_df=pd.read_csv(test_path)\n logging.info(\"Read train and test completed\")\n logging.info(f\"Train DataFrame head :\\n{train_df.head().to_string()}\")\n logging.info(f\"Test DataFrame head :\\n{test_df.head().to_string()}\")\n logging.info(\" Getting preprocessor object\")\n\n train_df['distance']=train_df.apply(lambda row: calculate_distance(row['Restaurant_latitude'],\n row['Restaurant_longitude'],\n row['Delivery_location_latitude'],\n row['Delivery_location_longitude']), axis=1)\n\n\n test_df['distance']=test_df.apply(lambda row: calculate_distance(row['Restaurant_latitude'],\n row['Restaurant_longitude'],\n row['Delivery_location_latitude'],\n row['Delivery_location_longitude']), axis=1)\n \n\n logging.info(\"Convert Order_Date column to datetime\")\n train_df[\"Order_Date\"] = pd.to_datetime(train_df[\"Order_Date\"],dayfirst=True)\n test_df[\"Order_Date\"] = pd.to_datetime(test_df[\"Order_Date\"],dayfirst=True)\n\n logging.info(\"Extract year, month, and day into separate columns\")\n train_df[\"Order_Year\"] = train_df[\"Order_Date\"].dt.year\n train_df[\"Order_Month\"] = train_df[\"Order_Date\"].dt.month\n train_df[\"Order_Day\"] = train_df[\"Order_Date\"].dt.day\n\n test_df[\"Order_Year\"] = test_df[\"Order_Date\"].dt.year\n test_df[\"Order_Month\"] = test_df[\"Order_Date\"].dt.month\n test_df[\"Order_Day\"] = test_df[\"Order_Date\"].dt.day\n\n logging.info(\"Extract hour and minute from time ordered and time picked order\")\n train_df[\"Time_Orderd\"]=train_df[\"Time_Orderd\"].astype('str')\n train_df[\"Time_Orderd\"]=train_df[\"Time_Orderd\"].apply(lambda x : x[0] if \".\" in x else x)\n train_df[\"Time_Orderd\"]=train_df[\"Time_Orderd\"].apply(lambda x :'0'+x+\":00\" if x=='1' else x)\n train_df[\"Time_Orderd\"]=train_df[\"Time_Orderd\"].apply(lambda x :'01'+x[2:5] if x[0:2]=='24' else x)\n train_df[\"Time_Orderd\"]=train_df[\"Time_Orderd\"].apply(lambda x :'0' if x=='nan'else x)\n train_df[\"Time_Orderd\"]=train_df[\"Time_Orderd\"].apply(lambda x :'00:00' if x=='0'else x)\n\n train_df[\"Time_Order_picked\"]=train_df[\"Time_Order_picked\"].astype('str')\n train_df[\"Time_Order_picked\"]=train_df[\"Time_Order_picked\"].apply(lambda x : x[0] if \".\" in x else x)\n train_df[\"Time_Order_picked\"]=train_df[\"Time_Order_picked\"].apply(lambda x :'0'+x+\":00\" if x=='1' else x)\n train_df[\"Time_Order_picked\"]=train_df[\"Time_Order_picked\"].apply(lambda x :'01'+ x[2:5] if x[0:2]=='24' else x)\n train_df[\"Time_Order_picked\"]=train_df[\"Time_Order_picked\"].apply(lambda x :'0' if x=='nan'else x)\n train_df[\"Time_Order_picked\"]=train_df[\"Time_Order_picked\"].apply(lambda x :'00:00' if x=='0'else x)\n\n train_df['Time_Orderd'] = np.where(train_df['Time_Orderd'] == '00:00', train_df.apply(subtract_time, axis=1), train_df['Time_Orderd'])\n train_df['Time_Order_picked'] = np.where(train_df['Time_Order_picked'] == '00:00', train_df.apply(add_time, axis=1), train_df['Time_Order_picked'])\n\n test_df[\"Time_Orderd\"]=test_df[\"Time_Orderd\"].astype('str')\n test_df[\"Time_Orderd\"]=test_df[\"Time_Orderd\"].apply(lambda x : x[0] if \".\" in x else x)\n test_df[\"Time_Orderd\"]=test_df[\"Time_Orderd\"].apply(lambda x :'0'+x+\":00\" if x=='1' else x)\n test_df[\"Time_Orderd\"]=test_df[\"Time_Orderd\"].apply(lambda x :'01'+x[2:5] if x[0:2]=='24' else x)\n test_df[\"Time_Orderd\"]=test_df[\"Time_Orderd\"].apply(lambda x :'0' if x=='nan'else x)\n test_df[\"Time_Orderd\"]=test_df[\"Time_Orderd\"].apply(lambda x :'00:00' if x=='0'else x)\n\n test_df[\"Time_Order_picked\"]=test_df[\"Time_Order_picked\"].astype('str')\n test_df[\"Time_Order_picked\"]=test_df[\"Time_Order_picked\"].apply(lambda x : x[0] if \".\" in x else x)\n test_df[\"Time_Order_picked\"]=test_df[\"Time_Order_picked\"].apply(lambda x :'0'+x+\":00\" if x=='1' else x)\n test_df[\"Time_Order_picked\"]=test_df[\"Time_Order_picked\"].apply(lambda x :'01'+ x[2:5] if x[0:2]=='24' else x)\n test_df[\"Time_Order_picked\"]=test_df[\"Time_Order_picked\"].apply(lambda x :'0' if x=='nan'else x)\n test_df[\"Time_Order_picked\"]=test_df[\"Time_Order_picked\"].apply(lambda x :'00:00' if x=='0'else x)\n\n test_df['Time_Orderd'] = np.where(test_df['Time_Orderd'] == '00:00', test_df.apply(subtract_time, axis=1), test_df['Time_Orderd'])\n test_df['Time_Order_picked'] = np.where(test_df['Time_Order_picked'] == '00:00', test_df.apply(add_time, axis=1), test_df['Time_Order_picked'])\n\n train_df[\"orderd_hour\"]=train_df[\"Time_Orderd\"].apply(lambda x : x[0:2])\n train_df[\"orderd_minute\"]=train_df[\"Time_Orderd\"].apply(lambda x : x[3:])\n\n test_df[\"orderd_hour\"]=test_df[\"Time_Orderd\"].apply(lambda x : x[0:2])\n test_df[\"orderd_minute\"]=test_df[\"Time_Orderd\"].apply(lambda x : x[3:])\n\n train_df[\"orderd_picked_hour\"]=train_df[\"Time_Order_picked\"].apply(lambda x : x[0:2])\n train_df[\"orderd_picked_minute\"]=train_df[\"Time_Order_picked\"].apply(lambda x : x[3:])\n\n test_df[\"orderd_picked_hour\"]=test_df[\"Time_Order_picked\"].apply(lambda x : x[0:2])\n test_df[\"orderd_picked_minute\"]=test_df[\"Time_Order_picked\"].apply(lambda x : x[3:])\n\n\n logging.info(\"converting the new columns into int datatype\")\n train_df[\"orderd_hour\"]=train_df[\"orderd_hour\"].astype(int)\n train_df[\"orderd_minute\"]=train_df[\"orderd_minute\"].astype(int)\n train_df[\"orderd_picked_hour\"]=train_df[\"orderd_picked_hour\"].astype(int)\n train_df[\"orderd_picked_minute\"]=train_df[\"orderd_picked_minute\"].astype(int)\n\n\n test_df[\"orderd_hour\"]=test_df[\"orderd_hour\"].astype(int)\n test_df[\"orderd_minute\"]=test_df[\"orderd_minute\"].astype(int)\n test_df[\"orderd_picked_hour\"]=test_df[\"orderd_picked_hour\"].astype(int)\n test_df[\"orderd_picked_minute\"]=test_df[\"orderd_picked_minute\"].astype(int)\n\n\n\n\n preprocessor_obj=self.get_data_transformation_obj()\n\n target_column_name='Time_taken (min)'\n drop_columns=[target_column_name,'ID', 'Delivery_person_ID','Restaurant_latitude','Restaurant_longitude','Delivery_location_latitude','Delivery_location_longitude','Order_Date','Time_Orderd','Time_Order_picked']\n\n input_feature_train_df=train_df.drop(columns=drop_columns,axis=1)\n logging.info(f\"Train DataFrame head :\\n{input_feature_train_df.head().to_string()}\")\n target_feature_train_df=train_df[target_column_name]\n \n input_feature_test_df=test_df.drop(columns=drop_columns,axis=1)\n logging.info(f\"Train DataFrame head :\\n{input_feature_test_df.head().to_string()}\")\n target_feature_test_df=test_df[target_column_name]\n \n\n input_feature_train_arr=preprocessor_obj.fit_transform(input_feature_train_df)\n input_feature_test_arr=preprocessor_obj.transform(input_feature_test_df)\n logging.info(\"Applying preprocessing on train and test data\")\n\n train_arr=np.c_[input_feature_train_arr,np.array(target_feature_train_df)]\n test_arr=np.c_[input_feature_test_arr,np.array(target_feature_test_df)]\n\n \n\n save_object(\n file_path=Datatransformation_config.preprocessor_obj_file_path,\n obj=preprocessor_obj\n )\n logging.info(\"preprocessor pickle file saved\")\n\n return(\n train_arr,test_arr,\n self.data_transformation_config.preprocessor_obj_file_path\n )\n\n\n except Exception as e:\n logging.info(\"Exception occured at initiate_data_transformation\")\n raise CustomException(e,sys)","repo_name":"shahabas9/delivery_time_prediction","sub_path":"src/components/data_transformation.py","file_name":"data_transformation.py","file_ext":"py","file_size_in_byte":11195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10365271020","text":"import _flowunit as modelbox\nimport cv2\nimport json\nimport numpy as np\n\n\nclass draw_pose_lightFlowUnit(modelbox.FlowUnit):\n # Derived from modelbox.FlowUnit\n def __init__(self):\n super().__init__()\n\n def open(self, config):\n # Open the flowunit to obtain configuration information\n self.colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], \n [170, 255, 0], [85, 255, 0], [0, 255, 0], [0, 255, 85], \n [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], \n [0, 0, 255], [85, 0, 255], [170, 0, 255], [255, 0, 255], \n [255, 0, 170], [255, 0, 85], [85, 85, 255], [170, 170, 255], [170, 255, 170]]\n self.cnt_colors = len(self.colors)\n\n self.BODY_PARTS_KPT_IDS = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11],\n [11, 12], [12, 13], [1, 0], [0, 14], [14, 16], [0, 15], [15, 17], [2, 16], [5, 17]]\n self.BODY_PARTS_PAF_IDS = ([12, 13], [20, 21], [14, 15], [16, 17], [22, 23], [24, 25], [0, 1], [2, 3], [4, 5],\n [6, 7], [8, 9], [10, 11], [28, 29], [30, 31], [34, 35], [32, 33], [36, 37], [18, 19], [26, 27])\n\n return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n def process(self, data_context):\n # Process the data\n # modelbox.info('draw_pose_light')\n in_image = data_context.input(\"in_image\")\n in_pose = data_context.input(\"in_pose\")\n out_image = data_context.output(\"out_image\")\n\n for buffer_img, buffer_pose in zip(in_image, in_pose):\n width = buffer_img.get('width')\n height = buffer_img.get('height')\n channel = buffer_img.get('channel')\n\n img_data = np.array(buffer_img.as_object(), copy=False)\n img_data = img_data.reshape((height, width, channel))\n\n pose_str = buffer_pose.as_object()\n pose_data = self.decode_pose(pose_str, (height, width))\n\n img_out = img_data.copy()\n self.draw_pose(img_out, pose_data)\n\n out_buffer = modelbox.Buffer(self.get_bind_device(), img_out)\n out_buffer.copy_meta(buffer_img)\n out_image.push_back(out_buffer)\n\n return modelbox.Status.StatusCode.STATUS_SUCCESS\n \n def decode_pose(self, pose_str, input_shape):\n try:\n result_json = json.loads(pose_str)\n pose = json.loads(result_json['pose_result'])\n except Exception as ex:\n modelbox.error(str(ex))\n return []\n else:\n pose = np.array(pose)\n pose[..., 0] = pose[..., 0] * input_shape[1]\n pose[..., 1] = pose[..., 1] * input_shape[0]\n return pose\n\n def draw_pose(self, img_data, all_poses):\n for pose in all_poses:\n for part_id in range(len(self.BODY_PARTS_PAF_IDS) - 2):\n kpt_a_id = self.BODY_PARTS_KPT_IDS[part_id][0]\n global_kpt_a_id = pose[kpt_a_id, 0]\n color_a = self.colors[kpt_a_id % self.cnt_colors]\n if global_kpt_a_id > 0:\n x_a, y_a = pose[kpt_a_id]\n cv2.circle(img_data, (int(x_a), int(y_a)), 3, color_a, -1)\n kpt_b_id = self.BODY_PARTS_KPT_IDS[part_id][1]\n global_kpt_b_id = pose[kpt_b_id, 0]\n color_b = self.colors[kpt_b_id % self.cnt_colors]\n if global_kpt_b_id > 0:\n x_b, y_b = pose[kpt_b_id]\n cv2.circle(img_data, (int(x_b), int(y_b)), 3, color_b, -1)\n if global_kpt_a_id > 0 and global_kpt_b_id > 0:\n cv2.line(img_data, (int(x_a), int(y_a)), (int(x_b), int(y_b)), color_b, 2)\n\n def close(self):\n # Close the flowunit\n return modelbox.Status()\n\n def data_pre(self, data_context):\n # Before streaming data starts\n return modelbox.Status()\n\n def data_post(self, data_context):\n # After streaming data ends\n return modelbox.Status()\n\n def data_group_pre(self, data_context):\n # Before all streaming data starts\n return modelbox.Status()\n\n def data_group_post(self, data_context):\n # After all streaming data ends\n return modelbox.Status()\n","repo_name":"sunxiaobei/modelbox_gallery","sub_path":"workspace/multi_person_pose_lightweight_openpose/etc/flowunit/draw_pose_light/draw_pose_light.py","file_name":"draw_pose_light.py","file_ext":"py","file_size_in_byte":4328,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"19092919604","text":"#!/usr/bin/python\n\n# Define TreeNode\nclass TreeNode:\n\n # Two ways to declare a tree node:\n # 1. N = TreeNode(1)\n # 2. N = TreeNode(1, 2, 3), which is equivalent to N = Node(left=2, right=3, val=1)\n # In other words, if only want root and right, issue N = Node(1, right=3).\n def __init__(self, val, left=None, right=None):\n self.val = val\n self.left = TreeNode(left) if left else left\n self.right = TreeNode(right) if right else right\n\n # Define the string representation of a tree rooted at this node.\n # Reference: http://stevekrenzel.com/articles/printing-trees\n # Thank Steve Krenzel for coming up with this amazingly clear representation.\n def __repr__(self, depth=0):\n ret = str()\n if self.right:\n ret += self.right.__repr__(depth + 1)\n ret += ' ' * 4 * depth + str(self.val) + '\\n'\n if self.left:\n ret += self.left.__repr__(depth + 1)\n return ret if depth else ret.rstrip()\n\n# Define Tree\nclass Tree:\n\n # Three ways to declare a tree:\n # 1. T = Tree(1), which creates a tree with root of 1.\n # 2. T = Tree([1, 2, 3, '#', 5]), confused? Check out LeetCode OJ's binary tree serialization.\n # 3. T = Tree(), which creates an empty tree.\n def __init__(self, root=None):\n if type(root) == int:\n self.root = TreeNode(root)\n elif type(root) == list:\n if not root or root[0] == '#':\n self.root = None\n return\n self.root = TreeNode(root.pop(0))\n temp, queue = self.root, list()\n while temp:\n # If list has more than 1 element, then construct left and right child.\n if len(root) > 1:\n left, right = root[0], root[1]\n if left != '#':\n temp.left = TreeNode(left)\n queue.append(temp.left)\n if right != '#':\n temp.right = TreeNode(right)\n queue.append(temp.right)\n root = root[2:]\n # If list has only one element, then add left child and exit.\n elif len(root) == 1:\n left = root[0]\n if left != '#':\n temp.left = TreeNode(left)\n break\n # If empty, then exit directly.\n else:\n break\n temp = queue.pop(0)\n else:\n self.root = None\n\n # Define the string representation of a tree.\n def __repr__(self):\n return str(self.root)","repo_name":"yi-guo/coding-interview","sub_path":"leetcode/python/Tree.py","file_name":"Tree.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11121007689","text":"#!/usr/bin/env python3\n\"\"\"\n@Filename: overlay.py\n@Author: dulanj\n@Time: 02/10/2021 19:22\n-------------------------------------\nThis is also taken from Keras team repo - https://keras.io/examples/vision/deeplabv3_plus/\n\"\"\"\nimport sys\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\nfrom deeplab.dataset_voc import read_image\nfrom deeplab.inference import inference\nfrom deeplab.pascal_voc import VOC_COLORMAP\n\n# Loading the Colormap\ncolormap = np.array(VOC_COLORMAP).astype(np.uint8)\n\n\ndef decode_segmentation_masks(mask, colormap, n_classes):\n r = np.zeros_like(mask).astype(np.uint8)\n g = np.zeros_like(mask).astype(np.uint8)\n b = np.zeros_like(mask).astype(np.uint8)\n for l in range(0, n_classes):\n idx = mask == l\n r[idx] = colormap[l, 0]\n g[idx] = colormap[l, 1]\n b[idx] = colormap[l, 2]\n rgb = np.stack([r, g, b], axis=2)\n return rgb\n\n\ndef get_overlay(image, colored_mask):\n image = tf.keras.preprocessing.image.array_to_img(image)\n image = np.array(image).astype(np.uint8)\n is_bg = np.tile(np.all(colored_mask == (0, 0, 0), axis=-1), (3, 1, 1)).transpose(1, 2, 0)\n colored_mask = np.where(is_bg, image, colored_mask)\n overlay = cv2.addWeighted(image, 0.35, colored_mask, 0.65, 0)\n return overlay\n\n\ndef press(event):\n print('press', event.key)\n if event.key == \"escape\":\n plt.close()\n sys.exit(0)\n\n\ndef plot_samples_matplotlib(display_list, figsize=(5, 3)):\n fig, axes = plt.subplots(nrows=1, ncols=len(display_list), figsize=figsize)\n fig.canvas.mpl_connect('key_press_event', press)\n for i in range(len(display_list)):\n if display_list[i].shape[-1] == 3:\n axes[i].imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]))\n else:\n axes[i].imshow(display_list[i])\n plt.show()\n\n\ndef plot_predictions(images_list, model):\n pred_list = []\n for image_file in images_list:\n image_tensor = read_image(image_file)\n prediction_mask = inference(image_tensor=image_tensor, model=model)\n prediction_colormap = decode_segmentation_masks(prediction_mask, colormap, 20)\n overlay = get_overlay(image_tensor, prediction_colormap)\n predict_image_list = [image_tensor, overlay, prediction_colormap]\n plot_samples_matplotlib(\n predict_image_list, figsize=(18, 14)\n )\n pred_list.append(predict_image_list)\n return pred_list\n\n\ndef save_cv_image(save_path, image):\n cv2.imwrite(save_path, cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_RGB2BGR))\n","repo_name":"CodeProcessor/DeepLab-Training-Pipeline","sub_path":"deeplab/overlay.py","file_name":"overlay.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"13610417223","text":"\nimport os, glob\n\nfrom conans import ConanFile, tools, AutoToolsBuildEnvironment\n\n\nclass IlmBaseConan(ConanFile):\n name = \"IlmBase\"\n description = \"IlmBase is a component of OpenEXR. OpenEXR is a high dynamic-range (HDR) image file format developed by Industrial Light & Magic for use in computer imaging applications.\"\n version = \"2.2.0\"\n license = \"BSD\"\n url = \"https://github.com/Mikayex/conan-ilmbase.git\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\", \"cppstd\"\n generators = \"cmake\"\n exports = \"FindIlmBase.cmake\", \"*.tar.gz\"\n\n def source(self):\n base = \"ilmbase-{version}.tar.gz\".format(version=self.version)\n if os.path.exists(base):\n self.output.info(\"Found local source tarball {}\".format(base))\n tools.unzip(base)\n else:\n url = \"http://download.savannah.nongnu.org/releases/openexr/\" + base\n self.output.warn(\"Downloading source tarball {}\".format(url))\n tools.get(url)\n\n def build(self):\n args = [\"--enable-shared\",\n \"--enable-namespaceversioning\",\n ]\n\n autotools = AutoToolsBuildEnvironment(self)\n autotools.configure(configure_dir='ilmbase-{}'.format(self.version), args=args)\n autotools.make()\n tools.replace_prefix_in_pc_file(\"IlmBase.pc\", \"${package_root_path_ilmbase}\")\n\n def package(self):\n autotools = AutoToolsBuildEnvironment(self)\n autotools.install()\n self.copy(\"FindIlmBase.cmake\", src=\".\", dst=\".\")\n self.copy(\"license*\", dst=\"licenses\", src=\"ilmbase-%s\" % self.version, ignore_case=True, keep_path=False)\n\n for f in glob.glob(os.path.join(self.package_folder, 'lib', '*.la')):\n os.remove(f)\n\n def package_info(self):\n self.cpp_info.includedirs = ['include', os.path.join('include', 'OpenEXR')]\n self.cpp_info.libs = ['Half', 'Iex', 'IexMath', 'IlmThread', 'Imath']\n\n if self.settings.os == \"Windows\":\n self.cpp_info.defines.append(\"OPENEXR_DLL\")\n\n if not self.settings.os == \"Windows\":\n self.cpp_info.cppflags = [\"-pthread\"]\n","repo_name":"aloysbaillet/aswf-ci-experiment","sub_path":"conan/IlmBase/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36288505256","text":"# -*- coding: utf-8 -*-\n#\n# @author: Five\n# Created on 2013-5-14\n#\nfrom flask_mongoengine.wtf.orm import model_form\nfrom guildconnections.choices import Gender, Boolean\nfrom guildconnections.common.orm import BaseModel\nfrom guildconnections.constants import DEFAULT_FORM_EXCLUDE\nfrom mongoengine.document import EmbeddedDocument\nfrom mongoengine.fields import (StringField, DateTimeField, IntField,\n ReferenceField, ListField, EmbeddedDocumentField, EmailField, URLField)\nimport datetime\n\nclass GuideType(BaseModel):\n \"\"\" use mongo DB to store the guild type,\n then we can add it dynamic without changing code\"\"\"\n name = StringField()\n meta = {\n 'allow_inheritance' : False\n }\n\n def __unicode__(self):\n return ''.format(self.guide_type)\n\n\nclass Game(BaseModel):\n \"\"\" game mongo model \"\"\"\n name = StringField(max_length=128)\n description = StringField(max_length=512)\n\n meta = {\n 'allow_inheritance' : False\n }\n\n def __unicode__(self):\n return ''.format(self.name)\n\n\nclass Recruitment(EmbeddedDocument):\n\n find_guide = StringField(max_length=3, choices=Boolean.choices)\n find_for_game = ReferenceField(Game, dbref=False)\n find_for_type = ReferenceField(GuideType, dbref=False)\n# find_for_game = ReferenceField(Game, default=Game)\n# find_for_type = ReferenceField(GuideType, default=GuideType)\n\n current_realm = StringField()\n transfer_realm = StringField(max_length=3, choices=Boolean.choices)\n preffered_role = StringField()\n\n meta = {\n 'allow_inheritance' : False\n }\n\n\nclass Gamer(EmbeddedDocument):\n\n avatar = StringField(max_length=512)\n\n # in the psd-gamer setup page, it's age.?\n dob = DateTimeField()\n gender = StringField(max_length=1, choices=Gender.choices,\n default=Gender.U)\n\n bio = StringField(max_length=512)\n\n # maybe this is a choice too\n avg_play_time = IntField()\n timezone = IntField()\n\n # or we use string field instead?\n games = ListField(ReferenceField(Game, dbref=False))\n\n forum_signature = StringField(max_length=512)\n\n # Recruitment properties\n # TODO, dont know whether it's required.\n find_guide = StringField(max_length=3,\n choices=Boolean.choices,\n default=Boolean.NO)\n find_for_game = ReferenceField(Game, dbref=False)\n find_for_type = ReferenceField(GuideType, dbref=False)\n\n current_realm = StringField()\n transfer_realm = StringField(max_length=3,\n choices=Boolean.choices,\n default=Boolean.NO)\n preffered_role = StringField()\n\n\n meta = {\n 'allow_inheritance' : False\n }\n\nclass User(BaseModel):\n \"\"\" user mongo model \"\"\"\n\n email = EmailField(required=True, unique=True)\n\n # where is the user account and password?\n # we use this name field for user nick now.\n # if we need a new field for gamer profile setup, add it later\n name = StringField(max_length=128, required=True)\n password = StringField(max_length=16, required=True)\n\n # used when validate email\n verify_code = StringField(max_length=6)\n\n gamer = EmbeddedDocumentField(Gamer, default=Gamer)\n\n last_login_on = DateTimeField(default=datetime.datetime.now)\n\n meta = {\n 'allow_inheritance' : False\n }\n\n\n\nclass Guide(BaseModel):\n \"\"\" guide mongo model \"\"\"\n\n name = StringField(required=True)\n logo = StringField()\n url = URLField()\n description = StringField()\n founded_on = DateTimeField()\n\n min_age_required = IntField()\n gender_required = StringField(max_length=1, choices=Gender.choices,\n default=Gender.U)\n\n play_time_required = StringField()\n play_type = StringField()\n\n weekly_play_time = IntField()\n timezone = IntField()\n\n prime_time_from = IntField()\n prime_time_to = IntField()\n\n previous_games = StringField()\n voice = StringField(max_length=3, choices=Boolean.choices,\n default=Boolean.NO)\n\n meta = {\n 'allow_inheritance' : False\n }\n\n\nuser_form_exclude = list(DEFAULT_FORM_EXCLUDE)\nuser_form_exclude.extend(('last_login_on', 'verify_code'))\nguid_form_exclude = DEFAULT_FORM_EXCLUDE + ('logo',)\n\nUserForm = model_form(User, exclude=user_form_exclude)\nGamerForm = model_form(Gamer, exclude=DEFAULT_FORM_EXCLUDE)\nGuideForm = model_form(Guide, exclude=guid_form_exclude)\n","repo_name":"IamFive/gc","sub_path":"guildconnections/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23243734864","text":"from datetime import datetime, timedelta\nimport json\nimport os\nfrom tools.uploadGCS import save_file, upload_multiple_folders\nfrom tools.cec_data import request_cec_by_type\nfrom tools.conn import get_sht_data\nfrom configs import default_special_municipality, default_tv\nimport googleapiclient\nwith open('mapping/mapping_county_town.json', encoding='utf-8') as f:\n mapping_county_town = json.loads(f.read())\nwith open('mapping/mapping_county_town_vill.json', encoding='utf-8') as f:\n mapping_county_town_vill = json.loads(f.read())\nwith open('mapping/mayor_candidate_2022.json', encoding='utf-8') as f:\n candidate_info = json.loads(f.read())\nENV_FOLDER = os.environ['ENV_FOLDER']\nIS_TV = os.environ['PROJECT'] == 'tv'\nIS_STARTED = os.environ['IS_STARTED'] == 'true'\nPOLITICS_URL = os.environ['POLITICS_URL']\n\ndef parse_cec_mayor(data):\n organized_data = {}\n for district in data:\n deptCode = district['deptCode'] if district['deptCode'] else '000'\n region_code = f\"{district['prvCode']}_{district['cityCode']}_{deptCode}\"\n region = organized_data.setdefault(region_code, {'profRate': district['profRate']})\n for c in district['candTksInfo']:\n candNo = region.setdefault(c['candNo'], c)\n candNo = c\n return organized_data\n\n\ndef parse_tv_sht():\n sht_data = {}\n source = {}\n sht_data_raw = get_sht_data(url=os.environ['SHT_URL'], shtID=os.environ['WKS_ID'])\n for row in sht_data_raw:\n if row[0] == '城市名' or not row[0]:\n continue\n county_name = row[0].replace('台', '臺')\n candNo = int(row[2])\n name = row[1]\n party = row[3]\n try:\n tks = row[5].replace(',', '')\n tks = tks.replace('%', '')\n tks = int(tks) if tks else 0\n except ValueError:\n tks = 0\n try:\n tksRate = float(row[4].replace('%', '')) if row[4] else 0\n except ValueError:\n tksRate = 0\n candVictor = False\n county_source = source.setdefault(county_name, row[6])\n if county_source == '自行計票 + 候選人計票' or county_source == 'tv':\n county_source = 'tv'\n else:\n county_source = 'cec'\n source[county_name] = county_source\n # candidates\n county = sht_data.setdefault(county_name, {candNo: {}})\n county[candNo] = {\n \"candNo\": candNo,\n \"name\": name,\n \"party\": party,\n \"tks\": tks,\n \"tksRate\": tksRate,\n \"candVictor\": candVictor\n }\n return sht_data, source\n\n\ndef gen_tv_mayor(updatedAt = '', source = '', sht_data = '', polling_data = '', is_running=False):\n updatedAt = updatedAt if updatedAt else (datetime.utcnow() + timedelta(hours = 8)).strftime('%Y-%m-%d %H:%M:%S')\n result = []\n if source:\n for county_name, candNos in sht_data.items():\n candidates = []\n if source[county_name] == 'tv':\n county_source = '自行計票 + 候選人計票'\n cand_infos = sht_data[county_name]\n else:\n county_source = '中選會'\n county_code = [k for k in mapping_county_town.keys()][[v for v in mapping_county_town.values()].index(county_name)]\n cand_infos = polling_data[county_code]\n for candNo in candNos.keys():\n party = '無黨籍' if sht_data[county_name][candNo]['party'] == '無' else sht_data[county_name][candNo]['party']\n try:\n candidate = {\n \"candNo\": str(candNo).zfill(2),\n \"name\": sht_data[county_name][candNo]['name'],\n \"party\": party,\n \"tks\": cand_infos[candNo]['tks'],\n \"tksRate\": cand_infos[candNo]['tksRate'],\n \"candVictor\": True if cand_infos[candNo]['candVictor'] == \"*\" else False\n }\n except KeyError:\n candidate = {\n \"candNo\": str(candNo).zfill(2),\n \"name\": sht_data[county_name][candNo]['name'],\n \"party\": party,\n \"tks\": 0,\n \"tksRate\": 0,\n \"candVictor\": False\n }\n\n candidates.append(candidate)\n candidates.sort(key=lambda x: (-x[\"tks\"], x[\"candNo\"]), reverse=False)\n result.append(\n {\"city\": county_name, \"candidates\": candidates, \"source\": county_source})\n else:\n for county_code, default_candidates in default_tv.items():\n candidates = []\n county_source = \"中選會\"\n county_name = mapping_county_town[county_code]\n for candNo in default_candidates:\n c_info = candidate_info[county_code][str(candNo)]\n party = '無黨籍' if c_info['party'] == '無' else c_info['party']\n candTks = {\n \"candNo\": str(candNo).zfill(2),\n \"name\": c_info['name'],\n \"party\": party,\n \"tks\": 0,\n \"tksRate\": 0,\n \"candVictor\": False,\n }\n candidates.append(candTks)\n candidates.sort(key=lambda x: (-x[\"tks\"], x[\"candNo\"]), reverse=False)\n result.append(\n {\"city\": mapping_county_town[county_code], \"candidates\": candidates[:3], \"source\": county_source})\n year = datetime.now().year\n destination_file = f'{ENV_FOLDER}/{year}/mayor/tv.json'\n data = {\"updatedAt\": updatedAt,\n \"is_running\": is_running,\n \"polling\": result}\n save_file(destination_file, data, year)\n return\n\n\ndef gen_special_municipality(updatedAt, polling_data, is_running=False):\n result = []\n for county_code, default_candidates in default_special_municipality.items():\n candidates = []\n if polling_data:\n for candNo, c_info in candidate_info[county_code].items():\n try:\n tksRate = polling_data[county_code][int(\n candNo)]['tksRate'] if polling_data[county_code][int(candNo)]['tksRate'] else 0\n tks = polling_data[county_code][int(\n candNo)]['tks'] if polling_data[county_code][int(candNo)]['tks'] else 0\n candVictor = True if polling_data[county_code][int(candNo)]['candVictor'] == '*' else False\n except:\n tksRate = 0\n tks = 0\n candVictor = False\n candTks = {\n \"candNo\": candNo.zfill(2),\n \"name\": c_info['name'],\n \"party\": c_info['party'],\n \"tks\": tks,\n \"tksRate\": tksRate,\n \"candVictor\": candVictor,\n }\n candidates.append(candTks)\n else:\n for candNo in default_candidates:\n c_info = candidate_info[county_code][str(candNo)]\n candTks = {\n \"candNo\": str(candNo).zfill(2),\n \"name\": c_info['name'],\n \"party\": c_info['party'],\n \"tks\": 0,\n \"tksRate\": 0,\n \"candVictor\": False,\n }\n candidates.append(candTks)\n candidates.sort(key=lambda x: (-x[\"tks\"], x[\"candNo\"]), reverse=False)\n result.append(\n {\"city\": mapping_county_town[county_code], \"candidates\": candidates[:3]})\n year = datetime.now().year\n destination_file = f'{ENV_FOLDER}/{year}/mayor/special_municipality.json'\n data = {\"updatedAt\": updatedAt,\n \"is_running\": is_running,\n \"polling\": result}\n save_file(destination_file, data, year)\n return\n\n\ndef gen_vote(updatedAt, polling_data='', candidate_info=candidate_info, year=datetime.now().year):\n result = []\n for region_code, region_candidates in candidate_info.items():\n candidates = []\n for candNo, c_info in region_candidates.items():\n candTks = {\n 'candNo': candNo,\n 'name': {\n 'label': c_info['name'],\n 'href': f\"{POLITICS_URL}/person/{c_info['name_id']}\",\n 'imgSrc': c_info['name_img'] if c_info['name_img'] else ''\n },\n 'party': {\n 'label': c_info['party'] if c_info['party'] != '無' else '無黨籍',\n 'href': '',\n 'imgSrc': c_info['party_img'] if c_info['party_img'] else ''\n },\n 'tks': 0,\n 'tksRate': 0,\n 'candVictor': False\n }\n if polling_data:\n try:\n candTks['tks'] = polling_data[region_code][int(\n candNo)]['tks'] if polling_data[region_code][int(candNo)]['tks'] else 0\n candTks['tksRate'] = polling_data[region_code][int(\n candNo)]['tksRate'] if polling_data[region_code][int(candNo)]['tksRate'] else 0\n candTks['candVictor'] = True if polling_data[region_code][int(\n candNo)]['candVictor'] == '*' or polling_data[region_code][int(\n candNo)]['candVictor'] == True else False\n except KeyError:\n pass\n candidates.append(candTks)\n try:\n districtName = mapping_county_town[region_code]\n except KeyError:\n districtName = region_code\n\n result.append(\n {\"districtName\": districtName, \"candidates\": candidates})\n\n VERSION = os.environ['VERSION']\n data = {\"updatedAt\": updatedAt,\n \"year\": str(year),\n \"type\": 'mayor',\n \"title\": \"縣市長選舉\",\n \"version\": VERSION,\n \"districts\": result}\n destination_file = f'{ENV_FOLDER}/{VERSION}/{year}/mayor/all.json'\n\n save_file(destination_file, data, year)\n return\n\n\ndef map_candidate(region_candidates, polling_data, region_code):\n candidates = []\n for candNo, c_info in region_candidates.items():\n\n candTks = {\n \"candNo\": candNo,\n \"name\": c_info['name'],\n \"party\": c_info['party'] if c_info['party'] != '無' else '無黨籍',\n \"tksRate\": 0,\n \"candVictor\": \" \"\n }\n if polling_data:\n try:\n can_polling_data = polling_data[region_code][int(candNo)]\n candTks['tks'] = can_polling_data['tks'] if can_polling_data['tks'] else 0\n candTks['tksRate'] = can_polling_data['tksRate'] if can_polling_data['tksRate'] else 0\n candTks['candVictor'] = can_polling_data['candVictor'] if can_polling_data['candVictor']else ' '\n except KeyError:\n pass\n candidates.append(candTks)\n\n return candidates\n\n\ndef gen_map(updatedAt, scope, polling_data, scope_code='', sub_region='', is_running = False):\n result = []\n for region_code in sub_region.keys():\n if scope == 'country':\n vill_Code = '000'\n range = mapping_county_town[region_code]\n elif scope == 'county':\n region_code = scope_code[:-3] + region_code # county code '09_007_010'\n vill_Code = '000'\n range = f'{mapping_county_town[scope_code]} {mapping_county_town[region_code]}'\n else:\n vill_Code = region_code\n region_code = scope_code + '_' + region_code # vill code '09_007_010_010'\n range = sub_region[vill_Code].replace(\"_\", \" \")\n\n region_code_split = region_code.split('_')\n county_code = region_code_split[0] + '_' + region_code_split[1]\n town_code = region_code_split[2]\n\n candidates = map_candidate(candidate_info[f\"{county_code}_000\"], polling_data, region_code)\n if polling_data:\n profRate = polling_data[region_code]['profRate'] if polling_data[region_code]['profRate'] else 0\n else:\n profRate = 0\n if scope == 'town':\n profRate = None\n candidates = None\n\n result.append({\n \"range\": range,\n \"county\": county_code.replace('_', ''),\n \"town\": None if town_code == '000' else town_code,\n \"vill\": None if vill_Code == '000' else vill_Code,\n \"profRate\": profRate,\n \"candidates\": candidates})\n year = datetime.now().year\n data = {\"updatedAt\": updatedAt,\n \"is_running\": is_running,\n \"is_started\": IS_STARTED,\n \"districts\": result}\n if scope == 'country':\n destination_file = f'{ENV_FOLDER}/{year}/mayor/map/{scope}.json'\n elif scope == 'county':\n destination_file = f'{ENV_FOLDER}/{year}/mayor/map/{scope}/{scope_code[:-3].replace(\"_\", \"\")}.json'\n else:\n destination_file = f'{ENV_FOLDER}/{year}/mayor/map/{scope}/{scope_code.replace(\"_\", \"\")}.json'\n\n save_file(destination_file, dict(sorted(data.items(), reverse=True)), year)\n return\n\n\ndef gen_mayor(update = '', data = '', is_running = False):\n updatedAt = update if update else (datetime.utcnow() + timedelta(hours = 8)).strftime('%Y-%m-%d %H:%M:%S')\n gen_vote(updatedAt, data)\n if IS_TV:\n return\n gen_special_municipality(updatedAt, data, is_running)\n gen_map(updatedAt, 'country', data, '00_000_000', candidate_info, is_running=is_running)\n for county_code, towns in mapping_county_town_vill.items():\n if county_code == '10_020': # 2022嘉義市長選舉延後\n continue\n county_code = county_code + '_000'\n gen_map(updatedAt, 'county', data, county_code, towns, is_running=is_running)\n if IS_STARTED:\n continue\n for town_code, vills in towns.items():\n town_code = county_code[:-3] + town_code\n gen_map(updatedAt, 'town', polling_data='',\n scope_code = town_code, sub_region=vills)\n return\n\n\nif __name__ == '__main__':\n if IS_STARTED:\n jsonfile, is_running = request_cec_by_type()\n if jsonfile:\n updatedAt = datetime.strptime(jsonfile[\"ST\"], '%m%d%H%M%S')\n updatedAt = f\"{datetime.now().year}-{datetime.strftime(updatedAt, '%m-%d %H:%M:%S')}\"\n mayor_data = parse_cec_mayor(jsonfile[\"TC\"])\n if IS_TV:\n try:\n sht_data, source = parse_tv_sht()\n gen_tv_mayor(updatedAt, source, sht_data, mayor_data, is_running)\n print('tv mayor done')\n except googleapiclient.errors.HttpError:\n print('sht failed')\n gen_mayor(updatedAt, mayor_data, is_running)\n print(\"mayor done\")\n else:\n print('problem of cec data ')\n if IS_TV:\n sht_data, source = parse_tv_sht()\n if 'cec' not in source.values():\n gen_tv_mayor(source=source, sht_data=sht_data, is_running=True)\n print('tv mayor done')\n else:\n if IS_TV:\n gen_tv_mayor()\n gen_mayor()\n print(\"mayor done\")\n # upload_multiple_folders(2022)","repo_name":"yatiti84/openrelation-elections","sub_path":"mayor.py","file_name":"mayor.py","file_ext":"py","file_size_in_byte":15384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16391083605","text":"import sys\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\nfrom ChangePixelColor import AlterPixelsColor\nfrom PIL._util import isPath\nfrom PIL import Image\nimport cStringIO\n\n\nLastImages = []\n\ndef ImageContent(image):\n buffer = QBuffer()\n buffer.open(QIODevice.ReadWrite)\n image.save(buffer, \"PNG\")\n strio = cStringIO.StringIO()\n strio.write(buffer.data())\n buffer.close()\n strio.seek(0)\n return strio\n\n\ndef rgb(r, g, b, a=255):\n \"\"\"(Internal) Turns an RGB color into a Qt compatible color integer.\"\"\"\n # use qRgb to pack the colors, and then turn the resulting long\n # into a negative integer with the same bitpattern.\n return (qRgba(r, g, b, a) & 0xffffffff)\n\ndef align8to32(bytes, width, mode):\n \"\"\"\n converts each scanline of data from 8 bit to 32 bit aligned\n \"\"\"\n\n bits_per_pixel = {\n '1': 1,\n 'L': 8,\n 'P': 8,\n }[mode]\n\n # calculate bytes per line and the extra padding if needed\n bits_per_line = bits_per_pixel * width\n full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8)\n bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0)\n\n extra_padding = -bytes_per_line % 4\n\n # already 32 bit aligned by luck\n if not extra_padding:\n return bytes\n\n new_data = []\n for i in range(len(bytes) // bytes_per_line):\n new_data.append(bytes[i*bytes_per_line:(i+1)*bytes_per_line] + b'\\x00' * extra_padding)\n\n return b''.join(new_data)\n\n\ndef _toqclass_helper(im):\n data = None\n colortable = None\n\n # handle filename, if given instead of image name\n if hasattr(im, \"toUtf8\"):\n # FIXME - is this really the best way to do this?\n if str is bytes:\n im = unicode(im.toUtf8(), \"utf-8\")\n else:\n im = str(im.toUtf8(), \"utf-8\")\n if isPath(im):\n im = Image.open(im)\n\n if im.mode == \"1\":\n format = QImage.Format_Mono\n elif im.mode == \"L\":\n format = QImage.Format_Indexed8\n colortable = []\n for i in range(256):\n colortable.append(rgb(i, i, i))\n elif im.mode == \"P\":\n format = QImage.Format_Indexed8\n colortable = []\n palette = im.getpalette()\n for i in range(0, len(palette), 3):\n colortable.append(rgb(*palette[i:i+3]))\n elif im.mode == \"RGB\":\n data = im.tobytes(\"raw\", \"BGRX\")\n format = QImage.Format_RGB32\n elif im.mode == \"RGBA\":\n try:\n data = im.tobytes(\"raw\", \"BGRA\")\n except SystemError:\n # workaround for earlier versions\n r, g, b, a = im.split()\n im = Image.merge(\"RGBA\", (b, g, r, a))\n format = QImage.Format_ARGB32\n else:\n raise ValueError(\"unsupported image mode %r\" % im.mode)\n\n # must keep a reference, or Qt will crash!\n __data = data or align8to32(im.tobytes(), im.size[0], im.mode)\n return {\n 'data': __data, 'im': im, 'format': format, 'colortable': colortable\n }\n\nclass ImageQt(QImage):\n\n def __init__(self, im):\n im_data = _toqclass_helper(im)\n QImage.__init__(self,\n im_data['data'], im_data['im'].size[0],\n im_data['im'].size[1], im_data['format'])\n if im_data['colortable']:\n self.setColorTable(im_data['colortable'])\n\nclass Example(QWidget):\n\n def __init__(self):\n super(Example, self).__init__()\n self.initUI()\n\t\n def color_picker(self, widget):\n color = QColorDialog.getColor()\n widget.setStyleSheet(\"QWidget { background-color: %s}\" % color.name())\n \n \n def initUI(self):\n \n hbox = QHBoxLayout(self)\n self.ChangeColor = QLabel(\"Change Color\", self)\n \n self.Searchbox0 = QLabel(\"\", self)\n self.Searchbox0.setStyleSheet(\"QWidget { background-color:Blue }\" )\n \n self.SearchColor0 = QPushButton(\"Search Color\", self)\n self.SearchColor0.clicked.connect(lambda:self.color_picker(self.Searchbox0))\n \n self.Alterbox = QLabel()\n self.Alterbox.setStyleSheet(\"QWidget { background-color:Green }\" )\n \n self.AlterColor = QPushButton(\"Alter Color\", self)\n self.AlterColor.clicked.connect(lambda:self.color_picker(self.Alterbox))\n self.AlterColor.clicked.connect(self.alterColorImage)\n self.slidervalue = QLabel()\n self.slidervalue.setNum(25) \n self.slider = QSlider(Qt.Horizontal)\n self.slider.setMinimum(0)\n self.slider.setMaximum(50)\n self.slider.setValue(25)\n self.slider.setTickInterval(1)\n self.slider.setSingleStep(1)\n self.slider.valueChanged.connect(self.slidervalue.setNum)\n self.Transparency = QLabel(\"Transparency\", self)\n \n self.Searchbox1 = QLabel(\"\", self)\n self.Searchbox1.setStyleSheet(\"QWidget { background-color:Blue }\" )\n \n self.SearchColor1 = QPushButton(\"Search Color\", self)\n self.SearchColor1.clicked.connect(lambda:self.color_picker(self.Searchbox1))\n self.SearchColor1.clicked.connect(self.apply_transparency)\n bottom = QFrame()\n bottom.setFrameShape(QFrame.StyledPanel)\n \n splitter1 = QSplitter(Qt.Vertical)\n splitter1.addWidget(self.ChangeColor)\n splitter1.addWidget(self.SearchColor0)\n splitter1.addWidget(self.Searchbox0)\n splitter1.addWidget(self.AlterColor)\n splitter1.addWidget(self.Alterbox)\n splitter1.addWidget(self.slidervalue)\n splitter1.addWidget(self.slider)\n splitter1.addWidget(self.Transparency)\n splitter1.addWidget(self.SearchColor1)\n splitter1.addWidget(self.Searchbox1)\n splitter1.addWidget(bottom)\n splitter1.setSizes([10,10,10,10,5,5,10,10,10,10,160])\n self.scrollArea = QScrollArea()\n self.scrollArea.setBackgroundRole(QPalette.Dark)\n self.right = QLabel()\n self.right.setSizePolicy(QSizePolicy.Ignored,\n QSizePolicy.Ignored)\n self.right.setScaledContents(True)\n self.scrollArea.setWidget(self.right)\n \n splitter2 = QSplitter(Qt.Horizontal)\n splitter2.addWidget(splitter1)\n splitter2.addWidget(self.scrollArea)\t\n splitter2.setSizes([40,350])\n hbox.addWidget(splitter2)\n\t\t\n self.setLayout(hbox)\n self.show()\n \n def alterColorImage(self):\n SearchRGBA = self.getColorWidget(self.Searchbox0)\n AlterRGBA = self.getColorWidget(self.Alterbox)\n LastImages.append(self.image.copy())\n stream = AlterPixelsColor(ImageContent(self.image), SearchRGBA,AlterRGBA,int(self.slidervalue.text()))\n self.image = ImageQt(stream)\n self.right.setPixmap(QPixmap.fromImage(self.image))\n \n def apply_transparency(self):\n SearchRGBA = self.getColorWidget(self.Searchbox1)\n LastImages.append(self.image.copy())\n AlterPixelsColor(ImageContent(self.image), SearchRGBA,(0, 0, 0, 0), int(self.slidervalue.text()))\n self.image = QImage(self.fileName)\n self.right.setPixmap(QPixmap.fromImage(self.image)) \n \n def getColorWidget(self,widget):\n palette = widget.palette()\n color = palette.color(widget.backgroundRole())\n rgba = color.red(), color.green(), color.blue(), color.alpha()\n return rgba\n\nclass ImageViewer(QMainWindow):\n def __init__(self):\n super(ImageViewer, self).__init__()\n\n self.printer = QPrinter()\n self.scaleFactor = 0.0\n \n self.createActions()\n self.createMenus()\n \n self.setWindowTitle(\"Image Viewer\")\n self.ex = Example()\n self.setCentralWidget(self.ex)\n self.resize(500, 400)\n self.show()\n def open(self):\n self.ex.fileName = QFileDialog.getOpenFileName(self, \"Open File\",\n QDir.currentPath())\n if self.ex.fileName:\n self.ex.image = QImage(self.ex.fileName)\n if self.ex.image.isNull():\n QMessageBox.information(self, \"Image Viewer\",\n \"Cannot load %s.\" % self.ex.fileName)\n return\n\n self.ex.right.setPixmap(QPixmap.fromImage(self.ex.image))\n self.scaleFactor = 1.0\n\n self.printAct.setEnabled(True)\n self.fitToWindowAct.setEnabled(True)\n self.updateActions()\n\n if not self.fitToWindowAct.isChecked():\n self.ex.right.adjustSize()\n \n def createActions(self):\n self.openAct = QAction(\"&Open...\", self, shortcut=\"Ctrl+O\",\n triggered=self.open)\n \n self.saveAct = QAction(\"&Save...\", self, shortcut=\"Ctrl+S\",\n triggered=self.save)\n \n self.undoAct = QAction(\"&Undo...\", self, shortcut=\"Ctrl+Z\",\n triggered=self.undo)\n \n self.printAct = QAction(\"&Print...\", self, shortcut=\"Ctrl+P\",\n enabled=False, triggered=self.print_)\n\n self.exitAct = QAction(\"E&xit\", self, shortcut=\"Ctrl+Q\",\n triggered=self.close)\n\n self.zoomInAct = QAction(\"Zoom &In (25%)\", self,\n shortcut=\"Ctrl++\", enabled=False, triggered=self.zoomIn)\n\n self.zoomOutAct = QAction(\"Zoom &Out (25%)\", self,\n shortcut=\"Ctrl--\", enabled=False, triggered=self.zoomOut)\n\n self.normalSizeAct = QAction(\"&Normal Size\", self,\n shortcut=\"Ctrl+N\", enabled=False, triggered=self.normalSize)\n\n self.fitToWindowAct = QAction(\"&Fit to Window\", self,\n enabled=False, checkable=True, shortcut=\"Ctrl+F\",\n triggered=self.fitToWindow)\n\n self.aboutAct = QAction(\"&About\", self, triggered=self.about)\n\n self.aboutQtAct = QAction(\"About &Qt\", self,\n triggered=qApp.aboutQt)\n def createMenus(self):\n self.fileMenu = QMenu(\"&File\", self)\n self.fileMenu.addAction(self.openAct)\n self.fileMenu.addAction(self.printAct)\n self.fileMenu.addAction(self.saveAct)\n self.fileMenu.addSeparator()\n self.fileMenu.addAction(self.exitAct)\n\n self.editMenu = QMenu(\"&Edit\", self)\n self.editMenu.addAction(self.undoAct)\n \n self.viewMenu = QMenu(\"&View\", self)\n self.viewMenu.addAction(self.zoomInAct)\n self.viewMenu.addAction(self.zoomOutAct)\n self.viewMenu.addAction(self.normalSizeAct)\n self.viewMenu.addSeparator()\n self.viewMenu.addAction(self.fitToWindowAct)\n\n self.helpMenu = QMenu(\"&Help\", self)\n self.helpMenu.addAction(self.aboutAct)\n self.helpMenu.addAction(self.aboutQtAct)\n\n \n self.menuBar().addMenu(self.fileMenu)\n self.menuBar().addMenu(self.editMenu)\n self.menuBar().addMenu(self.viewMenu)\n self.menuBar().addMenu(self.helpMenu)\n\n \n def print_(self):\n dialog = QPrintDialog(self.printer, self)\n if dialog.exec_():\n painter = QPainter(self.printer)\n rect = painter.viewport()\n size = self.imageLabel.pixmap().size()\n size.scale(rect.size(), QtCore.Qt.KeepAspectRatio)\n painter.setViewport(rect.x(), rect.y(), size.width(), size.height())\n painter.setWindow(self.imageLabel.pixmap().rect())\n painter.drawPixmap(0, 0, self.imageLabel.pixmap())\n\n \n def save(self):\n stream = ImageContent(self.ex.image)\n image = Image.open(stream)\n filename = str(self.ex.fileName)\n image.format = filename[filename.index(\".\")+1:].upper()\n image.save(filename)\n \n\n def zoomIn(self):\n self.scaleImage(1.25)\n\n def zoomOut(self):\n self.scaleImage(0.8)\n \n def undo(self):\n self.ex.image = LastImages[-1]\n LastImages.pop()\n self.ex.right.setPixmap(QPixmap.fromImage(self.ex.image))\n\n def normalSize(self):\n self.ex.right.adjustSize()\n self.scaleFactor = 1.0\n\n def fitToWindow(self):\n fitToWindow = self.fitToWindowAct.isChecked()\n self.scrollArea.setWidgetResizable(fitToWindow)\n if not fitToWindow:\n self.normalSize()\n\n self.updateActions()\n \n def about(self):\n QMessageBox.about(self, \"About Image Viewer\",\n \"

The Image Viewer example shows how to combine \"\n \"QLabel and QScrollArea to display an image. QLabel is \"\n \"typically used for displaying text, but it can also display \"\n \"an image. QScrollArea provides a scrolling view around \"\n \"another widget. If the child widget exceeds the size of the \"\n \"frame, QScrollArea automatically provides scroll bars.

\"\n \"

The example demonstrates how QLabel's ability to scale \"\n \"its contents (QLabel.scaledContents), and QScrollArea's \"\n \"ability to automatically resize its contents \"\n \"(QScrollArea.widgetResizable), can be used to implement \"\n \"zooming and scaling features.

\"\n \"

In addition the example shows how to use QPainter to \"\n \"print an image.

\")\n\n def updateActions(self):\n self.zoomInAct.setEnabled(not self.fitToWindowAct.isChecked())\n self.zoomOutAct.setEnabled(not self.fitToWindowAct.isChecked())\n self.normalSizeAct.setEnabled(not self.fitToWindowAct.isChecked())\n\n def scaleImage(self, factor):\n self.scaleFactor *= factor\n self.imageLabel.resize(self.scaleFactor * self.imageLabel.pixmap().size())\n\n self.adjustScrollBar(self.scrollArea.horizontalScrollBar(), factor)\n self.adjustScrollBar(self.scrollArea.verticalScrollBar(), factor)\n\n self.zoomInAct.setEnabled(self.scaleFactor < 3.0)\n self.zoomOutAct.setEnabled(self.scaleFactor > 0.333)\n\n def adjustScrollBar(self, scrollBar, factor):\n scrollBar.setValue(int(factor * scrollBar.value()\n + ((factor - 1) * scrollBar.pageStep()/2)))\n\n\n\ndef main():\n app = QApplication(sys.argv)\n ex = ImageViewer()\n sys.exit(app.exec_())\n\t\nif __name__ == '__main__':\n main()\n","repo_name":"Joneyviana/GuiEditorImage","sub_path":"Base.py","file_name":"Base.py","file_ext":"py","file_size_in_byte":14107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32980994240","text":"def contar_caractere(string, caractere):\n cont = 0\n for i in range(len(string)):\n if string[i] == caractere:\n cont += 1\n return cont\n\npalavra = input(\"Adicione uma palavra: \")\ncar = input(\"Adicione um caractere: \")\nprint(contar_caractere(palavra, car))\n","repo_name":"marianavieiras/Projetos-em-Python","sub_path":"Lista_04/exercicio4-5.py","file_name":"exercicio4-5.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"21273541133","text":"#!/usr/bin/env python3\n\"\"\"\nA class to aggregate time windows, source-receiver information and misfit\nusing Pandas.\n\"\"\"\nimport os\nimport pyasdf\nimport traceback\nimport numpy as np\nimport pandas as pd\nfrom glob import glob\nfrom copy import deepcopy\nfrom fnmatch import filter as fnf\nfrom obspy.geodetics import gps2dist_azimuth\nfrom pyatoa import logger\nfrom pyatoa.utils.form import format_event_name\nfrom pyatoa.visuals.insp_plot import InspectorPlotter\n\n\nclass Inspector(InspectorPlotter):\n \"\"\"\n This plugin object will collect information from a Pyatoa run folder and\n allow the User to easily understand statistical information or generate\n statistical plots to help understand a seismic inversion.\n\n Inherits plotting capabilities from InspectorPlotter class to reduce clutter\n \"\"\"\n\n def __init__(self, tag=\"default\", verbose=True):\n \"\"\"\n Inspector will automatically search for relevant file names using the\n tag attribute. If nothing is found, internal dataframes will be empty.\n\n :type tag: str\n :param tag: tag of a previously saved workflow to be used for reading\n in existing data from disk\n :type verbose: bool\n :param verbose: detail the files that are being read and their status\n \"\"\"\n self.windows = pd.DataFrame()\n self.sources = pd.DataFrame()\n self.receivers = pd.DataFrame()\n self.tag = tag\n self.verbose = verbose\n\n # Placeholder attributes for getters\n self._models = None\n self._srcrcv = None\n self._step_misfit = None\n self._event_misfit = None\n self._station_misfit = None\n\n # Try to load an already created Inspector\n try:\n self.read(tag=self.tag)\n except FileNotFoundError:\n pass\n\n def _get_str(self):\n \"\"\"\n Get the string representation once and save as internal attribute\n \"\"\"\n # Get a list of internal public methods\n try:\n str_out = (f\"{len(self.events):<4} event(s)\\n\"\n f\"{len(self.stations):<4} station(s)\\n\"\n f\"{len(self.iterations):<4} iteration(s)\\n\"\n f\"{self.evaluations:<4} evaluation(s)\")\n\n except KeyError:\n str_out = (f\"{0:<4} event(s)\\n\"\n f\"{0:<4} station(s)\\n\"\n f\"{0:<4} iteration(s)\\n\"\n f\"{0:<4} evaluation(s)\\n\")\n return str_out\n\n def __str__(self):\n \"\"\"\n Return a list of all variables and functions available for quick ref\n \"\"\"\n return self._get_str()\n\n def __repr__(self):\n return self._get_str()\n\n def _try_print(self, a):\n \"\"\"Try-except catch for property print statements\"\"\"\n try:\n return self.windows.loc[:, a].unique()\n except KeyError:\n try:\n return self.sources.loc[:, a]\n except KeyError:\n return []\n\n @property\n def keys(self):\n \"\"\"Shorthand to access the keys of the Windows dataframe\"\"\"\n return self.windows.keys()\n\n @property\n def events(self):\n \"\"\"Return an array of all event ids\"\"\"\n return self._try_print(\"event\")\n\n @property\n def stations(self):\n \"\"\"Return an array of all stations\"\"\"\n return self._try_print(\"station\")\n\n @property\n def networks(self):\n \"\"\"Return an array of all stations\"\"\"\n return self._try_print(\"network\")\n\n @property\n def netsta(self):\n \"\"\"Return a Dataframe containing unique network-station idents\"\"\"\n try:\n return pd.concat([self.windows.loc[:, \"network\"],\n self.windows.loc[:, \"station\"]],\n axis=1).drop_duplicates().reset_index(drop=True)\n except KeyError:\n return []\n\n @property\n def srcrcv(self):\n \"\"\"Return a dataframe with source-receiver information, dists and baz\"\"\"\n if self._srcrcv is None:\n self.get_srcrcv()\n return self._srcrcv\n\n @property\n def pairs(self):\n \"\"\"Determine the number of unique source-receiver pairs\"\"\"\n cats = [\"iteration\", \"step\", \"event\", \"station\"]\n df = self.windows.groupby(cats).count()\n # Pick an arbitrary label as all the counts will be the same\n df = df.groupby(cats[:2]).count()[[\"network\"]]\n\n return df.rename({\"network\": \"count\"}, axis=1)\n\n @property\n def iterations(self):\n \"\"\"Return an array of all iteration\"\"\"\n return self._try_print(\"iteration\")\n\n @property\n def steps(self):\n \"\"\"Returns a pandas. Series of iteration with values listing steps\"\"\"\n try:\n return self.windows.groupby(\"iteration\").apply(\n lambda x: x[\"step\"].unique()\n )\n except KeyError:\n return []\n \n @property\n def models(self):\n \"\"\"Return a dict of model numbers related to a unique iteration/step\"\"\"\n if self._models is None:\n self.get_models()\n return self._models\n\n @property\n def initial_model(self):\n \"\"\"Return tuple of the iteration and step count corresponding M00\"\"\"\n try:\n return self.steps.index[0], self.steps[0][0]\n except TypeError:\n logger.warning(\"Inspector has no 'steps' data, returning None\")\n return None, None\n\n @property\n def final_model(self):\n \"\"\"Return tuple of iteration and step count for final accepted model\"\"\"\n try:\n return self.steps.index[-1], self.steps[-1][-1]\n except TypeError:\n logger.warning(\"Inspector has no 'steps' data, returning None\")\n return None, None\n\n @property\n def good_models(self):\n \"\"\"Return models that are only status 0 or 1 (initial or success)\"\"\"\n if self._models is None:\n self.get_models()\n return self.models[self.models.state.isin([0, 1])]\n\n @property\n def restarts(self):\n \"\"\"\n Try to guess the indices of restarts for convergence plot based on \n misfit increase in adjacent good models as well as discontinous misfit \n values for the final line search model and subsequent initial model.\n Not guaranteed to catch everything so may require manual review using \n the convergence() function\n \"\"\"\n if self._models is None:\n self.get_models()\n\n # Find out where the misfit values increase instead of decrease\n misfit = self.good_models.misfit.round(decimals=3)\n misfit_increase = np.where(misfit.diff() > 0)[0]\n mi_idx = misfit.iloc[misfit_increase].index.values\n\n # Find out where the same model shows a discontinuous misfit\n dm = self.good_models[[\"model\", \"misfit\"]].groupby(\n \"model\").diff().misfit.round(3)\n dm_idx = dm[abs(dm) > 0].index.values\n\n restart_indices = np.concatenate((mi_idx, dm_idx))\n\n return self.models.iloc[np.unique(restart_indices)]\n\n @property\n def evaluations(self):\n \"\"\"Returns the number of iterations, or the sum of all step counts\"\"\"\n try:\n return sum(self.steps.apply(len).values)\n except AttributeError:\n return 0\n\n @property\n def mags(self):\n \"\"\"Return a dictionary of event magnitudes\"\"\"\n return self._try_print(\"magnitude\")\n\n @property\n def times(self):\n \"\"\"Return a dictionary of event origin times\"\"\"\n return self._try_print(\"time\")\n\n @property\n def depths(self):\n \"\"\"Return a dictionary of event depths in units of meters\"\"\"\n return self._try_print(\"depth_km\")\n\n def _get_srcrcv_from_dataset(self, ds):\n \"\"\"\n Get source and receiver information from dataset, this includes\n latitude and longitude values for both, and event information including\n magnitude, origin time, id, etc.\n\n Returns Dataframes for sources and receivers iff they are not already\n contained in the class dataframes, to avoid duplicates.\n\n Returns empty DataFrames if no unique info was found.\n\n :type ds: pyasdf.ASDFDataSet\n :param ds: dataset to query for distances\n :rtype source: pandas.core.frame.DataFrame\n :return source: single row Dataframe containing event info from dataset\n :rtype receivers: multiindexed dataframe containing unique station info\n \"\"\"\n # Create a dataframe with source information, ignore duplicates\n event_id = format_event_name(ds.events[0])\n # Some events, like FORCESOLUTIONS, do not contain information on magni.\n try:\n magnitude = ds.events[0].preferred_magnitude().mag\n except AttributeError:\n magnitude = None\n\n if event_id not in self.sources.index:\n src = {\n \"event_id\": format_event_name(ds.events[0]),\n \"time\": str(ds.events[0].preferred_origin().time),\n \"magnitude\": magnitude,\n \"depth_km\": ds.events[0].preferred_origin().depth * 1E-3,\n \"latitude\": ds.events[0].preferred_origin().latitude,\n \"longitude\": ds.events[0].preferred_origin().longitude,\n }\n source = pd.DataFrame([list(src.values())],\n columns=list(src.keys())\n )\n source.set_index(\"event_id\", inplace=True)\n\n self.sources = pd.concat([self.sources, source])\n\n # Loop through all the stations in the dataset to create a dataframe\n networks, stations, locations = [], [], []\n latitudes, longitudes = [], []\n for sta, sta_info in ds.get_all_coordinates().items():\n # Append station information one time globally by checking name\n net, sta = sta.split(\".\")\n if not (net, sta) in self.receivers.index:\n networks.append(net)\n stations.append(sta)\n latitudes.append(sta_info[\"latitude\"])\n longitudes.append(sta_info[\"longitude\"])\n\n # Create a list of tuples for multiindexing\n if networks:\n tuples = list(zip(*[networks, stations]))\n idx = pd.MultiIndex.from_tuples(tuples,\n names=[\"network\", \"station\"])\n receivers = pd.DataFrame([latitudes, longitudes],\n index=[\"latitude\", \"longitude\"],\n columns=idx\n )\n self.receivers = pd.concat([self.receivers, receivers.T])\n\n def _get_windows_from_dataset(self, ds):\n \"\"\"\n Get window and misfit information from dataset auxiliary data\n Model and Step information should match between the two\n auxiliary data objects MisfitWindows and AdjointSources\n\n TODO: break this into _get_windows_from_dataset and \n _get_adjsrcs_from_dataset?\n\n :type ds: pyasdf.ASDFDataSet\n :param ds: dataset to query for misfit:\n :rtype: pandas.DataFrame\n :return: a dataframe object containing information per misfit window\n \"\"\"\n eid = format_event_name(ds.events[0])\n\n # Initialize an empty dictionary that will be used to initalize\n # a Pandas DataFrame\n window = {\"event\": [], \"iteration\": [], \"step\": [], \"network\": [],\n \"station\": [], \"location\": [], \"channel\": [], \"component\": [],\n \"misfit\": [], \"length_s\": [],\n }\n # These are direct parameter names of the MisfitWindow aux data objects\n winfo = {\"dlnA\": [], \"window_weight\": [], \"max_cc_value\": [],\n \"relative_endtime\": [], \"relative_starttime\": [],\n \"cc_shift_in_seconds\": [], \"absolute_starttime\": [],\n \"absolute_endtime\": [],\n }\n\n misfit_windows = ds.auxiliary_data.MisfitWindows\n adjoint_sources = ds.auxiliary_data.AdjointSources\n\n # Initiation loop to get iteration and step count, allows for the case\n # where no step count is given (e.g., iteration == 'default')\n iters, steps = [], []\n for iter_ in misfit_windows.list():\n iters.append(iter_)\n for step in misfit_windows[iter_].list():\n # Ensure that step counts are formatted like: 's00'\n # if not then we DONT have step counts in the dataset\n if not step.startswith(\"s\") and not len(step) == 3:\n step = \"\"\n steps.append(step)\n\n # Pulling out important information from the windows and adj src.\n for iter_, step in zip(iters, steps):\n # If any entries exist for a given event/model/step\n # ignore appending them to the internal structure as they've\n # already been collected\n if not self.windows.empty and \\\n not self.isolate(iter_, step, eid).empty:\n continue\n\n # Explicitely allow for case with no step count in dataset\n misfit_window_eval = misfit_windows[iter_]\n adjoint_source_eval = adjoint_sources[iter_]\n if step:\n misfit_window_eval = misfit_window_eval[step]\n adjoint_source_eval = adjoint_source_eval[step]\n\n for win in misfit_window_eval:\n # pick apart information from this window\n cha_id = win.parameters[\"channel_id\"]\n net, sta, loc, cha = cha_id.split(\".\")\n component = cha[-1]\n\n try:\n # Workaround for potential mismatch between channel\n # names of windows and adjsrcs, search for w/ wildcard\n adj_tag = fnf(adjoint_source_eval.list(),\n f\"{net}_{sta}_*{component}\"\n )[0]\n\n # This misfit value will be the same for mult windows\n window[\"misfit\"].append(adjoint_source_eval[\n adj_tag].parameters[\"misfit\"])\n except IndexError:\n if self.verbose:\n print(f\"No matching adjoint source for {cha_id}\")\n window[\"misfit\"].append(np.nan)\n\n # winfo keys match the keys of the Pyflex Window objects\n for par in winfo:\n winfo[par].append(win.parameters[par])\n\n # get identifying information for this window\n window[\"event\"].append(eid)\n window[\"network\"].append(net)\n window[\"station\"].append(sta)\n window[\"location\"].append(loc)\n window[\"channel\"].append(cha)\n window[\"component\"].append(component)\n window[\"iteration\"].append(iter_)\n window[\"step\"].append(step)\n\n # useful to get window length information\n window[\"length_s\"].append(\n win.parameters[\"relative_endtime\"] -\n win.parameters[\"relative_starttime\"]\n )\n\n # Only add to internal structure if something was collected\n if window[\"event\"]:\n window.update(winfo)\n self.windows = pd.concat([self.windows, pd.DataFrame(window)],\n ignore_index=True)\n\n def _parse_nonetype_eval(self, iteration, step_count):\n \"\"\"\n Whenever a user does not choose an iteration or step count, e.g., in\n plotting functions, this function defines default values based on the\n initial model (if neither given), or the last step count for a given\n iteration (if only iteration is given). Only step count is not allowed\n\n :type iteration: str\n :param iteration: chosen iteration, formatted as e.g., 'i01'\n :type step_count: str\n :param step_count: chosen step count, formatted as e.g., 's00'\n :rtype: tuple of str\n :return: (iteration, step_count) default values for the iteration\n and step_count\n \"\"\"\n # Default iteration and step count if None are given\n if iteration is None and step_count is None:\n iteration, step_count = self.initial_model\n print(f\"No iteration or step count given, defaulting to initial \"\n f\"model: {iteration}{step_count}\")\n elif iteration and (step_count is None):\n step_count = self.steps[iteration][-1]\n print(f\"No step count given, defaulting to final step count within\"\n f\"given iteration: {iteration}{step_count}\")\n elif (iteration is None) and (step_count is not None):\n raise ValueError(\"'step_count' cannot be provided by itself, you \"\n \"must also set the variable: 'iteration'\")\n return iteration, step_count\n \n def discover(self, path=\"./\", ignore_symlinks=True):\n \"\"\"\n Allow the Inspector to scour through a path and find relevant files,\n appending them to the internal structure as necessary.\n\n :type path: str\n :param path: path to the pyasdf.asdf_data_set.ASDFDataSets that were\n outputted by the Seisflows workflow\n :type ignore_symlinks: bool\n :param ignore_symlinks: skip over symlinked HDF5 files when discovering\n \"\"\"\n dsfids = glob(os.path.join(path, \"*.h5\"))\n # remove symlinks from the list if requested\n if ignore_symlinks:\n dsfids = [_ for _ in dsfids if not os.path.islink(_)]\n for i, dsfid in enumerate(dsfids):\n if self.verbose:\n print(f\"{os.path.basename(dsfid):<25} \"\n f\"{i+1:0>3}/{len(dsfids):0>3}\", end=\"...\"\n )\n try:\n self.append(dsfid)\n if self.verbose:\n print(\"done\")\n except KeyError as e:\n if self.verbose:\n print(f\"error: {e}\")\n traceback.print_exc()\n continue\n\n return self\n\n def append(self, dsfid, srcrcv=True, windows=True):\n \"\"\"\n Simple function to parse information from a\n pyasdf.asdf_data_setASDFDataSet file and append it to the currect\n collection of information.\n\n :type dsfid: str\n :param dsfid: fid of the dataset\n :type srcrcv: bool\n :param srcrcv: gather source-receiver information\n :type windows: bool\n :param windows: gather window information\n \"\"\"\n try:\n with pyasdf.ASDFDataSet(dsfid) as ds:\n if srcrcv:\n self._get_srcrcv_from_dataset(ds)\n if windows:\n try:\n self._get_windows_from_dataset(ds)\n except AttributeError as e:\n if self.verbose:\n print(\"error reading dataset: \"\n \"missing auxiliary data\")\n return\n except OSError:\n if self.verbose:\n print(f\"error reading dataset: already open\")\n return\n\n def extend(self, windows):\n \"\"\"\n Extend the current Inspector data frames with the windows from another\n Inspector. This is useful for when an inversion has been run in legs, so\n two individual inspectors constitute a single inversion.\n\n .. note::\n The current inspector is considered leg A, and the argument\n 'windows' is considered leg B. Leg B will have its iteration numbers\n changed to reflect this\n\n .. warning::\n This will only work if all the events and stations are the same.\n That is, only two identical inversion scenarios can be used.\n\n :type windows: pandas.core.data_frame.DataFrame or list of DataFrames\n :param windows: Windows from a separate inspector object that will be\n used to extend the current Inspector. Can also be provided as a list\n of DataFrames to extend multiple times.\n \"\"\"\n def convert(val):\n \"\"\"Convenience function to convert between int and str repr\"\"\"\n if isinstance(val, str):\n return int(val[1:])\n elif isinstance(val, int):\n return f\"i{val:0>2}\"\n\n # To allow for list arguments\n if not isinstance(windows, list):\n windows = [windows]\n\n for win in windows:\n # Ensure that inplace changes won't affect original data\n windows_ext = win.copy()\n\n # Determine the new B iteration values based on the\n # final iteration of leg A\n final_iter_a = self.iterations[-1]\n for iter_ in windows_ext.iteration.unique():\n shifted_iter = convert(convert(iter_) + convert(final_iter_a))\n windows_ext.iteration.replace(iter_, shifted_iter, inplace=True)\n\n self.windows = pd.concat([self.windows, windows_ext])\n\n # Redo get models since iterations have changed\n if self._models is not None:\n self.get_models()\n\n return self\n\n def save(self, path=\"./\", fmt=\"csv\", tag=None):\n \"\"\"\n Save the downloaded attributes into JSON files for easier re-loading.\n\n .. note::\n fmt == 'hdf' requires 'pytables' to be installed in the environment\n\n :type tag: str\n :param tag: tag to use to save files, defaults to the class tag\n but allows for the option of overwriting that\n :type path: str\n :param path: optional path to save to, defaults to cwd\n :type fmt: str\n :param fmt: format of the files to write, default csv\n \"\"\"\n if tag is None:\n tag = self.tag\n if fmt == \"hdf\":\n try:\n import pytables\n except ImportError:\n fmt = \"csv\"\n print(\"format 'hdf' requires pytables, defaulting to 'csv'\")\n\n if fmt == \"csv\":\n write_check = 0\n if not self.sources.empty:\n self.sources.to_csv(os.path.join(path, f\"{tag}_src.csv\"))\n write_check += 1\n if not self.receivers.empty:\n self.receivers.to_csv(os.path.join(path, f\"{tag}_rcv.csv\"))\n write_check += 1\n if not self.windows.empty:\n self.windows.to_csv(os.path.join(path, f\"{tag}.csv\"),\n index=False)\n write_check += 1\n if write_check == 0:\n logger.warning(\"Inspector empty, will not write to disk\")\n elif fmt == \"hdf\":\n with pd.HDFStore(os.path.join(path, f\"{tag}.hdf\")) as s:\n s[\"sources\"] = self.sources\n s[\"receivers\"] = self.receivers\n s[\"windows\"] = self.windows\n else:\n raise NotImplementedError\n\n def write(self, **kwargs):\n \"\"\"Same as Inspector.save(), but I kept writing .write()\"\"\"\n self.save(**kwargs)\n\n def read(self, path=\"./\", fmt=None, tag=None):\n \"\"\"\n Load previously saved attributes to avoid re-processing data.\n\n :type tag: str\n :param tag: tag to use to look for files, defaults to the class tag\n but allows for the option of overwriting that\n :type path: str\n :param path: optional path to file, defaults to cwd\n :type fmt: str\n :param fmt: format of the files to read, default csv\n \"\"\"\n if tag is None:\n tag = self.tag\n\n # Dynamically determine file format\n if not fmt:\n tag = tag.split(\".\")[0] # remove extension if there is one\n if os.path.exists(os.path.join(path, f\"{tag}.csv\")):\n fmt = \"csv\"\n elif os.path.exists(os.path.join(path, f\"{tag}.hdf\")):\n fmt = \"hdf\"\n else:\n raise FileNotFoundError\n\n if fmt == \"csv\":\n self.sources = pd.read_csv(os.path.join(path, f\"{tag}_src.csv\"))\n self.sources.set_index(\"event_id\", inplace=True)\n\n self.receivers = pd.read_csv(os.path.join(path, f\"{tag}_rcv.csv\"))\n self.receivers.set_index([\"network\", \"station\"], inplace=True)\n\n self.windows = pd.read_csv(os.path.join(path, f\"{tag}.csv\"))\n elif fmt == \"hdf\":\n with pd.HDFStore(os.path.join(path, f\"{tag}.hdf\")) as s:\n self.sources = s[\"sources\"]\n self.receivers = s[\"receivers\"]\n self.windows = s[\"windows\"]\n else:\n raise NotImplementedError\n\n def copy(self):\n \"\"\"\n Return a deep copy of the Inspector\n \"\"\"\n return deepcopy(self)\n\n def reset(self):\n \"\"\"\n Simple function to wipe out all the internal attributes\n \"\"\"\n self.windows = pd.DataFrame()\n self.sources = pd.DataFrame()\n self.receivers = pd.DataFrame()\n\n def isolate(self, iteration=None, step_count=None, event=None,\n network=None, station=None, channel=None, component=None,\n keys=None, exclude=None, unique_key=None):\n \"\"\"\n Returns a new dataframe that is grouped by a given index if variable is\n None, defaults to returning all available values\n\n :type event: str\n :param event: event id e.g. '2018p130600' (optional)\n :type iteration: str\n :param iteration: iteration e.g. 'i00' (optional)\n :type step_count: str\n :param step_count: step count e.g. 's00' (optional)\n :type station: str\n :param station: station name e.g. 'BKZ' (optional)\n :type network: str\n :param network: network name e.g. 'NZ' (optional)\n :type channel: str\n :param channel: channel name e.g. 'HHE' (optional)\n :type component: str\n :param component: component name e.g. 'Z' (optional)\n :type unique_key: str\n :param unique_key: isolates model, event and station information, \n alongside a single info key, such as dlnA.\n Useful for looking at one variable without have to write out long \n lists to 'exclude' or 'keys'\n :type keys: list\n :param keys: list of keys to retain in returned dataset, 'exclude'\n will override this variable, best to use them separately\n :type exclude: list\n :param exclude: list of keys to remove from returned dataset\n :rtype: pandas.DataFrame\n :return: DataFrame with selected rows based on selected column values\n \"\"\"\n df = self.windows\n df = df.loc[(df[\"event\"] == (event or df[\"event\"].to_numpy())) &\n (df[\"iteration\"] == (\n iteration or df[\"iteration\"].to_numpy())) &\n (df[\"step\"] == (step_count or df[\"step\"].to_numpy())) &\n (df[\"station\"] == (station or df[\"station\"].to_numpy())) &\n (df[\"network\"] == (network or df[\"network\"].to_numpy())) &\n (df[\"channel\"] == (channel or df[\"channel\"].to_numpy())) &\n (df[\"component\"] == (\n component or df[\"component\"].to_numpy()))\n ]\n if unique_key is not None:\n # return the unique key alongside identifying information\n unique_keys = [\"event\", \"iteration\", \"step\", \"network\", \"station\", \n \"channel\", \"comp\", unique_key]\n df = df.loc[:, df.columns.intersection(unique_keys)]\n if exclude is not None:\n if not isinstance(exclude, list):\n exclude = [exclude]\n # delete excluded keys from key list one by one\n df_keys = df.keys().to_numpy()\n for e in exclude:\n df_keys = df_keys[df_keys != e]\n if keys is not None:\n keys = np.append(df_keys, keys)\n else:\n keys = df_keys\n if keys is not None:\n # 'exclude' may produce repeat keys so run unique beforehand\n df = df.loc[:, df.columns.intersection(np.unique(keys))]\n return df\n\n def nwin(self, level=\"step\"):\n \"\"\"\n Find the cumulative length of misfit windows for a given iter/step,\n or the number of misfit windows for a given iter/step.\n\n .. note::\n Neat trick to select just by station:\n insp.windows(level='station').query(\"station == 'BFZ'\")\n\n :type level: str\n :param level: Level to get number of windows by. Default is 'step'\n\n * step: to get the total window length and number of windows for the\n given step count.\n * station: to get this on a per-station basis,\n useful for identifying sta quality.\n :rtype: pandas.DataFrame\n :return: a DataFrame with indices corresponding to iter, step,\n columns listing the number of windows (nwin) and the cumulative\n length of windows in seconds (length_s)\n \"\"\"\n group_list = [\"iteration\", \"step\", \"length_s\"]\n if level in [\"station\", \"event\"]:\n group_list.insert(2, level)\n elif level == \"step\":\n pass\n else:\n raise TypeError(\n \"nwin() argument 'level' must be 'station', 'event', 'step'\")\n\n windows = self.windows.loc[:, tuple(group_list)]\n windows.sort_values(group_list, inplace=True)\n\n group = windows.groupby(group_list[:-1]).length_s\n df = pd.concat([group.apply(len).rename(\"nwin\"), group.sum()],\n axis=1)\n if level == \"step\":\n return df\n else:\n # Only sort by window number if level is 'station' or 'event'\n return df.sort_values(\"nwin\", ascending=False)\n\n def misfit(self, level=\"step\", reset=False):\n \"\"\"\n Sum the total misfit for a given iteration based on the individual\n misfits for each misfit window, and the number of sources used.\n Calculated misfits are stored internally to avoid needing to recalculate\n each time this function is called\n\n .. note::\n To get per-station misfit on a per-step basis\n df = insp.misfits(level=\"station\").query(\"station == 'TOZ'\")\n df.groupby(['iteration', 'step']).sum()\n\n :type level: str\n :param level: Default is 'step'\n 'station': unscaled misfit on a per-station basis\n 'step': to get total misfit for a given step count.\n 'event': to get this on a per-event misfit.\n :type reset: bool\n :param reset: reset internally stored attribute and re-calculate misfit\n :rtype: dict\n :return: total misfit for each iteration in the class\n \"\"\"\n # We will try to access internal attributes first to save time\n if not reset:\n if level == \"step\" and self._step_misfit is not None:\n return self._step_misfit\n elif level == \"station\" and self._station_misfit is not None:\n return self._station_misfit\n elif level == \"event\" and self._event_misfit is not None:\n return self._event_misfit\n\n # Various levels to sort the misfit by\n group_list = [\"iteration\", \"step\", \"event\", \"station\", \"component\", \n \"misfit\"]\n misfits = self.windows.loc[:, tuple(group_list)]\n\n # Count the number of windows on a per station basis\n nwin = misfits.groupby(\n group_list[:-1]).misfit.apply(len).rename(\"nwin\")\n\n # Misfit is unique per component, not window, drop repeat components\n misfits.drop_duplicates(subset=group_list[:-1], keep=\"first\", \n inplace=True)\n\n # Group misfit and window on a per station basis, collect together\n nwin = nwin.groupby(group_list[:-2]).sum()\n misfits = misfits.groupby(\n group_list[:-2]).misfit.sum().rename(\"unscaled_misfit\")\n df = pd.concat([misfits, nwin], axis=1)\n\n # No formal definition of station misfit so we just define it as the\n # misfit for a given station, divided by number of windows\n if level == \"station\":\n df[\"misfit\"] = df.apply(\n lambda row: row.unscaled_misfit / row.nwin, axis=1\n )\n # Event misfit function defined by Tape et al. (2010) Eq. 6\n elif level in [\"event\", \"step\"]:\n # Group misfits to the event level and sum together windows, misfit\n df = df.groupby(group_list[:3]).sum() \n df[\"misfit\"] = df.apply(\n lambda row: row.unscaled_misfit / (2 * row.nwin), axis=1\n )\n if level == \"step\":\n # Sum the event misfits if step-wise misfit is requested\n misfits = df.loc[:, \"misfit\"]\n group = misfits.groupby([\"iteration\", \"step\"])\n df = pd.concat([group.apply(len).rename(\"n_event\"),\n group.sum().rename(\"summed_misfit\")], axis=1)\n # Misfit function a la Tape et al. (2010) Eq. 7\n df[\"misfit\"] = df.apply(\n lambda row: row.summed_misfit / row.n_event, axis=1\n )\n df.drop(labels=\"summed_misfit\", axis=1)\n else:\n raise NotImplementedError(\n \"level must be 'station', 'event' or 'step'\")\n\n # Set internal attribute for easier access at next request\n if level == \"step\":\n self._step_misfit = df\n elif level == \"station\":\n self._station_misfit = df\n elif level == \"event\":\n self._event_misfit = df\n\n return df\n\n def stats(self, level=\"event\", choice=\"mean\", key=None, iteration=None,\n step_count=None):\n \"\"\"\n Calculate the per-level statistical values for DataFrame\n\n :type level: str\n :param level: get statistical values per 'event' or 'station'\n :type choice: str\n :param choice: Pandas function, 'mean', 'std', 'var', etc.\n :type key: windows column header, e.g. 'cc_shift_in_seconds'\n :type iteration: str\n :param iteration: filter for a given iteration\n :type step_count: str\n :param step_count: filter for a given step count\n :rtype: pandas.DataFrame\n :return: DataFrame containing the `choice` of stats for given options\n \"\"\"\n group_list = [\"iteration\", \"step\", level]\n\n df = getattr(self.windows.groupby(group_list), choice)(\n numeric_only=True)\n if iteration is not None:\n df = df.loc[iteration]\n if step_count is not None:\n df = df.loc[step_count]\n if key is not None:\n df = df[key]\n\n return df\n\n def minmax(self, iteration=None, step_count=None, keys=None,\n quantities=None, pprint=True):\n \"\"\"\n Calculate and print the min/max values for a whole slew of parameters\n for a given iteration and step count. Useful for understanding the\n worst/ best case scenarios and their relation to the average.\n\n :type iteration: str\n :param iteration: filter for a given iteration\n :type step_count: str\n :param step_count: filter for a given step count\n :type keys: list of str\n :param keys: keys to calculate minmax values for, must be a subset of\n Inspector.windows.keys()\n :type quantities: list of str\n :param quantities: quantities to get values for, e.g. min, max, median,\n must be an attribute of pandas.core.series.Series\n :type pprint: bool\n :param pprint: pretty print the resulting values\n :rtype: dict\n :return: dictionary containing the minmax stats\n \"\"\"\n if iteration is None:\n iteration, step_count = self.final_model\n if keys is None:\n keys = [\"misfit\", \"length_s\", \"dlnA\", \"max_cc_value\",\n \"cc_shift_in_seconds\"]\n if quantities is None:\n quantities = [\"min\", \"max\", \"mean\", \"median\", \"std\"]\n\n minmax_dict = {}\n df = self.windows[self.windows.iteration == iteration]\n df = df[df.step == step_count]\n\n minmax_dict[\"nwin\"] = len(df)\n minmax_dict[\"len\"] = df.length_s.sum()\n\n for key in keys:\n for quantity in quantities:\n minmax_dict[f\"{key}_{quantity}\"] = getattr(df[key], quantity)()\n\n if pprint:\n max_key_len = max([len(_) for _ in minmax_dict.keys()])\n for key, val in minmax_dict.items():\n print(f\"{key + ':':<{max_key_len}} {val:.4f}\")\n\n return minmax_dict\n\n def compare(self, iteration_a=None, step_count_a=None, iteration_b=None,\n step_count_b=None):\n \"\"\"\n Compare the misfit and number of windows on an event by event basis\n between two evaluations. Provides absolute values as well as\n differences. Final dataframe is sorted by the difference in misfit,\n showing the most and least improved events.\n\n :type iteration_a: str\n :param iteration_a: initial iteration to use in comparison\n :type step_count_a: str\n :param step_count_a: initial step count to use in comparison\n :type iteration_b: str\n :param iteration_b: final iteration to use in comparison\n :type step_count_b: str\n :param step_count_b: final step count to use in comparison\n :rtype: pandas.core.data_frame.DataFrame\n :return: a sorted data frame containing the difference of misfit and\n number of windows between final and initial\n \"\"\"\n # Assuming if first arg isnt given, default to first/last model\n if iteration_a is None:\n iteration_a, step_count_a = self.initial_model\n if iteration_b is None:\n iteration_b, step_count_b = self.final_model\n\n # If initial or final models not given, nothing to compare\n if None in [iteration_a, step_count_a, iteration_b, step_count_b]:\n logger.warning(\"Cannot locate model indices to compare model data\")\n return None\n\n misfit = self.misfit(level=\"event\")\n msft_a = misfit.loc[iteration_a, step_count_a]\n msft_b = misfit.loc[iteration_b, step_count_b]\n\n # Doesn't really make sense to compare unscaled misfit so drop column\n msft_a = msft_a.drop([\"unscaled_misfit\"], axis=1).copy()\n msft_b = msft_b.drop([\"unscaled_misfit\"], axis=1).copy()\n\n # For renaming and access to renamed columns\n initial = f\"{iteration_a}{step_count_a}\"\n final = f\"{iteration_b}{step_count_b}\"\n\n msft_a.rename({\"nwin\": f\"nwin_{initial}\",\n \"misfit\": f\"misfit_{initial}\"},\n axis=\"columns\", inplace=True)\n msft_b.rename({\"nwin\": f\"nwin_{final}\", \"misfit\": f\"misfit_{final}\"},\n axis=\"columns\", inplace=True)\n\n df = pd.merge(msft_a, msft_b, left_index=True, right_index=True)\n df[\"diff_misfit\"] = df[f\"misfit_{final}\"] - df[f\"misfit_{initial}\"]\n df[\"diff_nwin\"] = df[f\"nwin_{final}\"] - df[f\"nwin_{initial}\"]\n\n return df.sort_values(by=\"diff_misfit\")\n\n def compare_windows(self, iteration_a=None, step_count_a=None, \n iteration_b=None, step_count_b=None):\n \"\"\"\n Compare individual, matching misfit windows between two evaluations.\n \n .. note::\n This will only work/make sense if the windows were fixed between \n the two evaluations, such that they share the exact same window\n selections.\n\n :type iteration_a: str\n :param iteration_a: initial iteration to use in comparison\n :type step_count_a: str\n :param step_count_a: initial step count to use in comparison\n :type iteration_b: str\n :param iteration_b: final iteration to use in comparison\n :type step_count_b: str\n :param step_count_b: final step count to use in comparison\n :rtype: pandas.core.data_frame.DataFrame\n :return: a data frame containing differences of windowing paramenters\n between final and initial models\n \"\"\"\n # These are the window values that will be different between two evals\n comp_values = [\"misfit\", \"dlnA\", \"window_weight\", \"max_cc_value\",\n \"cc_shift_in_seconds\"]\n\n # Assuming if first arg isnt given, default to first/last model\n if iteration_a is None:\n iteration_a, step_count_a = self.initial_model\n if iteration_b is None:\n iteration_b, step_count_b = self.final_model\n\n # Use copies to ensure any inplace changes don't make it back to self\n windows_a = self.isolate(iteration_a, step_count_a).copy()\n windows_b = self.isolate(iteration_b, step_count_b).copy()\n \n assert(len(windows_a) == len(windows_b)), \\\n (\"the number of windows does not match between the two \"\n \"evaluations, windows cannot be compared\")\n\n # We are using references of the windows to make inplace changes which\n # throws chained assigment warnings. This is acceptable so ignore\n evals = []\n with pd.option_context(\"mode.chained_assignment\", None):\n for _win in [windows_a, windows_b]:\n eval = f\"{_win.iteration.iloc[0]}{_win.step.iloc[0]}\"\n evals.append(eval)\n # Drop unncessary columns that are not useful in comparison\n _win.drop([\"length_s\", \"relative_endtime\", \"absolute_starttime\",\n \"absolute_endtime\", \"iteration\", \"step\"],\n axis=1, inplace=True)\n # Rename columns so they don't get merged into one another\n for column in comp_values:\n _win.rename({column: f\"{column}_{eval}\"},\n axis=\"columns\", inplace=True)\n # Set the index as a column so that the user can figure out\n # the windows index in the original dataframe\n _win[f\"index_{eval}\"] = _win.index\n\n # Merge the evaluations using shared attributes i.e. src rcv info\n df = pd.merge(windows_a, windows_b)\n # Take differences of all the comparison values, 'final - initial'\n initial, final = evals\n for val in comp_values:\n df[f\"diff_{val}\"] = df[f\"{val}_{final}\"] - df[f\"{val}_{initial}\"]\n\n return df\n\n def filter_sources(self, lat_min=None, lat_max=None, lon_min=None,\n lon_max=None, depth_min=None, depth_max=None,\n mag_min=None, mag_max=None, min_start=None,\n max_start=None):\n \"\"\"\n Go through misfits and windows and remove events that fall outside\n a certain bounding box. Return sources that fall within the box.\n Bounds are inclusive of given values.\n\n :type lat_min: float\n :param lat_min: minimum latitude in degrees\n :type lat_max: float\n :param lat_max: maximum latitude in degrees\n :type lon_min: float\n :param lon_min: minimum longitude in degrees\n :type lon_max: float\n :param lon_max: maximum longitude in degrees\n :type depth_min: float\n :param depth_min: minimum depth of event in km, depth is positive\n :type depth_max: float\n :param depth_max: maximum depth of event in km, depth is positive\n :type mag_min: float\n :param mag_min: minimum magnitude\n :type mag_max: float\n :param mag_max: maximum magnitude\n :type min_start: obspy.UTCDateTime()\n :param min_start: minimum origintime of event\n :type max_start: obspy.UTCDateTime()\n :param max_start: maximum origintime of event\n \"\"\"\n sources = self.sources.copy()\n if lat_min:\n sources = sources.loc[sources[\"latitude\"] >= lat_min]\n if lat_max:\n sources = sources.loc[sources[\"latitude\"] <= lat_max]\n if lon_min:\n sources = sources.loc[sources[\"longitude\"] >= lon_min]\n if lon_max:\n sources = sources.loc[sources[\"longitude\"] <= lon_max]\n if depth_min:\n sources = sources.loc[sources[\"depth_km\"] >= depth_min]\n if depth_max:\n sources = sources.loc[sources[\"depth_km\"] <= depth_max]\n if mag_min:\n sources = sources.loc[sources[\"magnitude\"] >= mag_min]\n if mag_max:\n sources = sources.loc[sources[\"magnitude\"] <= mag_max]\n if min_start or max_start:\n # Convert strings to datetime objects for datetime manipulations\n sources[\"time\"] = pd.to_datetime(sources[\"time\"])\n if min_start:\n sources = sources.loc[\n sources[\"time\"] >= min_start].set_index(\"event_id\")\n if max_start:\n sources = sources.loc[\n sources[\"time\"] <= max_start].set_index(\"event_id\")\n\n return sources\n\n def get_models(self):\n \"\"\"\n Return a sorted list of misfits which correspond to accepted models,\n label discards of the line search, and differentiate the final accepted\n line search evaluation from the previous iteration and the initial\n evaluation of the current iteration.\n\n .. note::\n State and status is given as:\n 0 == INITIAL function evaluation for the model;\n 1 == SUCCESS -ful function evaluation for the model;\n -1 == DISCARD trial step from line search.\n\n :rtype: pandas.core.data_frame.DataFrame\n :return: a dataframe containing model numbers, their corresponding\n iteration, step count and misfit value, and the status of the\n function evaluation.\n \"\"\"\n misfit = self.misfit()\n models = {\"model\": [], \"iteration\": [], \"step_count\": [], \"misfit\": [],\n \"status\": [], \"state\": []\n }\n\n # Model lags iteration by 1\n for m, iter_ in enumerate(self.iterations):\n # First we collect misfit values for each step for reference\n misfits_ = [float(misfit.loc[iter_].loc[_].misfit) for _ in\n self.steps[iter_]\n ]\n\n # Then we loop through the steps and pick out the smallest misfit\n for s, step in enumerate(self.steps[iter_]):\n # Initial evaluation, accepted misfits\n if step == \"s00\":\n model = m\n status = 0\n # Line search, mix of discards and final misfit\n else:\n model = m + 1\n if misfits_[s] == min(misfits_):\n status = 1\n else:\n status = -1\n\n models[\"model\"].append(f\"m{model:0>2}\")\n models[\"misfit\"].append(misfits_[s])\n models[\"iteration\"].append(iter_)\n models[\"step_count\"].append(step)\n models[\"state\"].append(status)\n models[\"status\"].append({0: \"INITIAL\", \n 1: \"SUCCESS\", \n -1: \"DISCARD\"}[status]\n )\n\n self._models = pd.DataFrame(models)\n\n def get_srcrcv(self):\n \"\"\"\n Retrieve information regarding source-receiver pairs including distance,\n backazimuth and theoretical traveltimes for a 1D Earth model.\n\n :rtype: pandas.core.frame.DataFrame\n :return: separate dataframe with distance and backazimuth columns, that\n may be used as a lookup table\n \"\"\"\n if self.sources.empty or self.receivers.empty:\n return []\n\n srcrcv_dict = {\"event\": [], \"network\": [], \"station\": [],\n \"distance_km\": [], \"backazimuth\": []\n }\n\n for eid, elat, elon, edpth in zip(self.sources.index.to_numpy(),\n self.sources.latitude.to_numpy(),\n self.sources.longitude.to_numpy(),\n self.sources.depth_km.to_numpy()\n ):\n for rid, rlat, rlon in zip(self.receivers.index,\n self.receivers.latitude.to_numpy(),\n self.receivers.longitude.to_numpy()\n ):\n gcd, _, baz = gps2dist_azimuth(lat1=elat, lon1=elon,\n lat2=rlat, lon2=rlon,\n )\n net, sta = rid\n srcrcv_dict[\"event\"].append(eid)\n srcrcv_dict[\"network\"].append(net)\n srcrcv_dict[\"station\"].append(sta)\n srcrcv_dict[\"distance_km\"].append(gcd * 1E-3)\n srcrcv_dict[\"backazimuth\"].append(baz)\n\n self._srcrcv = pd.DataFrame(srcrcv_dict)\n\n def get_unique_models(self, float_precision=3):\n \"\"\"\n Find all accepted models (status 0 or 1) that have a unique misfit\n value. Because some forward evaluations are repeats of the previous\n line search evaluation, they will effectively be the same evaluation so\n they can be removed\n\n :type float_precision: int\n :param float_precision: identical misfit values will differ after some\n decimal place. this value determines which decimal place to\n truncate the values for comparison\n \"\"\"\n models = self.good_models\n models.reset_index(drop=True, inplace=True)\n misfit = models.misfit.round(decimals=float_precision)\n identical_misfit = np.where(misfit.diff() == 0)[0]\n models.drop(axis=0, index=identical_misfit, inplace=True)\n models.reset_index(drop=True, inplace=True)\n\n return models\n","repo_name":"adjtomo/pyatoa","sub_path":"pyatoa/core/inspector.py","file_name":"inspector.py","file_ext":"py","file_size_in_byte":50564,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"67"} +{"seq_id":"12720158893","text":"import itertools\nfrom functools import partial\nfrom typing import Iterable\n\nimport numpy as np\nfrom pandas.tseries.frequencies import to_offset\nimport pytest\n\nfrom gluonts.dataset.common import Dataset\nfrom gluonts.dataset.loader import (\n DataBatch,\n InferenceDataLoader,\n TrainDataLoader,\n)\nfrom gluonts.mx.batchify import _pad_arrays, batchify, stack\nfrom gluonts.testutil.dummy_datasets import get_dataset\nfrom gluonts.transform import (\n ContinuousTimeInstanceSplitter,\n ContinuousTimeUniformSampler,\n ContinuousTimePredictionSampler,\n)\n\nNUM_BATCHES = 22\n\n\n@pytest.fixture\ndef loader_factory():\n def train_loader(\n dataset: Dataset,\n prediction_interval_length: float,\n context_interval_length: float,\n is_train: bool = True,\n override_args: dict = None,\n ) -> Iterable[DataBatch]:\n if override_args is None:\n override_args = {}\n\n if is_train:\n sampler = ContinuousTimeUniformSampler(\n num_instances=10,\n min_past=context_interval_length,\n min_future=prediction_interval_length,\n )\n else:\n sampler = ContinuousTimePredictionSampler(\n min_past=context_interval_length\n )\n\n splitter = ContinuousTimeInstanceSplitter(\n future_interval_length=prediction_interval_length,\n past_interval_length=context_interval_length,\n instance_sampler=sampler,\n freq=to_offset(\"H\"),\n )\n\n kwargs = dict(\n dataset=dataset,\n transform=splitter,\n batch_size=10,\n stack_fn=partial(batchify, dtype=np.float32, variable_length=True),\n )\n\n kwargs.update(override_args)\n\n if is_train:\n return itertools.islice(TrainDataLoader(**kwargs), NUM_BATCHES)\n else:\n return InferenceDataLoader(**kwargs)\n\n return train_loader\n\n\ndef test_train_loader_shapes(loader_factory):\n loader = loader_factory(get_dataset(), 1.0, 1.5)\n\n d = next(iter(loader))\n\n field_names = [\n \"past_target\",\n \"past_valid_length\",\n \"future_target\",\n \"future_valid_length\",\n ]\n\n assert all([key in d for key in field_names])\n\n assert d[\"past_target\"].shape[2] == d[\"future_target\"].shape[2] == 2\n assert d[\"past_target\"].shape[0] == d[\"future_target\"].shape[0] == 10\n assert (\n d[\"past_valid_length\"].shape[0]\n == d[\"future_valid_length\"].shape[0]\n == 10\n )\n\n\ndef test_train_loader_length(loader_factory):\n loader = loader_factory(get_dataset(), 1.0, 1.5)\n\n assert len(list(loader)) == NUM_BATCHES\n\n\ndef test_inference_loader_shapes(loader_factory):\n loader = loader_factory(\n dataset=get_dataset(),\n prediction_interval_length=1.0,\n context_interval_length=1.5,\n is_train=False,\n override_args={\"batch_size\": 10},\n )\n\n batches = list(loader)\n assert len(batches) == 1\n batch = batches[0]\n\n assert batch[\"past_target\"].shape[2] == 2\n assert batch[\"past_target\"].shape[0] == 3\n assert batch[\"past_valid_length\"].shape[0] == 3\n\n\ndef test_inference_loader_shapes_small_batch(loader_factory):\n loader = loader_factory(\n dataset=get_dataset(),\n prediction_interval_length=1.0,\n context_interval_length=1.5,\n is_train=False,\n override_args={\"batch_size\": 2},\n )\n\n batches = list(loader)\n assert len(batches) == 2\n batch = batches[0]\n\n assert batch[\"past_target\"].shape[2] == 2\n assert batch[\"past_target\"].shape[0] == 2\n assert batch[\"past_valid_length\"].shape[0] == 2\n\n\ndef test_train_loader_short_intervals(loader_factory):\n loader = loader_factory(\n dataset=get_dataset(),\n prediction_interval_length=0.001,\n context_interval_length=0.0001,\n is_train=True,\n override_args={\"batch_size\": 5},\n )\n\n batches = list(loader)\n batch = batches[0]\n\n assert (\n batch[\"past_target\"].shape[1] == batch[\"future_target\"].shape[1] == 1\n )\n assert (\n batch[\"past_target\"].shape[0] == batch[\"future_target\"].shape[0] == 5\n )\n\n\ndef test_inference_loader_short_intervals(loader_factory):\n loader = loader_factory(\n dataset=get_dataset(),\n prediction_interval_length=0.001,\n context_interval_length=0.0001,\n is_train=False,\n override_args={\"batch_size\": 5},\n )\n\n batches = list(loader)\n batch = batches[0]\n\n assert batch[\"past_target\"].shape[1] == 1\n\n\n@pytest.mark.parametrize(\"is_right_pad\", [True, False])\ndef test_variable_length_stack(is_right_pad):\n arrays = [d[\"target\"].T for d in list(iter(get_dataset()))]\n\n stacked = stack(\n arrays,\n variable_length=True,\n is_right_pad=is_right_pad,\n )\n\n assert stacked.shape[0] == 3\n assert stacked.shape[1] > 0\n assert stacked.shape[2] == 2\n\n\n@pytest.mark.parametrize(\"is_right_pad\", [True, False])\ndef test_variable_length_stack_zerosize(is_right_pad):\n arrays = [np.zeros(shape=(0, 2)) for _ in range(5)]\n\n stacked = stack(\n arrays,\n variable_length=True,\n is_right_pad=is_right_pad,\n )\n\n assert stacked.shape[0] == 5\n assert stacked.shape[1] == 1\n assert stacked.shape[2] == 2\n\n\n@pytest.mark.parametrize(\"axis\", [0, 1])\n@pytest.mark.parametrize(\"is_right_pad\", [True, False])\ndef test_pad_arrays_axis(axis: int, is_right_pad: bool):\n arrays = [d[\"target\"] for d in list(iter(get_dataset()))]\n if axis == 0:\n arrays = [x.T for x in arrays]\n\n padded_arrays = _pad_arrays(arrays, axis, is_right_pad=is_right_pad)\n\n assert all(a.shape[axis] == 8 for a in padded_arrays)\n assert all(a.shape[1 - axis] == 2 for a in padded_arrays)\n\n\ndef test_pad_arrays_pad_left():\n arrays = [d[\"target\"] for d in list(iter(get_dataset()))]\n padded_arrays = _pad_arrays(arrays, 1, is_right_pad=False)\n\n for padded_array in padded_arrays[1:]:\n assert np.allclose(padded_array[:, 0], 0)\n","repo_name":"awslabs/gluonts","sub_path":"test/mx/test_variable_length.py","file_name":"test_variable_length.py","file_ext":"py","file_size_in_byte":6024,"program_lang":"python","lang":"en","doc_type":"code","stars":3904,"dataset":"github-code","pt":"67"} +{"seq_id":"10389564408","text":"import pandas as pd\nimport datetime as dt\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func, inspect\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\nBase=automap_base()\nBase.prepare(engine,reflect=True)\nStation=Base.classes.station\nMeasurement=Base.classes.measurement\nsession=Session(bind=engine)\nmax_stn='USC00519281'\n\nfrom flask import Flask, jsonify\napp=Flask(__name__)\nfd=dt.date(2016,8,23)\n@app.route(\"/\")\ndef home():\n return (\n f\"Routes available ...

\"\n f\"Home : /
\" \n f\"Precipitation : /api/v1.0/precipitation
\"\n f\"Stations : /api/v1.0/stations
\"\n f\"Temperatures : /api/v1.0/tobs
\"\n f\"Temperatures from Date : /api/v1.0/ (Use YYYY-MM-DD format)
\"\n f\"Temperatures between Dates: /api/v1.0// (Use YYYY-MM-DD format)
\"\n )\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precip():\n engine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n Base=automap_base()\n Base.prepare(engine,reflect=True)\n Station=Base.classes.station\n Measurement=Base.classes.measurement\n session=Session(bind=engine)\n max_stn='USC00519281'\n sel=[Measurement.date,Measurement.prcp]\n prcp_query=session.query(*sel).\\\n filter(Measurement.station==max_stn).\\\n filter(Measurement.datefd).all()\n temp_df=pd.DataFrame(daily_temps,columns=[\"date\",\"tobs\"])\n temp_df.set_index(temp_df['date'],inplace=True)\n temp_df=temp_df.drop(['date'],axis=1)\n temp_dict=temp_df.to_dict()['tobs']\n session.close()\n return jsonify(temp_dict)\n\n@app.route(\"/api/v1.0/\")\ndef tempfrom(start):\n engine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n Base=automap_base()\n Base.prepare(engine,reflect=True)\n Station=Base.classes.station\n Measurement=Base.classes.measurement\n session=Session(bind=engine)\n max_stn='USC00519281'\n sd_yr=int(start[0:4])\n sd_mo=int(start[5:7])\n sd_dy=int(start[8:10])\n start_date=dt.date(sd_yr,sd_mo,sd_dy)\n sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]\n tempfrom_query=session.query(*sel).\\\n filter(Measurement.station==max_stn).\\\n filter(Measurement.date>start_date).all()\n tempfrom_dict={'min_temp':tempfrom_query[0][0],'avg_temp':round(tempfrom_query[0][1],1),'max_temp':tempfrom_query[0][2]}\n session.close()\n return jsonify(tempfrom_dict)\n\n@app.route(\"/api/v1.0//\")\ndef tempbetween(start,end):\n sd_yr=int(start[0:4])\n sd_mo=int(start[5:7])\n sd_dy=int(start[8:10])\n start_date=dt.date(sd_yr,sd_mo,sd_dy)\n ed_yr=int(end[0:4])\n ed_mo=int(end[5:7])\n ed_dy=int(end[8:10])\n end_date=dt.date(ed_yr,ed_mo,ed_dy)\n sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]\n tempin_query=session.query(*sel).\\\n filter(Measurement.station==max_stn).\\\n filter(Measurement.date>start_date).\\\n filter(Measurement.date to issu markers \n\n\ndef set_vm():\n i=vm.vid_maker()\n return i \n\n@pytest.mark.ytd\ndef test_get_vid_len():\n \"\"\" tests whether invoking wizard works and variables are initiated \"\"\"\n i=set_vm()\n vid_fp=i.path_join('tests','tests_inputs','vm_test_input_vid2.webm')\n audio_fp=i.path_join('tests','tests_inputs','test_input_speech.wav')\n print(i.get_vid_len(vid_fp=vid_fp))\n print(i.get_vid_len(vid_fp=audio_fp))\n return \n\ndef test_extract_sound_from_vid():\n i=set_vm()\n vid_fp=i.path_join('tests','tests_inputs','vm_test_input_vid.webm')\n out_dir=i.path_join('tests','tests_outputs')\n i.split_sound_and_video(vid_fp=vid_fp,out_dir=out_dir,do_audio=True,do_video=False)\n\ndef test_extract_vid_from_vid():\n i=set_vm()\n vid_fp=i.path_join('tests','tests_inputs','vm_test_input_vid.webm')\n out_dir=i.path_join('tests','tests_outputs')\n i.split_sound_and_video(vid_fp=vid_fp,out_dir=out_dir,do_audio=False,do_video=True)\n\ndef test_cut_vid():\n i=set_vm()\n vid_fp=i.path_join('tests','tests_inputs','vm_test_input_vid.webm')\n out_fp=i.path_join('tests','tests_outputs','vm_test_output_cut_vid.webm')\n i.torch_cut_vid(vid_fp=vid_fp,out_fp=out_fp,st_flt=8,en_flt=30)\n \ndef test_cut_audio():\n i=set_vm()\n audio_fp=i.path_join('tests','tests_inputs','test_input_background.wav')\n audio_fp=i.path_join('tests','tests_inputs','test_input_speech.wav')\n out_fp=i.path_join('tests','tests_outputs','vm_test_output_cut_audio.wav')\n i.torch_cut_audio(audio_fp=audio_fp,out_fp=out_fp,st_flt=0,en_flt=30)\n\ndef test_boomerangize():\n i=set_vm()\n vid_fp=i.path_join('tests','tests_inputs','vm_test_input_boomerang.webm')\n out_fp=i.path_join('tests','tests_outputs','vm_test_output_boomerang.webm')\n i.torch_boomerang(vid_fp=vid_fp,out_fp=out_fp)\n \ndef test_overlay_audios():\n i=set_vm()\n background_fp=i.path_join('tests','tests_inputs','test_input_background.wav')\n speech_fp=i.path_join('tests','tests_inputs','test_input_speech.wav')\n out_fp=i.path_join('tests','tests_outputs','vm_test_output_add_background.wav')\n i.overlay_audios(background_fp=background_fp,speech_fp=speech_fp,out_fp=out_fp)\n \ndef test_overlay_audio_and_video():\n audio_fp=i.path_join('tests','tests_inputs','test_input_background.wav')\n vid_fp=i.path_join('tests','tests_inputs','fiuty12345.webm')\n out_fp=i.path_join('tests','tests_outputs','vm_test_output_overlay_audio_and_video.webm')\n i.overlay_audio_and_video(vid_fp=vid_fp,audio_fp=audio_fp,out_fp=out_fp)\n \ndef test_chopify_video():\n vid_fp=i.path_join('tests','tests_inputs','vm_test_input_vid.webm')\n out_dir=i.path_join('tests','tests_outputs')\n i.chopify_video(vid_fp=vid_fp,out_dir=out_dir,N=10)\n \ndef test_chopify_to_timestamps():\n vid_fp=i.path_join('tests','tests_inputs','vm_test_input_vid.webm')\n out_dir=i.path_join('tests','tests_outputs')\n timestamps=[0,10,20,50]\n i.chopify_to_timestamps(vid_fp=vid_fp,out_dir=out_dir,timestamps=timestamps)\n \ndef test_concat_streams():\n fp0=i.path_join('tests','tests_inputs','_part_0.webm')\n fp1=i.path_join('tests','tests_inputs','_part_1.webm')\n fp2=i.path_join('tests','tests_inputs','_part_2.webm')\n out_fp=i.path_join('tests','tests_outputs','test_concat_streams.webm')\n i.concat_streams(fps=[fp0,fp1,fp2],out_fp=out_fp)\n \ndef test_workflow(background_fp = None, speech_fp = None, yt_raw_fp=None):\n if background_fp is None:\n background_fp=i.path_join('tests','tests_inputs','test_input_background.wav') # get background from outside \n if speech_fp is None: \n speech_fp=i.path_join('tests','tests_inputs','test_speech.wav') # get speech file from outside \n if yt_raw_fp is None:\n yt_raw_fp=i.path_join('tests','tests_inputs','vm_test_input_vid.webm') # get raw yt vid \n \n tests_outputs_dir=i.path_join('tests','tests_outputs')\n # 1. split yt video and audio \n if 1: # takes long time \n yt_vid_video_fp,yt_vid_audio_fp=i.split_sound_and_video(vid_fp=yt_raw_fp,out_dir=tests_outputs_dir,do_audio=True,do_video=True)\n else:\n yt_vid_video_fp=i.path_join('tests','tests_outputs','vm_test_input_vid.webm')\n yt_vid_audio_fp=i.path_join('tests','tests_outputs','vm_test_input_vid.wav')\n\n # 2. combine speech and background together towards speech len \n speech_and_background_fp=i.path_join('tests','tests_outputs','speech_and_background.wav')\n speech_and_background_fp=i.overlay_audios(background_fp=background_fp,speech_fp=speech_fp,out_fp=speech_and_background_fp)\n \n # 3. cut yt vid to some lengths \n if 1:\n cut_yt_vid_fp=i.path_join('tests','tests_outputs','cut_yt_vid.webm')\n cut_yt_vid_fp=i.torch_cut_vid(vid_fp=yt_vid_video_fp,out_fp=cut_yt_vid_fp,st_flt=7,dur_flt=10)\n else:\n cut_yt_vid_fp=i.path_join('tests','tests_outputs','cut_yt_vid.webm')\n \n # 4. boomerangize video so it matches speech len \n if 1:\n boomerang_fp=i.path_join('tests','tests_outputs','boomerang.webm')\n boomerang_fp=i.torch_boomerang(vid_fp=cut_yt_vid_fp,out_fp=boomerang_fp,n=0)\n else:\n boomerang_fp=i.path_join('tests','tests_outputs','boomerang.webm')\n \n #5. concat video further so it matches speech len \n if 1:\n final_vid_fp=i.path_join('tests','tests_outputs','final_vid.webm')\n tgt_len=i.get_vid_len(speech_fp)\n N= int(tgt_len // (i.get_vid_len(boomerang_fp)) + 1)\n final_vid_fp=i.concat_streams(fps=[boomerang_fp for i in range(N)],out_fp=final_vid_fp )\n background_len=i.get_vid_len(speech_and_background_fp)\n final_vid_fp=i.torch_cut_vid(vid_fp=final_vid_fp,out_fp=final_vid_fp,st_flt=0,en_flt=background_len)\n \n else:\n final_vid_fp=i.path_join('tests','tests_outputs','final_vid.webm')\n \n # 6. concat speech with background and final vid together \n final_mov_fp=i.path_join('tests','tests_outputs','final_mov.webm')\n final_mov_fp=i.overlay_audio_and_video(vid_fp=final_vid_fp,audio_fp=speech_and_background_fp,out_fp=final_mov_fp)\n \n \nif __name__=='__main__':\n i=vm.vid_maker()\n out_dir=i.path_join('tests','tests_outputs')\n i.clear_dir(out_dir)\n if 0:\n test_get_vid_len()\n test_extract_sound_from_vid()\n test_extract_vid_from_vid()\n test_cut_vid()\n test_cut_audio()\n test_extract_sound_from_vid()\n test_extract_vid_from_vid()\n test_boomerangize()\n test_overlay_audios()\n test_overlay_audio_and_video()\n test_chopify_video()\n test_concat_streams()\n \n# test_extract_sound_from_vid()\n# test_workflow()\n test_chopify_to_timestamps()\n\n\n\n","repo_name":"pawelofficial/yt-tts","sub_path":"src2/tests/test_vidmaker.py","file_name":"test_vidmaker.py","file_ext":"py","file_size_in_byte":7088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"991280073","text":"import email\n\n\ndef get_message(path):\n \"\"\"\n 根据邮件路径解析邮件\n :param path: 邮件路径\n :return message: message object\n \"\"\"\n with open(path, 'r', encoding='gbk', errors='ignore') as f:\n content = f.read()\n return email.message_from_string(content)\n\n\ndef get_payload(message):\n \"\"\"\n 获取邮件正文\n :param message: 邮件\n :return payload: 邮件正文\n \"\"\"\n payload = ''\n if message.is_multipart():\n for part in message.get_payload():\n payload += part.get_payload()\n else:\n payload = message.get_payload()\n return payload\n\n\ndef get_mail_content(path):\n \"\"\"\n 根据邮件路径提取邮件中的文本数据\n :param path: 邮件路径\n :return content: 邮件文本\n \"\"\"\n return get_payload(get_message(path))\n\n\nif __name__ == '__main__':\n print(get_payload(get_message('/home/vimsucks/JupyterNotebook/dataset/trec06c/data/001/002')))\n","repo_name":"Scotty1027/undergraduate-thesis","sub_path":"nb_spam/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22228996987","text":"import nose\n\nfrom pandas import DataFrame\nimport numpy as np\n\nimport pandas.util.testing as tm\n\nfrom pandas.io.json import json_normalize, nested_to_record\n\ndef _assert_equal_data(left, right):\n if not left.columns.equals(right.columns):\n left = left.reindex(columns=right.columns)\n\n tm.assert_frame_equal(left, right)\n\n\nclass TestJSONNormalize(tm.TestCase):\n\n def setUp(self):\n self.state_data = [\n {'counties': [{'name': 'Dade', 'population': 12345},\n {'name': 'Broward', 'population': 40000},\n {'name': 'Palm Beach', 'population': 60000}],\n 'info': {'governor': 'Rick Scott'},\n 'shortname': 'FL',\n 'state': 'Florida'},\n {'counties': [{'name': 'Summit', 'population': 1234},\n {'name': 'Cuyahoga', 'population': 1337}],\n 'info': {'governor': 'John Kasich'},\n 'shortname': 'OH',\n 'state': 'Ohio'}]\n\n def test_simple_records(self):\n recs = [{'a': 1, 'b': 2, 'c': 3},\n {'a': 4, 'b': 5, 'c': 6},\n {'a': 7, 'b': 8, 'c': 9},\n {'a': 10, 'b': 11, 'c': 12}]\n\n result = json_normalize(recs)\n expected = DataFrame(recs)\n\n tm.assert_frame_equal(result, expected)\n\n def test_simple_normalize(self):\n result = json_normalize(self.state_data[0], 'counties')\n expected = DataFrame(self.state_data[0]['counties'])\n tm.assert_frame_equal(result, expected)\n\n result = json_normalize(self.state_data, 'counties')\n\n expected = []\n for rec in self.state_data:\n expected.extend(rec['counties'])\n expected = DataFrame(expected)\n\n tm.assert_frame_equal(result, expected)\n\n result = json_normalize(self.state_data, 'counties', meta='state')\n expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])\n\n tm.assert_frame_equal(result, expected)\n\n def test_more_deeply_nested(self):\n data = [{'country': 'USA',\n 'states': [{'name': 'California',\n 'cities': [{'name': 'San Francisco',\n 'pop': 12345},\n {'name': 'Los Angeles',\n 'pop': 12346}]\n },\n {'name': 'Ohio',\n 'cities': [{'name': 'Columbus',\n 'pop': 1234},\n {'name': 'Cleveland',\n 'pop': 1236}]}\n ]\n },\n {'country': 'Germany',\n 'states': [{'name': 'Bayern',\n 'cities': [{'name': 'Munich', 'pop': 12347}]\n },\n {'name': 'Nordrhein-Westfalen',\n 'cities': [{'name': 'Duesseldorf', 'pop': 1238},\n {'name': 'Koeln', 'pop': 1239}]}\n ]\n }\n ]\n\n result = json_normalize(data, ['states', 'cities'],\n meta=['country', ['states', 'name']])\n # meta_prefix={'states': 'state_'})\n\n ex_data = {'country': ['USA'] * 4 + ['Germany'] * 3,\n 'states.name': ['California', 'California', 'Ohio', 'Ohio',\n 'Bayern', 'Nordrhein-Westfalen',\n 'Nordrhein-Westfalen'],\n 'name': ['San Francisco', 'Los Angeles', 'Columbus',\n 'Cleveland', 'Munich', 'Duesseldorf', 'Koeln'],\n 'pop': [12345, 12346, 1234, 1236, 12347, 1238, 1239]}\n\n expected = DataFrame(ex_data, columns=result.columns)\n tm.assert_frame_equal(result, expected)\n\n def test_shallow_nested(self):\n data = [{'state': 'Florida',\n 'shortname': 'FL',\n 'info': {\n 'governor': 'Rick Scott'\n },\n 'counties': [{'name': 'Dade', 'population': 12345},\n {'name': 'Broward', 'population': 40000},\n {'name': 'Palm Beach', 'population': 60000}]},\n {'state': 'Ohio',\n 'shortname': 'OH',\n 'info': {\n 'governor': 'John Kasich'\n },\n 'counties': [{'name': 'Summit', 'population': 1234},\n {'name': 'Cuyahoga', 'population': 1337}]}]\n\n result = json_normalize(data, 'counties',\n ['state', 'shortname',\n ['info', 'governor']])\n ex_data = {'name': ['Dade', 'Broward', 'Palm Beach', 'Summit',\n 'Cuyahoga'],\n 'state': ['Florida'] * 3 + ['Ohio'] * 2,\n 'shortname': ['FL', 'FL', 'FL', 'OH', 'OH'],\n 'info.governor': ['Rick Scott'] * 3 + ['John Kasich'] * 2,\n 'population': [12345, 40000, 60000, 1234, 1337]}\n expected = DataFrame(ex_data, columns=result.columns)\n tm.assert_frame_equal(result, expected)\n\n def test_meta_name_conflict(self):\n data = [{'foo': 'hello',\n 'bar': 'there',\n 'data': [{'foo': 'something', 'bar': 'else'},\n {'foo': 'something2', 'bar': 'else2'}]}]\n\n self.assertRaises(ValueError, json_normalize, data,\n 'data', meta=['foo', 'bar'])\n\n result = json_normalize(data, 'data', meta=['foo', 'bar'],\n meta_prefix='meta')\n\n for val in ['metafoo', 'metabar', 'foo', 'bar']:\n self.assertTrue(val in result)\n\n def test_record_prefix(self):\n result = json_normalize(self.state_data[0], 'counties')\n expected = DataFrame(self.state_data[0]['counties'])\n tm.assert_frame_equal(result, expected)\n\n result = json_normalize(self.state_data, 'counties',\n meta='state',\n record_prefix='county_')\n\n expected = []\n for rec in self.state_data:\n expected.extend(rec['counties'])\n expected = DataFrame(expected)\n expected = expected.rename(columns=lambda x: 'county_' + x)\n expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])\n\n tm.assert_frame_equal(result, expected)\n\n\nclass TestNestedToRecord(tm.TestCase):\n\n def test_flat_stays_flat(self):\n recs = [dict(flat1=1,flat2=2),\n dict(flat1=3,flat2=4),\n ]\n\n result = nested_to_record(recs)\n expected = recs\n self.assertEqual(result, expected)\n\n def test_one_level_deep_flattens(self):\n data = dict(flat1=1,\n dict1=dict(c=1,d=2))\n\n result = nested_to_record(data)\n expected = {'dict1.c': 1,\n 'dict1.d': 2,\n 'flat1': 1}\n\n self.assertEqual(result,expected)\n\n def test_nested_flattens(self):\n data = dict(flat1=1,\n dict1=dict(c=1,d=2),\n nested=dict(e=dict(c=1,d=2),\n d=2))\n\n result = nested_to_record(data)\n expected = {'dict1.c': 1,\n 'dict1.d': 2,\n 'flat1': 1,\n 'nested.d': 2,\n 'nested.e.c': 1,\n 'nested.e.d': 2}\n\n self.assertEqual(result,expected)\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb',\n '--pdb-failure', '-s'], exit=False)\n","repo_name":"pyparallel/pyparallel","sub_path":"Lib/site-packages/pandas-0.17.0-py3.3-win-amd64.egg/pandas/io/tests/test_json_norm.py","file_name":"test_json_norm.py","file_ext":"py","file_size_in_byte":7823,"program_lang":"python","lang":"en","doc_type":"code","stars":579,"dataset":"github-code","pt":"67"} +{"seq_id":"15363888196","text":"from Crypto.Signature import PKCS1_v1_5\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Hash import SHA256\nfrom Crypto.Util.number import bytes_to_long, long_to_bytes\nfrom base64 import b64decode, urlsafe_b64decode\nfrom gmpy2 import gcd, mpz\nimport requests\n\nurl = 'https://web-jwt-b9766b1f.chal-2021.duc.tf/'\njwt0 = requests.get(f'{url}get_token').text\njwt1 = requests.get(f'{url}/get_token').text\ntarget_bit_length = 2048\njwt_list = [\n jwt0,\n jwt1\n]\n\n\ndef b64urldecode(b64: str) -> str:\n return urlsafe_b64decode(b64+(\"=\" * (len(b64) % 4)))\n\n\ndef parse(jwt: str) -> (bytes, bytes):\n tokens = jwt.split(\".\")\n return \".\".join(tokens[0:2]), b64urldecode(tokens[2])\n\n\ndef get_rsa_mc(jwt: str) -> int:\n inp, sig = parse(jwt)\n h = SHA256.new(inp.encode())\n m = bytes_to_long(\n PKCS1_v1_5.pkcs1_15._EMSA_PKCS1_V1_5_ENCODE(h, target_bit_length // 8)\n )\n c = bytes_to_long(sig)\n return mpz(m), mpz(c)\n\n\ndef get_pubkey(n: int, e: int) -> str:\n k = RSA.construct([n, e])\n return k.export_key(\"PEM\")\n\n\nms = []\ncs = []\nfor jwt in jwt_list:\n m, c = get_rsa_mc(jwt)\n ms.append(m)\n cs.append(c)\n\nassert len(ms) > 0 and len(cs) == len(ms)\n\ne = 65537\nn = pow(cs[0], e) - ms[0]\nfor i in range(1, len(ms)):\n m = ms[i]\n c = cs[i]\n n = gcd(n, pow(c, e) - m)\n\nfor i in range(2, 1000):\n while n % i == 0:\n n //= i\nn = int(n)\n\nprint(n)\nprint(get_pubkey(n, e))\n","repo_name":"th13vn/ctf-source-saved","sub_path":"_duCTF/JWT/getpub.py","file_name":"getpub.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16889957475","text":"# -*- coding: utf-8 -*-\nimport sys\nif sys.getdefaultencoding() != 'utf-8':\n reload(sys)\n sys.setdefaultencoding('utf-8')\nimport scrapy\nimport time\nimport json\nfrom ..items import ScrapyRedisMongodbItem\nclass XpathRule(object):\n total_page = \"//div[@class='pagenav']/a/text()\"\n urlist = \"//table[@class='tablelist']//tr[@class='even']//a/@href|//table[@class='tablelist']//tr[@class='odd']//a/@href\"\n\nclass CctvSpider(scrapy.Spider):\n name = \"tencent\"\n custom_settings = {\n 'ITEM_PIPELINES': {\n 'scrapy_redis_mongodb.pipelines.RedisPipeline': 300\n }}\n start_urls = ['http://hr.tencent.com/position.php']\n\n def parse(self, response):\n try:\n total_page = response.xpath(XpathRule.total_page).extract()[-2]\n except:\n return\n for p in xrange(int(total_page)):\n url = \"http://hr.tencent.com/position.php?&start={}#a\".format((p)*10)\n yield scrapy.Request(url, self.parse_detail)\n\n def parse_detail(self, response):\n urls = response.xpath(XpathRule.urlist).extract()\n for url in urls:\n item = ScrapyRedisMongodbItem()\n item['url'] = \"http://hr.tencent.com/\"+url\n yield item\n\n\n","repo_name":"pythonPCS/scrapy-redis-mongo-mysql-news","sub_path":"scrapy_redis_mongodb-master/scrapy_redis_mongodb/spiders/scrapy_news.py","file_name":"scrapy_news.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"26115382559","text":"from django.http import HttpResponseRedirect, HttpResponse, JsonResponse\nfrom django.views.generic import ListView, CreateView, UpdateView\nfrom django.shortcuts import render, get_object_or_404, get_list_or_404\n\nfrom .forms import RouteForm\n\nfrom .models import Route\n\n\ndef home(request):\n return render(request, 'routes_form/test_page3.html')\n\n\ndef search(request):\n routes = Route.objects.all()\n return render(request, 'routes_form/search_route_page.html', {'routes': routes})\n\n\ndef url_parsing(request):\n user_input = request.GET.get('inputValue')\n\n urlAjax = Route.objects.get(pk=int(user_input))\n # (pk=int(user_input))\n # urlAjax2 = urlAjax.url\n urlRequestList = parsing(urlAjax.url)\n data = {\n 'url': urlRequestList,\n 'about': urlAjax.about,\n }\n # print(data)\n return JsonResponse(data)\n\n\n# FORMS\n\n\ndef getForm(request):\n form = RouteForm()\n if request.method == 'POST':\n form = RouteForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('/search')\n\n return render(request, 'routes_form/create_route_page.html', {'form': form})\n\n\n# print(parsing(a))\n\n# Parsing requestURL\ndef parsing(requestURL):\n # Extracting coordinates from URL to one string\n coordinatesFromURL = requestURL[(requestURL.index(\n 'profile/') + len('profile/')): requestURL.index('?')]\n coordinateStringList = coordinatesFromURL.split(\n ';') # Creating a list of Coordinates as Strings\n # Creating a two-dimensional list from coordinates\n coordinatePairs = [i.split(',') for i in coordinateStringList]\n # Convert strings to decimals\n stringToFloat = [[float(j) for j in i] for i in coordinatePairs]\n return stringToFloat # Returning prepared pairs of coordinates in a list\n\n\n# {% extends 'routes_form/base.html' %}\n# {% load static %}\n\n# {% block content_form %}\n# {% include 'routes_form/partials/_search_form.html' %}\n# {% endblock %}\n\n# {% block script %}\n# \n# {% endblock %}\n","repo_name":"lager-trip/test-repository","sub_path":"routes_form/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38715089702","text":"\"\"\"\nProblem:\n\nGiven a list of words, return the shortest unique prefix of each word. For example, given the list:\n\ndog\ncat\napple\napricot\nfish\n\nReturn the list:\n\nd\nc\napp\napr\nf\n\"\"\"\n\nfrom typing import Dict, List, Optional\n\n\ndef get_unique_prefix_for_string(\n dictionary: Dict[str, int], string: str, string_list: List[str]\n) -> Optional[str]:\n prefix = \"\"\n for char in string:\n prefix += char\n if prefix not in dictionary:\n return prefix\n # if a string with the current prefix exists, the prefix for the string is\n # updated\n prev_str_with_same_prefix = string_list[dictionary[prefix]]\n prev_prefix = prefix\n prev_str_index = dictionary[prefix]\n\n del dictionary[prefix]\n try:\n prev_prefix = prev_str_with_same_prefix[: len(prev_prefix) + 1]\n except:\n return\n dictionary[prev_prefix] = prev_str_index\n\n\ndef get_unique_prefix(string_list: List[str]) -> List[str]:\n dictionary = {}\n # generating the unique prefix\n for index, string in enumerate(string_list):\n prefix = get_unique_prefix_for_string(dictionary, string, string_list)\n if not prefix:\n raise ValueError(\"Unique Prefix Generation not possible\")\n dictionary[prefix] = index\n return list(dictionary.keys())\n\n\nif __name__ == \"__main__\":\n print(get_unique_prefix([\"dog\", \"cat\", \"apple\", \"apricot\", \"fish\"]))\n\n\n\"\"\"\nSPECS:\n\nTIME COMPLEXITY: O(n)\nSPACE COMPLEXITY: O(n)\n\"\"\"\n","repo_name":"ruppysuppy/Daily-Coding-Problem-Solutions","sub_path":"Solutions/162.py","file_name":"162.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":444,"dataset":"github-code","pt":"67"} +{"seq_id":"25053486086","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.fft import fft, fftfreq, rfft, rfftfreq\n\nRATE = 64000\n\nfilename = 'data/Fulldata_20220330_25000Hz.dat'\nfulldata = np.loadtxt(filename, delimiter = \",\")\nprint(len(fulldata))\n\nDCoffset = np.mean(fulldata)\nnormalAllData = fulldata-DCoffset\nplt.figure()\nplt.plot(normalAllData)\nplt.show()\n\nyf = rfft(normalAllData)\nxf = rfftfreq(len(normalAllData), 1/RATE)\nplt.figure()\nplt.plot(xf,np.abs(yf),'r-o')\nplt.show()","repo_name":"ececli/Ultrasonic_Ranging","sub_path":"Function_Test/test_channel_frequency_response/analyze_micData_v1.py","file_name":"analyze_micData_v1.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"37485038636","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.http import HttpResponse\nfrom .models import Employee_category, Employee_designation, Employee_Qualification, Employee_Information\nfrom .forms import EmployeeForm, CategoryForm, DesignationForm, QualificationForm\nfrom django.core.paginator import Paginator\n\n# Create your views here.\n\ndef employee_home(request):\n\treturn render(request, 'employee/employee_home.html')\n\ndef employee_information(request):\n\tall_employee = Employee_Information.objects.all()\n\tpaginator = Paginator(all_employee, 3)\n\tpage = request.GET.get('page')\n\tpage_listings = paginator.get_page(page)\n\n\tif 'get_employee' in request.GET:\n\t\tget_employee = request.GET['get_employee']\n\t\tif get_employee:\n\t\t\tpage_listings = all_employee.filter(Employee_name__icontains=get_employee)\n\n\tdata = {'all_employee': page_listings}\n\treturn render(request, 'employee/employee_information.html', data)\n\ndef add_category(request):\n\tall_category = Employee_category.objects.all()\n\tcategory_form = CategoryForm()\n\tif request.method == 'POST':\n\t\tcategory_form = CategoryForm(request.POST)\n\t\tif category_form.is_valid():\n\t\t\tcategory_name = category_form.cleaned_data.get('category_name')\n\t\t\tcategory_form.save()\n\t\t\tmessages.success(request, f'Category Added Successfully : ( {category_name} )')\n\t\t\treturn redirect('add_category')\n\t\telse:\n\t\t\treturn HttpResponse(\"Error\")\n\tdata = {'all_category': all_category, 'category_form': category_form}\n\treturn render(request, 'employee/add_category.html', data)\n\ndef category_update(request, pk):\n\tget_category = Employee_category.objects.get(id=pk)\n\tcategory_form = CategoryForm(instance=get_category)\n\tif request.method == 'POST':\n\t\tcategory_form = CategoryForm(request.POST, instance=get_category)\n\t\tif category_form.is_valid():\n\t\t\tcategory_name = get_category.category_name\n\t\t\tcategory_form.save()\n\t\t\tmessages.warning(request, f'Category Updated ( {category_name} ) ')\n\t\t\treturn redirect('add_category')\n\tdata = {'category_form': category_form}\n\treturn render(request, 'employee/category_update.html', data)\n\ndef category_remove(request, pk):\n\tget_category = Employee_category.objects.get(id=pk)\n\tif request.method == 'POST':\n\t\tcategory_name = get_category.category_name\n\t\tget_category.delete()\n\t\tmessages.error(request, f'Category Deleted : ( {category_name} )')\n\t\treturn redirect('add_category')\n\tdata = {'get_category': get_category}\n\treturn render(request, 'employee/category_remove.html')\n\ndef employee_designation(request):\n\tall_designation = Employee_designation.objects.all()\n\tdesignation_form = DesignationForm()\n\tif request.method == 'POST':\n\t\tdesignation_form = DesignationForm(request.POST)\n\t\tif designation_form.is_valid():\n\t\t\tdesignation_name = designation_form.cleaned_data.get('designation_name')\n\t\t\tdesignation_form.save()\n\t\t\tmessages.success(request, f'Designation Added Successfully : ( {designation_name} ) ')\n\t\t\treturn redirect('employee_designation')\n\t\telse:\n\t\t\treturn HttpResponse(\"Error\")\n\tdata = {'all_designation': all_designation, 'designation_form': designation_form}\n\treturn render(request, 'employee/designation.html', data)\n\ndef designation_remove(request, pk):\n\tget_designation = Employee_designation.objects.get(id=pk)\n\tif request.method == 'POST':\n\t\tdesignation_name = get_designation.designation_name\n\t\tget_designation.delete()\n\t\tmessages.error(request, f'Designation Deleted : ( {designation_name} )')\n\t\treturn redirect('employee_designation')\n\tdata = {'get_designation': get_designation}\n\treturn render(request, 'employee/designation_remove.html', data)\n\ndef designation_update(request, pk):\n\tget_designation = Employee_designation.objects.get(id=pk)\n\tdesignation_form = DesignationForm(instance=get_designation)\n\tif request.method == 'POST':\n\t\tdesignation_form = DesignationForm(request.POST, instance=get_designation)\n\t\tif designation_form.is_valid():\n\t\t\tdesignation_name = get_designation.designation_name\n\t\t\tdesignation_form.save()\n\t\t\tmessages.warning(request, f'Designation Updated : ( {designation_name} ) ')\n\t\t\treturn redirect('employee_designation')\n\t\telse:\n\t\t\treturn HttpResponse('Error')\n\tdata = {'designation_form': designation_form}\n\treturn render(request, 'employee/designation_update.html', data)\n\ndef add_qualification(request):\n\tall_qualification = Employee_Qualification.objects.all()\n\tqualification_form = QualificationForm()\n\tif request.method == 'POST':\n\t\tqualification_form = QualificationForm(request.POST)\n\t\tif qualification_form.is_valid():\n\t\t\tqualification_name = qualification_form.cleaned_data.get('qualification_name')\n\t\t\tqualification_form.save()\n\t\t\tmessages.success(request, f'Data Added Successfully ( {qualification_name} ) ')\n\t\t\treturn redirect('add_qualification')\n\t\telse:\n\t\t\treturn HttpResponse(\"Fail\")\n\tdata = {'all_qualification': all_qualification, 'qualification_form': qualification_form}\n\treturn render(request, 'employee/add_qualification.html', data)\n\ndef qualification_update(request, pk):\n\tget_qualification = Employee_Qualification.objects.get(id=pk)\n\tqualification_form = QualificationForm(instance=get_qualification)\n\tif request.method == 'POST':\n\t\tqualification_form = QualificationForm(request.POST, instance=get_qualification)\n\t\tif qualification_form.is_valid():\n\t\t\tqualification_name = get_qualification.qualification_name\n\t\t\tqualification_form.save()\n\t\t\tmessages.warning(request, f'Qualfication Updated ( {qualification_name} ) ')\n\t\t\treturn redirect('add_qualification')\n\t\telse:\n\t\t\treturn HttpResponse('Error')\n\tdata = {'qualification_form': qualification_form}\n\treturn render(request, 'employee/qualification_update.html', data)\n\ndef qualification_remove(request, pk):\n\tget_qualification = Employee_Qualification.objects.get(id=pk)\n\tif request.method == 'POST':\n\t\tqualification_name = get_qualification.qualification_name\n\t\tget_qualification.delete()\n\t\tmessages.error(request, f'Qualfication Deleted ( {qualification_name} ) ')\n\t\treturn redirect('add_qualification')\n\tdata = {'get_qualification': get_qualification}\n\treturn render(request, 'employee/qualification_remove.html', data)\n\ndef add_employee(request):\n\temploye_form=EmployeeForm()\n\tif request.method=='POST':\n\t\temploye_form=EmployeeForm(request.POST, request.FILES)\n\t\tif employe_form . is_valid():\n\t\t\temployee_name = employe_form.cleaned_data.get('Employee_name')\n\t\t\temploye_form.save()\n\t\t\tmessages.success(request, f'Employee Added Successfully ( {employee_name} )')\n\t\t\treturn redirect('employee_information')\n\t\telse:\n\t\t\tmessages.error(request, 'Employee Added Fail...')\n\t\t\treturn redirect('add_employee')\n\tdata={'employe_form':employe_form}\n\treturn render(request, 'employee/add_employee.html',data)\n\ndef employee_update(request, pk):\n\tget_employee = Employee_Information.objects.get(id=pk)\n\temployee_form=EmployeeForm(instance=get_employee)\n\tif request.method == 'POST':\n\t\temploye_form = EmployeeForm(request.POST, request.FILES, instance=get_employee)\n\t\tif employe_form.is_valid():\n\t\t\temployee_name = employe_form.cleaned_data.get('Employee_name')\n\t\t\temploye_form.save()\n\t\t\tmessages.warning(request, f'Employee Updated Successfully ( {employee_name} )')\n\t\t\treturn redirect('employee_information')\n\t\telse:\n\t\t\treturn HttpResponse(\"Fail...\")\n\tdata = {'employe_form': employee_form}\n\treturn render(request, 'employee/employee_update.html', data)\n\ndef employee_remove(request, pk):\n\tget_employee = Employee_Information.objects.get(id=pk)\n\tif request.method == 'POST':\n\t\tget_employee.delete()\n\t\tstudent_name = get_employee.Employee_name\n\t\tmessages.error(request, f'Employee Deleted : ( {student_name} )')\n\t\treturn redirect('employee_information')\n\tdata = {'get_employee': get_employee}\n\treturn render(request, 'employee/employee_remove.html', data)\n\n\n\n\n\n","repo_name":"ARIFULLAHkhan/college_website","sub_path":"employee/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1198455871","text":"'''\n한수\n\n문제\n어떤 양의 정수 X의 각 자리가 등차수열을 이룬다면, 그 수를 한수라고 한다. 등차수열은 연속된 두 개의 수의 차이가 일정한 수열을 말한다. N이 주어졌을 때, 1보다 크거나 같고, N보다 작거나 같은 한수의 개수를 출력하는 프로그램을 작성하시오. \n\n입력\n첫째 줄에 1,000보다 작거나 같은 자연수 N이 주어진다.\n\n출력\n첫째 줄에 1보다 크거나 같고, N보다 작거나 같은 한수의 개수를 출력한다.\n'''\nimport sys\n\ndef isApNum(n):\n if n < 100:\n return True\n nset = set()\n for i in range(len(str(n)) - 1):\n nset.add(int(str(n)[i]) - int(str(n)[i+1]))\n if len(nset) == 1:\n return True\n else:\n return False\n\nn = int(sys.stdin.readline())\ncount = 0\nfor i in range(1, n+1):\n if isApNum(i):\n count += 1\nprint(count)","repo_name":"JustYOLO/codeChallenge","sub_path":"BOJ/Day_7/1065.py","file_name":"1065.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29712595754","text":"def merge_sort(data):\n if len(data) <= 1:\n return data\n index = len(data) // 2\n lst1 = data[:index]\n lst2 = data[index:]\n left = merge_sort(lst1)\n right = merge_sort(lst2)\n return merge(left, right)\n\ndef merge(lst1,lst2):\n list = []\n i = j =0\n while i < len(lst1) and j < len(lst2):\n if lst1[i] <= lst2[j]:\n list.append(lst1[i])\n i += 1\n else:\n global num\n num = num + len(lst1)-i\n list.append(lst2[j])\n j += 1\n if i == len(lst1):\n for h in lst2[j:]:\n list.append(h)\n elif j == len(lst2):\n for h in lst1[i:]:\n list.append(h)\n return list\n\n##Data Processing\nimport numpy as np\nfile = np.loadtxt('/Users/jingweili/Documents/人工智能培训/斯坦福算法课/IntegerArray.txt')\nnum = 0\nprint(merge_sort(file))\nprint(num)\n","repo_name":"stonyjerry/Stanford-Algorithm","sub_path":"InversionsCount.py","file_name":"InversionsCount.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37397344716","text":"import random\nfrom hangman_guessing_list import guess_list\nfrom hangman_life import *\n# Const\nMAX_TRIES = 6\n\n\ndef print_number_of_tries(number_of_tries):\n print(f'You have {number_of_tries} attempts left')\n\n\ndef print_already_guessing(letter_guess):\n print(f'The letter you have guessed is {letter_guess} already guessing')\n\n\nif __name__ == '__main__':\n # Pick a random word from the words in guess_list\n guessing_word = random.choice(guess_list).lower()\n word_letters_len = len(guessing_word)\n # Keep the game running until the game is over\n game_over = False\n tries = MAX_TRIES\n print(game_name)\n print(guessing_word)\n # Create a list contains \"_\" as the number of letters in the guessing_word\n # to score and save the right letter guessed\n result = []\n wrong_result = []\n for i in range(word_letters_len):\n result += '_'\n # Iterate over the user input to keep the game running until the game is over\n while not game_over:\n user_guessing = input(\"Guess a letter: \")\n # Check if user_guessing already guessing\n if user_guessing in result:\n print_already_guessing(user_guessing)\n print_number_of_tries(tries)\n elif user_guessing in wrong_result:\n print_already_guessing(user_guessing)\n print_number_of_tries(tries)\n # Check if user_guessing is right or wrong\n else:\n for position in range(word_letters_len):\n letter = guessing_word[position]\n # If right\n if letter == user_guessing:\n result[position] = letter\n # If wrong\n if user_guessing not in guessing_word:\n print(f'You guessed {user_guessing}, This letter is not in the world')\n tries -= 1\n wrong_result.append(user_guessing)\n print_number_of_tries(tries)\n if tries == 0:\n print(\"GAME OVER, You lose\")\n print(f'The right word is {guessing_word}')\n game_over = True\n # Join all the letters which in the list of guessed letter and turn it into string\n print(' '.join(result))\n\n # If right\n if '_' not in result:\n print(\"You are a WINNER, Congratulations\")\n game_over = True\n # Print the correct shape after every try\n print(lives[tries])\n\n\n","repo_name":"NadavHalevy/HangmanGame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71390368535","text":"# imports\n\nprint(\"\"\"\n|******************|\n| Desafio037 |\n|******************|\n\"\"\")\nprint(\"Conversão Numérica\")\n\nnum = int(input(\"Digite um número: \"))\n\nescolha = False\nwhile not escolha:\n base = int(input(\"Você deseja converte-lo para qual base? 1-Binário, 2-Octal e 3-Hexadecimal \"))\n escolha = True\n if base == 1:\n tipo = \"Binário\"\n result = bin(num)[2:]\n elif base == 2:\n tipo = \"Octal\"\n result = oct(num)[2:]\n elif base == 3:\n tipo = \"Hexadecimal\"\n result = hex(num)[2:]\n else:\n print(\"Opção inválida. Escolha novamente!\")\n escolha = False\n\nprint(\"O número {} em {} é {}\".format(num, tipo, result))\n","repo_name":"iamtheluiz/curso_em_video_python","sub_path":"Mundo 2/aula12/desafio037.py","file_name":"desafio037.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7344506074","text":"import matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\n# задаём значения с которыми бдуем работать\r\nstuffdict = {'couch_s': (300, 75),\r\n 'couch_b': (500, 80),\r\n 'bed': (400, 100),\r\n 'closet': (200, 50),\r\n 'bed_s': (200, 40),\r\n 'desk': (200, 70),\r\n 'table': (300, 80),\r\n 'tv_table': (200, 30),\r\n 'armchair': (100, 30),\r\n 'bookshelf': (200, 60),\r\n 'cabinet': (150, 20),\r\n 'game_table': (150, 30),\r\n 'hammock': (250, 45),\r\n 'diner_table_with_chairs': (250, 70),\r\n 'stools': (150, 30),\r\n 'mirror': (100, 20),\r\n 'instrument': (300, 70),\r\n 'plant_1': (25, 10),\r\n 'plant_2': (30, 20),\r\n 'plant_3': (45, 25),\r\n 'sideboard': (175, 30),\r\n 'chest_of_drawers': (25, 40),\r\n 'guest_bed': (250, 40),\r\n 'standing_lamp': (20, 30),\r\n 'garbage_can': (30, 35),\r\n 'bar_with_stools': (200, 40),\r\n 'bike_stand': (100, 80),\r\n 'chest': (150, 25),\r\n 'heater': (100, 25)\r\n }\r\n\r\ndef knapsack(cap, values, weights):\r\n items = []\r\n for i in range(len(values)):\r\n itemInfo = {\r\n 'vpw': values[i] / weights[i],\r\n 'weight': weights[i]\r\n }\r\n if len(items) == 0:\r\n items.append(itemInfo)\r\n else:\r\n k = 0\r\n while k < len(items) and items[k]['vpw'] > itemInfo['vpw']:\r\n k += 1\r\n items.insert(k, itemInfo)\r\n total = 0\r\n cap_left = cap\r\n for item in items:\r\n if cap_left - item['weight'] >= 0:\r\n total += item['weight'] * item['vpw']\r\n cap_left -= item['weight']\r\n elif cap_left > 0:\r\n total += item['vpw'] * cap_left\r\n cap_left = 0\r\n return total\r\n\r\n\r\ndef plot_memtable(V, stuffdict):\r\n plt.figure(figsize=(30,15))\r\n item_list = list(stuffdict.keys())\r\n item_list.insert(0, 'empty')\r\n sns.heatmap(V, yticklabels=item_list)\r\n plt.yticks(size=25)\r\n plt.xlabel('Area', size=25)\r\n plt.ylabel('Added item', size=25)\r\n plt.title('Value for Area with Set of Items', size=30)\r\n plt.show()\r\n\r\ncap = 2000\r\nweights = [300, 500, 400, 200, 200, 200, 300, 200, 100, 200, 150, 150, 250, 250, 150, 100, 300, 25, 30, 45, 175, 25, 250, 20, 30, 200, 100, 150, 100]\r\nvalues = [75, 80, 100, 50, 40, 70, 80, 30, 30, 60, 20, 30, 45, 70, 30, 20, 70, 10, 20, 25, 30, 40, 40, 30, 35, 40, 80, 25, 25]\r\n\r\nprint(knapsack(cap, values, weights))\r\n\r\n","repo_name":"CTF-one-love/PraktikaG236N008","sub_path":"PZ8/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23696491518","text":"import sys\n\nsys.stdin = open('input.txt', 'r')\n\nTC = int(input())\n\n\ndef bfs(v):\n que = []\n visited = [0] * (V + 1)\n que.append(v)\n visited[v] = 1\n while que:\n v = que.pop(0)\n\n for w in L[v]:\n if not visited[w]:\n que.append(w)\n visited[w] = visited[v] + 1\n if visited[G]:\n return visited[G] - 1\n return visited[G]\n\n\nfor tc in range(1, TC + 1):\n V, E = map(int, input().split())\n L = [[] for _ in range(V + 1)]\n for e in range(E):\n v1, v2 = map(int, input().split())\n L[v1].append(v2)\n L[v2].append(v1)\n S, G = map(int, input().split())\n print('#{} {}'.format(tc, bfs(S)))\n","repo_name":"rogonu/TIL","sub_path":"SSAFY_1학기/08-26/5102.py","file_name":"5102.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37210440891","text":"import os\nimport sys\nfrom timeit import Timer\n\nimport quma\nfrom quma.tests import util\n\nloops = int(sys.argv[1]) if len(sys.argv) > 1 else 20000\n\nhere = os.path.dirname(__file__)\nsql_path = os.path.join(here, '../quma/tests/fixtures/scripts/qmark')\n\n\ndef get_db():\n db = quma.Database(\n 'sqlite:///:memory:',\n sql_path,\n persist=True,\n changeling=True,\n cache=True,\n )\n db.execute(util.CREATE_USERS)\n db.execute(util.INSERT_USERS)\n return db\n\n\ndef db_namespaces():\n db = get_db()\n for _ in range(loops):\n with db.cursor as cursor:\n db.get_users(cursor)\n db.root.get_users(cursor)\n db.get_test(cursor)\n db.root.get_test(cursor)\n db.user.add(\n cursor,\n name='Test User',\n email='test.user@example.com',\n city='Test City',\n ).run()\n cursor.commit()\n\n db.user.by_name(cursor, name='User 1').one()\n db.user.by_name(cursor, name='Test User').one()\n db.users.all(cursor, name='Test User').first()\n db.users.all(cursor, name='Test User').all()\n db.user.remove(cursor, name='Test User').run()\n cursor.commit()\n\n\ndef cursor_namespaces():\n db = get_db()\n for _ in range(loops):\n with db.cursor as cursor:\n cursor.get_users()\n cursor.root.get_users()\n cursor.get_test()\n cursor.root.get_test()\n cursor.user.add(\n name='Test User',\n email='test.user@example.com',\n city='Test City',\n ).run()\n cursor.commit()\n\n cursor.user.by_name(name='User 1').one()\n cursor.user.by_name(name='Test User').one()\n cursor.users.all(name='Test User').first()\n cursor.users.all(name='Test User').all()\n cursor.user.remove(name='Test User').run()\n cursor.commit()\n\n\nheader = '\\n{} loops:'.format(loops)\nprint(header)\nprint('-' * (len(header) - 1))\nt = Timer(lambda: db_namespaces())\nprint('db api: ', t.timeit(number=1), 'seconds')\nt = Timer(lambda: cursor_namespaces())\nprint('cur api: ', t.timeit(number=1), 'seconds')\n","repo_name":"ebenefuenf/quma","sub_path":"bin/cursor_vs_db.py","file_name":"cursor_vs_db.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"67"} +{"seq_id":"3246404051","text":"from keras.applications import (\n ResNet50,\n ResNet50V2,\n ResNet101,\n ResNet101V2,\n ResNet152,\n ResNet152V2,\n)\n\nBASE_NETWORKS_MAPPING = {\n \"ResNet50\": {\n \"backbone\": ResNet50,\n \"image_size\": 224,\n },\n \"ResNet50V2\": {\n \"backbone\": ResNet50V2,\n \"image_size\": 224,\n },\n \"ResNet101\": {\n \"backbone\": ResNet101,\n \"image_size\": 224,\n },\n \"ResNet101V2\": {\n \"backbone\": ResNet101V2,\n \"image_size\": 224,\n },\n \"ResNet152\": {\n \"backbone\": ResNet152,\n \"image_size\": 224,\n },\n \"ResNet152V2\": {\n \"backbone\": ResNet152V2,\n \"image_size\": 224\n },\n}\n# Specify the name of network that you can use as a base model\nNETWORK = \"ResNet50\"\n\n# The path of directory containig images that can be used for training\n# You can split those into 3 sub datasets called; training dataset, validation dataset, test dataset.\nTRAINING_IMAGES_PATH = \"images/training\"\n\n# The path of directory containig images that can be used for testing the trained model\n# You need to understand that this is the different from test dataset\nTEST_IMAGES_PATH = \"images/test\"\n\n# The path of hdf5 file that manage training dataset\nTRAIN_HDF5 = \"hdf5/train.hdf5\"\n\n# The path of hdf5 file that manage validation dataset\nVAL_HDF5 = \"hdf5/val.hdf5\"\n\n# The path of hdf5 file that manage test dataset\nTEST_HDF5 = \"hdf5/test.hdf5\"\n\n# Trained model will be saved to this \nMODEL_PATH = \"output/result.model\"\n\n# You can set traing curve in this folder\nOUTPUT_PATH = \"output\"\n\n# Epochs \nEPOCHS = 1\n\n# We use SGD. so this specify the number of batch size.\n# Please specify this number depending on your p\nBATCH_SIZE = 8\n\n# This speicify the numer of classes. I set this to 2 because my demo dataset consist of dog & cat.\n# If you want to build the digit recognition, you can specify this to 10.\nCLASS_NUMS = 2\n","repo_name":"lifelonglearner127/image_classification_pipeline","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40581433079","text":"\"\"\"\nn = 10, d = 1 \nthe k*k are 0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100\nWe are using the digit 1 in: 1, 16, 81, 100. The total count is then 4.\n\nThe function, when given n = 25 and d = 1 as argument, should return 11 since\nthe k*k that contain the digit 1 are:\n1, 16, 81, 100, 121, 144, 169, 196, 361, 441.\nSo there are 11 digits 1 for the squares of numbers between 0 and 25.\n\"\"\"\ndef nb_dig(n, d):\n k = ''\n for x in range((n+1)):\n sqr_root = str(x * x)\n for y in sqr_root:\n if y == str(d):\n k += y\n \n return len(k)\n\nnb_dig(5750, 2)","repo_name":"activus-d/Codewars_Python","sub_path":"Count-the-Digit.py","file_name":"Count-the-Digit.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9160791242","text":"num = [[],[]]\nvalor=0\nfor c in range(1,8):\n valor=(int(input(f'Digite um {c}° valor : ')))\n if valor%2==0:\n num[0].append(valor)\n else:\n num[1].append(valor)\nprint('-='*30)\nprint(f'Os valores pares digitados foram {sorted(num[0])}')\nprint(f'Os valores impares digitados foram {sorted(num[1])}')\nprint('-='*30)","repo_name":"Naelbsiqueira/ProjetoeEstudos","sub_path":"ExerciciosPy/ex085.py","file_name":"ex085.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70836324055","text":"import json\nimport os\nimport requests\n\nfrom flask import Flask, jsonify, render_template, redirect, request, session\nfrom flask_session import Session\n\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = os.environ.get('SECRET_KEY')\napp.config['GITHUB_CLIENT_ID'] = os.environ.get('GITHUB_CLIENT_ID')\napp.config['GITHUB_CLIENT_SECRET'] = os.environ.get('GITHUB_CLIENT_SECRET')\n\napp.secret_key = app.config['SECRET_KEY']\napp.config['SESSION_TYPE'] = 'filesystem'\nsess = Session()\nsess.init_app(app)\n\n@app.route('/')\ndef index():\n if session.get('access_token'):\n return redirect(\"/fork\", code=302)\n\n return render_template('index.html')\n\n@app.route('/callback')\ndef callback():\n args = request.args\n\n endpoint = f\"https://github.com/login/oauth/access_token?client_id={app.config['GITHUB_CLIENT_ID']}&client_secret={app.config['GITHUB_CLIENT_SECRET']}&code={args.get('code')}\"\n print(endpoint)\n headers = {\n 'Accept': 'application/json',\n }\n response = requests.post(endpoint, headers=headers)\n if response.status_code != 200:\n return jsonify(error=response.reason), response.status_code\n response_dict = json.loads(response.text)\n\n session['access_token'] = response_dict.get('access_token')\n\n return redirect(\"/fork\", code=302)\n\n@app.route('/fork', methods = ['GET'])\ndef fork_repo_form():\n if not session.get('access_token'):\n return redirect(\"/\", code=302)\n\n return render_template('fork.html')\n\n@app.route('/fork', methods = ['POST'])\ndef fork_repo():\n \"\"\"https://docs.github.com/en/rest/repos/forks?apiVersion=2022-11-28#create-a-fork\"\"\"\n if not request.form.get('owner'):\n return jsonify(error='Missing \"owner\" value in the form data'), 400\n owner = request.form['owner']\n\n if not request.form.get('repo'):\n return jsonify(error='Missing \"repo\" value in the form data'), 400\n repo = request.form['repo']\n\n if not session.get('access_token'):\n return jsonify(error='Missing \"access_token\" value for this user'), 400\n access_token = session['access_token']\n \n endpoint = f'https://api.github.com/repos/{owner}/{repo}/forks'\n headers = {\n 'Accept': 'application/vnd.github+json',\n 'X-GitHub-Api-Version': '2022-11-28',\n 'Authorization': f'Bearer {access_token}',\n }\n payload = {\n 'name': f'{repo}',\n 'default_branch_only': True\n }\n response = requests.post(endpoint, headers=headers, data=json.dumps(payload))\n\n response_dict = json.loads(response.text)\n if response_dict.get('message'):\n return jsonify(error=response_dict['message']), response.status_code\n\n return jsonify(message='Repo successfully forked'), 200\n\nif __name__ == '__main__':\n app.run()","repo_name":"zachfire9/github-utilities","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10234273544","text":"#import urllib\nimport time\nimport re\nfrom html.parser import HTMLParser\nimport urllib.request\nupdate = ''\nwith urllib.request.urlopen('http://www.thepremiereresidential.com/properties/san-diego/la-jolla-international-gardens/availability/') as response:\n update = str(response.read())\n\n# create a subclass and override the handler methods\nclass InitialParser(HTMLParser):\n root_links = []\n listing_section = False\n pattern = re.compile('^(\\\\\\\\n|\\\\\\\\t| )')\n def handle_starttag(self, tag, attrs):\n if tag == 'article':\n for (key, value) in attrs:\n if key == 'class':\n self.listing_section = True\n\n def handle_endtag(self, tag):\n if tag == 'article':\n self.listing_section = False\n\n def handle_data(self, data): \n if self.listing_section and not self.pattern.match(data):\n print(data)\n\n# instantiate the parser and fed it some HTML\nparser = InitialParser()\nparser.feed(update)\n\ntime.sleep(3)\n\n","repo_name":"e4chang/glowing-lamp","sub_path":"src/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35844251617","text":"import random\nimport csv\n\n# List of all games\nProductsList = [100, 'Fifa', 14.99, 8, 'active', 105, 'Call of Duty', 19.99, 12, 'active', 110, 'NBA 2k 22', 17.99, 24,\n 'active', 115, 'Overwatch', 9.99, 0, 'inactive', 120, 'Fortnite', 9.99, 4, 'active', 125, 'Madden 22',\n 19.99, 20, 'active', 130, 'Halo', 8.99, 0, 'active', 135, 'Star Wars Battlefront', 5.99, 2, 'active',\n 140, 'Super Mario Bros Wii', 7.99, 1, 'inactive', 145, 'Apex Legends', 15.99, 14, 'active', 150,\n 'Minecraft', 12.99, 9, 'active', 155, 'Grand Theft Auto V', 4.99, 5, 'inactive', 160,\n 'Plants vs Zombies', 5.99, 4, 'active']\n\nCostumersList = []\nOrdersList = []\noption = 0\norder_number = 1722542\namount_due = 0\nspaces = 0\n\n\n# Display the catalog except for the inactive products\ndef ShowCatalog():\n print('Code Game Price Stock')\n print('------------------------------------')\n for count in range(0, len(ProductsList), 5):\n if ProductsList[count + 4] == 'active':\n print(ProductsList[count], end=\" \")\n if len(ProductsList[count + 1]) < 22:\n spaces = 22 - len(ProductsList[count + 1])\n print(ProductsList[count + 1], \" \" * spaces, end='')\n if len(str(ProductsList[count + 2])) < 7:\n spaces = 7 - len(str(ProductsList[count + 2]))\n print('$' + str(ProductsList[count + 2]), \" \" * spaces, end='')\n print(ProductsList[count + 3])\n\n\ndef UpdateProducts():\n print('Products update')\n print('---------------')\n game_code = int(input('Enter the video game code: '))\n\n if game_code in ProductsList:\n position = ProductsList.index(game_code)\n gameInfo(game_code)\n\n modify = input('''\nEnter feature to modify\na.Price\nb.Stock\nc.Status\nd.Cancel update\n\n>''')\n # Replacing the user input of the selected update for the current info in the products list\n if modify == 'a':\n new_price = float(input('Enter new price: '))\n ProductsList[position + 2] = new_price\n print('Product successfully updated')\n elif modify == 'b':\n new_stock = int(input('Enter new stock quantity: '))\n ProductsList[position + 3] = new_stock\n print('Product successfully updated')\n elif modify == 'c':\n new_status = input('Enter new status: ')\n ProductsList[position + 4] = new_status\n print('Product successfully updated')\n\n else:\n print('Product inexistent')\n\n\ndef RegisterNewCostumer():\n # Join the two inputs into a new string using index to select the letters\n print('Register new costumer')\n print('--------------------')\n costumer_name = input(\"Enter costumer's name: \")\n costumer_phone = input(\"Enter costumer's phone number: \")\n costumer_code = str(costumer_phone[-3:]) + costumer_name.upper()[0:3]\n if costumer_code in CostumersList:\n code_position = CostumersList.index(costumer_code)\n print('Costumer already registered with the code:', CostumersList[code_position])\n else:\n CostumersList.append(costumer_code)\n print('Costumer registered with the code:', costumer_code)\n\n\ndef CreateOrder(order_number):\n amount_due = 0\n print('Create new order')\n print('----------------')\n costumer_code = input('Enter costumer code: ')\n # Validate if the costumer is registered, if the product exists, if the product is active, and if the product is in stock\n if costumer_code in CostumersList:\n run_buy_order = 'yes'\n while run_buy_order.lower() == 'yes':\n game_code = int(input('Enter the product code: '))\n\n if game_code in ProductsList:\n if ProductsList[ProductsList.index(game_code) + 4] == 'active':\n if ProductsList[ProductsList.index(game_code) + 3] > 0:\n gameInfo(game_code)\n\n amount_to_buy = int(input('Enter amount to buy: '))\n while amount_to_buy > ProductsList[ProductsList.index(game_code) + 3]:\n print('Not enough stock')\n amount_to_buy = int(input('Enter amount to buy: '))\n # Calculate price with discounts and append info to the order list\n else:\n price = ProductsList[ProductsList.index(game_code) + 2]\n amount_due = round(amount_due + amount_to_buy * price, 2)\n total_price = price * amount_to_buy\n\n ProductsList[ProductsList.index(game_code) + 3] = ProductsList[ProductsList.index(\n game_code) + 3] - amount_to_buy\n\n if amount_to_buy >= 5 and amount_to_buy < 10:\n amount_due = round(amount_due * 0.95, 2)\n total_price = round((price * amount_to_buy) * 0.95, 2)\n print('Congratulations, you have a 5% discount!')\n elif amount_to_buy >= 10 and amount_to_buy < 20:\n amount_due = round(amount_due * 0.90, 2)\n total_price = round((price * amount_to_buy) * 0.90, 2)\n print('Congratulations, you have a 10% discount!')\n elif amount_to_buy >= 20:\n random_discount = random.randint(20, 30)\n amount_due = round(amount_due * ((100 - random_discount) / 100), 2)\n total_price = round((price * amount_to_buy) * ((100 - random_discount) / 100), 2)\n print('Congratulations, you have a ' + str(random_discount) + '% discount!')\n\n OrdersList.append(order_number)\n OrdersList.append(costumer_code)\n OrdersList.append(game_code)\n OrdersList.append(amount_to_buy)\n OrdersList.append(total_price)\n order_created = \"yes\"\n\n run_buy_order = input('Do you want to add another game?(yes/no): ')\n else:\n run_buy_order = input(\n 'Product out of stock, do you want to search for a different product?(yes/no): ')\n order_created = \"yes\"\n\n else:\n run_buy_order = input('Product inactive, do you want to search for a different product?(yes/no): ')\n order_created = \"no\"\n else:\n run_buy_order = input('Product not existent, do you want to search for a different product?(yes/no): ')\n order_created = \"no\"\n\n if order_created == \"yes\":\n print()\n print('Order', order_number, 'has been created')\n print('Amount due: $' + str(amount_due))\n order_number += 1\n else:\n print('Costumer not registered, please select option 3 to register')\n\n# More than one time I have to display the game info so I defined it to write the code once\ndef gameInfo(game_code):\n position = ProductsList.index(game_code)\n print('Name:', ProductsList[position + 1])\n print('Price:', '$' + str(ProductsList[position + 2]))\n print('Stock:', ProductsList[position + 3])\n print('Status:', ProductsList[position + 4])\n\n# Look for the order number in the orders list and if it is in the list, print the info\ndef ShowOrder():\n display_order = int(input('Enter the order to display: '))\n if display_order in OrdersList:\n print('Order number:', OrdersList[OrdersList.index(display_order)])\n print('Costumer:', OrdersList[OrdersList.index(display_order) + 1])\n print('Product Description Quantity Price Total')\n\n spaces = 0\n if display_order in OrdersList:\n for count in range(0, len(OrdersList), 5):\n if OrdersList[count] == display_order:\n product_position = ProductsList[ProductsList.index(OrdersList[count + 2])]\n print(OrdersList[count + 2], end=\" \")\n if len(str(ProductsList[ProductsList.index(product_position) + 1])) < 14:\n spaces = 14 - len(str(ProductsList[ProductsList.index(product_position) + 1]))\n print(ProductsList[ProductsList.index(product_position) + 1], \" \" * spaces, end=\" \")\n print(OrdersList[count + 3], end=\" \")\n print('$' + str(ProductsList[ProductsList.index(product_position) + 2]), end=\" \")\n print('$' + str(round(OrdersList[count + 4], 2)))\n\n# Open a new file and use the writer to create a CSV file of the catalog\ndef SaveData():\n print(\"Generating CVS file...\")\n catalogFile = open(\"catalog.csv\", \"w\", newline=\"\")\n writer = csv.writer(catalogFile)\n\n header = ['Code', 'Product', 'Price', 'Stock', 'Status']\n writer.writerow(header)\n\n for count in range(0, len(ProductsList), 5):\n row = list()\n row.append(ProductsList[count])\n row.append(ProductsList[count + 1])\n row.append('$' + str(ProductsList[count + 2]))\n row.append(ProductsList[count + 3])\n row.append(ProductsList[count + 4])\n writer.writerow(row)\n\n catalogFile.close()\n print(\"Catalog CSV file is ready\")\n\n# Go through the catalog and print the product name and the stock if the stock is less than 5\n# Go through orders list and if the costumer code is in the list print that order info\ndef ExtraReports():\n print('''\na) Report of low stock products\nb) Report of orders for a costumer\n ''')\n option = input('Pick your report option: ')\n if option == 'a':\n print('Report of low stock products')\n print('----------------------------')\n for count in range(0, len(ProductsList), 5):\n if ProductsList[count + 3] < 5:\n print(ProductsList[count + 1], '-', ProductsList[count + 3])\n if option == 'b':\n costumer_code = input('Enter the costumer code: ')\n if costumer_code in OrdersList:\n for count in range(0, len(OrdersList), 5):\n if OrdersList[count + 1] == costumer_code:\n position_in_prodList = ProductsList.index(OrdersList[count + 2])\n print(OrdersList[count + 2], end=\" \")\n if len(str(ProductsList[position_in_prodList + 1])) < 14:\n spaces = 14 - len(str(ProductsList[position_in_prodList + 1]))\n print(ProductsList[position_in_prodList + 1], \" \" * spaces, end=\" \")\n print(OrdersList[count + 3], end=\" \")\n print('$' + str(ProductsList[position_in_prodList + 2]), end=\" \")\n print('$' + str(OrdersList[count + 4]))\n\n else:\n print('No orders for this costumer')\n\n# Keep showing the menu until the costumer exits\nwhile option != 8:\n print('''\nGame Stop Order Management System\n=================================\n1) Show catalog of products\n2) Update products\n3) Register new costumer\n4) Create Order\n5) Show order\n6) Save data\n7) Extra reports\n8) Exit\n ''')\n\n option = int(input('Select an option from 1 to 8: '))\n print()\n\n while option < 1 or option > 8:\n print('Invalid option')\n option = int(input('Select an option from 1 to 8: '))\n print()\n\n if option == 1:\n ShowCatalog()\n\n if option == 2:\n UpdateProducts()\n\n if option == 3:\n RegisterNewCostumer()\n\n if option == 4:\n CreateOrder(order_number)\n\n if option == 5:\n ShowOrder()\n\n if option == 6:\n SaveData()\n\n if option == 7:\n ExtraReports()\n","repo_name":"mnoriega-mx/Personal-School-Projects","sub_path":"OrderManagementSysyem/OrderManagementSystem.py","file_name":"OrderManagementSystem.py","file_ext":"py","file_size_in_byte":11866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15475536651","text":"def getUnitAndValue(inVar):\n inVar = str(inVar)\n number = ''\n unit = ''\n for l in inVar:\n if(l.isdigit() or l == ',' or l == '.'):\n if l == ',':\n l = '.'\n number += l\n else:\n unit += l\n number = float(number)\n return (number, unit)\n\ndef getBytes(inVar):\n tmp = getUnitAndValue(inVar)\n number = tmp[0]\n unit = tmp[1]\n del tmp\n #IS\n if(unit == 'k' or unit == 'K' or unit == 'KB'):\n return int(number * 1000)\n elif(unit == 'm' or unit == 'M' or unit == 'MB'):\n return int(number * 1000000)\n elif(unit == 'g' or unit == 'G' or unit == 'GB'):\n return int(number * 1000000000)\n elif(unit == 't' or unit == 'T' or unit == 'TB'):\n return int(number * 1000000000000)\n elif(unit == 'p' or unit == 'P' or unit == 'PB'):\n return int(number * 1000000000000000)\n elif(unit == 'e' or unit == 'E' or unit == 'EB'):\n return int(number * 1000000000000000000)\n elif(unit == 'z' or unit == 'Z' or unit == 'ZB'):\n return int(number * 1000000000000000000000)\n elif(unit == 'y' or unit == 'Y' or unit == 'YB'):\n return int(number * 1000000000000000000000000)\n #BU\n elif(unit == 'KiB'):\n return int(number * 1024)\n elif(unit == 'MiB'):\n return int(number * 1048576)\n elif(unit == 'GiB'):\n return int(number * 1073741824)\n elif(unit == 'TiB'):\n return int(number * 1099511627776)\n elif(unit == 'PiB'):\n return int(number * 1125899906842624)\n elif(unit == 'EiB'):\n return int(number * 1152921504606846976)\n elif(unit == 'ZiB'):\n return int(number * 1180591620717411303424)\n elif(unit == 'YiB'):\n return int(number * 1208925819614629174706176)\n elif(unit == '' or unit == 'b' or unit == 'B'):\n return int(number)\n else:\n print('Fatal error during conversion of %s, is an effective unit of measure? Exiting...' % (str(inVar)))\n exit()\n\ndef getHumanValue(inVar, si=True):\n inVar = int(inVar)\n #si\n if si:\n if(inVar >= 1000):\n inVar = inVar/1000\n if(inVar < 1000):\n return(str((inVar))+'KB')\n if(inVar >= 1000):\n inVar = inVar/1000\n if(inVar < 1000):\n return(str(int(inVar))+'MB')\n if(inVar >= 1000):\n inVar = inVar/1000\n if(inVar < 1000):\n return(str(int(inVar))+'GB')\n if(inVar >= 1000):\n inVar = inVar/1000\n if(inVar < 1000):\n return(str(int(inVar))+'TB')\n if(inVar >= 1000):\n inVar = inVar/1000\n if(inVar < 1000):\n return(str(int(inVar))+'PB')\n if(inVar >= 1000):\n inVar = inVar/1000\n if(inVar < 1000):\n return(str(int(inVar))+'EB')\n if(inVar >= 1000):\n inVar = inVar/1000\n if(inVar < 1000):\n return(str(int(inVar))+'ZB')\n if(inVar >= 1000):\n inVar = inVar/1000\n #if(inVar < 1000):\n return(str(int(inVar))+'YB')\n else:\n return(str(int(inVar))+'B')\n #bu\n else:\n if(inVar >= 1024):\n inVar = inVar/1024\n if(inVar < 1024):\n return(str((inVar))+'KiB')\n if(inVar >= 1024):\n inVar = inVar/1024\n if(inVar < 1024):\n return(str(int(inVar))+'MiB')\n if(inVar >= 1024):\n inVar = inVar/1024\n if(inVar < 1024):\n return(str(int(inVar))+'GiB')\n if(inVar >= 1024):\n inVar = inVar/1024\n if(inVar < 1024):\n return(str(int(inVar))+'TiB')\n if(inVar >= 1024):\n inVar = inVar/1024\n if(inVar < 1024):\n return(str(int(inVar))+'PiB')\n if(inVar >= 1024):\n inVar = inVar/1024\n if(inVar < 1024):\n return(str(int(inVar))+'EiB')\n if(inVar >= 1024):\n inVar = inVar/1024\n if(inVar < 1024):\n return(str(int(inVar))+'ZiB')\n if(inVar >= 1024):\n inVar = inVar/1024\n #if(inVar < 1024):\n return(str(int(inVar))+'YiB')\n else:\n return(str(int(inVar))+'B')\n\ndef optimize(inVar, si=True):\n return getHumanValue(getBytes(inVar), si)\n","repo_name":"marcomg/openhjsplit","sub_path":"libitunitsconversion.py","file_name":"libitunitsconversion.py","file_ext":"py","file_size_in_byte":4397,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"38575726042","text":"from ..components import NLInstance\nimport numpy as np\n\n\ndef sign(val):\n if val > 0:\n return 1\n elif val == 0:\n return 0\n else:\n return -1\n\n\nclass TriodeModel(NLInstance):\n def __init__(self, mu=106, ex=1.46, kg=1572, kp=464, kvb=179.0, rg=2000, vt=2.5e-3):\n \"\"\"\n Triode Model for state-space simulation\n \"\"\"\n self.mu = mu\n self.ex = ex # equal with KX\n self.kg = kg\n self.kp = kp\n self.kvb = kvb\n self.rg = rg\n self.vt = vt\n\n def compute_e1(self, vn):\n vak = vn[0]\n vgk = vn[1]\n\n e1 = (vak / self.kp) * np.log(1 + np.exp(self.kp * (1. / self.mu + vgk / np.sqrt(self.kvb + vak * vak))))\n return e1\n\n def compute_de1(self, vn, e1):\n res = np.zeros(2)\n if e1 >= 0:\n vak = vn[0]\n vgk = vn[1]\n\n temp_exp = np.exp(self.kp * (vgk / np.sqrt(self.kvb + vak * vak) + 1. / self.mu))\n\n res[0] = np.log(temp_exp + 1) / self.kp - vgk * vak * vak * temp_exp / (\n np.power(self.kvb + vak * vak, 1.5) * (temp_exp + 1.))\n res[1] = vak * temp_exp / (np.sqrt(vak * vak + self.kvb) * (temp_exp + 1))\n return res\n\n def compute_ia(self, vn):\n e1 = self.compute_e1(vn)\n if e1 >= 0:\n ia = np.power(e1, self.ex) * (sign(e1)+1) / self.kg\n else:\n ia = 0\n return ia\n\n def compute_dia(self, vn, e1=None):\n res = np.zeros(2)\n if e1 is None:\n e1 = self.compute_e1(vn)\n if e1 >= 0:\n de1 = self.compute_de1(vn=vn, e1=e1)\n temp_val = (1 + sign(e1)) * self.ex * np.power(e1, self.ex - 1) / self.kg\n res[0] = temp_val * de1[0]\n res[1] = temp_val * de1[1]\n\n return res\n\n def compute_ig(self, vn):\n vgk = vn[1]\n ig = np.log(1 + np.exp(vgk / self.vt)) * self.vt / self.rg\n return ig\n\n def compute_dig(self, vn):\n res = np.zeros(2)\n vgk = vn[1]\n res[1] = np.exp(vgk / self.vt) / (self.rg * np.exp(vgk / self.vt) + self.rg)\n # res[1] = 1./(self.rg * np.exp(-vgk/self.vt) + self.rg)\n return res\n\n def compute_i(self, vn):\n vn = vn.astype(np.longdouble)\n res = np.zeros((2, 1))\n res[0, 0] = self.compute_ia(vn) # ia\n res[1, 0] = self.compute_ig(vn) # ig\n\n return res\n\n def compute_ji(self, vn):\n vn = vn.astype(np.longdouble)\n dia = self.compute_dia(vn).reshape((1, 2))\n dig = self.compute_dig(vn).reshape((1, 2))\n res = np.concatenate((dia, dig), axis=0)\n\n return res\n\n\nclass TriodeModelImproved(TriodeModel):\n def __init__(self, mu=106, ex=1.46, kg=1572, kp=464, kvb=179.0, rg=2000, vt=2.5e-3, kn=0.5, v_gamma=0.35, v_ct=0.49):\n super().__init__(mu=mu, ex=ex, kg=kg, kp=kp, kvb=kvb, rg=rg, vt=vt)\n self.kn = kn\n self.v_gamma = v_gamma\n self.v_ct = v_ct\n\n def compute_e1(self, vn):\n vak = vn[0]\n vgk = vn[1]\n\n e1 = (vak / self.kp) * np.log(1 + np.exp(self.kp * (1 / self.mu + (vgk + self.v_ct) / np.sqrt(self.kvb + vak*vak))))\n\n return e1\n\n def compute_de1(self, vn, e1):\n res = np.zeros(2)\n if e1 >= 0:\n vak = vn[0]\n vgk_ct = vn[1] + self.v_ct\n\n temp_exp = np.exp(self.kp * (vgk_ct / np.sqrt(self.kvb + vak * vak) + 1. / self.mu))\n\n res[0] = np.log(temp_exp + 1) / self.kp - vgk_ct * vak * vak * temp_exp / (\n np.power(self.kvb + vak * vak, 1.5) * (temp_exp + 1.))\n res[1] = vak * temp_exp / (np.sqrt(vak * vak + self.kvb) * (temp_exp + 1))\n return res\n\n def compute_ig(self, vn):\n vgk = vn[1]\n\n if vgk < (self.v_gamma - self.kn):\n result = 0\n elif vgk > (self.v_gamma + self.kn):\n result = (vgk - self.v_gamma) / self.rg\n else:\n a = 1 / (4 * self.kn * self.rg)\n b = (self.kn - self.v_gamma) / (2 * self.kn * self.rg)\n c = (-1.0 * a * np.power(self.v_gamma - self.kn, 2.0)) - (b * (self.v_gamma - self.kn))\n result = a * np.power(vgk, 2.0) + b * vgk + c\n\n return result\n\n def compute_dig(self, vn):\n vgk = vn[1]\n\n result = np.zeros(2)\n\n if vgk < (self.v_gamma - self.kn):\n pass\n elif vgk > (self.v_gamma + self.kn):\n result[0] = 0\n result[1] = 1 / self.rg\n\n else:\n a = 1 / (4 * self.kn * self.rg)\n b = (self.kn - self.v_gamma) / (2 * self.kn * self.rg)\n\n result[0] = 0\n result[1] = a * 0.5 * vgk + b\n\n return result\n\n\nclass TB12AX7(TriodeModel):\n def __init__(self):\n super().__init__(mu=100, ex=1.4, kg=1060, kp=600, kvb=300, rg=2000)\n\n\nclass TB12AX7Improved(TriodeModelImproved):\n def __init__(self):\n super().__init__(mu=100, ex=1.4, kg=1060, kp=600, kvb=300, rg=2000)\n\n","repo_name":"bramantyois/nkss","sub_path":"nkss/semiconductors/triodemodel.py","file_name":"triodemodel.py","file_ext":"py","file_size_in_byte":4958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72255925335","text":"'''\nCréez un programme qui affiche le N-ème élément de la célèbre suite de Fibonacci. (0, 1, 1, 2) étant le début de la suite et le premier élément étant à l’index 0.\n\n\nExemples d’utilisation :\n$> python exo.py 3\n2\n$>\n\nAfficher -1 si le paramètre est négatif ou mauvais.\n'''\n\nimport sys\n\n# Recuperation du numeros d'index et teste d'erreurs\ndef input_data():\n nb=0\n l = sys.argv[1:]\n if len(l)>1:\n print(-1)\n sys.exit()\n try:\n nb= int(l[0])\n except:\n print(-1)\n sys.exit()\n if nb<0:\n print(-1)\n sys.exit()\n return nb\n\n# suite de fibonacci (J'ai mit par default les deux premiere variables de la liste)\ndef fibonacci(nb):\n n=1\n n0=0\n nf=0\n l=[0,1]\n while nb>0:\n nf=n0+n\n l.append(nf)\n n0=n\n n=nf\n nb-=1\n return l\n\n\n# teste de la valeur d'index si bien superieur a 1 + application de la suite avec le numeros d'index >1 + affichage de la derniere\n# valeur de la liste. \ndef afficher_valeur_index(nb):\n if nb==0:\n print(0)\n elif nb==1:\n print(1)\n else:\n nb= nb-1\n l=fibonacci(nb)\n print (l[-1])\n\n# Appel des fonctions : \nnb= input_data()\nafficher_valeur_index(nb)\n\n","repo_name":"Sinhay13/Epreuve_eau","sub_path":"eau003.py","file_name":"eau003.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19292019948","text":"import io\nimport subprocess\nimport os\nimport pandas as pd\n\nimport sdi_utils.gensolution as gs\nimport sdi_utils.set_logging as slog\nimport sdi_utils.textfield_parser as tfp\nimport sdi_utils.tprogress as tp\n\ntry:\n api\nexcept NameError:\n class api:\n\n queue = list()\n\n class Message:\n def __init__(self,body = None,attributes = \"\"):\n self.body = body\n self.attributes = attributes\n\n def send(port,msg) :\n if port == outports[1]['name'] :\n api.queue.append(msg)\n\n class config:\n ## Meta data\n config_params = dict()\n tags = {'sdi_utils':''}\n version = \"0.0.1\"\n operator_name = 'repl_table_csv'\n operator_description = \"table to csv\"\n operator_description_long = \"Converts table to csv stream.\"\n add_readme = dict()\n debug_mode = True\n config_params['debug_mode'] = {'title': 'Debug mode',\n 'description': 'Sending debug level information to log port',\n 'type': 'boolean'}\n\n drop_header = False\n config_params['drop_header'] = {'title': 'Drop header',\n 'description': 'Drop header (not only for the first run).',\n 'type': 'boolean'}\n\n only_header = False\n config_params['only_header'] = {'title': 'Only header',\n 'description': 'Only header (for preparation purpose).',\n 'type': 'boolean'}\n\n drop_columns = 'None'\n config_params['drop_columns'] = {'title': 'Drop Columns',\n 'description': 'List of columns to drop.',\n 'type': 'string'}\n\nlist_headers = set()\n\ndef process(msg):\n\n global list_dicts\n\n att = dict(msg.attributes)\n att['operator'] = 'repl_table_csv'\n logger, log_stream = slog.set_logging(att['operator'], loglevel=api.config.debug_mode)\n\n header = [c[\"name\"] for c in msg.attributes['table']['columns']]\n df = pd.DataFrame(msg.body,columns=header)\n\n drop_columns = tfp.read_list(api.config.drop_columns)\n if drop_columns :\n logger.info('Drop columns: {}'.format(drop_columns))\n df = df.drop(columns = drop_columns)\n\n if df.shape[0] == 0 :\n att['data_outcome'] = False\n api.send(outports[2]['name'],api.Message(attributes=att,body = att['data_outcome']))\n logger.info('No data received, msg to port error_status sent.')\n logger.info('Process ended: {}'.format(time_monitor.elapsed_time()))\n api.send(outports[0]['name'], log_stream.getvalue())\n return 0\n\n att['data_outcome'] = True\n\n # always sort the columns alphabetically because DB columns do not have an order\n df = df[sorted(df.columns)]\n\n if api.config.drop_header and api.config.only_header :\n err_stat = \"Contradicting configuration - Drop header: {} Only header: {}\".format(api.config.drop_header, api.config.only_header)\n raise ValueError(err_stat)\n\n if api.config.only_header:\n df = df.head(n= 0)\n data_str = df.to_csv(index=False)\n # drop headers if it is part of multiple calls (key: table name and cols)\n elif api.config.drop_header :\n data_str = df.to_csv(index=False,header=False)\n else :\n if 'base_table' in att :\n col_str = att['base_table'] + '-' + ' '.join(df.columns.tolist())\n else:\n col_str = att['table_name'] + '-' + ' '.join(df.columns.tolist())\n if col_str in list_headers :\n data_str = df.to_csv(index=False,header=False)\n else :\n data_str = df.to_csv(index=False)\n list_headers.add(col_str)\n\n att[\"file\"] = {\"connection\": {\"configurationType\": \"Connection Management\", \"connectionID\": \"unspecified\"}, \\\n \"path\": \"open\", \"size\": 0}\n\n logger.info('CSV-table: {}.{} ({} - {})'.format(att['schema_name'],att['table_name'],df.shape[0],df.shape[1]))\n\n msg = api.Message(attributes=att,body = data_str)\n api.send(outports[1]['name'],msg)\n log = log_stream.getvalue()\n if len(log)>0 :\n api.send(outports[0]['name'], log_stream.getvalue())\n\n\ninports = [{'name': 'data', 'type': 'message.table',\"description\":\"Input message with table\"}]\noutports = [{'name': 'log', 'type': 'string',\"description\":\"Logging data\"}, \\\n {'name': 'csv', 'type': 'message.file',\"description\":\"Output data as csv\"},\\\n {'name': 'error', 'type': 'message',\"description\":\"Error status\"}]\n\n\n#api.set_port_callback(inports[0]['name'], process)\n\ndef test_operator() :\n #api.config.drop_header = False\n #api.config.only_header = True\n\n attributes = {\"table\":{\"columns\":[{\"class\":\"string\",\"name\":\"header1\",\"nullable\":True,\"size\":80,\"type\":{\"hana\":\"NVARCHAR\"}},\n {\"class\":\"string\",\"name\":\"header2\",\"nullable\":True,\"size\":3,\"type\":{\"hana\":\"NVARCHAR\"}},\n {\"class\":\"string\",\"name\":\"header3\",\"nullable\":True,\"size\":10,\"type\":{\"hana\":\"NVARCHAR\"}}],\n \"name\":\"test.table\",\"version\":1}, 'base_table':'TABLE'}\n table = [ [(j*3 + i) for i in range(0,3)] for j in range (0,5)]\n msg = api.Message(attributes=attributes, body=table)\n print(table)\n process(msg)\n process(msg)\n process(msg)\n\n for m in api.queue :\n print(m.body)\n\n\n\nif __name__ == '__main__':\n test_operator()\n if True :\n subprocess.run([\"rm\", '-r','../../../solution/operators/sdi_replication_' + api.config.version])\n gs.gensolution(os.path.realpath(__file__), api.config, inports, outports)\n solution_name = api.config.operator_name + '_' + api.config.version\n subprocess.run([\"vctl\", \"solution\", \"bundle\",'../../../solution/operators/sdi_replication_' + api.config.version, \\\n \"-t\", solution_name])\n subprocess.run([\"mv\", solution_name + '.zip', '../../../solution/operators'])\n","repo_name":"thhapke/di_replication","sub_path":"src/di_replication/repl_table_csv/repl_table_csv.py","file_name":"repl_table_csv.py","file_ext":"py","file_size_in_byte":6186,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"8822434970","text":"from ngsolve import *\nfrom xfem import *\nfrom xfem.lsetcurv import LevelSetMeshAdaptation\nfrom math import pi\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.rc('legend',fontsize=15)\nplt.rc('axes',titlesize=15)\nplt.rc('axes',labelsize=15)\nplt.rc('xtick',labelsize=14)\nplt.rc('ytick',labelsize=14)\nnp.random.seed(123)\nfrom unf_interf_prob import SolveZNoCut \n\ndef GeomExp(problem, show_plots=False ):\n domain_type = problem.domain_type\n problem_type = problem.problem_type \n mu = problem.mu \n k = problem.k\n print(\"Running GeomExp\")\n for order_geom in [1,2,3]:\n l2_errors_order = [] \n grad_errors_order = [] \n l2_errors_order_NoIF = [] \n ndofs_order = [] \n orders = [1,2,3]\n n_ref_max = 8\n \n for order in orders:\n stabi_dict = { }\n stabi_dict[\"gamma-CIP\"] = 10**(-2)\n if order == 1:\n stabi_dict[\"gamma-CIP\"] = 10**(-3)\n stabi_dict[\"gamma-GLS\"] = 10**(-2)\n stabi_dict[\"alpha-stab\"] = 1e-4\n stabi_dict[\"gamma-IF\"] = 1e-2\n stabi_dict[\"gamma-data\"] = 10**5\n stabi_dict[\"gamma-Geom\"] = 1e-2\n \n l2_errors = [ ]\n grad_errors = [ ]\n ndofs = [ ]\n n_refs = n_ref_max-order\n all_refs = range(n_refs)\n for n_ref in all_refs: \n vtk_output = False\n if order == 3 and order_geom in [1,3] and n_ref == all_refs[-1]:\n vtk_output = True\n result = SolveZNoCut(problem=problem, order = order, n_refs = n_ref, order_geom=order_geom, order_dual = order, stabi_dict=stabi_dict, geom_stab_all_el = True, vtk_output = vtk_output)\n l2_errors.append(result[\"rel-l2-err\"])\n grad_errors.append(result[\"rel-h1sem-err\"])\n ndofs.append(result[\"ndof\"])\n\n l2_errors_order.append(l2_errors)\n grad_errors_order.append(grad_errors)\n ndofs_order.append(ndofs)\n\n mesh_width = np.array(ndofs)**(-1/2)\n name_str = problem.lset_name + \"-\" + problem_type + \"-p{0}\".format(order)+\"-q{0}\".format(order_geom)+\"-mus({0},{1})\".format(int(mu[0]),int(mu[1]))+\"-ks({0},{1})\".format(int(k[0]),int(k[1]))+\".dat\" \n results = [np.array(ndofs,dtype=float),mesh_width, np.array(l2_errors,dtype=float), np.array(grad_errors,dtype=float) ]\n header_str = \"ndof h rel-L2-err-B rel-H1sem-err-B\"\n np.savetxt(fname =\"../data/{0}\".format(name_str),\n X = np.transpose(results),\n header = header_str,\n comments = '')\n if show_plots:\n mesh_width = np.array(ndofs_order[0])**(-1/2)\n for idx,order,marker in zip( [0,1,2],orders,[\"o\",\"s\",\"+\"] ): \n plt.loglog(ndofs_order[idx], l2_errors_order[idx],label=\"p={0}\".format(order),marker=\"o\")\n plt.loglog(ndofs_order[idx], grad_errors_order[idx],label=\"p={0}\".format(order),marker=\"o\",linestyle=\"dashed\")\n plt.loglog(ndofs_order[0],5*mesh_width,label=\"$\\mathcal{O}(h)$\", color='gray', linestyle='dashed' )\n plt.loglog(ndofs_order[0],100*mesh_width**2,label=\"$\\mathcal{O}(h^2)$\", color='gray', linestyle='dotted')\n plt.legend()\n plt.xlabel(\"ndof\")\n plt.show() \n","repo_name":"UCL/interface-uc-unfitted-iso","sub_path":"scripts/GeomStudy.py","file_name":"GeomStudy.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31176598586","text":"from flask import Flask, request\nfrom pymongo import MongoClient, InsertOne\nimport requests\nfrom bson.json_util import dumps\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nfrom sklearn.metrics.pairwise import cosine_similarity as distance\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import RegexpTokenizer\nfrom textblob import TextBlob\n\nimport pandas as pd\nimport numpy as np\n\nimport os\nfrom config import dbURL\n\n### Conection to MongoDB\nclient = MongoClient(dbURL)\ndb = client[\"GoT\"]\ncollection_cha = db[\"characters\"]\ncollection_con = db[\"conversations\"]\n\n\n### Flask end-points\nimport random\napp = Flask(__name__)\n\n\n\n@app.route('/character_insert/') ### this function pretend to insert a new document in our MongoDB\ndef character_insert(name): ### It requires a name to create in our collection the character document\n\n id = 0\n lastId = list(collection_cha.find({},{'_id':1}).sort('$natural',-1).limit(1)) ### as you can see here, we dont want the extra large ids that Mongo insert by default,\n if len(lastId) > 0: ### so we will create our auto-increment id. In addition, if the query dont detect any id\n for e in lastId: ### it will assign the id = 1\n for _,v in e.items():\n id = v + 1\n \n collection_cha.insert_one( {\n \"_id\": id,\n \"c_name\": name} )\n\n else:\n id = 1\n collection_cha.insert_one( {\n \"_id\": id,\n \"c_name\": name} )\n\n\n\n@app.route('/conversation_insert///') ### this function pretend to insert a new document in our MongoDB\ndef conversation_insert(house,name,line): ### It requires a house,name and line text to create in our second collection the conversation document\n house = str(house)\n print(house)\n name = str(name)\n line = str(line)\n\n \n id = 0 ### In the same way as before, it will assign auto incremental ids to every document.\n lastId = list(collection_con.find({},{'_id':1}).sort('$natural',-1).limit(1)) \n if len(lastId) > 0: \n for e in lastId: \n for _,v in e.items():\n id = v + 1\n\n c_id_list = list(collection_cha.find({'c_name':name},{'_id':1}))\n id_c = [value for dictionary in c_id_list for key,value in dictionary.items()][0] ### Here we search for character id which it is in chracters collection \n ### to assign it to the conversation\n collection_con.insert_one( { ### finally we create the document with all this info.\n \"_id\": id, ### as you can see we will have a document for line in the show\n \"house\": house,\n \"id_c\": id_c,\n \"c_name\": name,\n \"line\": line\n } )\n else:\n id = 1\n c_id_list = list(collection_cha.find({'c_name':name},{'_id':1}))\n id_c = [value for dictionary in c_id_list for key,value in dictionary.items()][0]\n\n collection_con.insert_one( {\n \"_id\": id,\n \"house\": house,\n \"id_c\": id_c,\n \"c_name\": name,\n \"line\": line\n } )\n\n\n\n\n\n@app.route('/get_user_id/') ### this function pretend to retrieve a the character id in our MongoDB\ndef get_user_id(name):\n match = list(collection_cha.find({\"c_name\":name},{\"_id\":1,\"c_name\":1}))\n return match[0]\n\n\n@app.route('/get_character_lines/') ### this function pretend to retrieve all the lines for a character id in our MongoDB\ndef get_character_lines(id_c):\n id_c = int(id_c)\n match = collection_con.find({\"id_c\":id_c},{\"_id\":1,\"line\":1})\n return dumps(match)\n\n@app.route('/get_house_conversation/') ### this function pretend to retrieve all the lines for a house in our MongoDB\ndef get_house_conversation(house):\n match = collection_con.find({\"house\":house},{\"_id\":1,\"c_name\":1,\"line\":1})\n return dumps(match)\n\n\n\n@app.route('/get_house_characters/') ### this function pretend to retrieve all the characters that belongs to a house in our MongoDB\ndef get_house_characters(house):\n match = collection_con.find({\"house\":house}).distinct(\"c_name\")\n return dumps(match)\n\n\n\n@app.route('/character_friend_recommender/') ### this function will analyze all messages from all characters and tell\ndef character_friend_recommender(name): ### us who is the recommended friend for our character\n\n characters = list(collection_con.find({}).distinct(\"c_name\"))\n\n sentiment_text = {} ### we start the anlysis of sentiment\n\n for character in characters:\n lines = []\n text= \"\"\n text_clean = \"\"\n\n match = list(collection_con.find({\"c_name\":character})) ### we get all the info of the character\n\n for dictionary in match: ### then we make a dictionary with his lines\n lines.append(dictionary[\"line\"])\n\n for line in lines: ### after that, we get a string with all the text\n text += line\n \n # removing symbols from the text to improve the analysis.\n words = nltk.word_tokenize(text)\n stop_words = set(stopwords.words('english'))\n tokens_clean = [e for e in words if e not in stop_words]\n \n for word in tokens_clean:\n text_clean += word\n \n sentiment_text[character] = text_clean ### now we have a dictionary with character:text\n\n count_vectorizer = CountVectorizer()\n sparse_matrix = count_vectorizer.fit_transform(sentiment_text.values())\n\n\n doc_term_matrix = sparse_matrix.todense()\n df_sentiment = pd.DataFrame(doc_term_matrix, ### we create our data frame\n columns=count_vectorizer.get_feature_names(), \n index=sentiment_text.keys())\n\n similarity_matrix = distance(df_sentiment,df_sentiment)\n\n sim_df = pd.DataFrame(similarity_matrix, columns=sentiment_text.keys(), index=sentiment_text.keys())\n\n np.fill_diagonal(sim_df.values, 0) # Remove diagonal max values and set those to 0\n\n sim_df_idmax = pd.DataFrame(sim_df.idxmax()) ### now we have the similarity matrix and we can proceed to get the recommended friend\n return (f\"The recommended friend for {name} is:\" + \" \" + np.asarray(sim_df_idmax.loc[name])[0])\n\n\n@app.route('/character_sentiment/') ### this function will analyze the sentiment from a character\ndef character_sentiment(name):\n\n lines = []\n sentiments = []\n polarity = []\n subjetivity = []\n match = list(collection_con.find({\"c_name\":name}))\n\n for dictionary in match:\n lines.append(dictionary[\"line\"])\n\n\n for line in lines:\n \n sentiments.append(TextBlob(line).sentiment) ### here we use textblob to get the sentiment of all the character lines\n\n \n for sentiment in sentiments: ### now we append the values to a list so we can get the mean\n if sentiment[0] != 0.0 or sentiment[1] != 0.0:\n polarity.append(sentiment[0])\n subjetivity.append(sentiment[1])\n \n single_line_analysis_list = []\n zipped = list(zip(lines,sentiments))\n for single_line_analysis in zipped:\n single_line_analysis_list.append(single_line_analysis)\n\n ### finally we have the mean of polarity and subjetivity and all the anlysis for every line\n\n return \\\n (f\"The polarity of {name} is {np.mean(polarity)}, and his subjetivity is {np.mean(subjetivity)}\") + \\\n (f\" \") + \\\n (f\"Also, if you want to check the stats line by line take a look over here:\") + \\\n (f\" \") + \\\n (f\"{single_line_analysis_list}\")\n\n\n@app.route('/house_sentiment/') ### house version of character sentiment\ndef house_sentiment(house):\n\n lines = []\n sentiments = []\n polarity = []\n subjetivity = []\n match = list(collection_con.find({\"house\":house}))\n\n for dictionary in match:\n lines.append(dictionary[\"line\"])\n\n\n for line in lines:\n \n sentiments.append(TextBlob(line).sentiment)\n\n \n for sentiment in sentiments:\n if sentiment[0] != 0.0 or sentiment[1] != 0.0:\n polarity.append(sentiment[0])\n subjetivity.append(sentiment[1])\n \n single_line_analysis_list = []\n zipped = list(zip(lines,sentiments))\n for single_line_analysis in zipped:\n single_line_analysis_list.append(single_line_analysis)\n\n\n return \\\n (f\"The polarity of {house} is {np.mean(polarity)}, and his subjetivity is {np.mean(subjetivity)}\") + \\\n (f\" \") + \\\n (f\"Also, if you want to check the stats line by line take a look over here:\") + \\\n (f\" \") + \\\n (f\"{single_line_analysis_list}\")\n\n\n\n@app.route('/house_friend_recommender/') ### house version of character recommender\ndef house_friend_recommender(conversation):\n\n houses = list(collection_con.find({}).distinct(\"house\"))\n\n sentiment_text = {}\n\n for house in houses:\n lines = []\n text= \"\"\n text_clean = \"\"\n\n match = list(collection_con.find({\"house\":house}))\n\n for dictionary in match:\n lines.append(dictionary[\"line\"])\n\n for line in lines:\n text += line\n \n words = nltk.word_tokenize(text)\n stop_words = set(stopwords.words('english'))\n tokens_clean = [e for e in words if e not in stop_words]\n \n for word in tokens_clean:\n text_clean += word\n \n sentiment_text[house] = text_clean\n\n count_vectorizer = CountVectorizer()\n sparse_matrix = count_vectorizer.fit_transform(sentiment_text.values())\n\n\n doc_term_matrix = sparse_matrix.todense()\n df_sentiment = pd.DataFrame(doc_term_matrix, \n columns=count_vectorizer.get_feature_names(), \n index=sentiment_text.keys())\n\n similarity_matrix = distance(df_sentiment,df_sentiment)\n\n sim_df = pd.DataFrame(similarity_matrix, columns=sentiment_text.keys(), index=sentiment_text.keys())\n\n np.fill_diagonal(sim_df.values, 0) \n\n sim_df_idmax = pd.DataFrame(sim_df.idxmax())\n return (f\"The recommended house for {conversation} is:\" + \" \" + np.asarray(sim_df_idmax.loc[conversation])[0])\n\n\n@app.route('/character_house_recommender/') ### here we will recommend a house to a character! \ndef character_house_recommender(name): ### it is similar to what we have worked until now\n houses = list(collection_con.find({}).distinct(\"house\"))\n\n\n sentiment_text = {}\n\n for house in houses:\n lines = []\n text= \"\"\n text_clean = \"\"\n\n match = list(collection_con.find({\"house\":house}))\n\n for dictionary in match: ### the main difference is that we have to remove from the house the lines of our character to avoid always \n if dictionary[\"c_name\"] != name: ### recommending his own house\n lines.append(dictionary[\"line\"])\n \n for line in lines:\n text += line\n \n words = nltk.word_tokenize(text)\n stop_words = set(stopwords.words('english'))\n tokens_clean = [e for e in words if e not in stop_words]\n \n for word in tokens_clean:\n text_clean += word\n \n sentiment_text[house] = text_clean\n \n \n characert_lines = [] ### here we add our character and his lines to our dictionary\n character_text = \"\"\n character_text_clean = \"\"\n match = list(collection_con.find({\"c_name\":name}))\n \n for dictionary in match:\n if dictionary[\"c_name\"] == name:\n characert_lines.append(dictionary[\"line\"])\n\n for line in characert_lines:\n character_text += line\n\n character_words = nltk.word_tokenize(character_text)\n stop_words = set(stopwords.words('english'))\n tokens_clean_character = [e for e in character_words if e not in stop_words]\n for word in tokens_clean_character:\n character_text_clean += word\n \n sentiment_text[name] = character_text_clean\n \n\n count_vectorizer = CountVectorizer()\n sparse_matrix = count_vectorizer.fit_transform(sentiment_text.values())\n\n\n doc_term_matrix = sparse_matrix.todense()\n df_sentiment = pd.DataFrame(doc_term_matrix, \n columns=count_vectorizer.get_feature_names(), \n index=sentiment_text.keys())\n\n similarity_matrix = distance(df_sentiment,df_sentiment)\n\n sim_df = pd.DataFrame(similarity_matrix, columns=sentiment_text.keys(), index=sentiment_text.keys())\n\n np.fill_diagonal(sim_df.values, 0) \n\n sim_df_idmax = pd.DataFrame(sim_df.idxmax())\n return (f\"The recomended house for {name} is:\" + \" \" + np.asarray(sim_df_idmax.loc[name])[0])\n\n\n\n\napp.run(\"0.0.0.0\", os.getenv(\"PORT\"), debug=True)\n\n","repo_name":"guillermomar/API-Natural-Language-Analysis","sub_path":"API.py","file_name":"API.py","file_ext":"py","file_size_in_byte":13720,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"71418967893","text":"# Source: Paho MQTT documentation (https://www.eclipse.org/paho/index.php?page=clients/python/docs/index.php)\n# Source: Google etc..\n\nimport paho.mqtt.client as mqtt # Import Paho MQTT client library\nimport json # Import json library\n\n\n# Callback when the computer successfully connects to the broker\ndef on_connect(client, userdata, flags, rc):\n print(f\"Connected to broker with result code {rc}\")\n\n\n# Callback when the computer receives a message from the broker\ndef on_message(client, userdata, message):\n try:\n # Decode and load JSON payload\n received_payload = json.loads(message.payload.decode(\"utf-8\"))\n\n # Print received message\n print(\n f\"Received Message with ID {received_payload['message_id/counter']}, Temperature: {received_payload['temperature']}, Time: {received_payload['time']}\")\n\n # Acknowledge the message by sending an ACK\n ack_payload = {'ack_for_message_id': received_payload['message_id/counter']}\n ack_payload_json = json.dumps(ack_payload)\n client.publish(\"temperature/ack\", ack_payload_json)\n print(f\"Sent ACK for message id {received_payload['message_id/counter']}\")\n\n except Exception as e:\n print(f\"Error: {e}\")\n\n\n# Initialize MQTT client\nclient = mqtt.Client(\"Computer\")\nclient.on_connect = on_connect # Assign on_connect function\nclient.on_message = on_message # Assign on_message function\n\n# Try connecting to broker\ntry:\n client.connect(\"broker.hivemq.com\", 1883) # This is to the Websocket at: https://www.hivemq.com/demos/websocket-client/\nexcept:\n print(\"Could not connect to MQTT broker\")\n exit(1)\n\n# Start the client loop\nclient.loop_start()\n\n# Subscribe to temperature messages\nclient.subscribe(\"temperature/up\")\n\n# Keep the script running\ntry:\n while True:\n pass\nexcept KeyboardInterrupt:\n print(\"Exiting...\")\n client.loop_stop()\n","repo_name":"Fatalityh/MQTT-Python","sub_path":"computer.py","file_name":"computer.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39456754838","text":"#%% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataScience.changeDirOnImportExport setting\nimport os\n\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom graspy.plot import *\nfrom graspy.simulations import sample_edges\nfrom graspy.embed import AdjacencySpectralEmbed, LaplacianSpectralEmbed\nfrom graspy.utils import *\n\ntry:\n os.chdir(os.path.join(os.getcwd(), \"weighted_graph_models\"))\n print(os.getcwd())\nexcept:\n pass\n\nfrom wLSM_utils import *\n\n\nget_ipython().run_line_magic(\"matplotlib\", \"inline\")\n\n#%%\n############################\npois_range = np.array(range(1, 40, 5))\nhw_scale_range = 0.5 / (pois_range)\n\npois_range = [5]\nhw_scale_range = [1] # [0.5]\np = pois_range[0]\nh = hw_scale_range[0]\n\nn_hw_nodes = 1000\nn_modified_verts = 100\nn_blob1_verts = 100\nn_blob2_verts = 100\nn_blob3_verts = 100\npois_scale0 = p\npois_scale1 = p * 10\nhw_scale = h\nn_components = 7\nacorn = np.random.seed(8888)\n############################\n\nX = sample_hw_latent(\n n_hw_nodes, n_modified_verts, pois_scale0, pois_scale1, acorn=acorn\n)\n\n# could turn this on to add some sbm masses\n# mu1 = np.array([0.2, 0.05, 0.05])\n# mu2 = np.array([0.05, 0.2, 0.05])\n# mu3 = np.array([0.05, 0.05, 0.2])\n# X = np.concatenate((X, np.tile(mu1, (n_blob1_verts, 1))))\n# X = np.concatenate((X, np.tile(mu2, (n_blob2_verts, 1))))\n# X = np.concatenate((X, np.tile(mu3, (n_blob3_verts, 1))))\n\nn_verts = X.shape[0]\n\nP = hw_scale * X @ X.T\n\ngraph_uw = sample_edges(P, directed=False, loops=False)\nprint(np.mean(graph_uw))\nverts = np.array(range(n_verts))\nverts_mod = np.random.choice(range(n_hw_nodes), n_modified_verts, replace=False)\n\nlambda_mat = X @ X.T * pois_scale0\nlambda_mat[np.ix_(verts_mod, verts_mod)] = P[np.ix_(verts_mod, verts_mod)] * pois_scale1\n\ngraph_w = np.random.poisson(lambda_mat)\ngraph_w = symmetrize(graph_w)\ngraph_w = np.multiply(graph_w, graph_uw)\nheatmap(graph_w, transform=\"log\")\n\nase = AdjacencySpectralEmbed(n_components=n_components)\n\n# different options for regularizing\ngraph_embed = graph_w\n# graph_embed = graph_embed / np.sum(graph_embed, axis=0)[:,np.newaxis]\n# graph_embed = graph_w\n# graph_embed = augment_diagonal(graph_embed)\n# graph_embed += 1 / graph_embed.size\n# graph_embed = symmetrize(graph_embed)\nlam_mat = concentration_regularize(graph_w, sum_edges=True, weight=10)\n# graph_embed = graph_w * lam_mat\nheatmap(graph_embed)\n# graph_embed = pass_to_ranks(graph_embed)\nXhat = ase.fit_transform(graph_embed)\n\nlabels = np.isin(verts, verts_mod)\n# pairplot(X, labels=labels, legend_name=\"Modified\")\npairplot(Xhat, labels=labels, legend_name=\"Modified\")\nscreeplot(graph_embed, cumulative=False, show_first=20)\n\n# expectation = np.sum(hw_scale ** 2 * (X @ X.T) ** 2 * pois_scale0)\nexpectation = P * lambda_mat\nexpectation = expectation.sum()\nprint(expectation)\nprint(graph_w.sum())\n\n# lse_embed = LaplacianSpectralEmbed(form=\"R-DAD\", regularizer=10).fit_transform(graph_w)\n# pairplot(lse_embed, labels=labels)\n\n# pairplot(X)\n#%%\nlatent = AdjacencySpectralEmbed().fit_transform(lambda_mat)\npairplot(latent, labels=labels, legend_name=\"Modified\")\n\nl2 = X.copy()\nl2 *= np.sqrt(p)\nl2[verts_mod] = 10 * np.sqrt(p) * X[verts_mod]\npairplot(l2, labels=labels, legend_name=\"Modified\")\n\n#%%\nfrom scipy.linalg import orthogonal_procrustes\n\n# Do not perturb anything\nn_verts = 1000\npois_scale0 = 5\nX = sample_hw_latent(n_verts)\nY = np.sqrt(pois_scale0) * X\nP = X @ X.T\nY[verts_mod] = (\n np.sqrt(pois_scale1) * X[verts_mod]\n) # if we do this we get perfect recovery\nlambda_mat = pois_scale0 * P\nlambda_mat[np.ix_(verts_mod, verts_mod)] = pois_scale1 * P[np.ix_(verts_mod, verts_mod)]\n# lambda_mat = Y @ Y.T\nlatent = AdjacencySpectralEmbed(n_components=3).fit_transform(lambda_mat)\n# pairplot(latent)\nR, scale = orthogonal_procrustes(latent, Y)\nlatent = latent @ R\npl = np.concatenate((latent, Y), axis=0)\nlabels = np.array(n_verts * [\"estimated\"] + n_verts * [\"true\"])\npairplot(pl, labels)\n\n#%%\n\n","repo_name":"neurodata/weighted_graph_models","sub_path":"bped_wLSM_take1.py","file_name":"bped_wLSM_take1.py","file_ext":"py","file_size_in_byte":3956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11576379098","text":"import logging\nimport uuid\nfrom typing import List, Union\n\nfrom sqlalchemy.orm import Session\n\nfrom core.db.models.engine import Engine\nfrom core.db.models.face import Face\nfrom core.db.models.user import User\nfrom core.exceptions.app import InputError\nfrom core.exceptions.db import DbError\n\nlogger = logging.getLogger('db')\n\n\ndef create_engine(\n sess: Session,\n provider: str,\n description: str = None,\n users: List[uuid.UUID] = []\n) -> Engine:\n try:\n existing_users = sess.query(User).filter(\n User.user_id.in_(users)\n ).all()\n if len(existing_users) != len(users):\n logger.error(\"Cann't create engine, as some users are not in DB.\")\n logger.error(\"Request users:[%s] DB users:[%s]\" %\n (\n ', '.join([str(u) for u in users]),\n ', '.join([u.user_id for u in existing_users])\n )\n )\n raise ValueError(\"Invalid users\")\n\n engine = Engine(\n engine_id=uuid.uuid4(),\n provider=provider,\n description=description,\n users=existing_users\n )\n sess.add(engine)\n sess.commit()\n sess.refresh(engine)\n logger.debug(f'Create engine in db: id={engine.engine_id}')\n return engine\n except ValueError as ex:\n raise InputError(str(ex))\n except Exception:\n sess.rollback()\n raise DbError('Can not create engine in db')\n\n\ndef update_engine(\n sess: Session,\n engine_id: uuid.UUID,\n provider: str,\n description: str = None\n) -> int:\n try:\n engine = sess.query(Engine).filter(\n Engine.engine_id == engine_id\n ).first()\n if engine is None:\n raise ValueError(\"No such engine\")\n\n rows = sess.query(Engine).filter(\n Engine.engine_id == engine_id\n ).update(\n {\n Engine.provider: provider,\n Engine.description: description,\n }\n )\n sess.commit()\n logger.debug(f'Update engine in db: id={engine_id}')\n return rows\n except Exception:\n sess.rollback()\n raise DbError('Can not create engine in db')\n\n\ndef get_engine(\n sess: Session, \n engine_id: uuid.UUID\n) -> Union[Engine, None]:\n return sess.query(Engine).filter(Engine.engine_id == engine_id).first()\n\n\ndef get_engines(\n sess: Session, \n skip: int = 0, \n limit: int = 100\n) -> List[Engine]:\n try:\n return sess.query(Engine).offset(skip).limit(limit).all()\n except Exception:\n raise DbError('Can not get engines from db')\n\n\ndef get_engines_count(sess: Session) -> int:\n try:\n return len(sess.query(Engine).all())\n except Exception:\n raise DbError('Can not get engines from db')\n\n\ndef delete_engine(sess: Session, engine_id: uuid.UUID) -> int:\n try:\n engine = sess.query(Engine).filter(\n Engine.engine_id == engine_id\n ).first()\n if engine is None:\n raise ValueError(\"No such engine\")\n engine.users = []\n engine.faces = []\n sess.add(engine)\n sess.commit()\n sess.refresh(engine)\n\n rows = sess.query(Engine).filter(\n Engine.engine_id == engine_id\n ).delete()\n sess.query(Face).filter(Face.engine_id == engine_id).delete()\n sess.commit()\n return rows\n except ValueError as ex:\n raise InputError(str(ex))\n except Exception:\n sess.rollback()\n raise DbError('Can not delete user in db')\n","repo_name":"gosha20777/mipt-networks-2022","sub_path":"app/core/db/operations/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"322936188","text":"import numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense\r\nnum_samples = 10\r\n# Load positive and negative samples\r\npositive_samples = []\r\nnegative_samples = []\r\n\r\n# Load positive samples\r\nfor i in range(num_samples):\r\n filename = f\"audio_data/{i}.npy\"\r\n audio = np.load(filename, allow_pickle=True)\r\n positive_samples.append(audio)\r\n\r\n# Load negative samples\r\nfor i in range(num_samples):\r\n filename = f\"background_sound/{i}.npy\"\r\n audio = np.load(filename, allow_pickle=True)\r\n negative_samples.append(audio)\r\n\r\n# Create labels (1 for positive samples, 0 for negative samples)\r\nlabels = np.concatenate((np.ones(num_samples), np.zeros(num_samples)))\r\n\r\n# Combine positive and negative samples\r\nall_samples = np.concatenate((positive_samples, negative_samples))\r\n\r\n# Split the data into training and testing sets\r\nX_train, X_test, y_train, y_test = train_test_split(all_samples, labels, test_size=0.2, random_state=42)\r\n\r\n# Normalize the audio samples\r\nX_train = X_train / np.max(np.abs(X_train))\r\nX_test = X_test / np.max(np.abs(X_test))\r\n\r\n# Reshape the data for compatibility with Conv1D layer\r\nX_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)\r\nX_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)\r\n\r\n# Build the model architecture\r\nmodel = Sequential()\r\nmodel.add(Conv1D(32, 3, activation='relu', input_shape=(X_train.shape[1], 1)))\r\nmodel.add(MaxPooling1D(2))\r\nmodel.add(Conv1D(64, 3, activation='relu'))\r\nmodel.add(MaxPooling1D(2))\r\nmodel.add(Flatten())\r\nmodel.add(Dense(64, activation='relu'))\r\nmodel.add(Dense(1, activation='sigmoid'))\r\n\r\n# Compile the model\r\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\r\n\r\n# Train the model\r\nmodel.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=32)\r\n\r\n# Save the trained model\r\nmodel.save(\"wake_word_model.h5\")\r\n\r\nprint(\"Model training completed.\")\r\n","repo_name":"ARNAB-BOTMAS/Srishti_project","sub_path":"text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71828275414","text":"\"\"\"create project table\n\nRevision ID: 2a106f365f40\nRevises: 0bdf146ebf68\nCreate Date: 2021-07-06 12:52:04.926130\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '2a106f365f40'\ndown_revision = '0bdf146ebf68'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('status',\n sa.Column('status_id', sa.Integer(), nullable=False),\n sa.Column('description', sa.String(length=100), nullable=False),\n sa.PrimaryKeyConstraint('status_id')\n )\n op.create_table('project',\n sa.Column('project_id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('status_id', sa.Integer(), nullable=True),\n sa.Column('name', sa.String(length=128), nullable=False),\n sa.Column('description', sa.String(length=255), nullable=False),\n sa.Column('deadline', sa.String(length=20), nullable=False),\n sa.ForeignKeyConstraint(['status_id'], ['status.status_id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.user_id'], ),\n sa.PrimaryKeyConstraint('project_id')\n )\n op.create_index(op.f('ix_project_name'), 'project', ['name'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_project_name'), table_name='project')\n op.drop_table('project')\n op.drop_table('status')\n # ### end Alembic commands ###\n","repo_name":"v-danh/Flask-Web-App","sub_path":"migrations/versions/2a106f365f40_create_project_table.py","file_name":"2a106f365f40_create_project_table.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4504847921","text":"import datetime\nfrom typing import Union\n\n\nclass RType:\n def __init__(self, value: str):\n self.value = value\n\n @classmethod\n def cast(cls, value: str) -> Union[int, float, datetime.datetime, datetime.time]:\n return cls(value)._cast()\n\n def _cast(self):\n try:\n value = int(self.value)\n except ValueError:\n try:\n value = float(self.value)\n except ValueError:\n try:\n value = datetime.datetime.strptime(self.value, \"%d/%m/%Y\").date()\n except ValueError:\n try:\n value = datetime.datetime.strptime(\n self.value, \"%Y/%m/%d\"\n ).date()\n except ValueError:\n try:\n hour, minute = self.value.split(\":\")\n except ValueError:\n value = self.value\n else:\n value = datetime.time(int(hour), int(minute))\n return value\n","repo_name":"centaurialpha/pireal","sub_path":"src/pireal/core/rtypes.py","file_name":"rtypes.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"67"} +{"seq_id":"7497739227","text":"# Calculates monthly precipitation statistics from daily data and\n# writes to netCDF file\nimport utilities as util\nimport datetime as dt\nimport xarray as xr\n\ndef read_month(fileGlob, reanalysis, variable):\n \"\"\"Work around to get monthly data\"\"\"\n ds = xr.open_mfdataset(fileGlob, concat_dim='time', data_vars='different')\n ds.set_coords(['latitude','longitude'], inplace=True)\n ds.coords['time'] = util.make_time_coordinate(fileGlob)\n ds.load()\n return ds\n\ndef get_month_snow(fileGlob, reanalysis, threshold=0.):\n '''\n Generates a data set containing precipitation statistics for a given month\n \n reanalysis - name of renalaysis\n date - YYYYMM to get statistics\n \n Returns\n -------\n xarray Dataset\n '''\n\n import xarray as xr\n\n from constants import vnamedict\n varName = vnamedict[reanalysis]['SNOW']['name']\n \n try:\n #ds = util.read_month(fileGlob, reanalysis, 'SNOW')\n ds = read_month(fileGlob, reanalysis, 'SNOW')\n except:\n print ( '% get_month_snow: Unable to open {}'.format(fileGlob) )\n raise\n\n dsOut = xr.Dataset({'snow': ds['PRECSNO'].sum(dim='time', keep_attrs=True),\n 'snowday': ds['PRECSNO'].where(ds['PRECSNO'] > threshold).count(dim='time', keep_attrs=True)})\n dsOut = dsOut.where(dsOut.latitude > -999.)\n \n return dsOut\n\ndef process_daily_snow(reanalysis, variable, start_date=None, end_date=None,\n threshold=1., verbose=False, grid=None):\n '''Processes monthly precipitation statistics for a daterange'''\n\n if not start_date: start_date='19790101'\n if not end_date: end_date=dt.datetime.today().strftime('%Y%m%d')\n \n if verbose:\n print ( '% Processing {} from {} for {} to {}'.format(variable, reanalysis, start_date, end_date) )\n\n fileList = util.make_fileList(reanalysis, variable, (start_date, end_date), grid=grid)\n\n for f in fileList:\n\n if verbose:\n print ( ' Generating statistics for {}'.format(util.date_from_filename(f).strftime('%Y%m')) )\n\n ds = get_month_snow(f, reanalysis, threshold=threshold)\n\n #try:\n # ds = get_month_snow(f, reanalysis)\n # print (ds)\n #except:\n # pass # Not a good way to do this\n\n filo = util.make_outfile(f, reanalysis, variable)\n if verbose:\n print ( ' Writing statistics to {}'.format(filo) )\n ds.to_netcdf(filo)\n \n return\n\nif __name__ == \"__main__\":\n\n import argparse\n\n parser = argparse.ArgumentParser(description='Estimates monthly total snowfall from daily reanalysis snowfall')\n parser.add_argument('reanalysis', metavar='reanalysis', type=str,\n help='Name of reanalysis: CFSR, ERAI, MERRA, MERRA2, etc.')\n parser.add_argument('variable', metavar='variable', type=str,\n help='Variable name')\n parser.add_argument('--start_date', '-sd', type=str, action='store', default=None,\n help='Date to start processing (YYYYMMDD)')\n parser.add_argument('--end_date', '-ed', type=str, action='store', default=None,\n help='Date to end processing (YYYYMMDD)')\n parser.add_argument('--threshold', '-t', type=float, action='store', default=1.,\n help='Threshold for wetday')\n parser.add_argument('-v', '--verbose', action='store_true')\n parser.add_argument('--grid', '-g', type=str, action='store', default=None)\n \n args = parser.parse_args()\n \n process_daily_snow(args.reanalysis, args.variable,\n start_date=args.start_date, end_date=args.end_date,\n threshold=args.threshold, verbose=args.verbose, grid=args.grid)\n \n\n \n \n","repo_name":"andypbarrett/SnowOnSeaIce","sub_path":"source/precipitation/process_daily_snowfall.py","file_name":"process_daily_snowfall.py","file_ext":"py","file_size_in_byte":3790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36271474799","text":"import pyaudio\nimport argparse\nimport wave\nimport time\nimport curses\nfrom curses import wrapper\nimport numpy as np\n\np = pyaudio.PyAudio()\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-i', '--input', type=str, required=True)\n\nargs = parser.parse_args()\n\n# open file\nwf = wave.open(args.input, 'rb')\nn_channels = wf.getnchannels()\n\n\n# Print the visualizer\n# data is a 1D array of [0, 1) normalized values\ndef print_visualizer(data, scr):\n # getting screen length and height\n length = curses.COLS\n height = curses.LINES\n print(data)\n \n num_buckets = len(data)\n bucket_width = length//num_buckets\n\n bucket_str = \"#\"*bucket_width\n for i in range(num_buckets):\n # printing out the column\n bucket_height = int(data[i]*height)\n for j in range(bucket_height):\n scr.addstr(i*bucket_width, j, bucket_str)\n # a= 0\n \n scr.refresh()\n\n# if __name__ == '__main__':\ndef main(stdscr):\n # initialize curses\n # stdscr = curses.initscr()\n curses.noecho()\n curses.cbreak()\n stdscr.keypad(True)\n visualizer_data = []\n\n def callback(in_data, frame_count, time_info, status):\n data = wf.readframes(frame_count)\n int_data = np.frombuffer(data, dtype=np.uint16)\n if int_data.size < frame_count*n_channels:\n # pad with zeros\n int_data = np.pad(int_data, (0, frame_count*n_channels - int_data.size), 'constant')\n int_data = int_data.reshape(frame_count, n_channels)\n avg_data = np.mean(int_data, 1) # take average across channels\n # print(avg_data.shape)\n raw_fft_mag = np.abs(np.fft.rfft(avg_data))\n # print_visualizer(raw_fft_mag, stdscr)\n # stdscr.addstr(1,2, \"POOPOO\")\n visualizer_data = raw_fft_mag\n\n # To play out data, just leave this in\n return (data, pyaudio.paContinue)\n\n def UGETCHAR_(scr):\n h = scr.getch()\n if h == 3:\n raise KeyboardInterrupt\n if h == 26:\n raise EOFError\n return h\n\n\n # stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\n # channels=wf.getnchannels(), \n # rate=wf.getframerate(), \n # output=True, \n # stream_callback=callback)\n\n # stream.start_stream()\n\n i = 0\n # while stream.is_active():\n while True:\n UGETCHAR_(stdscr) # exit when ctrl+C\n # print_visualizer(visualizer_data, stdscr)\n stdscr.addstr(1,2, \"POOPOO %d\" % i)\n stdscr.refresh()\n \n time.sleep(0.1)\n\n # stream.stop_stream()\n # stream.close()\n wf.close()\n\n p.terminate()\n\n curses.nocbreak()\n stdscr.keypad(False)\n curses.echo()\n curses.endwin()\n\n\n# # wrapper(main)\n# main(0)","repo_name":"stefantquach/FFT_music_visualizer","sub_path":"play_music.py","file_name":"play_music.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30927082260","text":"from kivy.uix.screenmanager import Screen\nfrom kivy.properties import StringProperty, ListProperty\n\n\nclass CheckboxScreen(Screen):\n question = None\n text = StringProperty(\"Checkbox Question\")\n options = ListProperty([\"option 1\", \"option 2\", \"option 3\", \"option 4\"])\n\n def select(self, param):\n # unselect if already selected\n if self.options[param] in self.question.response:\n self.question.response.remove(self.options[param])\n else:\n self.question.response.append(self.options[param])\n self.question.response = sorted(self.question.response)\n\n def set_questions(self, qs: list):\n self.questions = qs\n\n def on_pre_enter(self):\n self.reset()\n q_num = 0\n for q in self.questions:\n if q.type == \"checkbox\":\n q_num = self.questions.index(q) + 1\n\n self.question = q\n self.text = q.text + f\" (Question {q_num}/5)\"\n self.options = q.options\n\n def reset(self):\n self.ids.check1.active = False\n self.ids.check2.active = False\n self.ids.check3.active = False\n self.ids.check4.active = False\n","repo_name":"ojas-sanghi/FBLA-Quiz","sub_path":"kivy_code/checkbox.py","file_name":"checkbox.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73266215253","text":"# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding model 'APNTokenModel'\n db.create_table(u'apn_apntokenmodel', (\n ('token', self.gf('django.db.models.fields.CharField')(max_length=255, primary_key=True)),\n ))\n db.send_create_signal(u'apn', ['APNTokenModel'])\n\n\n def backwards(self, orm):\n # Deleting model 'APNTokenModel'\n db.delete_table(u'apn_apntokenmodel')\n\n\n models = {\n u'apn.apntokenmodel': {\n 'Meta': {'object_name': 'APNTokenModel'},\n 'token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'})\n }\n }\n\n complete_apps = ['apn']","repo_name":"m3lm4n/mediahackday","sub_path":"apn/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25672288386","text":"# -*- coding: utf-8 -*-\n\nprint(' Контрольная работа №_2\\n\\n')\nprint(' Задание №_1')\nprint('''\n1. Дан список. Выведите те его элементы, которые встречаются в списке только один\nраз. Элементы нужно выводить в том порядке, в котором они встречаются в\nсписке.\n''')\na = [1, 2, 34, 34, 2, 5, 6, 7]\na1 = []\nfor i in a:\n if a.count(i) == 1:\n a1.append(i)\nprint(f'Ответ:\\nВ списке {a}\\nОдин раз встречаются только числа - {a1}')\nprint('===============================================================================================================')\nprint(' Задание №_2')\nprint('''\n2. Дан список чисел. Посчитайте, сколько в нем пар элементов, равных друг другу.\nСчитается, что любые два элемента, равные друг другу образуют одну пару,\nкоторую необходимо посчитать.\n''')\n\nb = [1, 2, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 9]\ncount = 0\nfor i in range(len(b)):\n for j in range(i + 1, len(b)):\n if b[i] == b[j]:\n count += 1\nprint(f'Ответ:\\nВ списке {b}\\nЧисло пар = {count}')\nprint('===============================================================================================================')\nprint(' Задание №_3')\nprint('''\n3. Даны два кортежа:\nC_1 = (35, 78,21,37, 2,98, 6, 100, 231)\nC_2 = (45, 21,124,76,5,23,91,234)\nНеобходимо определить:\n1) Сумма элементов какого из кортежей больше и вывести соответствующее\nсообщение на экран (Сумма больше в кортеже - ..)\n2) Вывести на экран порядковые номера минимальных и максимальных элементов\nэтих кортежей\n''')\n\nC_1 = (35, 78, 21, 37, 2, 98, 6, 100, 231)\nC_2 = (45, 21, 124, 76, 5, 23, 91, 234)\n\nif sum(C_1) < sum(C_2):\n print(f'Ответ №_1_a:\\nСумма больше в кортеже - C_2 = {sum(C_2)}')\nelse:\n print(f'Ответ №_1_a:\\nСумма больше в кортеже - C_1 = {sum(C_1)}')\nprint('Ответ №_1_b:\\nMin & Max index Tuple C_1')\nprint(f'Максимальное значение в кортеже С_1 находиться под индексом - {C_1.index(max(C_1))} ')\nprint(f'Минимальное значение в кортеже С_1 находиться под индексом - {C_1.index(min(C_1))} ')\nprint('Min & Max index Tuple C_2')\nprint(f'Максимальное значение в кортеже С_2 находиться под индексом - {C_2.index(max(C_2))} ')\nprint(f'Минимальное значение в кортеже С_2 находиться под индексом - {C_2.index(min(C_2))} ')\nprint('===============================================================================================================')\nprint(' Задание №_4')\nprint('''\n4. Создайте словарь из строки ' An apple a day keeps the doctor away' следующим\nобразом: в качестве ключей возьмите символы строки, а значениями пусть будут\nчисла, соответствующие количеству вхождений данной буквы в строку.\n''')\nstr = 'An apple a day keeps the doctor away'\nstr = str.split( )\ns = []\nfor i in str:\n s += i\n\ndict = {symbol: s.count(symbol) for symbol in s}\nprint(f'Ответ:\\nСловарь = {dict}')\nprint('===============================================================================================================')\nprint(' Задание №_5')\nprint('''\n5. Клиент приходит в кондитерскую. Он хочет приобрести один или несколько ви��ов\nпродукции, а также узнать её состав.\nРеализуйте кондитерскую.\nПредложите выбор:\n\n''')\n\ngoods = {\n 'Блины': [\"Молоко, Яйцо, Масло сливочное, Соль, Сода, Сахар\", \"Цена = 1р.50к за 100гр.\"],\n 'Оладьи': ['Молоко, Яйцо, Масло растительно, Соль, Сахар, Разрыхлитель, Кефир', \"Цена = 1р.00к за 100гр.\"],\n 'Торт': ['Яйцо, Саха, Сметана, Сгущ/молоко, Ванильный экстракт, Мука, Уксус, Ягоды, Сода, Вафельная крошка',\n \"Цена = 2р.75к за 100гр.\"],\n 'Эклер': ['Молоко, Мука, Яйцо, Сахар, Сахар ванильный, Масло сливочное, Какао, Соль', \"Цена = 3р.20к за 100гр.\"],\n 'Маффины': ['Мука, Молоко, Яйцо, Соль, Сахар, Разрыхлитель, Лимон, Ванильный сахар, Масло сливочное',\n \"Цена = 2р.80к за 100гр.\"]\n}\n\nprint('Добрый день!\\nМогу я Вам чем нибудь помочь?')\nmenu = ' Блины;\\n Оладьи;\\n Торт;\\n Эклер;\\n Маффины;\\n'\nask = input('Дать Вам меню? ').title()\nif ask == 'Нет':\n print('Ответ:', 'До свидания')\nelse:\n print(menu)\n while True:\n good = input('Что Вы выбрали?: ').title()\n if good == 'Нет':\n break\n ask = input('Что вы хотите узнать: Состав или Цену?: ').title()\n if ask == 'Состав':\n print('Ответ:', good, '-', 'Состав:', '-', goods[good][0])\n ask = input('Что нибудь ещё?: ').title()\n if ask == 'Цена' or ask == 'Цену':\n print('Ответ:', good, '-', goods[good][1])\n else:\n break\n elif ask == 'Цена' or ask == 'Цену':\n print('Ответ:', good, '-', goods[good][1])\n ask = input('Что нибудь ещё?: ').title()\n if ask == 'Состав':\n print('Ответ:', good, '-', 'Состав:', '-', goods[good][0])\n else:\n break\n else:\n ask == 'Состав и цена' or ask == 'Состав и цену'\n print(good, '-', goods[good][0], '-', goods[good][1])\n ask = input('Что нибудь ещё?: ').title()\n if ask == 'Да':\n continue\n else:\n ask == 'Нет'\n break\n print(f'Ответ:\\nДо свидание')\nprint('Следующий')\nprint('===============================================================================================================')\nprint(' Задание №_6')\nprint('''\n6. Даны два списка чисел. Посчитайте, сколько чисел содержится одновременно как в\nпервом списке, так и во втором.\n''')\nlist_1 = [1, 2, 3, 4, 55, 3, 232, 331, 543, 3]\nlist_2 = [1, 2, 3, 4, 34, 3, 322, 15]\nall_list = [ ]\ncount = 0\nfor i in list_1: # берем первое число из списка list_1\n for j in list_2: # берем второе значен из списка list_2\n if i == j: # сравниваем эти значения\n count += 1\n all_list.append(i + j) # если значения равны то складываем их и добавляем в новый список\nprint(f'Ответ:\\nЧисло схождений списков находящих в списках = {count}')\nprint(f'Нашли в списках одинаковые числа и сложили найденые пары между собой - {all_list}')\nprint(f'Общая сумма найденых схождений чисел между листами = {sum(all_list)}')\nprint('===============================================================================================================')\nprint(' Задание №_7')\nprint('''\n7. Напишите программу, демонстрирующую работу try\\except\\finally\n''')\ntry:\n a = input('Enter int: ')\n b = input('Enter int: ')\n c = int(a) / int(b)\n print(c)\nexcept ZeroDivisionError:\n print('Одно из чисел = 0, на 0 делить нельзя')\nexcept ValueError:\n print('Не могу преобразовать строку в число!')\nexcept TypeError:\n print('Ошибка типа: неподдерживаемые типы операндов для /: str и str')\nfinally:\n print(f'Ответ:\\nТаким образом работает try\\except\\finally')\nprint('===============================================================================================================')\nprint(' Задание №_8')\nprint('''\n8. В текстовый файл построчно записаны фамилия и имя учащихся класса и его\nоценка за контрольную. Вывести на экран всех учащихся, чья оценка меньше 3\nбаллов и посчитать средний балл по классу\n''')\nwith open(\"Class.txt\", encoding='utf8') as file:\n count = 0\n summa = 0\n name_student = []\n student_3_mark = []\n for line in file:\n count += 1\n for i in range(len(line)):\n if line[i].isdigit():\n if int(line[i]) >= 0:\n name_student += line.rsplit(' , ''\\n')\n if int(line[i]) <= 3:\n student_3_mark += line.rsplit(' , ')\n num = int(line[i])\n summa += num\n average = summa / count\n print(('').join(map(str, name_student)))\n print(f'Средний бал в классе = {round(average, 2)}\\n')\n print('Список учеников у которых оценка = 3 баллам')\n print(('').join(map(str, student_3_mark)))\nprint('===============================================================================================================')\n","repo_name":"RussWolF04/Control_practice","sub_path":"Control_Practic_work/Practice_work_2/Kontrolnaya/Kontrol_Job_2.py","file_name":"Kontrol_Job_2.py","file_ext":"py","file_size_in_byte":10454,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24496696437","text":"class User:\n def sign_in(self):\n print(\"Logged in\")\n\n\nclass Wizard(User):\n def __init__(self, name, power):\n self.name = name\n self.power = power\n\n def attack(self):\n print(f\"{self.name} is attacking with the {self.power} spell \")\n\n\nclass Archer(User):\n def __init__(self, name, number_of_arrows):\n self.name = name\n self.number_of_arrows = number_of_arrows\n def attack(self):\n print(f\"attacking with arrows: arrows left: {self.number_of_arrows}\")\n\n\nwizard1 = Wizard(\"Andres\", \"Firaga 3\")\nprint(wizard1.sign_in())\nprint(wizard1.power)\narcher1 = Archer(\"green Arrow\", 55)\nprint(archer1.sign_in())\nprint(archer1.name)\nprint(archer1.number_of_arrows)\n#Polymorphism, same name of the method attack, but react different\n# depending of the object tha call them\narcher1.attack()\nwizard1.attack()\nprint(\"---------------Using abstraction------------------------\")\n# Polymorphism abstraction\ndef player_attack(char):\n return char.attack()\nplayer_attack(wizard1)\nplayer_attack(archer1)\n\n# Anothe way usin for loop\nprint(\"----------------Using for loop-----------------------\")\nfor char in [wizard1, archer1]:\n char.attack()\n\n# Check if the object is an instance\nprint(isinstance(wizard1,Wizard))","repo_name":"andresparrab/Python_Learning","sub_path":"Classes/PlayerCharacter2.py","file_name":"PlayerCharacter2.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3768581403","text":"#Allows us to import individual classes from the Tkinter module, without having to also import these\n#constants, in the GUI class.\nimport tkinter as tk\n\nN = tk.N\nS = tk.S\nE = tk.E\nW = tk.W\nNE = tk.NE\nSE = tk.SE\nNW = tk.NW\nSW = tk.SW\nWORD = tk.WORD\nEND = tk.END\nCENTER = tk.CENTER\n","repo_name":"JHaley1513/JTunes","sub_path":"tkConstants.py","file_name":"tkConstants.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"71979681173","text":"import pandas\nimport numpy as np\n\n\ndef find_closest_date(timepoint, time_series, add_time_delta_column=True):\n print(time_series)\n deltas = np.abs(time_series - timepoint)\n print(deltas)\n idx_closest_date = np.argmin(deltas)\n print(idx_closest_date)\n res = {\"closest_date\": time_series.loc[idx_closest_date]}\n idx = ['closest_date']\n if add_time_delta_column:\n res[\"closest_delta\"] = deltas[idx_closest_date]\n idx.append('closest_delta')\n return pandas.Series(res, index=idx)\n\n\ndef merge_data(ranks, matches):\n \"\"\" matches[['closest', 'days_bt_x_and_y']] = matches.date.apply(\n lambda x: find_closest_date(x, ranks.loc[ranks['date'] == max(ranks['date'])]['date'])) \"\"\"\n\n ranks = ranks.loc[ranks['date'] == '2018-12-17'].drop_duplicates()\n # merge winner id to get rank and points\n\n matches = matches.merge(ranks, how='left',\n left_on=['winner_id'], right_on=['id'])\n\n matches = matches.drop(columns=['name', 'state', 'id', 'date_y'])\n matches = matches.rename(\n columns={'date_x':'date','rank': 'winner_rank', 'points': 'winner_points'})\n\n # merge loser id to get rank and points\n\n matches = matches.merge(ranks, how='left',\n left_on=['loser_id'], right_on=['id'])\n\n matches = matches.drop(columns=['name', 'state', 'id', 'date_y'])\n matches = matches.rename(\n columns={'date_x':'date','rank': 'loser_rank', 'points': 'loser_points'})\n\n matches['winner_rank'] = matches['winner_rank'].fillna(0)\n matches['winner_points'] = matches['winner_points'].fillna(0)\n matches['loser_rank'] = matches['loser_rank'].fillna(0)\n matches['loser_points'] = matches['loser_points'].fillna(0)\n\n matches['winner_rank'] = matches['winner_rank'].astype(int)\n matches['winner_points'] = matches['winner_points'].astype(int)\n matches['loser_rank'] = matches['loser_rank'].astype(int)\n matches['loser_points'] = matches['loser_points'].astype(int)\n\n return matches\n\n\nmatches2018 = pandas.read_csv('data/2018.csv')\nmatches2019 = pandas.read_csv('data/2019.csv')\nmatches2020 = pandas.read_csv('data/2020.csv')\n\nmatches2018['date'] = matches2018['date'].apply(\n lambda x: pandas.to_datetime(str(x), format='%Y-%m-%d'))\nmatches2019['date'] = matches2019['date'].apply(\n lambda x: pandas.to_datetime(str(x), format='%Y-%m-%d'))\nmatches2020['date'] = matches2020['date'].apply(\n lambda x: pandas.to_datetime(str(x), format='%Y-%m-%d'))\n\nranks2018 = pandas.read_csv('data/2018ranks.csv')\nranks2019 = pandas.read_csv('data/2019ranks.csv')\nranks2020 = pandas.read_csv('data/2020ranks.csv')\n\nranks2018['date'] = ranks2018['date'].apply(\n lambda x: pandas.to_datetime(str(x), format='%Y-%m-%d'))\nranks2019['date'] = ranks2019['date'].apply(\n lambda x: pandas.to_datetime(str(x), format='%Y-%m-%d'))\nranks2020['date'] = ranks2020['date'].apply(\n lambda x: pandas.to_datetime(str(x), format='%Y-%m-%d'))\n\n\n#ranks = pandas.concat([ranks2018, ranks2019, ranks2020], ignore_index=True)\n\nfinal2018 = merge_data(ranks2018, matches2018)\nfinal2019 = merge_data(ranks2019, matches2019)\nfinal2020 = merge_data(ranks2020, matches2020)\n\nfinal2018.to_csv('data/final20181.csv', index=False)\nfinal2019.to_csv('data/final2019.csv', index=False)\nfinal2020.to_csv('data/final2020.csv', index=False)\n","repo_name":"jelenajaksic/asm_project","sub_path":"data_merge.py","file_name":"data_merge.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27978694378","text":"\"\"\"Базовый модуль\"\"\"\nfrom selenium.webdriver import Remote as RemoteWebDriver\nfrom selenium.common.exceptions import NoAlertPresentException, \\\n NoSuchElementException\n\nimport math\n\n\nclass BasePage():\n \"\"\"Этот базовый класс для работы с браузером\"\"\"\n def __init__(self, browser: RemoteWebDriver, url, timeout=10):\n self.browser = browser\n self.url = url\n self.browser.implicitly_wait(timeout)\n\n def open(self):\n self.browser.get(self.url)\n\n def is_element_present(self, how, what):\n try:\n self.browser.find_element(how, what)\n except NoSuchElementException:\n return False\n return True\n\n def solve_quiz_and_get_code(self):\n \"\"\"\n Метод для расчета формул из задачи на stepik\n \"\"\"\n alert = self.browser.switch_to.alert\n x = alert.text.split(\" \")[2]\n answer = str(math.log(abs((12 * math.sin(float(x))))))\n alert.send_keys(answer)\n alert.accept()\n try:\n alert = self.browser.switch_to.alert\n alert_text = alert.text\n print(f\"Your code: {alert_text}\")\n alert.accept()\n except NoAlertPresentException:\n print(\"No second alert presented\")\n","repo_name":"Stynic/page_object_stepic_automation","sub_path":"pages/base_page.py","file_name":"base_page.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43467719758","text":"C = int(input())\n\nanswer_list=[]\n\nfor i in range(0,C):\n num_list= list(map(int,input().split()))\n sum=0\n mean=0\n alpha=0\n for j in range(1,num_list[0]+1):\n sum +=num_list[j]\n\n mean= sum/num_list[0]\n\n for k in range(1,num_list[0]+1):\n if num_list[k] > mean:\n alpha +=1\n\n percent= (alpha/num_list[0])*100\n\n answer_list.append(percent)\n\n\nfor c in range(0,len(answer_list)):\n pop=answer_list[c]\n print(f\"{pop:.3f}%\")\n","repo_name":"ship26/Coding_test","sub_path":"BJ_4344.py","file_name":"BJ_4344.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73833665814","text":"__all__ = ['get_message_bot_from_remote', 'register_bot',\n 'register_bot_domain', 'init_bot']\n\nimport json\nimport requests\nimport logging\nfrom faq_bot.model.i18n_data import get_i18n_content\nfrom faq_bot.constant import PRIVATE_KEY_PATH, DEVELOP_API_DOMAIN, \\\n API_BO, CALLBACK_ADDRESS, PHOTO_URL, BOT_NAME\nfrom conf.config import API_ID, DOMAIN_ID, ADMIN_ACCOUNT, LOCAL_ADDRESS, \\\n SERVER_CONSUMER_KEY\nfrom faq_bot.common.utils import auth_post, auth_get\nfrom faq_bot.common.global_data import get_value, set_value\nimport gettext\n_ = gettext.gettext\n\ndef headers():\n my_headers = {\n \"consumerKey\": SERVER_CONSUMER_KEY,\n \"Content-Type\": \"application/json\",\n \"charset\": \"UTF-8\"\n }\n return my_headers\n\n\ndef get_message_bot_from_remote():\n \"\"\"\n get a message bot.\n\n reference\n - `Common Message Property `_\n\n :return: bot no\n \"\"\"\n url = API_BO[\"bot\"]\n response = auth_get(url, headers=headers())\n if response.status_code != 200:\n logging.info(\"register bot domain field: code:%d content:%s\" % (\n response.status_code, response.text))\n return None\n\n content = json.loads(response.content)\n bots = content.get(\"bots\", None)\n if bots is None:\n return None\n for bot in bots:\n name = bot.get(\"name\", None)\n photo_url = bot.get(\"photoUrl\", None)\n if name == BOT_NAME and photo_url == PHOTO_URL:\n return bot.get(\"botNo\", None)\n return None\n\n\ndef register_bot():\n \"\"\"\n Register a message bot.\n\n reference\n - `Common Message Property `_\n\n :param photo_address: Access address of user's Avatar,\n If you need to change the user image,\n please replace the corresponding file in the image/, Only PNG file.\n :return: bot no\n \"\"\"\n url = API_BO[\"bot\"]\n fmt = _(\"FAQ Ask Bot\")\n a = lambda x, y: {\"language\": x, \"name\": y}\n b = lambda x, y: {\"language\": x, \"description\": y}\n data = {\n \"name\": BOT_NAME,\n \"i18nNames\": get_i18n_content(fmt, \"register_bot\", function=a),\n \"photoUrl\": PHOTO_URL,\n \"description\": BOT_NAME,\n \"i18nDescriptions\": get_i18n_content(fmt, \"register_bot\", function=b),\n \"managers\": [ADMIN_ACCOUNT],\n \"submanagers\": [],\n \"useGroupJoin\": False,\n \"useDomainScope\": False,\n \"useCallback\": True,\n \"callbackUrl\": CALLBACK_ADDRESS,\n \"callbackEvents\": [\"text\", \"location\", \"sticker\", \"image\"]\n }\n\n response = auth_post(url, data=json.dumps(data), headers=headers())\n if response.status_code != 200:\n raise Exception(\"register bot field: code:%d text:%s\" % (response.status_code, response.text))\n\n tmp = json.loads(response.content)\n bot_no = tmp.get('botNo', None)\n if bot_no is None:\n raise Exception(\"register bot field: bot no is None\")\n return bot_no\n\n\ndef register_bot_domain(bot_no):\n \"\"\"\n Register a message bot domain.\n\n reference\n - `Common Message Property `_\n\n :param bot_no: bot no\n \"\"\"\n url = API_BO[\"bot\"] +\"/\"+ str(bot_no) + \"/domain/\" + str(DOMAIN_ID)\n data = {\"usePublic\": True, \"usePermission\": False}\n response = auth_post(url, data=json.dumps(data), headers=headers())\n if response.status_code != 200:\n raise Exception(\"register bot domain field: code:%d content:%s\" % (\n response.status_code, response.text))\n\n\ndef init_bot():\n \"\"\"\n Initialize bot info. If the BOT is not registered, the system will fail to start.\n\n Before BOT registration,\n If BOT has been registered, it does not need to be re registered.\n\n reference\n - `Common Message Property `_\n\n \"\"\"\n bot_no = get_message_bot_from_remote()\n if bot_no is None:\n bot_no = register_bot()\n register_bot_domain(bot_no)\n\n # todo set cache\n set_value(\"bot_no\", str(bot_no))\n","repo_name":"lineworks/samplebot_faq_ask_bot_v1.0","sub_path":"faq_bot/externals/register_bot.py","file_name":"register_bot.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"40148507308","text":"import numpy.oldnumeric as Numeric\nfrom opengltk.OpenGL import GL\n\nfrom DejaVu.IndexedPolygons import IndexedPolygons\nfrom DejaVu.Geom import Geom\nfrom DejaVu.IndexedGeom import IndexedGeom\nimport datamodel\nimport viewerConst\nfrom DejaVu.viewerFns import checkKeywords\n\ndef uniq(objectSequence):\n \"\"\" returns a list with no duplicates\"\"\"\n d = {}\n for o in objectSequence: d[id(o)] = o\n return d.values()\n\n\nclass CallBackFunction:\n \"\"\"Class to allow to specify arguments to a callback function\"\"\"\n\n def __init__(self, function, *args, **kw):\n self.function = function\n self.args = args\n self.kw = kw\n\n def __call__(self, *args, **kw):\n args = self.args + args\n kw.update(self.kw)\n return apply(self.function, args, kw)\n\n\nclass Box(IndexedPolygons):\n \"\"\"3-D Polygon with 8 vertices.\n Box has attributes center and xside, yside, zside.\n\n if vertices are supplied, \n vertices[2] + vertices[4] are used to define xside, yside, zside \n + center\n\n In addition to a set of vertices, a Box can be defined by setting \n any of these combinations:\n center + side\n center + xside, yside, zside\n origin + side (origin is 'smallest' valued point of vertices)\n origin + xside, yside, zside\n origin + center\n two cornerPoints where cornerPoints are ordered:\n center - halfsides, center + halfsides\n\n Precedence among these possible parameters is defined as follows:\n side < [x,y,z]side < center < origin < cornerPoints < vertices\n \"\"\"\n \n \n keywords = IndexedPolygons.keywords + [\n 'maxCube',\n 'minCube',\n 'vertices',\n 'side',\n 'xside',\n 'yside',\n 'zside',\n 'center',\n 'origin',\n 'cornerPoints',\n ]\n\n\n def __init__(self, name=None, check=1, **kw):\n\n self.maxCube = None\n self.minCube = None\n\n if not kw.get('origin') \\\n and not kw.get('center') \\\n and not kw.get('cornerPoints') \\\n and not kw.get('minCube'):\n kw['origin'] = (0, 0, 0)\n\n #set up some defaults:\n materials = kw.get('materials')\n #print 'in BOX materials kw=', materials\n if not materials:\n kw['materials'] = ((0,0,1),(0,1,0),(0,0,1),(0,1,0),(1,0,0),(1,0,0),)\n #print 'in BOX after test for materials, kwmaterials=', kw['materials']\n\n vertices = kw.get('vertices')\n if vertices is not None:\n assert len(vertices)==8\n pt1 = Numeric.array(vertices[2])\n pt2 = Numeric.array(vertices[4])\n self.center = tuple(Numeric.multiply(Numeric.add(pt1,pt2), 0.5))\n self.xside, self.yside, self.zside = \\\n Numeric.subtract(pt2,pt1)\n else:\n #set up center \n center = kw.get('center')\n #if not center: center = (0.,0.,0.)\n self.center = center\n\n #set up sides\n side = kw.get('side')\n if side:\n self.xside = side\n self.yside = side\n self.zside = side\n else:\n xside = kw.get('xside')\n if not xside: xside = 1.0\n self.xside = xside\n yside = kw.get('yside')\n if not yside: yside = 1.0\n self.yside = yside\n zside = kw.get('zside')\n if not zside: zside = 1.0\n self.zside = zside\n\n #NB faces shouldn't change\n self.faces=((0,3,2,1),\n (3,7,6,2),\n (7,4,5,6),\n (0,1,5,4),\n (1,2,6,5),\n (0,4,7,3))\n self.funcs = {}\n fkeys = ['center', 'origin', 'centerOrigin', 'xside', \\\n 'yside', 'zside','maxCube', 'minCube']\n fs = [self.getVertsFromCenter, self.getVertsFromOrigin, \\\n self.getVertsFromCenterOrigin, \n CallBackFunction(self.getVertsFromSide, 'xside'),\n CallBackFunction(self.getVertsFromSide, 'yside'),\n CallBackFunction(self.getVertsFromSide, 'zside'),\n self.setMaxCube, self.setMinCube]\n for i in range(len(fkeys)):\n self.funcs[fkeys[i]] = fs[i]\n\n\n self.pickableVertices = 1\n kw['frontPolyMode'] = 'line'\n apply( IndexedPolygons.__init__, (self, name, check), kw )\n self.inheritMaterial = 0\n \n #print 'calling self.Set with ', kw.get('materials')\n #apply(self.Set,(), kw)\n\n # register functions to compute normals\n self.VertexNormalFunction(self.ComputeVertexNormals)\n self.vertexSet.normals.ComputeMode( viewerConst.AUTO )\n self.FaceNormalFunction(self.ComputeFaceNormals)\n self.faceSet.normals.ComputeMode( viewerConst.AUTO )\n self._PrimitiveType()\n self.GetNormals()\n self.oldFPM = GL.GL_LINE\n\n\n def Set(self, check=1, redo=1, updateOwnGui=True, **kw):\n \"\"\"set data for this object: Set polygon's vertices, faces, normals or materials\ncheck=1 : verify that all the keywords present can be handle by this func \nredo=1 : append self to viewer.objectsNeedingRedo\nupdateOwnGui=True : allow to update owngui at the end this func\n\"\"\"\n redoFlags = 0\n\n #newKeyList is list of keys specified in call to Set\n newKeyList = kw.keys()\n\n # Setting both center AND origin is a special case\n # which sets all side lengths\n centerOrigin = 0\n if 'center' in newKeyList and 'origin' in newKeyList:\n centerOrigin = 1\n\n side= kw.get( 'side')\n if side:\n kw['xside'] = side\n kw['yside'] = side\n kw['zside'] = side\n newKeyList.extend(['xside','yside', 'zside'])\n newKeyList = uniq(newKeyList)\n\n apply(self.updateVal, (['xside','yside','zside'],),kw)\n\n #these are either 1 or None\n self.maxCube = kw.get('maxCube')\n self.minCube = kw.get('minCube')\n if self.minCube and self.maxCube:\n self.center = [ (self.minCube[0] + self.maxCube[0]) * .5 ,\n (self.minCube[1] + self.maxCube[1]) * .5 ,\n (self.minCube[2] + self.maxCube[2]) * .5 ]\n \n # kl used to implement this precedence: \n # side < [x,y,z]side < center < origin < cornerPoints < vertices\n # vertices are dealt with last\n cornerPoints = None\n kl = ['xside', 'yside', 'zside','minCube', 'maxCube', 'center', 'origin'] \n for key in kl:\n if key in newKeyList:\n newVal = kw.get(key)\n if not newVal: continue\n if key in ['center','origin'] and centerOrigin:\n self.center = kw.get('center')\n newVal = kw.get('origin')\n newKeyList.remove('center')\n newKeyList.remove('origin')\n f = self.funcs['centerOrigin']\n else:\n del kw[key]\n f = self.funcs[key]\n cornerPoints = apply(f, (newVal,),{})\n\n #if cornerPoints are specified, they override other info\n newcornerPoints = kw.get('cornerPoints')\n if newcornerPoints:\n cornerPoints = newcornerPoints\n\n if cornerPoints:\n ptList = self.getVertsFromCornerPts(cornerPoints)\n else:\n ptList = None\n\n #vertices overrides everything: set center+sides\n newVertices = kw.get('vertices')\n if newVertices is not None:\n assert len(newVertices)==8\n pt1 = Numeric.array(newVertices[2])\n pt2 = Numeric.array(newVertices[4])\n self.center = tuple(Numeric.multiply(Numeric.add(pt1,pt2), 0.5))\n self.xside, self.yside, self.zside = \\\n Numeric.subtract(pt2,pt1)\n redoFlags |= self._redoFlags['redoDisplayListFlag']\n elif ptList:\n assert len(ptList)==8\n kw['vertices'] = ptList\n redoFlags |= self._redoFlags['redoDisplayListFlag']\n\n if kw.get('faces') is None:\n kw['faces'] = self.faces\n\n redoFlags |= apply( IndexedPolygons.Set, (self, check, 0), kw )\n\n return self.redoNow(redo, updateOwnGui, redoFlags)\n\n\n def updateVal(self, keyList, **kw):\n for item in keyList:\n itemVal = kw.get(item)\n if itemVal:\n setattr(self, item, itemVal)\n #exec('self.'+item+'='+str(itemVal))\n \n\n def setMaxCube(self, val):\n side = max(self.xside, self.yside, self.zside)\n self.xside = side\n self.yside = side\n self.zside = side\n return self.getVertsFromCenter(self.center)\n\n\n def setMinCube(self, val):\n side = min(self.xside, self.yside, self.zside)\n self.xside = side\n self.yside = side\n self.zside = side\n return self.getVertsFromCenter(self.center)\n\n\n def getVertsFromSide(self, sideStr, value):\n setattr(self, sideStr, value)\n #exec('self.'+sideStr+'=' + str(value))\n return self.getVertsFromCenter(self.center)\n\n\n def getVertsFromCenterOrigin(self, origin):\n #in this case, the origin is pt0\n x0,y0,z0 = origin\n x1,y1,z1 = self.center\n self.xside = x0 + 2*(x1-x0)\n self.yside = y0 + 2*(y1-y0)\n self.zside = z0 + 2*(z1-z0)\n pt1 = (self.xside, self.yside, self.zside)\n return (origin, pt1)\n \n\n def halfPt(self, pt):\n return Numeric.multiply(pt, 0.5)\n\n\n def getVertsFromOrigin(self, origin):\n #set new center here, also\n side = Numeric.array((self.xside, self.yside, self.zside))\n self.center = tuple(Numeric.add(origin, Numeric.multiply(side, 0.5)))\n pt1=tuple(Numeric.add(origin, side))\n return (origin, pt1)\n \n\n def getVertsFromCenter(self, center):\n self.center = center\n halfSide = Numeric.multiply(Numeric.array((self.xside, self.yside, self.zside)), 0.5)\n pt1=tuple(Numeric.add(center,halfSide))\n pt0=tuple(Numeric.subtract(center,halfSide))\n return (pt0, pt1)\n\n\n def getVertsFromCornerPts(self, cornerPoints):\n # cornerPoints = (center-halfsides, center+halfsides)\n # cornerPoints = (pt0, pt1)\n x1,y1,z1=cornerPoints[1]\n x0,y0,z0=cornerPoints[0]\n self.xside = x1-x0\n self.yside = y1-y0\n self.zside = z1-z0\n center = (x1-(x1-x0)/2., y1-(y1-y0)/2., z1-(z1-z0)/2.)\n self.center = center\n # maxCube sets box to cube with side = largest of x,y,z-side\n # min sets box to cube with side = smallest of x,y,z-side\n # maxCube has precedence over minCube\n if self.maxCube or self.minCube:\n if self.maxCube:\n side = max((x1-x0,y1-y0,z1-z0))\n elif self.minCube:\n side = min((x1-x0,y1-y0,z1-z0))\n self.xside = side\n self.yside = side\n self.zside = side\n pt1=tuple(Numeric.add(center,(side/2.,side/2,side/2)))\n pt0=tuple(Numeric.subtract(center,(side/2,side/2,side/2)))\n x1,y1,z1 = pt1\n x0,y0,z0 = pt0\n #built list of 8 pts\n ptList=((x1,y1,z0),\n (x0,y1,z0),\n (x0,y0,z0),\n (x1,y0,z0),\n (x1,y1,z1),\n (x0,y1,z1),\n (x0,y0,z1),\n (x1,y0,z1))\n return ptList\n\n\n def DisplayFunction(self):\n if self.frontPolyMode != self.oldFPM:\n self.RedoDisplayList()\n IndexedPolygons.DisplayFunction(self)\n\n\n def Draw(self):\n #print\"Box.Draw\"\n self.oldFPM = self.frontPolyMode\n if self.frontPolyMode == GL.GL_LINE:\n GL.glDisable(GL.GL_LIGHTING)\n #c is 8x3 array \n c= self.vertexSet.vertices.array\n #lines parallel to x-axis should be red, y->blue and z->green\n col = ((1,0,0),(0,1,0),(0,0,1))\n #these groups of 4 pairs of points define lines parallel to x, y, and z axes\n alines=[[(c[0],c[1]),(c[2],c[3]),(c[4],c[5]),(c[6],c[7])],\n [(c[0],c[3]),(c[1],c[2]),(c[4],c[7]),(c[5],c[6])],\n [(c[0],c[4]),(c[1],c[5]),(c[3],c[7]),(c[2],c[6])]]\n namectr=0\n for i in range(3):\n if not self.inheritMaterial:\n GL.glColor3fv(col[i])\n for vpairs in alines[i]:\n GL.glPushName(namectr)\n GL.glBegin(GL.GL_LINES)\n GL.glVertex3dv(list(vpairs[0]))\n GL.glVertex3dv(list(vpairs[1]))\n GL.glEnd()\n GL.glPopName()\n namectr=namectr+1\n self.viewer.enableOpenglLighting()\n return 1\n else:\n return IndexedPolygons.Draw(self)\n\n\nclass GridBox(Box):\n \"\"\" Specialized Box whose dimensions are controlled by spacing and npts\n in a grid. A GridBox has additional attributes xspacing, yspacing, \n zspacing and xnpts,ynpts and znpts.\n \"\"\"\n\n keywords = Box.keywords + [\n 'npts',\n 'xnpts',\n 'ynpts',\n 'znpts',\n 'spacing',\n 'xspacing',\n 'yspacing',\n 'zspacing'\n ]\n\n\n def __init__(self, name=None, check=1, **kw):\n\n npts = kw.get('npts')\n if not npts: \n xnpts = kw.get('xnpts')\n if not xnpts:\n kw['xnpts'] = 40\n ynpts = kw.get('ynpts')\n if not ynpts:\n kw['ynpts'] = 40\n znpts = kw.get('znpts')\n if not ynpts:\n kw['znpts'] = 40\n spacing = kw.get('spacing')\n if not spacing: \n spacing = (.375,.375,.375)\n kw['spacing'] = (.375, .375, .375)\n\n apply( Box.__init__, (self, name, check), kw )\n\n\n def Set(self, check=1, redo=1, updateOwnGui=True, **kw):\n \"\"\"set data for this object\ncheck=1 : verify that all the keywords present can be handle by this func \nredo=1 : append self to viewer.objectsNeedingRedo\nupdateOwnGui=True : allow to update owngui at the end this func\n\"\"\"\n redoFlags = apply( Box.Set, (self, check, 0), kw)\n\n #newKeyList is list of keys specified in call to Set\n newKeyList = kw.keys()\n npts= kw.get('npts')\n if npts:\n kw['xnpts'] = npts\n kw['ynpts'] = npts\n kw['znpts'] = npts\n newKeyList.extend(['xnpts','ynpts', 'znpts'])\n\n spacing = kw.get('spacing')\n if spacing:\n assert len(spacing)==3\n kw['xspacing'] = spacing[0]\n kw['yspacing'] = spacing[1]\n kw['zspacing'] = spacing[2]\n newKeyList.extend(['xspacing','yspacing', 'zspacing'])\n\n newKeyList = uniq(newKeyList)\n\n #update all 6 attributes\n apply(self.updateVal, (['xnpts','ynpts','znpts', 'xspacing',\\\n 'yspacing', 'zspacing'],),kw)\n\n if 'xnpts' in newKeyList or 'xspacing' in newKeyList:\n kw['xside'] = self.xnpts*self.xspacing\n\n if 'ynpts' in newKeyList or 'yspacing' in newKeyList:\n kw['yside'] = self.ynpts*self.yspacing\n\n if 'znpts' in newKeyList or 'zspacing' in newKeyList:\n kw['zside'] = self.znpts*self.zspacing\n\n return self.redoNow(redo, updateOwnGui, redoFlags)\n","repo_name":"MolecularFlipbook/FlipbookApp","sub_path":"mfb/MGLToolsPckgs/DejaVu/Box.py","file_name":"Box.py","file_ext":"py","file_size_in_byte":15476,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"67"} +{"seq_id":"36412299056","text":"import os\nimport shutil\nfrom secrets import token_urlsafe\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils.timezone import now\n\n\nclass Dir(models.Model):\n name = models.CharField(max_length=30)\n dirs = models.ManyToManyField('Dir', blank=True)\n files = models.ManyToManyField('File', blank=True)\n downloads = models.IntegerField(default=0)\n created_date = models.DateTimeField(default=now, editable=False)\n owner = models.ForeignKey('users.User', related_name='dirs', on_delete=models.CASCADE, default=1)\n\n def __str__(self):\n return '#{} {}'.format(self.id, self.name)\n \n def safe_delete(self, user, add_message):\n \"\"\"remove only files created by user and dirs that don't containt files/dirs created by other users\"\"\"\n if self.owner == user:\n for file in self.files.all():\n file.safe_delete(user, lambda x: None)#Do not add messages\n\n for dir in self.dirs.all():\n dir.safe_delete(user, lambda x: None)#Do not add messages\n\n updated_dir = Dir.objects.get(pk=self.id)\n\n if not len(updated_dir.files.all()) and not len(updated_dir.dirs.all()):\n self.delete()\n add_message(f'Directory {self.name} was removed')\n else:\n add_message(f'There was only your content removed from {self.name} directory')\n\n def safe_rename(self, user, name, parent_dir_id, add_message):\n if self.owner == user:\n self.name = self.correct_name(name, parent_dir_id, lambda x: None, self.name) #Do not add messages\n self.save()\n add_message(f'Directory was renamed to {self.name}')\n\n def inc_download(self):\n Dir.objects.filter(pk=self.pk).update(downloads=models.F('downloads') + 1)\n\n def archieve_token(self):\n token = token_urlsafe(16)\n path_with_token = os.path.join(settings.ARCHIVES_ROOT, token)\n\n while os.path.exists(path_with_token):\n token = token_urlsafe(16)\n path_with_token = os.path.join(settings.ARCHIVES_ROOT, token)\n \n os.mkdir(path_with_token)\n\n self.prepare_dir(self.id, path_with_token)\n\n shutil.make_archive(path_with_token, 'zip', path_with_token)\n shutil.rmtree(path_with_token)\n\n return token\n \n @classmethod\n def prepare_dir(self, id, space):\n \"\"\"\"Recursive prepare dirs structure for archiving\"\"\"\n dir_record = self.objects.get(pk=id)\n dir_record.inc_download()\n\n for the_file in dir_record.files.all():\n the_file.inc_download()\n filename = os.path.basename(the_file.file.name)\n\n file_path = os.path.join(settings.MEDIA_ROOT, filename)\n new_path = os.path.join(space, filename)\n\n shutil.copy(file_path, new_path)\n if filename != the_file.full_name:\n os.rename(new_path, os.path.join(space, the_file.full_name))\n \n for the_dir in dir_record.dirs.all():\n next_dir_path = os.path.join(space, the_dir.name)\n os.mkdir(next_dir_path)\n self.prepare_dir(the_dir.id, next_dir_path)\n\n @classmethod\n def add_new(self, value, parent_dir_id, user, add_message):\n \"\"\"Creating new record only if name is valid\"\"\"\n if len(value) > 30:\n add_message('Value should be no longer than 30 symbols')\n else:\n parent_dir = self.objects.get(pk=parent_dir_id)\n if value.lower() in list(map(lambda x: x.name.lower(), parent_dir.dirs.all())):\n add_message('A directory with that name already exists')\n else:\n instance = self(name=value, owner=user)\n instance.save()\n parent_dir.dirs.add(self.objects.get(pk=instance.id))\n\n @classmethod\n def correct_name(self, name, parent_dir_id, add_message, exclude=None):\n \"\"\"Correcting name\"\"\"\n parent_dir = self.objects.get(pk=parent_dir_id)\n existing_names = list(map(lambda x: x.name.lower(), parent_dir.dirs.all().exclude(name=exclude)))\n\n if len(name) > 30 or name.lower() in existing_names:\n name_with_token = name[:21] + '_' + token_urlsafe()[:8]\n\n while name_with_token.lower() in existing_names:\n name_with_token = name[:21] + '_' + token_urlsafe()[:8]\n\n add_message(f'Directory {name} was renamed to {name_with_token}')\n return name_with_token\n\n return name\n\n @classmethod\n def upload(self, files, relpaths, parent_dir_id, user, add_message):\n paths = [path.split('/')[:-1] for path in relpaths]\n #file_dir_ids is a list of dir's ids where files located\n file_dir_ids = []\n \n dir_inst = self(name=self.correct_name(paths[0][0], parent_dir_id, add_message), owner=user)\n dir_inst.save()\n d = [{\n dir_inst.id: {\n 'name': paths[0][0],\n 'cont': []\n }\n }]\n self.objects.get(pk=parent_dir_id).dirs.add(self.objects.get(pk=dir_inst.id))\n saved_id = dir_inst.id #Currently root id\n\n for path in paths:\n d_link = d #Linking object behind d variable\n\n for part in path:\n id = 0\n\n for index, i in enumerate(d_link):\n key, value = list(i.items())[0]\n\n if value['name'] == part: \n id = key\n break\n\n if id: #if part was finded\n d_link = d_link[index][id]['cont']\n saved_id = id\n else: #Create new Dir Record\n dir_inst = self(name=self.correct_name(part, saved_id, add_message), owner=user)\n dir_inst.save()\n self.objects.get(pk=saved_id).dirs.add(self.objects.get(pk=dir_inst.id))\n d_link.append({dir_inst.id: {\n 'name': part,\n 'cont': []\n }})\n d_link = d_link[-1][dir_inst.id]['cont']\n saved_id = dir_inst.id\n \n file_dir_ids.append(saved_id)\n \n for id, the_file in enumerate(files):\n File.upload(the_file, file_dir_ids[id], user, add_message)\n\n\nclass File(models.Model):\n name = models.CharField(max_length=30)\n ext = models.CharField(max_length=10)\n file = models.FileField()\n downloads = models.IntegerField(default=0)\n created_date = models.DateTimeField(default=now, editable=False)\n mmtype = models.CharField(max_length=100, default='content/file')\n owner = models.ForeignKey('users.User', related_name='files', on_delete=models.CASCADE, default=1)\n\n @property\n def full_name(self):\n return f'{self.name}.{self.ext}'\n\n def __str__(self):\n return f'#{self.id} {self.name}.{self.ext}'\n \n def inc_download(self):\n File.objects.filter(pk=self.pk).update(downloads=models.F('downloads') + 1)\n Stats.objects.filter(pk=settings.STATS_ID).update(file_downloads=models.F('file_downloads') + 1)\n\n def safe_delete(self, user, add_message):\n if self.owner == user:\n os.remove(os.path.join(settings.MEDIA_ROOT, self.file.name))\n self.delete()\n add_message(f'File {self.full_name} was removed')\n \n def safe_rename(self, user, name, parent_dir_id, add_message):\n if self.owner == user:\n self.name = self.correct_name(name, parent_dir_id, self.ext, lambda x: None, self.name) #Do not add messages\n self.save()\n add_message(f'File was renamed to {self.full_name}')\n\n @classmethod\n def correct_name(self, name, parent_dir_id, ext, add_message, exclude=None):\n \"\"\"Correcting name\"\"\"\n parent_dir = Dir.objects.get(pk=parent_dir_id)\n existing_names = list(map(lambda x: (x.name + x.ext).lower(), parent_dir.files.all().exclude(name=exclude)))\n\n if len(name) > 30 or (name + ext).lower() in existing_names:\n name_with_token = name[:21] + '_' + token_urlsafe()[:8]\n\n while name_with_token.lower() in existing_names:\n name_with_token = name[:21] + '_' + token_urlsafe()[:8]\n add_message(f'File {name} was renamed to {name_with_token}.{ext}')\n\n return name_with_token\n\n return name\n \n @classmethod\n def upload(self, the_file, parent_dir_id, user, add_message):\n if the_file.size > settings.MAX_UPLOAD_SIZE:\n return add_message(f'{the_file.name} wasnt uploaded, its size more than {settings.MAX_UPLOAD_SIZE_LABEL}')\n\n last_dot_index = the_file.name.rfind('.')\n if len(the_file.name[last_dot_index + 1:]) > 10:\n return add_message(f'{the_file.name} wasnt uploaded, it has cumbersome extension')\n\n instance = self(\n file=the_file, \n name=self.correct_name(the_file.name[:last_dot_index], parent_dir_id, the_file.name[last_dot_index + 1:], add_message),\n ext=the_file.name[last_dot_index + 1:],\n mmtype=the_file.content_type,\n owner=user\n )\n instance.save()\n parentDir = Dir.objects.get(pk=parent_dir_id)\n parentDir.files.add(self.objects.get(pk=instance.id))\n Stats.objects.filter(pk=settings.STATS_ID).update(file_uploads=models.F('file_uploads') + 1)\n\n\nclass Stats(models.Model):\n file_downloads = models.BigIntegerField(default=0)\n file_uploads = models.BigIntegerField(default=0)\n\n def __str__(self):\n return f'{self.id}'\n ","repo_name":"IINamelessII/MAKHROVYI","sub_path":"backend/main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9649,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"27651100726","text":"from venus.split.split_strategy import SplitStrategy\nfrom venus.common.utils import ReluApproximation, OSIPMode\nimport multiprocessing\nimport torch\n\nclass Logger():\n LOGFILE: str = \"venus_log.txt\"\n SUMFILE: str = \"venus_summary.txt\"\n VERBOSITY_LEVEL: int = 0\n\nclass Solver():\n # Gurobi time limit per MILP in seconds;\n # Default: -1 (No time limit)\n TIME_LIMIT: int = 1800\n # Frequency of Gurobi callbacks for ideal cuts\n # Cuts are added every 1 in pow(milp_nodes_solved, IDEAL_FREQ)\n IDEAL_FREQ: float = 1\n # Frequency of Gurobi callbacks for dependency cuts\n # Cuts are added every 1 in pow(milp_nodes_solved, DEP_FREQ)\n DEP_FREQ: float = 1\n # Whether to use Gurobi's default cuts \n DEFAULT_CUTS: bool = False\n # Whether to use ideal cuts\n IDEAL_CUTS: bool = True\n # Whether to use inter-depenency cuts\n INTER_DEP_CUTS: bool = True\n # Whether to use intra-depenency cuts\n INTRA_DEP_CUTS: bool = False\n # Whether to use inter-dependency constraints\n INTER_DEP_CONSTRS: bool = True\n # Whether to use intra-dependency constraints\n INTRA_DEP_CONSTRS: bool = True\n # whether to monitor the number of MILP nodes solved and initiate\n # splititng only after BRANCH_THRESHOLD is reached.\n MONITOR_SPLIT: bool = False\n # Number of MILP nodes solved before initiating splitting. Splitting\n # will be initiated only if MONITOR_SPLIT is True.\n BRANCH_THRESHOLD: int = 500\n # Whether to print gurobi output\n PRINT_GUROBI_OUTPUT: bool = False\n # Gurobi feasibility tolerance\n FEASIBILITY_TOL: float = 10e-6\n\n def callback_enabled(self):\n \"\"\"\n Returns \n\n True iff the MILP SOLVER is using a callback function.\n \"\"\"\n if self.IDEAL_CUTS or self.INTER_DEP_CUTS or self.INTRA_DEP_CUTS or self.MONITOR_SPLIT:\n return True\n else:\n return False\n\n def dep_cuts_enabled(self):\n \"\"\"\n Returns \n\n True iff the MILP SOLVER is using dependency cuts.\n \"\"\"\n if self.INTER_DEP_CUTS or self.INTRA_DEP_CUTS:\n return True\n else:\n return False\n\n \nclass Verifier():\n # complete or incomplete verification\n COMPLETE: bool = True\n # number of parallel processes solving subproblems\n # VER_PROC_NUM: int = multiprocessing.cpu_count()\n VER_PROC_NUM: int = multiprocessing.cpu_count()\n # console output\n CONSOLE_OUTPUT: bool = True\n # pgd step size - The epsilon will be divided by this number.\n PGD_EPS: float = 10\n # pgd number of iterations\n PGD_NUM_ITER: int = 10\n\nclass Splitter():\n # Maximum depth for node splitting. \n BRANCHING_DEPTH: int = 7\n # determinines when the splitting process can idle because there are\n # many unprocessed jobs in the jobs queue\n LARGE_N_OF_UNPROCESSED_JOBS: int = 500\n # sleeping interval for when the splitting process idles\n SLEEPING_INTERVAL: int = 3\n # the number of input dimensions still considered to be small\n # so that the best split can be chosen exhaustively\n SMALL_N_INPUT_DIMENSIONS: int = 6\n # splitting strategy\n SPLIT_STRATEGY: SplitStrategy = SplitStrategy.NODE\n # the stability ratio weight for computing the difficulty of a problem\n STABILITY_RATIO_WEIGHT: float = 1\n # the value of fixed ratio above which the splitting can stop in any\n # case\n STABILITY_RATIO_CUTOFF: float = 0.7\n # the number of parallel splitting processes is 2^d where d is the\n # number of the parameter\n SPLIT_PROC_NUM: int = 2\n # macimum splitting depth\n MAX_SPLIT_DEPTH: int = 1000\n\nclass SIP():\n\n def __init__(self):\n # relu approximation\n self.RELU_APPROXIMATION = ReluApproximation.MIN_AREA\n # optimise memory\n self.OPTIMISE_MEMORY = False\n # formula simplificaton\n self.SIMPLIFY_FORMULA: bool = True\n # whether to use osip for convolutional layers\n self.OSIP_CONV = OSIPMode.OFF\n # number of optimised nodes during osip for convolutional layers\n self.OSIP_CONV_NODES = 200\n # whether to use oSIP for fully connected layers\n self.OSIP_FC = OSIPMode.OFF\n # number of optimised nodes during oSIP for fully connected\n self.OSIP_FC_NODES = 3\n # oSIP timelimit in seconds\n self.OSIP_TIMELIMIT = 7\n\n def is_osip_enabled(self):\n return self.OSIP_CONV == OSIPMode.ON or self.OSIP_FC == OSIPMode.ON\n\n def is_split_osip_enabled(self):\n return self.OSIP_CONV == OSIPMode.SPLIT or self.OSIP_FC == OSIPMode.SPLIT\n\n def is_osip_conv_enabled(self):\n return self.OSIP_CONV == OSIPMode.ON\n\n def is_osip_fc_enabled(self, depth=None):\n return self.OSIP_FC == OSIPMode.ON\n\n def copy(self):\n sip_cf = SIP()\n sip_cf.RELU_APPROXIMATION = self.RELU_APPROXIMATION\n sip_cf.OSIP_CONV = self.OSIP_CONV\n sip_cf.OSIP_CONV_NODES = self.OSIP_CONV_NODES\n sip_cf.OSIP_FC = self.OSIP_FC\n sip_cf.OSIP_FC_NODES = self.OSIP_FC_NODES\n sip_cf.OSIP_TIMELIMIT = self.OSIP_TIMELIMIT\n\n return sip_cf\n\n\nclass Config:\n \"\"\"\n Venus's Parameters\n \"\"\"\n\n def __init__(self):\n \"\"\"\n \"\"\"\n self.LOGGER = Logger()\n self.SOLVER = Solver()\n self.SPLITTER = Splitter()\n self.VERIFIER = Verifier()\n self.SIP = SIP()\n self.PRECISION = torch.float32\n self.DEVICE = torch.device('cpu')\n self._user_set_params = set()\n\n def set_param(self, param, value):\n if value is None: return\n self._user_set_params.add(param)\n if param == 'logfile':\n self.LOGGER.LOGFILE = value\n elif param == 'sumfile':\n self.LOGGER.SUMFILE = value\n elif param == 'time_limit':\n self.SOLVER.TIME_LIMIT = int(value)\n elif param == 'intra_dep_constrs':\n self.SOLVER.INTRA_DEP_CONSTRS = value\n elif param == 'intra_dep_cuts': \n self.SOLVER.INTRA_DEP_CUTS = value\n elif param == 'inter_dep_constrs': \n self.SOLVER.INTER_DEP_CONSTRS = value\n elif param == 'inter_dep_cuts':\n self.SOLVER.INTER_DEP_CUTS = value\n elif param == 'ideal_cuts':\n self.SOLVER.IDEAL_CUTS = value\n elif param == 'monitor_split':\n self.SOLVER.MONITOR_SPLIT = value\n elif param == 'branching_threshold':\n self.SOLVER.BRANCHING_THRESHOLD = int(value)\n elif param == 'ver_proc_num':\n self.VERIFIER.VER_PROC_NUM = int(value) \n elif param == 'split_proc_num': \n self.SPLITTER.SPLIT_PROC_NUM = int(value)\n elif param == 'branching_depth':\n self.SPLITTER.BRANCHING_DEPTH = int(value)\n elif param == 'stability_ratio_cutoff':\n self.SPLITTER.STABILITY_RATIO_CUTOFF = float(value)\n elif param == 'split_strategy':\n if value == 'node':\n self.SPLITTER.SPLIT_STRATEGY = SplitStrategy.NODE\n elif value == 'input':\n self.SPLITTER.SPLIT_STRATEGY = SplitStrategy.INPUT\n elif value == 'inputnodealt':\n self.SPLITTER.SPLIT_STRATEGY = SplitStrategy.INPUT_NODE_ALT\n elif value == 'nodeinput':\n self.SPLITTER.SPLIT_STRATEGY = SplitStrategy.NODE_INPUT\n elif value == 'inputnode':\n self.SPLITTER.SPLIT_STRATEGY = SplitStrategy.INPUT_NODE\n elif value == 'none':\n self.SPLITTER.SPLIT_STRATEGY = SplitStrategy.NONE\n elif param == 'oSIP_conv':\n if value == 'on':\n self.SIP.OSIP_CONV = OSIPMode.ON\n elif value == 'off':\n self.SIP.OSIP_CONV = OSIPMode.OFF\n elif value == 'split':\n self.SIP.OSIP_CONV = OSIPMode.SPLIT\n elif param == 'osip_conv_nodes':\n self.SIP.OSIP_CONV_NODES = int(value) \n elif param == 'osip_fc':\n if value == 'on':\n self.SIP.OSIP_FC = OSIPMode.ON\n elif value == 'off':\n self.SIP.OSIP_FC = OSIPMode.OFF\n elif value == 'split':\n self.SIP.OSIP_FC = OSIPMode.SPLIT\n elif param == 'osip_fc_nodes':\n self.SIP.OSIP_FC_NODES = int(value) \n elif param == 'osip_timelimit':\n self.SIP.OSIP_TIMELIMIT = int(value)\n elif param == 'relu_approximation':\n if value == 'min_area':\n self.SIP.RELU_APPROXIMATION = ReluApproximation.MIN_AREA\n elif value == 'identity':\n self.SIP.RELU_APPROXIMATION = ReluApproximation.IDENTITY\n elif value == 'venus':\n self.SIP.RELU_APPROXIMATION = ReluApproximation.VENUS_HEURISTIC\n elif value == 'parallel':\n self.SIP.RELU_APPROXIMATION = ReluApproximation.PARALLEL\n elif value == 'zero':\n self.SIP.RELU_APPROXIMATION = ReluApproximation.ZERO\n elif param == 'complete':\n self.VERIFIER.COMPLETE = value\n elif param == 'console_output':\n self.VERIFIER.CONSOLE_OUTPUT = value\n elif param == 'precision':\n self.PRECISION = value\n\n def set_param_if_not_set(self, param, value):\n if not param in self._user_set_params:\n self.set_param(param,value)\n\n def set_nn_defaults(self, nn):\n if nn.is_fc():\n print('asdsada')\n self.set_fc_defaults(nn)\n else: \n self.set_conv_defaults(nn)\n\n def set_fc_defaults(self, nn):\n self.set_param_if_not_set('inter_deps', False)\n self.set_param_if_not_set('relu_approximation', 'venus')\n relus = nn.get_n_relu_nodes()\n if nn.head.input_size < 10:\n self.set_param_if_not_set('inter_dep_constrs', False)\n self.set_param_if_not_set('intra_dep_constrs', False)\n self.set_param_if_not_set('inter_dep_cuts', False)\n self.set_param_if_not_set('monitor_split', False)\n self.set_param_if_not_set('stability_ratio_cutoff', 0.75)\n self.set_param_if_not_set('split_strategy', 'input')\n self.set_param_if_not_set('split_proc_num', 2)\n else: \n self.set_param_if_not_set('split_proc_num', 0)\n self.set_param_if_not_set('inter_dep_constrs', True)\n self.set_param_if_not_set('intra_dep_constrs', True)\n self.set_param_if_not_set('inter_dep_cuts', True)\n self.set_param_if_not_set('monitor_split', True)\n self.set_param_if_not_set('split_strategy', 'node')\n if relus < 1000:\n self.set_param_if_not_set('branching_depth', 2)\n self.set_param_if_not_set('branching_threshold', 10000)\n elif relus < 2000:\n self.set_param_if_not_set('branching_depth', 2)\n self.set_param_if_not_set('branching_threshold', 5000)\n else:\n self.set_param_if_not_set('branching_depth', 7)\n self.set_param_if_not_set('branching_threshold', 300)\n\n def set_conv_defaults(self, nn):\n self.set_param_if_not_set('stability_ratio_cutoff', 0.9)\n relus = nn.get_n_relu_nodes()\n if relus <= 10000 and len(nn.node) <=5:\n self.set_param_if_not_set('relu_approximation', 'venus')\n else:\n self.set_param_if_not_set('relu_approximation', 'min_area')\n if relus > 4000:\n self.set_param_if_not_set('intra_dep_constrs', False)\n self.set_param_if_not_set('inter_dep_cuts', False) \n if relus <= 10000:\n self.set_param_if_not_set('branching_depth', 2)\n self.set_param_if_not_set('branching_threshold', 50)\n else:\n self.set_param_if_not_set('monitor_split', False)\n self.set_param_if_not_set('split_strategy', 'none')\n\n def set_user(self, u_params):\n self.set_param('logfile', u_params.logfile)\n self.set_param('sumfile', u_params.sumfile)\n self.set_param('time_limit', u_params.timeout)\n self.set_param('intra_dep_constrs', u_params.intra_dep_constrs)\n self.set_param('intra_dep_cuts', u_params.intra_dep_cuts)\n self.set_param('inter_dep_constrs', u_params.inter_dep_constrs)\n self.set_param('inter_dep_cuts', u_params.inter_dep_cuts)\n self.set_param('ideal_cuts', u_params.ideal_cuts)\n self.set_param('monitor_split', u_params.monitor_split)\n self.set_param('branching_depth', u_params.branching_depth)\n self.set_param('branching_threshold', u_params.branching_threshold)\n self.set_param('ver_proc_num', u_params.ver_proc_num)\n self.set_param('split_proc_num', u_params.split_proc_num)\n self.set_param('stability_ratio_cutoff', u_params.stability_ratio_cutoff)\n self.set_param('split_strategy', u_params.split_strategy)\n self.set_param('osip_conv', u_params.osip_conv)\n self.set_param('osip_conv_nodes', u_params.osip_conv_nodes)\n self.set_param('osip_fc', u_params.osip_fc)\n self.set_param('osip_fc_nodes', u_params.osip_fc_nodes)\n self.set_param('osip_timelimit', u_params.osip_timelimit)\n self.set_param('relu_approximation', u_params.relu_approximation)\n self.set_param('complete', u_params.complete)\n self.set_param('console_output', u_params.console_output)\n","repo_name":"vas-group-imperial/venus2","sub_path":"venus/common/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":13376,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"41770429358","text":"# import von Modulen und der anderen Programmdateien\r\n\r\nimport Karte\r\nimport random as rd\r\nfrom tkinter import *\r\n\r\n\r\n# Die Hauptschleife in der alle Schritte durchgeführt werden\r\ndef __main__():\r\n # nötig damit die while-Schleife beendet wird\r\n hoereAuf = False\r\n\r\n def wennSchliessenGedrueckt():\r\n # macht die Veränderung der variablen hoereAuf nicht nur lokal für dies Funktion\r\n nonlocal hoereAuf\r\n hoereAuf = True\r\n\r\n tk = Tk()\r\n tk.protocol(\"WM_DELETE_WINDOW\", wennSchliessenGedrueckt)\r\n # Die Liste in der alle Ameisenobjekte vermerkt sind um jederzeit abrufbar zu sein\r\n ameisenliste = []\r\n # die variablen Nestkoordinaten\r\n nestposition_x = int(input(\"Welche x-koordinate soll das Nest haben?\"))\r\n nestposition_y = int(input(\"Welche y-koordinate soll das Nest haben?\"))\r\n # die variable Futterquellenanzahl\r\n futterquellen_anzahl = int(input(\"Wie viele Futterquellen soll es geben?\"))\r\n\r\n # die variable Ameisenanzahl\r\n ameisenzahl = int(input(\"Wie viele Ameisen soll es geben?\"))\r\n # die verdunstungszeit gibt an wie viele Runden benötigt werden um einen Pheromonpunkt abzubauen\r\n verdunstungszeit = int(input(\"Wie viele Runden soll es dauern bis ein pheromonpunkt abgebaut ist?\"))\r\n # Das spielfeld auf dem sich die Ameisen bewegen\r\n spielfeld = Karte.Karte(nestposition_x, nestposition_y, futterquellen_anzahl, tk)\r\n\r\n # Hier wird die Funktion aufgerufen, welche di Ameisen erstellt\r\n ameisenliste_fuellen(spielfeld, ameisenliste, ameisenzahl)\r\n\r\n # nachdem alle Objekte erstellt sind beginnt die Simulation in einer schleife\r\n while (not hoereAuf):\r\n # Zuerst werden alle Ameisen bewegt\r\n bewege(spielfeld, ameisenliste)\r\n # dann verdunstet ein Teil der Pheromone\r\n pheromon_update(spielfeld, verdunstungszeit)\r\n tk.update()\r\n # Zerstört das grafik-Fenster\r\n tk.destroy()\r\n print (\"Es wurde\",spielfeld.nest.gesammeltes_futter,\"Futter gesammelt.\")\r\n\r\n\r\n# Die Funktion in der Jede Ameise einzeln bewegt wird\r\ndef bewege(spielfeld, ameisen):\r\n for ameise in ameisen:\r\n # Hier wird die Funktion Aufgerufen, die alle Spielferder gibt, die von der Ameise aus näher zum Nest sind\r\n felder_zu_nest = spielfeld.gib_feld_naeher_nest(ameise.feld)\r\n # Hier wird das Feld der Ameise berechnet wenn sie kein Futter hat\r\n if ameise.futter is False:\r\n # zuerst sind alle Felder möglich die um die herum sind\r\n moegliche_felder = spielfeld.gebe_umliegende_felder(ameise.feld)\r\n # das ist die Liste mit den Feldern mit den meisten Pheromonen\r\n pheromon_felder = []\r\n # Hier werden die Felder mit den meisten Pheromonen aus den umliegenden Feldern herausgesucht\r\n for moegliches_feld in moegliche_felder:\r\n if moegliches_feld not in felder_zu_nest and moegliches_feld.pheromone>0:\r\n if len(pheromon_felder) == 0:\r\n pheromon_felder.append(moegliches_feld)\r\n elif moegliches_feld.pheromone > pheromon_felder[0].pheromone:\r\n for feld in pheromon_felder:\r\n pheromon_felder.remove(feld)\r\n pheromon_felder.append(moegliches_feld)\r\n elif moegliches_feld.pheromone == pheromon_felder[0].pheromone:\r\n pheromon_felder.append(moegliches_feld)\r\n\r\n # Wenn aber nun keine Felder mehr in der Liste sind\r\n if len(pheromon_felder) == 0:\r\n # stehen nun durch diese zuweisung alle Spielfelder zur verfügung und\r\n pheromon_felder = moegliche_felder\r\n # ein zufälliges Feld aus den pheromon_feldern wird ausgewählt und dem Ameisen Feld zugewiesen\r\n # dies mache ich damit ich später die Grafik der Ameise bewegen kann\r\n ameise_feld = rd.choice(pheromon_felder)\r\n # Wenn die Ameise nun eine Futterquelle erricht hat\r\n if type(ameise.feld) is Karte.Futterquelle:\r\n # Wenn die Futterquelle noch Futter hat\r\n if ameise.feld.futter > 0:\r\n # Nimmt die Ameise ein Futter auf\r\n ameise.futter = True\r\n ameise.feld.futter -= 1\r\n # Wenn die Ameise bereits Futter geladen hat\r\n elif ameise.futter is True:\r\n # sind die moeglichen Felder alle Felder, die näher zum Nest sind\r\n moegliche_felder = felder_zu_nest\r\n # davon wird ein zufälliges ausgewählt\r\n ameise_feld = rd.choice(moegliche_felder)\r\n # Nun wird dem Feld ein Pheromon-Punkt hinzugefügt, falls die Ameise ein Futter hat\r\n if ameise.futter is True:\r\n ameise.feld.pheromone += 1\r\n # Hier wird die Grafik der Ameise bewegt\r\n spielfeld.karte.move(ameise.grafik, 3 * (ameise_feld.x - ameise.feld.x), 3 * (ameise_feld.y - ameise.feld.y))\r\n # Der Ameise wird ein neues Feld zugewiesen\r\n ameise.feld = ameise_feld\r\n # Jetzt legt die Ameise ihr Futter ab wenn sie das Nest erreicht hat\r\n if type(ameise.feld) is Karte.Nest and ameise.futter is True:\r\n ameise.futter = False\r\n spielfeld.nest.gesammeltes_futter += 1\r\n\r\n\r\n# Hier werden alle Pheromonpunkte der Felder reduziert, sie verdunsten\r\ndef pheromon_update(spielfeld, verdunstungszeit):\r\n for spalte in spielfeld.felder:\r\n for feld in spalte:\r\n # Wenn die Pheromon-Punkte eines Feldes höher als ein sind\r\n if feld.pheromone > 0:\r\n # wird das Feld in der Visualisierung gelb\r\n if type(feld) is Karte.Feld:\r\n spielfeld.karte.itemconfigure(feld.grafik, fill=\"yellow\")\r\n elif type(feld) is Karte.Futterquelle:\r\n spielfeld.karte.itemconfigure(feld.grafik, fill=\"orange\")\r\n # und dem Feld werden Pheromone abgezogen\r\n feld.pheromone -= (1 / verdunstungszeit)\r\n # und wenn das Feld nun keine Pheromone mehr hat\r\n if feld.pheromone <= 0:\r\n # nimmt das Feld wieder die Farbe an die es hatte\r\n spielfeld.karte.itemconfigure(feld.grafik, fill=feld.farbe)\r\n if type(feld)is Karte.Futterquelle:\r\n if feld.futter==0:\r\n feld.farbe=\"green\"\r\n\r\n# die Funktion in der alle Ameisenobjekte erstellt werden\r\ndef ameisenliste_fuellen(spielfeld, ameisenliste, ameisenanzahl):\r\n # der Startpunkt wird benötigt da die Ameisen immer das Feld, auf dem sie sich aufhalten, speichern\r\n startpunkt = spielfeld.nest\r\n karte = spielfeld.karte\r\n # Nun wird für jeden integer von 0 bis zur endgültigen ameisen anzahl eine Ameise erstellt\r\n for ameisen_nr in range(0, ameisenanzahl):\r\n # das Ameisenobjekt wird erstellt\r\n ameise = ameise_erstellen(startpunkt, karte)\r\n # und der Ameisenliste hinzugefügt\r\n ameisenliste.append(ameise)\r\n\r\n\r\n# Die Klasse der Ameisen mit verschiedenen Attributen\r\nclass Ameise:\r\n def __init__(self, feld, karte):\r\n self.futter = False\r\n self.feld = feld\r\n self.grafik = karte.create_rectangle(self.feld.k[0] + 1,\r\n self.feld.k[1] + 1,\r\n self.feld.k[2] - 1,\r\n self.feld.k[3] - 1, fill=\"black\")\r\n\r\n\r\n# die Funktion, in der eine ameise erstellt wird\r\ndef ameise_erstellen(feld, karte):\r\n # der Ameise werden Attriute gegeben die vorher festgelegt wurden\r\n ameise = Ameise(feld, karte)\r\n return ameise\r\n\r\n\r\n# hier wird das Progamm gestartet\r\nif __name__ == \"__main__\":\r\n # indem die Funktion __main__() aufgerufen wird\r\n __main__()\r\n","repo_name":"janneklink/Ameisen","sub_path":"Standardschleife.py","file_name":"Standardschleife.py","file_ext":"py","file_size_in_byte":7844,"program_lang":"python","lang":"de","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"12781168085","text":"n, w, l = map(int, input().split())\r\ntrucks = list(map(int, input().split()))\r\n \r\nbridge = [0] * w \r\nweight, time = 0, 0 \r\n \r\nwhile True:\r\n out = bridge.pop(0) \r\n weight -= out \r\n \r\n if trucks: \r\n if weight + trucks[0] <= l: \r\n bridge.append(trucks[0]) \r\n weight += trucks[0] \r\n trucks.pop(0) \r\n else: \r\n bridge.append(0) \r\n time += 1 \r\n \r\n if not bridge: \r\n break \r\nprint(time)","repo_name":"ksy133900/TIL","sub_path":"백준/Silver/13335. 트럭/트럭.py","file_name":"트럭.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"8084140258","text":"# -*- coding: utf-8 -*-\nimport codecs\nimport datetime\nimport errno\nimport functools\nimport hashlib\nimport logging\nimport os\nimport random\nimport re\nimport shutil\nimport sys\nimport time\n\nimport requests\nfrom bs4 import Tag\nfrom requests import Response\nfrom typing import Dict, Optional, Pattern, Union\nimport json\n\nuser_agents = [\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 5.1; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',\n 'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36',\n 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:49.0) Gecko/20100101 Firefox/49.0'\n]\n\n\nLOG_FILE = 'log.txt'\nlogging_set_up = False\n\n\ndef _setup_logging():\n time_format = '%Y-%m-%d %H:%M:%S'\n log_formatter = logging.Formatter(\"%(asctime)s %(levelname)-5.5s %(message)s\", time_format)\n log_level = os.getenv('LOGLEVEL', 'INFO')\n root_logger = logging.getLogger()\n root_logger.setLevel(log_level)\n if root_logger.hasHandlers():\n # https://stackoverflow.com/questions/7173033/duplicate-log-output-when-using-python-logging-module\n root_logger.handlers.clear()\n\n file_handler = logging.FileHandler(LOG_FILE, encoding='utf-8')\n file_handler.setFormatter(log_formatter)\n root_logger.addHandler(file_handler)\n\n if os.getenv('LOGTOCONSOLE', '0') == '1':\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setFormatter(log_formatter)\n root_logger.addHandler(console_handler)\n\n\ndef get_logger(name=None):\n if not logging_set_up:\n _setup_logging()\n return logging.getLogger(name)\n\n\nlog = get_logger(__name__)\n\n\ndef perror(msg):\n \"\"\"печать ошибки на экран, не может быть стёрто\"\"\"\n log.error(f'Ошибка отображена пользователю: {msg}')\n sys.stdout.write(f'\\rОшибка: {msg}\\n')\n\n\ndef ptext(msg):\n \"\"\"печать обычного сообщения на экран, не может быть стёрто\"\"\"\n log.info(f'Сообщение отображено пользователю: {msg}')\n sys.stdout.write(f'\\r{msg}\\n')\n\n\ndef progress(msg):\n \"\"\"печать строки прогресса, стирает текущую строку\"\"\"\n sys.stdout.write(f'\\r{msg}')\n\n\ndef mkdirs_for_regular_file(filename: str):\n \"\"\"Создаёт все необходимые директории чтобы можно было записать указанный файл\"\"\"\n dirname = os.path.dirname(filename)\n if not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except OSError as e: # Guard against race condition\n if e.errno != errno.EEXIST:\n raise\n\n\ndef cut_bom(s: str):\n bom = codecs.BOM_UTF8.decode(\"utf-8\")\n return s[len(bom):] if s.startswith(bom) else s\n\n\ndef to_float(s: str, fallback=0.0):\n try:\n return float(s)\n except ValueError:\n return fallback\n\n\ndef md5_hex(s: str) -> str:\n md5 = hashlib.md5()\n md5.update(s.encode('utf-8'))\n return md5.hexdigest()\n\n\ndef gwar_fix_json(s: str, a: bool = False) -> str:\n s = ' '.join(s.split())\n s = s.replace('\"', \"\\'\")\n s = s.replace(\"'\", '\"')\n if a:\n # https://stackoverflow.com/questions/50947760/how-to-fix-json-key-values-without-double-quotes\n s = re.sub(\"(\\w+):\", r'\"\\1\":', s)\n json_s = json.loads(s)\n return json_s\n\n\ndef random_pause(target_pause: float):\n return random.uniform(\n target_pause - target_pause * 0.5,\n target_pause + target_pause * 0.5)\n\n\ndef select_one_required(root: Tag, selector: str) -> Tag:\n tag = root.select_one(selector)\n if not tag:\n raise Exception(f'Не найден элемент по пути {selector}')\n return tag\n\n\ndef select_one_text_required(root: Tag, selector: str):\n tag = root.select_one(selector)\n if not tag:\n raise Exception(f'Не найден элемент по пути {selector}')\n text = tag.text.strip()\n if not text:\n raise Exception(f'Не найден text у элемента по пути {selector}')\n return text\n\n\ndef select_one_text_optional(root: Tag, selector: str):\n tag = root.select_one(selector)\n if not tag:\n raise Exception(f'Не найден элемент по пути {selector}')\n text = tag.text if tag else ''\n return text.strip()\n\n\ndef select_one_attr_required(root: Tag, selector: str, attr_name: str):\n tag = root.select_one(selector)\n if not tag:\n raise Exception(f'Не найден элемент по пути {selector}')\n val: str = tag.get(attr_name)\n val = val.strip() if val else val\n if not val:\n raise Exception(f'Не найден аттрибут {attr_name} у элемента по пути {selector}')\n return val\n\n\ndef safe_file_name(value: str):\n if not value:\n return value\n value = re.sub(r'[^\\w\\s()\\[\\]{}.,-]+', ' ', value, flags=re.UNICODE)\n value = re.sub(r'[\\s]+', ' ', value)\n value = value.strip(' \\t.,') # точка на конце запрещена в Windows\n return value\n\n\nlast_time_connected: Optional[datetime.datetime] = None\n\n\ndef pausable(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n global last_time_connected\n bro: Browser = args[0]\n if last_time_connected and bro.pause:\n pause = random_pause(bro.pause) - (datetime.datetime.now() - last_time_connected).total_seconds()\n else:\n pause = 0\n if pause > 0:\n log.info(f'Сплю %.3f сек' % pause)\n time.sleep(pause)\n last_time_connected = datetime.datetime.now()\n return func(*args, **kwargs)\n return wrapper\n\n\nclass Browser:\n\n def __init__(self, pause: float):\n self.pause = pause\n\n @pausable\n def get_text(self, url: str, headers: Dict = None, content_type: str = None):\n headers = self._prepare_headers(headers)\n log.info(f'Запрашиваю GET {url}')\n log.info(f'Заголовки: {headers}')\n response = requests.get(url, headers=headers)\n log.info(f'Ответ: {response.status_code} {response.reason}')\n log.info(f'Заголовки: {response.headers}')\n self._validate_response(response, url, content_type)\n return response.text\n\n @pausable\n def post_text(self, url: str, headers: Dict = None, data: Dict = None, content_type: str = None):\n headers = self._prepare_headers(headers)\n log.info(f'Запрашиваю POST {url}')\n log.info(f'Заголовки: {headers}')\n response = requests.post(url, headers=headers, data=json.dumps(data))\n log.info(f'Ответ: {response.status_code} {response.reason}')\n log.info(f'Заголовки: {response.headers}')\n self._validate_response(response, url, content_type)\n return response.text\n\n @pausable\n def download(self, url: str,\n fpath: str,\n headers: Dict = None,\n content_type: Union[str, Pattern] = None,\n skip_if_file_exists=False):\n global last_time_connected\n progress(f' - Скачиваю {url}')\n if skip_if_file_exists and os.path.exists(fpath) and os.stat(fpath).st_size > 0:\n log.info(f'Пропускаю скачанный файл: {fpath}')\n last_time_connected = None\n return\n headers = self._prepare_headers(headers)\n log.info(f'Запрашиваю GET {url}')\n log.info(f'Заголовки: {headers}')\n response = requests.get(url, stream=True, headers=headers)\n log.info(f'Ответ: {response.status_code} {response.reason}')\n log.info(f'Заголовки: {response.headers}')\n self._validate_response(response, url, content_type)\n mkdirs_for_regular_file(fpath)\n with open(fpath, 'wb') as fd:\n shutil.copyfileobj(response.raw, fd)\n length = os.stat(fpath).st_size\n ptext(f' - Сохранено в файл {fpath} ({length} байт)')\n\n def _prepare_headers(self, additional_headers: Dict):\n headers = additional_headers if additional_headers else {}\n headers.update({'User-Agent': random.choice(user_agents)})\n return headers\n\n def _validate_response(self, response: Response, url, expected_ct: Union[str, Pattern]):\n if not response.ok:\n raise Exception(f'Не удалось скачать файл {url} - {response.status_code} {response.reason}')\n if expected_ct:\n actual_ct: str = response.headers.get('content-type')\n if actual_ct:\n if isinstance(expected_ct, Pattern):\n if not expected_ct.match(actual_ct):\n perror(f'Некорректный content-type {actual_ct} по адресу {url}')\n else:\n if actual_ct != expected_ct:\n perror(f'Некорректный content-type {actual_ct} по адресу {url}')\n\n\nif __name__ == '__main__':\n print(safe_file_name(\" Привет -.—.– Москва 1989 XVII () {} [] ,. Hello ?!|/\\\\ - ӘәӨөҮү \"))\n","repo_name":"aliasn3t/booklead","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":9811,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"35404151621","text":"def sumar(n1, n2):\n s=float(n1+n2)\n return(s)\ndef restar(n1, n2):\n r=float(n1-n2)\n return(r)\ndef multiplicar(n1, n2):\n m=float(n1*n2)\n return(m)\ndef dividir(n1, n2):\n d=float(n1/n2)\n return(d)\n\nif __name__==\"__main__\":\n x=float(input(\"Ingrese un número: \"))\n y=float(input(\"Ingrese otro número: \"))\n op=int(0)\n\n while op!=5:\n op=int(input(\"Ingrese: \\n 1 para SUMAR \\n 2 para RESTAR \\n 3 para MULTIPLICAR \\n 4 para DIVIDIR \\n 5 para FINALIZAR \\n ==>\"))\n\n if(op==1):\n print(sumar(x, y))\n elif(op==2):\n print(restar(x, y))\n elif(op==3):\n print(multiplicar(x, y))\n elif(op==4):\n print(dividir(x, y))\n elif(op==5):\n print(\"programa finalizado\")\n else:\n print(\"Eso no es 1, 2, 3, 4, o 5!!\")\n print(\">:(\")\n print(\"\\n\")\n\n","repo_name":"mateoiba30/Programas-escuela","sub_path":"2021/dlog/Phyton/función2.py","file_name":"función2.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"31317471337","text":"#!/usr/local/bin/python3\n\nimport subprocess\nimport optparse\nimport os\nimport re\n\n\ndef get_arguments():\n # Parse the passed arguments\n parser = optparse.OptionParser()\n\n parser.add_option(\"-i\", \"--interface\", dest=\"interface\", help=\"Interface to change its MAC address\")\n parser.add_option(\"-m\", \"--mac\", dest=\"new_mac\", help=\"New MAC address (random generated if not set).\")\n\n (options, arguments) = parser.parse_args()\n\n # parser.error(\"[-] die\")\n return options\n\n\ndef random_mac():\n # TO DO\n v = 'xx'\n return v\n\n\ndef get_current_mac(interface):\n # Check if MAC has been changed correctly\n ifconfig_result = subprocess.check_output([\"ifconfig\", interface]).decode('utf-8')\n\n spoofed_mac = re.search(r\"\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w\", ifconfig_result)\n\n if spoofed_mac:\n return spoofed_mac.group(0)\n else:\n print(\"[-] Could not find MAC address\")\n\n\ndef check_os():\n # Check the Operating System\n from sys import platform\n if platform == \"linux\" or platform == \"linux2\":\n print(\"[+] You are running: Linux\")\n return \"Linux\"\n elif platform == \"darwin\":\n print(\"[+] You are running: OSX\")\n return \"OSX\"\n elif platform == \"win32\":\n print(\"[+] You are running: Windows\")\n return \"Windows\"\n else:\n print(\"[+] You are running: Other Operating System\")\n return \"Other\"\n\n\nopsys = check_os()\noptions = get_arguments()\ninterface = options.interface\nnew_mac = options.new_mac\n\n\n# If the \"interface\" argument is empty, list the existing ones and ask the user:\nif not interface:\n print(\"[+] Please select the device you want to change the MAC address for:\")\n subprocess.call(\"networksetup -listallhardwareports | grep Device\", shell=True)\n print(\" \")\n interface = input(\"Device > \")\n\n\n# only for Linux\nif opsys == \"linux\":\n subprocess.call([\"sudo\", \"ifconfig\", interface, \"down\"])\n\nif not new_mac:\n # random MAC changer\n print(\"[+] Changing MAC interface for \" + interface + \" to random MAC\")\n os.system(\"sudo macchanger -r \" + interface)\nelse:\n # user defined\n print(\"[+] Changing MAC interface for \" + interface + \" to \" + new_mac)\n subprocess.call([\"sudo\", \"ifconfig\", interface, \"ether\", new_mac])\n\n# only for Linux\nif opsys == \"linux\":\n subprocess.call([\"sudo\", \"ifconfig\", interface, \"up\"])\n\n\ncurrent_mac = get_current_mac(interface)\nprint(\"Current MAC = \" + str(current_mac))\n\nprint(random_mac())\n","repo_name":"entpnomad/ethical-hacking","sub_path":"macchanger.py","file_name":"macchanger.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73343331093","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom distutils.core import setup\nimport glob\nimport PyRoom\n\nauthor = 'The Pyroom Team'\nurl = 'http://www.pyroom.org'\n\nsetup(\n name='PyRoom',\n version = PyRoom.__VERSION__,\n url = url,\n author = author,\n description = 'PyRoom is a distraction-free, fullscreen text editor',\n packages = ['PyRoom',],\n package_data = {'PyRoom':['interface.glade']},\n data_files = [\n ('/usr/share/pyroom/themes', glob.glob('themes/*.theme')),\n ('/usr/share/pyroom', ['pyroom.png']),\n ('/usr/share/applications', ['pyroom.desktop']),\n ],\n scripts=['pyroom',],\n)\n","repo_name":"genewoo/pyroom","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"33406089561","text":"# messing around with colexifications using CLICS database\n# citation: Rzymski, Tresoldi et al. 2019. The Database of Cross-Linguistic Colexifications, reproducible analysis of cross- linguistic polysemies. DOI: doi.org/10.17613/5awv-6w15\n\n# instructions for downloading/compiling the data: https://github.com/clics/clics3\n# I have already done this on 2021-07-09 but am not committing all that stuff to my repo (it's in other repos out there as listed in datasets.txt)\n# to check what datasets you have, run `clics datasets` in a terminal\n\n# want to be able to mess with CLICS data my own way, not just run their code to create the GML graph\n# e.g. take some subset of languages and make a semantic map out of just those ones\n# e.g. perform random walk on concepts to create new semantic map for conlang\n\n\nimport os\nimport csv\nimport random\nimport itertools\nimport math\nimport scipy.stats\n# import networkx as nx\nimport numpy as np\nimport matplotlib.pyplot as plt\n# import pandas as pd\n\nimport sys\nsys.path.insert(0, \"/home/wesley/programming/\")\nfrom BinomialObservation import BinomialObservation # might need sys.path to look at parent dir\n\nclass Concept:\n def __init__(self, concept_id, concept_gloss, source_glosses):\n self.concept_id = concept_id\n self.concept_gloss = concept_gloss\n self.source_glosses = source_glosses # list of all strings used as gloss for this same concept\n\n def __repr__(self):\n return f\"\"\n\n def __hash__(self):\n return hash(repr(self))\n\n def __eq__(self, other):\n if type(other) is not Concept:\n return NotImplemented\n if self.concept_id == other.concept_id:\n assert self.concept_gloss == other.concept_gloss, f\"two concepts with same id but different gloss:\\n{self}\\n{other}\"\n return True\n else:\n return False\n\n\nclass ConceptEncoding:\n def __init__(self, language, form, concept):\n self.language = language\n self.form = form\n assert type(concept) is Concept\n self.concept = concept\n\n def __repr__(self):\n return f\"\"\n\n def __hash__(self):\n return hash(repr(self))\n\n def __eq__(self, other):\n if type(other) is not ConceptEncoding:\n return NotImplemented\n return self.language == other.language and self.form == other.form and self.concept == other.concept\n\n\ndef get_subdirs_with_data():\n directory = \"/home/wesley/programming/Language/src/\"\n subdirs = [f for f in os.scandir(directory) if f.is_dir()]\n database_names = []\n file_dirs = []\n for subdir in subdirs:\n database_names.append(subdir.name)\n if subdir.name == \"lexibank-hantganbangime\":\n # for some reason this one has an extra src/ and a subdir with the same name EXCEPT hyphen is replace with underscore. WHY??\n file_dir = \"src/lexibank_hantganbangime/cldf\"\n else:\n file_dir = \"cldf\"\n file_dir_path = os.path.join(directory, subdir, file_dir)\n file_dirs.append(file_dir_path)\n return file_dirs, database_names\n\n\ndef get_filepaths_for_filename(default_filename, exceptions_db_to_filename=None):\n # some dbs are stupid and made their files have a different name, why?! especially freaking pylexirumah doesn't follow the rules\n # exceptions_db_to_filename is meant to account for this\n file_dirs, database_names = get_subdirs_with_data()\n fps = []\n for file_dir, db_name in zip(file_dirs, database_names):\n if exceptions_db_to_filename is not None and db_name in exceptions_db_to_filename:\n filename = exceptions_db_to_filename[db_name]\n else:\n filename = default_filename\n\n fp = os.path.join(file_dir, filename)\n if os.path.exists(fp):\n fps.append(fp)\n else:\n print(f\"Warning: forms file does not exist: {fp}\")\n return fps, database_names\n\n\n\ndef get_forms_filepaths():\n return get_filepaths_for_filename(\"forms.csv\")\n\n\ndef get_languages_filepaths():\n return get_filepaths_for_filename(\"languages.csv\", {\"pylexirumah\": \"lects.csv\"})\n\n\ndef get_parameters_filepaths():\n return get_filepaths_for_filename(\"parameters.csv\", {\"pylexirumah\": \"concepts.csv\"})\n\n\ndef get_raw_concepts():\n concepts_by_database = get_raw_concepts_by_database()\n res = []\n for db, concepts_by_id in concepts_by_database.items():\n for idx, concept in concepts_by_id.items():\n assert type(concept) is dict\n res.append(concept)\n return res\n\n\ndef get_dict_rows(fp):\n with open(fp) as f:\n reader = csv.DictReader(f)\n rows = [row for row in reader]\n return rows\n\n\ndef get_raw_concept_from_row(row, db_name):\n if db_name == \"pylexirumah\":\n # no Concepticon_Gloss here\n # the concept's name is in the ID field instead of Name, will use this as gloss if there is no Concepticon_Gloss for the associated Concepticon_ID\n source_gloss = row[\"ID\"]\n d = {\"Concepticon_ID\": row[\"Concepticon_ID\"], \"Source_Gloss\": source_gloss}\n else:\n # in the parameters.csv, it's ID not Parameter_ID\n source_gloss = row[\"Name\"]\n d = {\"Concepticon_ID\": row[\"Concepticon_ID\"], \"Concepticon_Gloss\": row[\"Concepticon_Gloss\"], \"Source_Gloss\": source_gloss}\n return d\n\n\ndef get_raw_concepts_by_database():\n parameters_fps, db_names = get_parameters_filepaths()\n concepts = {}\n for parameters_fp, db_name in zip(parameters_fps, db_names):\n # print(f\"getting concepts from database {db_name}\")\n concepts[db_name] = {}\n rows = get_dict_rows(parameters_fp)\n\n for row in rows:\n raw_concept = get_raw_concept_from_row(row, db_name)\n assert row[\"ID\"] not in concepts[db_name], f\"duplicate ID: {row['ID']}\"\n concepts[db_name][row[\"ID\"]] = raw_concept\n\n return concepts\n\n\ndef get_rows_from_fp(fp, database_name):\n rows = get_dict_rows(fp)\n for row in rows:\n assert \"fp\" not in row\n row[\"fp\"] = fp # store which file this record came from\n assert \"database_name\" not in row\n row[\"database_name\"] = database_name\n return rows\n\n\ndef get_rows_from_fps(fps, database_names):\n rows = []\n all_keys = set()\n keys_in_all_fps = None\n for fp, db_name in zip(fps, database_names):\n fp_rows = get_rows_from_fp(fp, db_name)\n fp_keys = set(fp_rows[0].keys())\n all_keys |= fp_keys\n if keys_in_all_fps is None:\n keys_in_all_fps = fp_keys\n else:\n keys_in_all_fps &= fp_keys\n # don't want to do this with initializing it as empty set because then it will just stay empty\n rows += fp_rows\n\n # some of them don't have the same keys, leave those keys out (don't set them to some default)\n # so that KeyError is raised if you try to use one\n # print(f\"all keys:\\n{sorted(all_keys)}\\nkeys in all files:\\n{sorted(keys_in_all_fps)}\")\n return rows, all_keys, keys_in_all_fps\n\n\ndef print_keys_of_rows(rows, keys):\n max_key_len = max(len(k) for k in keys)\n keys = sorted(keys)\n for i, row in enumerate(rows):\n print(f\"row {i}\")\n for k in keys:\n val = row.get(k)\n print(f\"- {k.ljust(max_key_len+1)}: {val}\")\n print()\n\n\ndef show_key_statistics(rows, keys):\n # for each key, show how many rows have it (and proportion) and some examples of what is in it\n for k in sorted(keys):\n rows_with_key = [row for row in rows if k in row and row[k] != \"\"]\n print(f\"key {k} is in {len(rows_with_key)} rows out of {len(rows)} ({100*len(rows_with_key)/len(rows):.2f}%). Examples of its values:\")\n sample_rows = random.sample(rows_with_key, min(5, len(rows_with_key)))\n for row in sample_rows:\n print(f\"{k} : {row[k]}\")\n print()\n\n\ndef construct_concepticon_id_gloss_correspondence(raw_concepts):\n gloss_to_id = {}\n id_to_gloss = {}\n for rc in raw_concepts:\n # some have ID with no gloss, some have gloss with no ID\n # some have both, which is great\n # some have neither, in which case we consider it not to be a part of the concepticon\n # (but note that the underlying data has mistakes, so something may actually be part of the concepticon when it shouldn't be,\n # or vice versa, or it may be labeled as the wrong concept, e.g. example of bark(dog) labeled as bark(tree))\n\n if has_id(rc) and has_gloss(rc):\n concept_id = rc[\"Concepticon_ID\"]\n concept_gloss = rc[\"Concepticon_Gloss\"]\n if concept_id in id_to_gloss:\n existing_gloss = id_to_gloss[concept_id]\n assert concept_gloss == existing_gloss, f\"gloss conflict for id {concept_id}: {concept_gloss} != {existing_gloss}\"\n else:\n id_to_gloss[concept_id] = concept_gloss\n if concept_gloss in gloss_to_id:\n existing_id = gloss_to_id[concept_gloss]\n assert concept_id == existing_id, f\"id conflict for gloss {concept_gloss}: {concept_id} != {existing_id}\"\n else:\n gloss_to_id[concept_gloss] = concept_id\n\n assert len(gloss_to_id) == len(id_to_gloss)\n for k,v in gloss_to_id.items():\n assert id_to_gloss[v] == k\n for k,v in id_to_gloss.items():\n assert gloss_to_id[v] == k\n return id_to_gloss, gloss_to_id\n\n\ndef has_id(raw_concept):\n return \"Concepticon_ID\" in raw_concept and raw_concept[\"Concepticon_ID\"] != \"\"\n\n\ndef has_gloss(raw_concept):\n return \"Concepticon_Gloss\" in raw_concept and raw_concept[\"Concepticon_Gloss\"] != \"\"\n\n\ndef has_source_gloss(raw_concept):\n rc = raw_concept\n return \"Source_Gloss\" in rc and rc[\"Source_Gloss\"] != \"\"\n\n\ndef get_source_gloss(raw_concept):\n rc = raw_concept\n return rc[\"Source_Gloss\"]\n\n\ndef construct_concept_objects(raw_concepts, id_to_gloss, gloss_to_id):\n expected_fields = [\"Concepticon_ID\", \"Concepticon_Gloss\", \"Source_Gloss\"]\n for c in raw_concepts:\n assert all(k in expected_fields for k in c.keys()), f\"concept has unexpected field: {c}\"\n\n for rc in raw_concepts:\n rc[\"has_id\"] = has_id(rc)\n rc[\"has_gloss\"] = has_gloss(rc)\n\n no_id_no_gloss = [rc for rc in raw_concepts if not rc[\"has_id\"] and not rc[\"has_gloss\"]]\n no_id_yes_gloss = [rc for rc in raw_concepts if not rc[\"has_id\"] and rc[\"has_gloss\"]]\n yes_id_no_gloss = [rc for rc in raw_concepts if rc[\"has_id\"] and not rc[\"has_gloss\"]]\n yes_id_yes_gloss = [rc for rc in raw_concepts if rc[\"has_id\"] and rc[\"has_gloss\"]]\n\n # detect impostors: concepts with a Concepticon_Gloss but no Concepticon_ID, and for whose gloss there is no Concepticon_ID,\n # i.e. the concept is not actually in the concepticon, or if it is, we don't know the ID\n glosses_of_extra_concepts = set()\n for rc in no_id_yes_gloss:\n # again don't prematurely optimize this\n gloss = rc[\"Concepticon_Gloss\"]\n assert gloss != \"\", \"shouldn't have passed has_gloss check\"\n # now want to know if any other concept links this gloss to some id in concepticon\n shared_gloss_rcs_with_id = [rc for rc in yes_id_yes_gloss if rc[\"Concepticon_Gloss\"] == gloss]\n if len(shared_gloss_rcs_with_id) == 0:\n # true impostor, there is no ID matching this Concepticon_Gloss\n glosses_of_extra_concepts.add(gloss)\n else:\n ids = set(rc[\"Concepticon_ID\"] for rc in shared_gloss_rcs_with_id)\n assert len(ids) == 1, f\"more than one Concepticon_ID found for the same Concepticon_Gloss: {ids}\"\n concept_id = list(ids)[0]\n # add it to the correspondence\n id_to_gloss[concept_id] = gloss\n gloss_to_id[gloss] = concept_id\n\n # detect orphans: concepts with a Concepticon_ID but no Concepticon_Gloss anywhere\n orphan_ids = set()\n for rc in yes_id_no_gloss:\n concept_id = rc[\"Concepticon_ID\"]\n assert concept_id != \"\", \"shouldn't have passed has_id check\"\n # now want to know if this id is linked to a gloss somewhere else\n shared_id_rcs_with_gloss = [rc for rc in yes_id_yes_gloss if rc[\"Concepticon_ID\"] == concept_id]\n if len(shared_id_rcs_with_gloss) == 0:\n # true orphan, there is no Concepticon_Gloss for this ID anywhere\n orphan_ids.add(concept_id)\n if has_source_gloss(rc):\n gloss = get_source_gloss(rc)\n # print(f\"orphan id {concept_id} has source gloss {gloss}\")\n id_to_gloss[concept_id] = gloss\n gloss_to_id[gloss] = concept_id\n else:\n raise Exception(f\"orphan id {concept_id} has no source gloss\")\n else:\n glosses = set(rc[\"Concepticon_Gloss\"] for rc in shared_id_rcs_with_gloss)\n assert len(glosses) == 1, f\"more than one Concepticon_Gloss found for the same Concepticon_ID: {glosses}\"\n gloss = list(glosses)[0]\n # add it to the correspondence\n id_to_gloss[concept_id] = gloss\n gloss_to_id[gloss] = concept_id\n # the orphan IDs are found in:\n # lexibank-naganorgyalrongic/cldf/parameters.csv\n # pylexirumah/cldf/concepts.csv\n\n # the ones with no id or gloss should be treated each as their own concept, based on the name/ID(for pylexirumah) field\n for rc in no_id_no_gloss:\n gloss = get_source_gloss(rc)\n glosses_of_extra_concepts.add(gloss)\n\n glosses_of_extra_concepts = sorted(glosses_of_extra_concepts)\n # print(\"glosses_of_extra_concepts:\", glosses_of_extra_concepts)\n for i, g in enumerate(glosses_of_extra_concepts):\n new_id = f\"EC{i+1}\" # EC stands for EXTRACONCEPT\n assert new_id not in id_to_gloss, new_id\n assert g not in gloss_to_id, g\n id_to_gloss[new_id] = g\n gloss_to_id[g] = new_id\n\n # now make correspondence from ID to Concept objects, and then go fill in all the glosses from the raw concepts\n id_to_concept = {}\n for concept_id in id_to_gloss:\n concept_gloss = id_to_gloss[concept_id]\n c = Concept(concept_id, concept_gloss, [])\n id_to_concept[concept_id] = c\n\n # now go through the raw concepts and add the source gloss to the correct Concept object\n for rc in raw_concepts:\n if rc[\"has_id\"]:\n concept_id = rc[\"Concepticon_ID\"]\n c = id_to_concept[concept_id]\n elif rc[\"has_gloss\"]:\n concept_gloss = rc[\"Concepticon_Gloss\"]\n # some of the ID-less ones have a gloss which is in the concepticon, but some have one which is not (a concepticon-impostor, if you will)\n try:\n concept_id = gloss_to_id[concept_gloss]\n except KeyError:\n # if it's an impostor, there is no such concepticon gloss in the real concepticon, so this is an extra concept\n # but its gloss should still already be in the extraconcepts list\n raise Exception(f\"impostor: {rc} should have been added to id-gloss correspondence but it wasn't\")\n c = id_to_concept[concept_id]\n else:\n concept_gloss = get_source_gloss(rc)\n concept_id = gloss_to_id[concept_gloss]\n c = id_to_concept[concept_id]\n c.source_glosses.append(concept_gloss)\n \n return id_to_concept, id_to_gloss, gloss_to_id\n\n\ndef get_language_from_row(row, language_id_to_glottocode_by_database):\n db_name = row[\"database_name\"]\n id_to_glottocode = language_id_to_glottocode_by_database[db_name]\n if \"Language_ID\" in row:\n language_id = row[\"Language_ID\"]\n elif \"Lect_ID\" in row:\n language_id = row[\"Lect_ID\"]\n else:\n raise Exception(f\"can't get language from row: {row}\")\n\n try:\n glottocode = id_to_glottocode[language_id]\n except KeyError:\n raise KeyError(f\"language_id {language_id} not found in\\n{id_to_glottocode}\")\n assert glottocode != \"\", f\"row {row} has blank language code\"\n return glottocode\n\n\ndef get_language_id_to_glottocode_dict_by_database():\n language_fps, db_names = get_languages_filepaths()\n d = {}\n for language_fp, db_name in zip(language_fps, db_names):\n id_to_glottocode = {}\n rows = get_dict_rows(language_fp)\n # we are reading languages.csv right now\n for row in rows:\n language_id = row[\"ID\"]\n glottocode = row[\"Glottocode\"]\n if glottocode == \"\":\n # failing this, use the ISO 639-3 (occurs in Basque (West) in lexibank-diacl, for instance)\n iso_code = row[\"ISO639P3code\"]\n if iso_code == \"\":\n # failing THAT, use the name! (occurs in Proto-Albanian in lexibank-diacl)\n lang_name = row[\"Name\"]\n if lang_name == \"\":\n raise ValueError(f\"database {db_name} has blank Glottocode, ISO code, and name in row {row}\")\n else:\n code = lang_name\n else:\n code = iso_code\n else:\n code = glottocode\n\n id_to_glottocode[language_id] = code\n\n d[db_name] = id_to_glottocode\n return d\n\n\ndef get_parameter_id_to_concept_by_database(id_to_concept, id_to_gloss, gloss_to_id):\n parameters_fps, db_names = get_parameters_filepaths()\n d = {}\n for parameters_fp, db_name in zip(parameters_fps, db_names):\n this_db_id_to_concept = {}\n rows = get_dict_rows(parameters_fp)\n # we are reading parameters.csv or concepts.csv\n for row in rows:\n raw_concept = get_raw_concept_from_row(row, db_name)\n parameter_id = row[\"ID\"]\n if has_id(raw_concept):\n # has concepticon ID\n concept_id = raw_concept[\"Concepticon_ID\"]\n elif has_gloss(raw_concept):\n concept_gloss = raw_concept[\"Concepticon_Gloss\"]\n concept_id = gloss_to_id[concept_gloss]\n elif has_source_gloss(raw_concept):\n source_gloss = get_source_gloss(raw_concept)\n concept_id = gloss_to_id[source_gloss]\n else:\n raise Exception(f\"cannot get concept for parameter raw_concept {raw_concept}\")\n concept = id_to_concept[concept_id]\n this_db_id_to_concept[parameter_id] = concept\n d[db_name] = this_db_id_to_concept\n return d\n\n\ndef get_form_from_row(row):\n # could use Value, but I think it sometimes tends to collapse distinctions e.g. removing diacritics, creating spurious colexifications\n return row[\"Form\"]\n\n\ndef get_concept_parameter_id_from_row(row):\n db_name = row[\"database_name\"]\n if db_name == \"pylexirumah\":\n return row[\"Concept_ID\"] # seriously dude, why is this not unified\n else:\n return row[\"Parameter_ID\"] # this will map via parameters.csv to the concepticon\n\n\ndef get_concept_from_row(row, id_to_concept, id_to_gloss, gloss_to_id, parameter_id_to_concept_by_database):\n db_name = row[\"database_name\"]\n parameter_id = get_concept_parameter_id_from_row(row)\n return parameter_id_to_concept_by_database[db_name][parameter_id]\n\n\ndef get_all_concept_encodings(rows, id_to_concept, id_to_gloss, gloss_to_id, language_id_to_glottocode_by_database, parameter_id_to_concept_by_database):\n all_concept_encodings = []\n for row in rows:\n lang = get_language_from_row(row, language_id_to_glottocode_by_database)\n form = get_form_from_row(row)\n concept = get_concept_from_row(row, id_to_concept, id_to_gloss, gloss_to_id, parameter_id_to_concept_by_database)\n # print(f\"language {lang} codes {concept} as {form}\")\n concept_encoding = ConceptEncoding(lang, form, concept)\n all_concept_encodings.append(concept_encoding)\n return all_concept_encodings\n\n\ndef get_random_colexification(colexification_sets_by_language):\n for i in range(100):\n lang, colexification_set_by_form = random.choice(list(colexification_sets_by_language.items()))\n sets_with_colexifications = [s for s in colexification_set_by_form.values() if len(s) > 1]\n if len(sets_with_colexifications) == 0:\n print(f\"language {lang} has no colexifications\")\n continue\n chosen_set = random.choice(sets_with_colexifications)\n pair = random.sample(chosen_set, 2)\n return pair\n print(\"failed to find a colexification\")\n\n\ndef get_colexification_sets_by_language(concept_encodings):\n d = {}\n for ce in concept_encodings:\n lang = ce.language\n if lang not in d:\n d[lang] = {}\n form = ce.form\n if ce.form not in d[lang]:\n d[lang][ce.form] = set()\n d[lang][ce.form].add(ce)\n return d\n\n\ndef add_pair_to_symmetric_sparse_matrix(d, pair, value_change=1):\n c0, c1 = pair\n if c0 not in d:\n d[c0] = {c1: 0}\n if c1 not in d:\n d[c1] = {c0: 0}\n if c1 not in d[c0]:\n d[c0][c1] = 0\n if c0 not in d[c1]:\n d[c1][c0] = 0\n # must be symmetric\n d[c0][c1] += 1\n d[c1][c0] += 1\n return d\n\n\ndef validate_symmetric_sparse_matrix(d):\n for c0 in d.keys():\n for c1 in d[c0].keys():\n v = d[c0][c1]\n assert v > 0 # only store non-zero since it will be very sparse\n assert d[c1][c0] == v # must be symmetric\n\n\ndef get_concept_closeness_matrix(colexification_sets_by_language):\n d = {}\n for language, colexifications_by_form in colexification_sets_by_language.items():\n for form, concept_encodings in colexifications_by_form.items():\n concepts = [ce.concept for ce in concept_encodings]\n pairs = itertools.combinations(concepts, 2)\n for pair in pairs:\n d = add_pair_to_symmetric_sparse_matrix(d, pair)\n\n validate_symmetric_sparse_matrix(d)\n return d\n\n\ndef get_concept_cooccurrence_count_matrix(colexification_sets_by_language):\n # for each pair of concepts, count how many times they are both in the same language's dataset\n # (used as number of trials for binomial estimation of colexification probability)\n # only include non-zero values, sparse matrix\n # this is really slow, possibly due to the large number of pairs (combinations(n,2) approaches n**2/2)\n d = {}\n lang_i = 0\n n_langs = len(colexification_sets_by_language.keys())\n for language, colexifications_by_form in colexification_sets_by_language.items():\n all_concepts = set()\n for form, concept_encodings in colexifications_by_form.items():\n concepts_encoded = [ce.concept for ce in concept_encodings]\n all_concepts |= set(concepts_encoded)\n # use set because some concepts may be encoded by more than one form\n for pair in itertools.combinations(all_concepts, 2):\n d = add_pair_to_symmetric_sparse_matrix(d, pair)\n print(f\"after language {language} (progress {lang_i}/{n_langs}), cooccurrence count now has length {len(d)}\")\n\n validate_symmetric_sparse_matrix(d)\n return d\n\n\ndef summarize_confidence_intervals(binomial_observation):\n b = binomial_observation\n p_hat = b.get_probability_estimator()\n print(f\"binomial observation of {b.successes}/{b.trials}, p_hat = {p_hat}\")\n confidences = np.arange(0.001, 1.000, 0.001) # arange is right-excl, confidence of 1 gives wilson of nan\n nci_lows = []\n nci_highs = []\n wci_lows = []\n wci_highs = []\n p_hats = []\n for confidence_level in confidences:\n nci = b.get_normal_approximation_ci(confidence_level)\n wci = b.get_wilson_ci(confidence_level)\n nci_lows.append(nci[0])\n nci_highs.append(nci[1])\n wci_lows.append(wci[0])\n wci_highs.append(wci[1])\n # print(confidence_level, nci, wci)\n plt.plot(confidences, nci_lows, c=\"g\", label=\"Normal\")\n plt.plot(confidences, nci_highs, c=\"g\")\n plt.plot(confidences, wci_lows, c=\"r\")\n plt.plot(confidences, wci_highs, c=\"r\", label=\"Wilson\")\n plt.legend()\n plt.show()\n\n random_ps = [b.choose_random_possible_probability() for i in range(100)]\n plt.hist(random_ps, bins=25)\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n fps, db_names = get_forms_filepaths()\n rows, all_keys, keys_in_all_fps = get_rows_from_fps(fps, db_names)\n\n raw_concepts = get_raw_concepts()\n id_to_gloss, gloss_to_id = construct_concepticon_id_gloss_correspondence(raw_concepts)\n id_to_concept, id_to_gloss, gloss_to_id = construct_concept_objects(raw_concepts, id_to_gloss, gloss_to_id)\n\n language_id_to_glottocode_by_database = get_language_id_to_glottocode_dict_by_database()\n parameter_id_to_concept_by_database = get_parameter_id_to_concept_by_database(id_to_concept, id_to_gloss, gloss_to_id)\n all_concept_encodings = get_all_concept_encodings(rows, id_to_concept, id_to_gloss, gloss_to_id, language_id_to_glottocode_by_database, parameter_id_to_concept_by_database)\n colexification_sets_by_language = get_colexification_sets_by_language(all_concept_encodings)\n\n concept_closeness_matrix = get_concept_closeness_matrix(colexification_sets_by_language)\n smaller_colexification_sets_by_language = dict(random.sample(colexification_sets_by_language.items(), 5)) # debug, the cooccurrence counting takes forever\n concept_cooccurrence_matrix = get_concept_cooccurrence_count_matrix(smaller_colexification_sets_by_language)\n\n for i in range(100):\n while True:\n c0 = random.choice(list(concept_cooccurrence_matrix.keys()))\n c1 = random.choice(list(concept_cooccurrence_matrix[c0].keys()))\n successes = concept_closeness_matrix[c0][c1] if c0 in concept_closeness_matrix and c1 in concept_closeness_matrix[c0] else 0\n if successes > 0:\n break\n trials = concept_cooccurrence_matrix[c0][c1]\n b = BinomialObservation(successes, trials)\n print(c0, c1)\n # summarize_confidence_intervals(b)\n\n # TODO: also keep track of how often a pair of concepts even exists in a language\n # combining that with the number of times it's colexified, can get Wilson confidence interval for binomial probability\n # when rolling whether to colexify something in the random walk, can use the simple binomial estimator,\n # but would be better to use a pdf derived from the Wilson intervals somehow (e.g. get non-zero probability of colexifying something whose colexification has never been seen)\n\n","repo_name":"Kuhron/programming","sub_path":"Language/CLICS.py","file_name":"CLICS.py","file_ext":"py","file_size_in_byte":26580,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"73126787094","text":"import pandas as pd\nimport numpy as np\nimport os\nfrom Clustering.StandardScaler import StandardScaler\nfrom dataset.Dataset import Dataset\n\n\nclass PCA(object):\n\n def __init__(self):\n self.directory = \"/path/to/train/\"\n self.test_path = \"/path/to/test/\"\n self.features = [\"weight\", \"hive_temperature\", \"hive_humidity\", \"wind_speed\", \"wind_speed_max\",\n \"weather_temperature_min\", \"weather_temperature_max\", \"weather_temperature\",\n \"weather_humidity\", \"rain\", \"rain_min\", \"rain_max\"]\n\n def __load_pre_data(self):\n df = pd.DataFrame(pd.read_csv(self.directory))\n for sub_dir in os.listdir(self.directory):\n if os.path.isfile(self.directory + sub_dir) or sub_dir.startswith(\".\"):\n continue\n for file in os.listdir(self.directory + sub_dir):\n if file.endswith(\".csv\"):\n df_new = pd.DataFrame(pd.read_csv(self.directory + sub_dir + \"/\" + file))\n df = df.append(pd.DataFrame(df_new), ignore_index=True)\n df.drop('datetime', axis=1, inplace=True)\n df = df.loc[:, ~df.columns.str.contains('^Unnamed')]\n return list(df.values)\n\n def __sort_eigen(self, values, vectors):\n swapped = True\n while swapped:\n swapped = False\n for i in range(len(values) - 1):\n if values[i] < values[i + 1]:\n values, vectors = self.__swap(values, vectors, i, i + 1)\n swapped = True\n return values, vectors\n\n @staticmethod\n def __swap(values, vectors, i, j):\n values = np.array(values)\n vectors = np.array(vectors)\n temp_val = values[i]\n values[i] = values[j]\n values[j] = temp_val\n\n temp_vec = list(vectors[:, i])\n vectors[:, i] = list(vectors[:, j])\n vectors[:, j] = list(temp_vec)\n\n return values, vectors\n\n def get_pca_dataset(self, variation):\n x = np.array(self.__load_pre_data())\n\n sc = StandardScaler()\n for i in range(12):\n sc.fit(x[:, i])\n x[:, i] = sc.transform(x[:, i])\n\n x = np.array(x)\n cov_matrix = np.cov(x.T)\n\n val, vec = np.linalg.eig(cov_matrix)\n val, vec = self.__sort_eigen(val, vec)\n\n var = sum(val)\n perc = 0\n\n n_components = 0\n while perc < variation:\n perc += val[n_components] / var\n n_components += 1\n\n dataset = Dataset()\n x, y, x_val, y_val = dataset.get_data(shuffle=True)\n x_test, y_test = dataset.get_data(test=True)\n\n x = np.array([np.array(j.__matmul__(vec))[:, :n_components] for j in x])\n x_val = np.array([np.array(j.__matmul__(vec))[:, :n_components] for j in x_val])\n x_test = np.array([np.array(j.__matmul__(vec))[:, :n_components] for j in x_test])\n\n return x, np.array(y), x_val, np.array(y_val), x_test, np.array(y_test)\n\n def get_n_most_important(self, n):\n x = np.array(self.__load_pre_data())\n sc = StandardScaler()\n for i in range(12):\n sc.fit(x[:, i])\n x[:, i] = sc.transform(x[:, i])\n\n x = np.array(x)\n cov_matrix = np.cov(x.T)\n val, vec = np.linalg.eig(cov_matrix)\n val, vec = self.__sort_eigen(val, vec)\n\n most_important = [np.abs(vec[:, i]).argmax() for i in range(n)]\n most_important = set(most_important)\n j = n\n while len(most_important) < n and j < len(vec):\n most_important.add(np.abs(vec[:, j]).argmax())\n j += 1\n\n features = list(pd.DataFrame(\n pd.read_csv(self.directory)).keys())\n features.remove('datetime')\n features.remove('Unnamed: 0')\n\n most_important_names = []\n for i in most_important:\n most_important_names.append(features[i])\n return most_important_names\n\n\npca = PCA()\nprint(pca.get_n_most_important(5))\n","repo_name":"julschoen/bees","sub_path":"FeatureSelection/PCA.py","file_name":"PCA.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20383124226","text":"### 필요한 라이브러리 호출\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport cv2 # 얼굴 인식, 물체 식별, 이미지 결합 등 작업이 가능한 오픈 소스 라이브러리\n\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.losses import categorical_crossentropy\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout\n\n### VGG19 네트워크 생성\nclass VGG19(Sequential):\n def __init__(self, input_shape):\n\n super().__init__()\n self.add(Conv2D(64, kernel_size=(3,3), padding='same',\n activation='relu', input_shape=input_shape))\n self.add(Conv2D(64, kernel_size=(3,3), padding='same',\n activation='relu'))\n self.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))\n\n self.add(Conv2D(128, kernel_size=(3,3), padding='same',\n activation='relu'))\n self.add(Conv2D(128, kernel_size=(3,3), padding='same',\n activation='relu'))\n self.add(MaxPooling2D(pool_size=(2,2), strides= (2,2)))\n\n self.add(Conv2D(256, kernel_size=(3,3), padding='same',\n activation='relu'))\n self.add(Conv2D(256, kernel_size=(3,3), padding='same',\n activation='relu'))\n self.add(Conv2D(256, kernel_size=(3,3), padding='same',\n activation='relu'))\n self.add(Conv2D(256, kernel_size=(3,3), padding='same',\n activation='relu'))\n self.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))\n\n self.add(Conv2D(512, kernel_size=(3,3), padding='same',\n activation='relu'))\n self.add(Conv2D(512, kernel_size=(3,3), padding='same',\n activation='relu'))\n self.add(Conv2D(512, kernel_size=(3,3), padding='same',\n activation='relu'))\n self.add(Conv2D(512, kernel_size=(3,3), padding='same',\n activation='relu'))\n self.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))\n\n self.add(Conv2D(512, kernel_size=(3,3), padding='same',\n activation='relu'))\n self.add(Conv2D(512, kernel_size=(3,3), padding='same',\n activation='relu'))\n self.add(Conv2D(512, kernel_size=(3,3), padding='same',\n activation='relu'))\n self.add(Conv2D(512, kernel_size=(3,3), padding='same',\n activation='relu'))\n self.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))\n\n self.add(Flatten())\n self.add(Dense(4096, activation='relu'))\n self.add(Dropout(0.5))\n self.add(Dense(4096, activation='relu'))\n self.add(Dropout(0.5))\n self.add(Dense(1000, activation='softmax'))\n\n self.compile(optimizer=tf.keras.optimizers.Adam(0.003),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n\n### VGG19 모델 출력\nmodel = VGG19(input_shape=(224,224,3))\nmodel.summary()\n\n\n### 사전 훈련된 VGG19 가중치 내려받기 및 클래스 정의\nmodel.load_weights('chapter06/data/vgg19_weights_tf_dim_ordering_tf_kernels.h5') # 사전 훈련된 VGG19 모델의 가중치 내려받기\nclasses = {282: 'cat',\n 681: 'notebook, notebook computer',\n 970: 'alp'} # 검증용으로 사용될 클래스 세 개만 적용했으며, 전체 이미지에 대한 클래스는 “../chap6/data/”에 위치한 classes.txt 파일을 참고하세요.\n\n\n### 이미지 호출 및 예측\nimage1 = cv2.imread('chapter06/data/labtop.jpg')\n#image1 = cv2.imread('chapter06/data/starrynight.jpeg')\n#image1 = cv2.imread('chapter06/data/cat.jpg')\nimage1 = cv2.resize(image1, (224,224))\nplt.figure()\nplt.imshow(image1)\nimage1 = image1[np.newaxis, :] # 차원 확장(행을 추가)\npredicted_value = model.predict_classes(image1)\nplt.title(classes[predicted_value[0]]) # 출력에 대한 title(제목) 지정\n","repo_name":"heechul90/study-tensorflow2","sub_path":"chapter06/VGGNet.py","file_name":"VGGNet.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71721540052","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport random\nfrom flask import Flask, render_template, jsonify, request\nimport json\nfrom db_handler import DBHandler\nfrom inference.cf import CFHandler\nimport os\n\napp = Flask(__name__, static_folder='../static/dist', template_folder='../static')\n\n# @app.route('/', defaults={'path': ''})\n# @app.route('/')\n# def catch_all(path):\n# # return 'You want path: %s' % path\n# return render_template('index.html', data = path )\n\n\nuse_semantic = False\nuse_db = False\n\n# Revolet\n# Force override doesnt work. must delete model and associated files.\ncf = CFHandler('109jikL9APOQLYLWVbh0BPzsKh4Fj1E5y','1d4uAconWl18s629ZUMaZlXkopD2Vfzh3', '1-JiAeAJ_0XWXxkRteRP1my84x57cpVmg',\n force_download=False)\n\n# Sum\n# Take paper2data (authors, citing authors) from CF (Revolet)\nif use_semantic:\n from inference.semantic import SemanticHandler\n semantic = SemanticHandler('1-n-GephieNUHJ-pXeHIoX-rjMWRtssKj', '11rB0mV9o5Uk78d8bIjzDR4obnHtn2bwc', '1-H_P6t33LNMfaAuD7qtj7b9hD-RviLvQ')\n\ndb_handler = DBHandler(cf, mock=not use_db)\n\ndef get_best_authors(title, author_ids, top=10):\n if len(author_ids) > 0:\n cf_recommended_authors = cf.get_recommended_authors(author_ids, top)\n else:\n cf_recommended_authors = []\n\n # TODO: We dont care about the weight but we should\n cf_recommended_authors = [author_score_tuple[0] for author_score_tuple in cf_recommended_authors]\n\n if title is not None and use_semantic:\n semantic_recommended_authors = semantic.get_recommended_authors(title, top)\n else:\n semantic_recommended_authors = []\n\n cf_author2idx = {author: idx for idx, author in enumerate(cf_recommended_authors)}\n semantic_author2idx = {author: idx for idx, author in enumerate(semantic_recommended_authors)}\n\n author2average_idx = {} #\n for idx, author in enumerate(cf_recommended_authors):\n if author in semantic_author2idx:\n author2average_idx[author] = (idx + semantic_author2idx[author]) / 2\n else:\n author2average_idx[author] = idx\n for idx, author in enumerate(semantic_author2idx):\n if author in cf_author2idx:\n author2average_idx[author] = (idx + cf_author2idx[author]) / 2\n else:\n author2average_idx[author] = idx\n\n authors_idx_tuples = sorted([(author, avg_idx) for author, avg_idx in author2average_idx.items()],\n key=lambda x: x[1])\n\n # TODO: Gather information from dbhandler\n ranked_authors = [authors_idx_tuple[0] for authors_idx_tuple in authors_idx_tuples]\n # author2data = db_handler.get_authors_data(ranked_authors)\n author2data = db_handler.get_authors_data_from_scholarly(ranked_authors)\n\n print(author2data)\n print(authors_idx_tuples)\n\n authors_with_extended_data = []\n for (author_id, score) in authors_idx_tuples:\n author_data = {'id': author_id,\n 'score': len(authors_idx_tuples) - score,\n 'interests': 'Author not in db',\n 'affiliation': 'Author not in db',\n # TODO: Author should ALWAYS be in DB\n 'img': 'https://scholar.google.com/citations?view_op=medium_photo&user=Smr99uEAAAAJ',\n 'name': 'Author not in db',\n 'citedby': 'Author not in db',\n 'papers': []\n }\n\n if author_id in author2data:\n extended_data = author2data[author_id]\n for field in ['interests', 'affiliation', 'img', 'name', 'citedby', 'papers']:\n if field in extended_data:\n author_data[field] = extended_data[field]\n\n authors_with_extended_data.append(author_data)\n\n return authors_with_extended_data\n\n\n\n@app.route('/')\ndef index():\n context = {'name': 'itai'}\n # data = json.dumps(data)\n return render_template('index.html', data=context)\n\n\n@app.route('/search_authors', methods=['POST'])\ndef search_authors():\n # POST request\n if request.method == 'POST':\n query = request.get_json()['query']\n\n #TODO: NOT IMPLEMENTED\n print(\"Query: \", query)\n\n # # TODO: fetch string...\n def mock_object(i):\n if random.random() > 0.5:\n return {'id': str(i),\n 'name': 'Shaul Markovitch',\n 'img': 'https://scholar.google.com/citations?view_op=medium_photo&user=bYcqNlgAAAAJ',\n 'affiliation': 'Professor of Computer Science, Technion - Israel Institute of Technology',\n 'papers': [{'id': 0, 'name': 'the best paper in the world', 'year': 1986}, {'id': 1,\n 'name': 'this is the second best paper in the world, for sure, no doubt. really',\n 'year': 2019}],\n 'score': random.random() * 10,\n }\n else:\n return {\n 'id': str(i),\n 'name': 'somebody',\n 'img': 'https://scholar.google.com/citations?view_op=medium_photo&user=Smr99uEAAAAJ',\n 'affiliation': 'Professor of some university',\n 'papers': [{'id': 0, 'name': 'the best paper in the world', 'year': 1986}, {'id': 1,\n 'name': 'this is the second best paper in the world, for sure, no doubt. really',\n 'year': 2019}],\n 'score': random.random() * 10,\n }\n\n recommendedAuthors = list(map(\n lambda i: mock_object(i), range(5)\n ))\n print('Returning recommended authors {}'.format(len(recommendedAuthors)))\n return json.dumps(recommendedAuthors)\n\n\n@app.route('/search_paper', methods=['POST'])\ndef search_paper():\n # POST request\n if request.method == 'POST':\n title = request.get_json()['title']\n authors = request.get_json()['authors']\n\n print(title, authors)\n\n return json.dumps(get_best_authors(title, authors))\n\n # # TODO: fetch string...\n # def mock_object(i):\n # if random.random() > 0.5:\n # return {'id': str(i),\n # 'name': 'Shaul Markovitch',\n # 'img': 'https://scholar.google.com/citations?view_op=medium_photo&user=bYcqNlgAAAAJ',\n # 'affiliation': 'Professor of Computer Science, Technion - Israel Institute of Technology',\n # 'papers': [{'id': 0, 'name': 'the best paper in the world', 'year': 1986}, {'id': 1,\n # 'name': 'this is the second best paper in the world, for sure, no doubt. really',\n # 'year': 2019}],\n # 'score': random.random() * 10,\n # }\n # else:\n # return {\n # 'id': str(i),\n # 'name': 'somebody',\n # 'img': 'https://scholar.google.com/citations?view_op=medium_photo&user=Smr99uEAAAAJ',\n # 'affiliation': 'Professor of some university',\n # 'papers': [{'id': 0, 'name': 'the best paper in the world', 'year': 1986}, {'id': 1,\n # 'name': 'this is the second best paper in the world, for sure, no doubt. really',\n # 'year': 2019}],\n # 'score': random.random() * 10,\n # }\n #\n # recommendedAuthors = list(map(\n # lambda i: mock_object(i), range(20)\n # ))\n #\n # return json.dumps(recommendedAuthors)\n\n\n@app.route('/authors_suggestions', methods=['POST'])\ndef get_author_suggestions():\n # POST request\n if request.method == 'POST':\n query = request.get_json()['query']\n\n authors = db_handler.get_authors_names_regex(query, limit = 30)\n\n # authors = cf.filter_authors(authors)\n\n return json.dumps(authors)\n\n # # TODO: fetch authors beginning with string...\n # def mock_object(i):\n # if random.random() > 0.5:\n # return {'id': str(i),\n # 'name': 'Shaul Markovitch',\n # 'img': 'https://scholar.google.com/citations?view_op=medium_photo&user=bYcqNlgAAAAJ',\n # 'affiliation': 'Professor of Computer Science, Technion - Israel Institute of Technology',\n # 'papers': [{'id': 0, 'name': 'the best paper in the world', 'year': 1986}, {'id': 1,\n # 'name': 'this is the second best paper in the world, for sure, no doubt. really',\n # 'year': 2019}],\n # 'score': random.random() * 10,\n # }\n # else:\n # return {\n # 'id': str(i),\n # 'name': 'somebody',\n # 'img': 'https://scholar.google.com/citations?view_op=medium_photo&user=Smr99uEAAAAJ',\n # 'affiliation': 'Professor of some university',\n # 'papers': [{'id': 0, 'name': 'the best paper in the world', 'year': 1986}, {'id': 1,\n # 'name': 'this is the second best paper in the world, for sure, no doubt. really',\n # 'year': 2019}],\n # 'score': random.random() * 10,\n # }\n #\n # suggestedAuthors = list(map(\n # lambda i: mock_object(i), range(5)\n # ))\n # return json.dumps(suggestedAuthors)\n # # return jsonify(recommendedAuthors)\n #\n # # return 'OK', 220\n\n\n# @app.route('/hello', methods=['GET', 'POST'])\n# def hello():\n\n# # POST request\n# if request.method == 'POST':\n# print('Incoming..')\n# print(request.get_json()) # parse as JSON\n# return 'OK', 200\n\n# # GET request\n# else:\n# message = {'greeting':'Hello from Flask!'}\n# return jsonify(message) # serialize and use JSON headers\n\n# @app.route('/test')\n# def test_page():\n# # look inside `templates` and serve `index.html`\n# return render_template('index.html')\n\n\n# @app.route('/hello') # take note of this decorator syntax, it's a common pattern\n# def hello():\n# # It is good practice to only call a function in your route end-point,\n# # rather than have actual implementation code here.\n# # This allows for easier unit and integration testing of your functions.\n# return get_hello()\n\n\n# def get_hello():\n# greeting_list = ['Ciao', 'Hei', 'Salut', 'Hola', 'Hallo', 'Hej']\n# return random.choice(greeting_list)\n\n\nif __name__ == '__main__':\n # app.run()\n app.debug = True\n app.run(host='0.0.0.0', port=5000, threaded=True)\n","repo_name":"itaiyesh/AcademicPapers","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":11563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74483487894","text":"import pylast, json, requests, glob\nfrom lastfmcache import LastfmCache\nfrom PIL import Image\nimport os\n\ndef crop_center(pil_img, crop_width, crop_height):\n img_width, img_height = pil_img.size\n return pil_img.crop(((img_width - crop_width) // 2,\n (img_height - crop_height) // 2,\n (img_width + crop_width) // 2,\n (img_height + crop_height) // 2))\n\ndef crop_max_square(pil_img):\n return crop_center(pil_img, min(pil_img.size), min(pil_img.size))\n\nwith open(\"config.json\", \"r+\") as f:\n config = json.load(f)\n\nnetwork = pylast.LastFMNetwork(api_key=config['apikey'], api_secret=config['secret'], username=config['username'], password_hash=pylast.md5(config['password']))\ncache = LastfmCache(config['apikey'], config['secret'])\ncache.enable_file_cache()\n\n\ntry:\n artists = network.get_authenticated_user().get_top_artists(limit=6, period=pylast.PERIOD_7DAYS)\nexcept Exception as e:\n print(e)\n\nartist_dict = {}\n\nfor a in artists:\n artist = cache.get_artist(a.item.name)\n artist_dict.update({ a.item.name : artist.cover_image })\n\nfor k, v in artist_dict.items():\n if not v:\n v = \"https://cdn.pixabay.com/photo/2015/10/05/22/37/blank-profile-picture-973460_960_720.png\"\n res = requests.get(v).content\n with open(\"artist_images\\\\\" + v.split('/')[-1], \"wb\") as f:\n f.write(res)\n artist_dict[k] = \"artist_images\\\\\" + v.split('/')[-1]\n\nnew_height, new_width = (250, 250)\nfor a in glob.glob(\"artist_images\\\\*.jpg\"):\n im = Image.open(a)\n im_thumb = crop_max_square(im).resize((500, 500), Image.LANCZOS)\n im_thumb.save(a)\n\nurl_temp = \"https://raw.githubusercontent.com/M4cs/M4cs/master/\"\n\ntemplate = \"\"\"\\\n## Who I've Been Listening To This Week\n\n\"\"\"\n\nfor image in artist_dict.values():\n template = template + \"| \"\ntemplate = template + \" |\\n| :---: | :---: | :---: | :---: | :---: | :---: |\\n\"\nfor artist in artist_dict.keys():\n template = template + \"| \" + \"\" + artist + \" \"\ntemplate = template + \" |\\n\"\n\n\nreadme = open(\"READMECOPY.md\", \"r\").read()\nwith open(\"README.md\", \"w\") as f:\n f.write(readme.format(template=template))\n\n\nos.system(\"git add . && git commit -m \\\"Update Artists\\\" && git push\")","repo_name":"M4cs/M4cs","sub_path":"myartists.py","file_name":"myartists.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"67"} +{"seq_id":"1245937067","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 10 11:40:21 2022\n\n@author: nathan, quentin, iannis\n\"\"\"\nimport random\nimport numpy as np # pour absolument tout :)\nimport math as math # pour inf\n\n\ndef BellmanFord(m, s0):\n \n #Initialisation\n M = np.matrix(m) # Cast de la matrice d'entrée\n n = len(M) # Variable de taile de la matrice\n d = np.full(n, math.inf) # Liste pour stoker les longueures des chemin de s0 à u (u correspondant a la variable d'ittération)\n pred = [None] * n # Liste pour stoker les prédécéseurs de chaque arréte pour plus court chemin de s0 à u\n d[s0], pred[s0] = 0, s0 # La longueure du sommet de départ a lui méme est manuellement mise à 0 de plus il est son propre prédécéseur\n sommets= {}\n for i in range(n):\n sommets[i] = chr(ord('a')+i)\n \n \n \n for i in range(1, n-1): # On ittére uniquement le nombre de fois nécéssaire (soit n)\n for u in range(n): \n for v in range(n):\n if d[v] > d[u]+M[u,v]:\n d[v] = d[u]+M[u,v] \n pred[v] = u\n \n for u in range(n): \n for v in range(n):\n if d[v] > d[u]+ M[u,v]:\n print(\"Il existe un cycle de poids négatif entre:\")\n print(sommets[v], sommets[u])\n raise SystemExit()\n \n# prettyPrint(s0, d, pred, sommets)\n return s0, d, pred, sommets\n \n","repo_name":"shiroling/Shortest-Path","sub_path":"BellmanFord.py","file_name":"BellmanFord.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7857564387","text":"import pandas as pd\nimport hotspot.modules\nimport loompy\nimport hotspot\nimport hotspot.modules\n\nfrom ete3 import Tree\n\n\nloom_file = snakemake.input[\"loom\"]\ntree_file = snakemake.input['tree_file']\nmodule_file = snakemake.input[\"modules\"]\n\nn_neighbors = snakemake.params['n_neighbors']\nmodel = snakemake.params['model']\n\ntry:\n use_umi = bool(snakemake.params['use_umi'])\nexcept AttributeError:\n use_umi = True\n\nout_file = snakemake.output['scores']\n\nwith loompy.connect(loom_file, 'r') as ds:\n barcodes = ds.ca['Barcode'][:]\n counts = ds[:, :]\n gene_info = ds.ra['EnsID', 'Symbol']\n num_umi = ds.ca['NumUmi'][:]\n\nt = Tree(tree_file, format=1)\nmodules = pd.read_table(module_file, index_col=0).Cluster\n\n# Have to do this because data_slideseq makes it a numpy array\ngene_info = pd.DataFrame(\n gene_info, columns=['EnsID', 'Symbol']).set_index('EnsID')\ncounts = pd.DataFrame(counts, index=gene_info.index, columns=barcodes)\n\nif use_umi:\n num_umi = pd.Series(num_umi, index=barcodes)\nelse:\n num_umi = pd.Series(1.0, index=barcodes)\n\nvalid_cells = set()\nfor x in t:\n if x.is_leaf():\n valid_cells.add(x.name)\nvalid_cells = pd.Index(valid_cells)\n\n# Align to latent space\ncounts = counts.loc[:, valid_cells]\nnum_umi = num_umi[valid_cells]\n\n# need counts, distances, and num_umi\n\nlatent = pd.DataFrame(0, index=counts.columns, columns=range(10))\nhs = hotspot.Hotspot(counts, latent=latent, umi_counts=num_umi)\n\nneighbors, weights = hotspot.knn.tree_neighbors_and_weights(\n t, n_neighbors, counts)\n\nweights = hotspot.knn.make_weights_non_redundant(\n neighbors.values, weights.values\n)\nweights = pd.DataFrame(\n weights, index=neighbors.index, columns=neighbors.columns)\n\nhs.weights = weights\nhs.neighbors = neighbors\n\n# %% Plot scores for all modules\nmodules_to_compute = sorted([x for x in modules.unique() if x != -1])\n\n# Get the scores\nmodule_scores = {}\nfor module in modules_to_compute:\n module_genes = modules.index[modules == module]\n\n scores = hotspot.modules.compute_scores(\n counts.loc[module_genes].values, model, num_umi.values,\n hs.neighbors.values, hs.weights.values\n )\n\n module_scores[module] = scores\n\nmodule_scores = pd.DataFrame(module_scores)\nmodule_scores.index = counts.columns\n\nmodule_scores.to_csv(out_file, sep=\"\\t\")\n","repo_name":"deto/Hotspot_Analysis","sub_path":"pipelineScripts/hotspot/computeModuleScores_tree.py","file_name":"computeModuleScores_tree.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22648179689","text":"import pygame\nimport sprites\nimport object\nimport math\n\nrespawnpoint_list = []\n\n\nclass Respawnpoint(object.Object):\n def __init__(self, x=None, y=None, tile_size=None, ud_list=None):\n if x == None:\n return\n self.x = x\n self.y = y\n\n self.ud_list = ud_list\n respawnpoint_list.append(self)\n ud_list.append(self)\n\n self.x_hit = 0.5\n self.y_hit = 0.5\n self.hitbox = object.RectangularHitbox(self.x_hit/2, self.y_hit/2, 0.2)\n\n\n def update(self, grid, ud_list):\n return\n\n def load(self, tile_size, bigSprite):\n bigSprite.load_sprite(\"./data/img/respawnpoint.jpg\", 0.5, 0.5, tile_size, \"respawnpoint\")\n\n def draw(self, screen, camera, bigSprite):\n bigSprite[\"respawnpoint\"].draw(screen, camera.coords_to_screen(self.x-self.x_hit/2, self.y+self.y_hit/2, screen))","repo_name":"kamelfanger83/valid-point","sub_path":"respawnpoint.py","file_name":"respawnpoint.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"41956682269","text":"## tree structure with diff. color for files and directories and additional parameter for level of tree\n\nfrom pathlib import Path\nfrom itertools import islice\nfrom colorama import Fore, Back, Style\nimport argparse\n\nspace = ' '\nbranch = '│ '\ntee = '├── '\nlast = '└── '\n\ndef tree(dir_path: Path, level: int=-1, limit_to_directories: bool=False,\n length_limit: int=1000):\n \"\"\"Given a directory Path object print a visual tree structure\"\"\"\n dir_path = Path(dir_path) # accept string coerceable to Path\n files = 0\n directories = 0\n def inner(dir_path: Path, prefix: str='', level=-1):\n nonlocal files, directories\n if not level: \n return # 0, stop iterating\n if limit_to_directories:\n contents = [d for d in dir_path.iterdir() if d.is_dir()]\n else: \n contents = list(dir_path.iterdir())\n pointers = [tee] * (len(contents) - 1) + [last]\n for pointer, path in zip(pointers, contents):\n if path.is_dir():\n yield (Fore.YELLOW+ prefix + pointer) + (Fore.RED + path.name)\n directories += 1\n extension = branch if pointer == tee else space \n yield from inner(path, prefix=prefix+extension, level=level-1)\n elif not limit_to_directories:\n yield (Fore.BLUE+ prefix + pointer)+ (Fore.GREEN + path.name)\n files += 1\n print(dir_path.name)\n iterator = inner(dir_path, level=level)\n for line in islice(iterator, length_limit):\n print(line)\n if next(iterator, None):\n print(f'... length_limit, {length_limit}, reached, counted:')\n print(f'\\n{directories} directories' + (f', {files} files' if files else ''))\n\nif __name__ == '__main__':\n\n\t# construct the argument parser and parse command line arguments\n\tap = argparse.ArgumentParser()\n\tap.add_argument(\"-d\", \"--dir_path\", type=str, help=\"base directory path for the tree\")\n\tap.add_argument(\"-l\", \"--level\", type=int, help=\"level of the tree\")\n\tap.add_argument(\"-ld\", \"--limit_to_directories\", type=bool, default=False,\n\t\thelp=\"limit_to_directories\")\n\tap.add_argument(\"-ll\", \"--length_limit\", type=int, default=1000, help=\"length_limit\")\n\t# args = vars(ap.parse_args())\n\targs = ap.parse_args()\n\n\tif not args.dir_path:\n\t\tprint('Directory path not found')\n\telse:\n\t\ttree(dir_path=args.dir_path, level=args.level, limit_to_directories=args.limit_to_directories,\n length_limit=args.length_limit)\n","repo_name":"Avnishthedeveloper/Directory_tree","sub_path":"tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"7715309054","text":"from getAllPmidsByTerm import getAllPmidsByTerm\nfrom getOneArticle import getOneArticleByPmid\nfrom MySQLProvider import MySQLProvider\ndef crawl(test=False, incremental=False):\n # 数据库管理实例\n mysqlProvier = MySQLProvider()\n # 获取所有与term相关的pmid\n pmids = getAllPmidsByTerm(test=test, incremental = incremental)\n\n # 去重,避免重复插入\n old_articles = mysqlProvier.getAllArticles()\n for pmid in old_articles:\n if pmid in pmids:\n pmids.remove(pmid)\n articles = {}\n bugPmid = []\n for pmid in pmids:\n # 对每一个pmid去爬取相应的数据存入article对象\n article = getOneArticleByPmid(pmid)\n if isinstance(article, str):\n bugPmid.append(pmid)\n else:\n # articles[article.pmid] = article\n cur_article = {}\n cur_article[article.pmid] = article\n try:\n mysqlProvier.saveArticles(cur_article)\n except BaseException:\n bugPmid.append(pmid)\n print(bugPmid)\n# crawl(test=True)\n\n# mysqlProvier.saveArticles(articles)\n# for pmid in ['31494566', '20301487', '31478758', '31478755', '31474663', '31468394', '31456437']:\n# try:\n# getOneArticleByPmid(pmid)\n# except BaseException:\n# print(pmid)\n","repo_name":"muhualing/spiders","sub_path":"medicalArticles/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1344635783","text":"# -*- coding: utf-8 -*-\n\"\"\"\nFunciones para lear archivos de datos y cargarlos en las distintas\nestructuras y clases que se definen en este paquete.\n\n@author: fpalacio\n\"\"\"\nimport datos\nimport filtros\nimport os\nimport numpy as np\nimport datetime\nimport copy\nimport math as m\nimport scipy.signal as signal\nimport pickle\nimport gzip\n\n##############################################################################\n\nRUTA_DATOS = '../data/'\nTS_MIN = 60\n\n##############################################################################\n\ndef fechaNumtoDateTime(dt_num):\n dt = []\n for i in range(len(dt_num)):\n num= dt_num[i]\n dt_datetime=NumtoDateTime(num)\n dt.append(dt_datetime)\n return dt\n\nimport math\n\n##############################################################################\ndef NMuestrasTSEntreDts(dt1,dt2):\n dif_dtini = dt2 - dt1\n return int(math.ceil(dif_dtini/datetime.timedelta(minutes=TS_MIN)))\n\n##############################################################################\ndef NumtoDateTime(num):\n dtini=datetime.datetime(1900, 1, 1)\n dt_datetime = dtini + datetime.timedelta(days=num-2)\n return dt_datetime\n\n##############################################################################\n\ndef fechaInitoDateTime(dt_ini,ndias,cant_min):\n dt = []\n muestras_por_dia = m.trunc((60*24) / cant_min + 0.00001)\n for dia in range(ndias):\n for muestra in range(muestras_por_dia):\n seg = dia*24*3600 + muestra * cant_min * 60\n dt_datetime=dt_ini + datetime.timedelta(seconds=seg)\n dt.append(dt_datetime)\n return dt\n\n##############################################################################\n\ndef fechaInitoDateTimeN(dt_ini,NmuestrasTS):\n dt = []\n for k in range(NmuestrasTS):\n dt_k = dt_ini + k * datetime.timedelta(minutes=TS_MIN)\n dt.append(dt_k)\n return dt\n\n##############################################################################\n \ndef dt_to_dtTS(dt):\n dtdia = datetime.datetime(dt.year, dt.month, dt.day)\n dtTS = NMuestrasTSEntreDts(dtdia,dt)*datetime.timedelta(minutes=TS_MIN)\n dtTS = dtTS + dtdia\n return dtTS\n#\n#\n# whatsapp BROU 092 001996\n##############################################################################\n\ndef archiPICKLE(ncentral):\n return RUTA_DATOS +'/c'+ str(ncentral) +'/c'+str(ncentral)+'.pkl.gz'\n\n##############################################################################\n\ndef archiFILTROS(ncentral):\n return RUTA_DATOS +'/c'+ str(ncentral) +'/c'+str(ncentral)+'_filtros.pkl.gz'\n\n##############################################################################\n\ndef archiSCADA(ncentral):\n return os.path.join(RUTA_DATOS,f'c{ncentral}/c{ncentral}_series10min.sas')\n\n##############################################################################\n\ndef archiGEN(ncentral):\n return RUTA_DATOS +'/c'+ str(ncentral) +'/c'+str(ncentral)+'_series10minGen.sas'\n\n##############################################################################\n\ndef archiPRONOS(ncentral):\n return RUTA_DATOS +'/c'+ str(ncentral) +'/c'+str(ncentral)+'_series60min_pronos.sas'\n\n##############################################################################\n\ndef archiSMEC(ncentral):\n return RUTA_DATOS +'/c'+ str(ncentral) + '/medidasSMEC.txt'\n\n##############################################################################\n\ndef path(ncentral):\n return RUTA_DATOS +'/c'+ str(ncentral) + '/'\n\n##############################################################################\n \ndef leerCampo(file):\n line = file.readline().strip()\n cols = line.split()\n return cols[0]\n\n##############################################################################\n\n\ndef leerArchi(nidCentral,tipoArchi): \n if tipoArchi == 'scada':\n archi = archiSCADA(nidCentral)\n elif tipoArchi == 'gen':\n archi = archiGEN(nidCentral)\n else:\n print(f\"ERROR: tipo de archivo desconocido\")\n return None\n\n print(f\"LEYENDO ARCHIVO {tipoArchi} DE CENTRAL {nidCentral}: {archi}\")\n\n if not os.path.exists(archi):\n print(\"ERROR: archivo no existente.\")\n return None\n\n f = open(archi, 'r')\n print('LEYENDO ENCABEZADO:')\n line=f.readline().strip()\n cols = line.split()\n nSeries = int(cols[0])\n print('\\tnum de series',nSeries)\n \n line=f.readline().strip()\n cols = line.split()\n meteo_utm_zona = cols[0]\n \n line=f.readline().strip()\n cols = line.split()\n meteo_utm_huso = int(cols[0]) \n \n line=f.readline().strip()\n cols = line.split()\n meteo_utm_xe = float(cols[0])\n\n line=f.readline().strip()\n cols = line.split()\n meteo_utm_yn = float(cols[0])\n print('\\tzona horaria:',meteo_utm_zona,meteo_utm_huso,meteo_utm_xe,meteo_utm_yn)\n \n line=f.readline().strip()\n cols = line.split()\n ident = cols[0]\n\n ubicacion = datos.Ubicacion(meteo_utm_zona,meteo_utm_huso,meteo_utm_xe,meteo_utm_yn,ident)\n \n line=f.readline().strip()\n cols = line.split()\n PAutorizada = float(cols[0])\n print('\\tpotencia autorizada:',PAutorizada)\n \n line=f.readline().strip()\n tipos = line.split()\n seg = np.arange(1,nSeries+1,1,dtype=np.int)\n print('\\ttipos',tipos)\n f.close() \n \n print('LEYENDO DATOS')\n # Leo etiquetas de tiempo comunes a todos los datos\n data=np.loadtxt(archi,skiprows=8)\n dt_num=data[:,0]\n tiempo = fechaNumtoDateTime(dt_num)\n #\n # verificamos que no haya fechas repetidas\n #\n print('\\tVerificando fechas repetidas')\n dt = list()\n for i in range(len(tiempo)-1):\n dt.append(tiempo[i+1]-tiempo[i])\n dtmin,dtmed,dtmax = np.min(dt),np.median(dt),np.max(dt)\n print(f\"\\tdt: min{dtmin} med={dtmed} max={dtmax}\")\n dt.append(dt[-1])\n dtposta = datetime.timedelta(minutes=TS_MIN)\n dtcero = datetime.timedelta(0)\n if dtmin == dtcero: # \n trep = tiempo[dt == dtcero]\n print(f\"ERROR: tiempos repetidos {trep}\")\n return None\n exit\n elif dtmax > 1.1*dtposta:\n print(f\"ERROR: tiempos faltantes!\")\n return None\n exit\n \n print('\\tconvirtiendo etiquetas de tiempo a DateTime')\n\n print('\\tLeyendo medidas')\n pot = None\n cgm = None\n medidas = []\n for i in range(nSeries):\n tipoDato = filtros.str_to_tipo(tipos[i])\n if tipoDato is None:\n continue\n meds = data[:, i+1]\n if TS_MIN != 10:\n if tipoDato == 'dir':\n meds_sin = [m.sin(m.radians(k)) for k in meds]\n meds_cos = [m.cos(m.radians(k)) for k in meds]\n\n meds_sin_m = signal.resample_poly(meds_sin, up=10, down=TS_MIN)\n meds_cos_m = signal.resample_poly(meds_cos, up=10, down=TS_MIN)\n\n meds_m = [m.atan2(s, c) for s, c in zip(meds_sin_m, meds_cos_m)]\n meds_m = [m.degrees(k) for k in meds_m]\n for k in range(len(meds_m)):\n if meds_m[k] < 0:\n meds_m[k] = meds_m[k] + 360\n\n meds = np.asarray(meds_m)\n else:\n meds = signal.resample_poly(meds, up=10, down=TS_MIN)\n\n dtini_TS = dt_to_dtTS(tiempo[0])\n tiempo = fechaInitoDateTimeN(dtini_TS, len(meds))\n nombre = tipoDato + ident\n minmax = filtros.min_max(tipoDato,PAutorizada)\n nrep = filtros.Nrep(tipoDato)\n med = datos.Medida(tipoArchi,meds,tiempo,tipoDato,nombre,minmax[0],minmax[1],nrep)\n \n if tipoDato != 'pot' and tipoDato != 'cgm' and tipoDato != 'dis':\n medidas.append(med)\n elif tipoDato == 'pot':\n pot=copy.copy(med) \n elif tipoDato == 'cgm':\n cgm=copy.copy(med) \n elif tipoDato == 'dis':\n dis=copy.copy(med) \n \n print('CREANDO MEDIDOR')\n Medidor = datos.Medidor(ident,medidas,ubicacion)\n print('CREANDO PARQUE')\n parque = datos.Parque(nidCentral,Medidor,cgm,pot)\n print('LECTURA TERMINADA\\n')\n return parque\n\n##############################################################################\n\ndef leerArchiSMEC(nidCentral):\n\n archi_SMEC = archiSMEC(nidCentral)\n\n if not os.path.exists(archi_SMEC):\n return None\n exit \n\n print(f\"Leyendo archivo SMEC para la central {nidCentral}\")\n\n # Leo muestras (todas las celdas tienen que tener un valor)\n f = open(archi_SMEC, 'r')\n line=f.readline()\n line=f.readline() \n lines=f.readlines()\n result=[]\n for x in lines:\n result.append(x.split()[1:-1])\n \n f.close() \n\n muestras_mat = np.array(result)\n ndias,n15min = muestras_mat.shape\n muestras15min = muestras_mat.flatten().astype(float)*4\n muestrasTS = signal.resample_poly(muestras15min,up=15,down=TS_MIN)\n\n # Leo fecha inicial\n f = open(archi_SMEC, 'r')\n line=f.readline()\n line=f.readline()\n line=f.readline()\n f.close() \n cols = line.split() \n dtini_str = cols[0]\n dtini = datetime.datetime.strptime(dtini_str, '%d/%m/%Y') \n\n dt_ini_corr = dtini + datetime.timedelta(minutes=30) # sumo 30 min para que este en fase con SCADA\n \n dt_TS = fechaInitoDateTime(dt_ini_corr,ndias,TS_MIN)\n tipoDato = 'pot'\n minmax = filtros.min_max(tipoDato,50)\n nrep = filtros.Nrep(tipoDato)\n \n med_TS = datos.Medida('smec',muestrasTS,dt_TS,'pot','potSMEC',minmax[0],minmax[1],nrep)\n\n return med_TS \n\n##############################################################################\n\ndef leerArchiPRONOS(nidCentral): \n archi_pronos = archiPRONOS(nidCentral) \n\n if not os.path.exists(archi_pronos):\n print(f\"AVISO: no hay pronósticos para esta central. Archivo {archi_pronos} no encontrado.\")\n return None\n exit\n\n print(f\"Leyendo archivo de pronósticos para la central {nidCentral}: {archi_pronos}\")\n \n f = open(archi_pronos, 'r')\n \n # Leo datos de las estaciones\n \n line=f.readline()\n cols = line.split()\n nSeries = int(cols[0])\n \n line=f.readline()\n cols = line.split()\n meteo_utm_zona = cols[0]\n \n line=f.readline()\n cols = line.split()\n meteo_utm_huso = int(cols[0]) \n \n line=f.readline()\n cols = line.split()\n meteo_utm_xe = float(cols[0])\n\n line=f.readline()\n cols = line.split()\n meteo_utm_yn = float(cols[0])\n \n line=f.readline()\n cols = line.split()\n ident = cols[0]\n\n ubicacion = datos.Ubicacion(meteo_utm_zona,meteo_utm_huso,meteo_utm_xe,meteo_utm_yn,ident)\n \n line=f.readline()\n cols = line.split()\n PAutorizada = float(cols[0])\n\n line=f.readline()\n tipos = line.split()\n tipos = [ tipos[i] for i in range(nSeries)]\n \n data=np.loadtxt(archi_pronos,skiprows=8)\n\n line=f.readline()\n cols = line.split() \n f.close() \n dt_ini_str = cols[0]\n dt_ini = NumtoDateTime(float(dt_ini_str))\n\n # Leo medidas\n medidas = []\n for i in range(nSeries):\n\n tipoDato = filtros.str_to_tipo(tipos[i])\n if tipoDato is None:\n break\n meds = data[:,i+1]\n nombre = tipoDato + ident\n minmax = filtros.min_max(tipoDato,PAutorizada)\n nrep = filtros.Nrep(tipoDato)\n \n if TS_MIN != 60: \n if tipoDato == 'dir':\n meds_sin = [m.sin(m.radians(k)) for k in meds ]\n meds_cos = [m.cos(m.radians(k)) for k in meds ]\n \n meds_sin_m = signal.resample_poly(meds_sin,up=60,down=TS_MIN)\n meds_cos_m = signal.resample_poly(meds_cos,up=60,down=TS_MIN)\n \n meds_m = [m.atan2(s,c) for s,c in zip(meds_sin_m,meds_cos_m)]\n meds_m = [m.degrees(k) for k in meds_m]\n for k in range(len(meds_m)):\n if meds_m[k] < 0 :\n meds_m[k] = meds_m[k] + 360\n \n meds = np.asarray(meds_m) \n else: \n meds = signal.resample_poly(meds,up=60,down=TS_MIN) \n \n dt_TS = fechaInitoDateTimeN ( dt_ini, len(meds))\n med = datos.Medida('pronos',meds,dt_TS,tipoDato,nombre,minmax[0],minmax[1],nrep)\n medidas.append(med)\n\n Medidor = datos.Medidor(ident,medidas,ubicacion)\n \n return Medidor\n\n##############################################################################\n \ndef leerArchivosCentral (nidCentral):\n #\n # si existe, cargamos el objeto guardado\n #\n archip = archiPICKLE(nidCentral)\n if os.path.exists(archip):\n print(f'INFO: cargando datos de parque de {archip}')\n return cargarCentral(nidCentral)\n #\n # si no, generamos todo desde 0 en base a los distintos archivos\n # de texto que componen la info de la central.\n #\n parque = leerArchi(nidCentral,'scada')\n \n parqueGen = leerArchi(nidCentral,'gen')\n if parqueGen is not None:\n parque.medidores[0].agregar_meds(parqueGen.medidores[0]._medidas)\n else:\n print(\"AVISO: No hay archivo GEN para esta central.\")\n \n med_TS = leerArchiSMEC(nidCentral)\n if med_TS is not None:\n parque.pot_SMEC = med_TS.desfasar(-1) # por que se desfasaba?\n\n else:\n print(\"AVISO: No hay archivo SMEC para esta central.\")\n\n medidor_pronosTS = leerArchiPRONOS(nidCentral)\n if medidor_pronosTS is not None:\n parque.medidores[0].agregar_meds(medidor_pronosTS._medidas)\n else:\n print(\"AVISO: No hay archivo PRONOS para esta central.\")\n #\n # si existe, cargamos el objeto guardado\n #\n return parque\n \n##############################################################################\n\ndef guardarCentral(parque):\n archi_central = archiPICKLE(parque.id)\n with gzip.open(archi_central,'wb') as output:\n pickle.dump(parque,output,pickle.HIGHEST_PROTOCOL)\n\n##############################################################################\n\ndef cargarCentral(id):\n archi_central = archiPICKLE(id)\n with gzip.open(archi_central,'r') as input:\n parque = pickle.load(input)\n return parque\n","repo_name":"nacho-pancho/darset","sub_path":"code/archivos.py","file_name":"archivos.py","file_ext":"py","file_size_in_byte":14074,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4527661814","text":"## @ingroup Methods-Noise-Frequency_Domain_Buildup-Rotor \n# RCAIDE/Methods/Noise/Frequency_Domain_Buildup/Rotor/LBL_VS_broadband_noise.py\n# \n# \n# Created: Jul 2023, M. Clarke \n\n# ----------------------------------------------------------------------------------------------------------------------\n# IMPORT\n# ---------------------------------------------------------------------------------------------------------------------- \n\n# Python Package imports \nimport numpy as np \n\n# ----------------------------------------------------------------------------------------------------------------------\n# Compute LBL-VS Broadband Noise \n# ----------------------------------------------------------------------------------------------------------------------\n## @ingroup Methods-Noise-Frequency_Domain_Buildup-Rotor \ndef LBL_VS_broadband_noise(R_c,alpha_star,delta_p,r_e,L,M,Dbar_h,f,U):\n '''This computes the laminar boundary layer compoment of broadband noise using the method outlined by the \n Brooks, Pope and Marcolini (BPM) Model\n \n Assumptions:\n BPM models assumes a naca 0012 airfol \n \n Source: \n BPM Model: Brooks, Thomas F., D. Stuart Pope, and Michael A.\n Marcolini. Airfoil self-noise and prediction. No. L-16528. 1989.\n \n Inputs: \n R_c - Reynolds number [-]\n alpha_star - angle of attack blade section [deg]\n delta_p - boundary layer of pressure section [m\n L - length of blade section [m]\n U - velocity of blade section [m/s]\n M - Mach number [-]\n c - airfoil section chord [m] \n f - frequency spectrum [Hz]\n Dbar_h - high frequency directivity term [-]\n r_e - distance from noise source to observer [m] \n \n Outputs \n SPL_LBL_VS - Sound pressure level of laminar boundary layer [dB]\n \n Properties Used:\n N/A \n ''' \n \n St_prime = f*delta_p/U # eqn 54 \n St_prime_1 = 0.001756*(R_c**0.3931) # eqn 55 \n St_prime_1[R_c<1.3E5] = 0.18 # eqn 55 \n St_prime_1[R_c>4E5] = 0.28 # eqn 55 \n \n St_prime_peak = St_prime_1*(10**(-0.04*alpha_star))\n \n G_1 = compute_G_1(St_prime/St_prime_peak)\n R_c_0 = compute_R_c_0(alpha_star)\n G_2 = compute_G_2(R_c/R_c_0)\n G_3 = compute_G_3(alpha_star)\n \n SPL_LBL_VS = 10*np.log10((delta_p*(M**5)*L*Dbar_h)/(r_e**2) ) + G_1 + G_2 + G_3 # eqn 53\n\n return SPL_LBL_VS\n\ndef compute_G_1(e): \n '''This computes the G_1 function using the BPM model\n \n Assumptions:\n BPM models assumes a naca 0012 airfol \n Corrections made to match experimental results \n \n Source: \n BPM Model: Brooks, Thomas F., D. Stuart Pope, and Michael A.\n Marcolini. Airfoil self-noise and prediction. No. L-16528. 1989.\n \n Inputs: \n e [-]\n \n Outputs \n G_1 [-]\n \n Properties Used:\n N/A \n ''' \n e = e * 0.3 \n num_1 = 39.8 *0.5 \n num_2 = 98.409 *0.5 \n \n G_1 = -num_1*np.log10(e) - 11.2 # eqn 57\n G_1[e<1.64] = -num_2*np.log10(e[e<1.64]) + 2 # eqn 57\n G_1[e<1.17] = -5.076 + np.sqrt( 2.484 - 506.25*(np.log10(e[e<1.17]))**2) # eqn 57\n G_1[e<0.8545] = num_2*np.log10(e[e<0.8545]) + 2 # eqn 57\n G_1[e<0.5974] = num_1*np.log10(e[e<0.5974]) - 11.2 # eqn 57\n \n return G_1\n\ndef compute_G_2(d):\n '''This computes the G_2 function using the BPM model\n \n Assumptions:\n BPM models assumes a naca 0012 airfol \n \n Source: \n BPM Model: Brooks, Thomas F., D. Stuart Pope, and Michael A.\n Marcolini. Airfoil self-noise and prediction. No. L-16528. 1989.\n \n Inputs: \n d [-]\n \n Outputs \n G_2 [-]\n \n Properties Used:\n N/A \n ''' \n \n G_2 = -77.852*np.log10(d) + 15.328 # eqn 58 \n G_2[d<3.0889] = -65.188*np.log10(d[d<3.0889]) + 9.125 # eqn 58 \n G_2[d<1.7579] = -114.052*((np.log10(d[d<1.7579]))**2 ) # eqn 58\n G_2[d<0.5689] = 65.188*np.log10(d[d<0.5689]) + 9.125 # eqn 58\n G_2[d<0.3237] = 77.852*np.log10(d[d<0.3237]) + 15.328 # eqn 58 \n \n return G_2\n\ndef compute_G_3(alpha_star): \n '''This computes the G_3 function using the BPM model\n \n Assumptions:\n BPM models assumes a naca 0012 airfol \n \n Source: \n BPM Model: Brooks, Thomas F., D. Stuart Pope, and Michael A.\n Marcolini. Airfoil self-noise and prediction. No. L-16528. 1989.\n \n Inputs: \n alpha_star [deg]\n \n Outputs \n G_3 [-]\n \n Properties Used:\n N/A \n ''' \n G_3 = 171.04 - 3.03*alpha_star\n return G_3\n\ndef compute_R_c_0(alpha_star):\n '''This computes the R_c_0 function using the BPM model\n \n Assumptions:\n BPM models assumes a naca 0012 airfol \n \n Source: \n BPM Model: Brooks, Thomas F., D. Stuart Pope, and Michael A.\n Marcolini. Airfoil self-noise and prediction. No. L-16528. 1989.\n \n Inputs: \n alpha_star [deg]\n \n Outputs \n R_c_0 [-]\n \n Properties Used:\n N/A \n ''' \n R_c_0 = 10**(0.215*alpha_star + 4.978) # eqn 59\n R_c_0[3 2):\n host = argvs[2]\n tp36 = Tp36(slot, host)\n except Exception as e:\n tpUtils.stderr(str(e.args))\n sys.exit(0)\n\n while True:\n try:\n data = input()\n recv_data = tp36.get_data()\n tpUtils.nodeOut(json.dumps(recv_data))\n except KeyboardInterrupt:\n sys.exit(0)\n except Exception as e:\n tpUtils.stderr(str(e.args))\n tpUtils.nodeOut(\"\")\n","repo_name":"cw-tpdev/node-red-contrib-tibbo-pi-p4","sub_path":"py/tp36.py","file_name":"tp36.py","file_ext":"py","file_size_in_byte":4939,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"37292082325","text":"\"\"\"add foreign keys\n\nRevision ID: 6347e96e55a9\nRevises: 989b68ead7df\nCreate Date: 2022-10-24 22:06:13.942056\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '6347e96e55a9'\ndown_revision = '989b68ead7df'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('test_address', sa.Column('customer_id', sa.Integer(), nullable=False))\n op.create_foreign_key('fk_address_customer_id', 'test_address', 'test_customers',\n ['customer_id'], ['id'],\n onupdate='CASCADE', ondelete='CASCADE')\n op.add_column('test_taxinfo', sa.Column('customer_id', sa.Integer(), nullable=False))\n op.create_foreign_key('fk_taxinfo_customer_id', 'test_taxinfo', 'test_customers',\n ['customer_id'], ['id'],\n onupdate='CASCADE', ondelete='CASCADE')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('fk_taxinfo_customer_id', 'test_taxinfo', type_='foreignkey')\n op.drop_column('test_taxinfo', 'customer_id')\n op.drop_constraint('fk_address_customer_id', 'test_address', type_='foreignkey')\n op.drop_column('test_address', 'customer_id')\n # ### end Alembic commands ###\n","repo_name":"shyamsrinivasan/flaskapp","sub_path":"migrations/versions/6347e96e55a9_add_foreign_keys.py","file_name":"6347e96e55a9_add_foreign_keys.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5421591786","text":"# Baekjoon Online Judge\n\n# 20210126\n\n# 7-1 (11654)\nS = input()\n\nprint(ord(S))\n\n# 7-2 (11720)\nN = input()\nN = int(N)\n\nM = input()\nM = list(map(int,M))\n\nS = sum(M)\n\nprint(int(S))\n\n# 7-3 (10809)\nS = input()\nS = list(S)\n\nA = 'abcdefghijklmnopqrstuvwxyz'\nA = list(A)\n\nfor i in (A):\n \n if (S.count(i) == 0):\n print('-1', end=' ')\n else: print(S.index(i), end=' ')\n \n# 7-4 (2675)\nN = input()\nN = int(N) \n\nfor i in range(N):\n \n R, S = input().split()\n R = int(R)\n S = list(S)\n \n for j in (S):\n \n print((j)*R, end='')\n \n print('')\n\n# 7-5 (1157) [RE]\nD = input().upper()\nD = list(D)\n\nU = set(D)\nU = list(U)\n\nnum = 0\nK = ''\nfor i in U:\n \n if (D.count(i) > num):\n num = D.count(i)\n K = i\n elif (D.count(i) == num):\n num = num\n K = '?'\n else:\n num = num\n K = K\n\nprint(K)\n ","repo_name":"ongeee/study","sub_path":"B_OJ_20210126.py","file_name":"B_OJ_20210126.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33583042995","text":"import arrow\n\nfrom datetime import datetime\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import models\n\nfrom ..cases.models import Case\nfrom ..persons.models import Profile, Contact\nfrom ..creme_core.models.auth import Account\nfrom ..events.models import Event\nfrom ..invoices.models import Invoice\nfrom ..leads.models import Lead\nfrom ..opportunities.models import Opportunity\nfrom ..tasks.models import Task\n\n\nclass Comment(models.Model):\n\n author = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, related_name=\"comments\", on_delete=models.CASCADE)\n\n name = models.CharField(max_length=100)\n email = models.CharField(max_length=255, blank=True)\n website = models.CharField(max_length=255, blank=True)\n\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)\n object_id = models.IntegerField()\n content_object = GenericForeignKey()\n\n comment = models.TextField()\n\n submit_date = models.DateTimeField(default=datetime.now)\n ip_address = models.GenericIPAddressField(null=True)\n public = models.BooleanField(default=True)\n\n case = models.ForeignKey(\n Case,\n blank=True,\n null=True,\n related_name=\"cases\",\n on_delete=models.CASCADE,\n )\n comment = models.CharField(max_length=255)\n commented_on = models.DateTimeField(auto_now=True)\n commented_by = models.ForeignKey(settings.PERSONS_PROFILE_MODEL, on_delete=models.CASCADE, blank=True, null=True )\n account = models.ForeignKey(\n Account,\n blank=True,\n null=True,\n related_name=\"accounts_comments\",\n on_delete=models.CASCADE,\n )\n lead = models.ForeignKey(\n Lead,\n blank=True,\n null=True,\n related_name=\"leads_comments\",\n on_delete=models.CASCADE,\n )\n opportunity = models.ForeignKey(\n Opportunity,\n blank=True,\n null=True,\n related_name=\"opportunity_comments\",\n on_delete=models.CASCADE,\n )\n contact = models.ForeignKey(\n Contact,\n blank=True,\n null=True,\n related_name=\"contact_comments\",\n on_delete=models.CASCADE,\n )\n profile = models.ForeignKey(\n settings.PERSONS_PROFILE_MODEL,\n blank=True,\n null=True,\n related_name=\"user_comments\",\n on_delete=models.CASCADE,\n )\n\n task = models.ForeignKey(\n Task,\n blank=True,\n null=True,\n related_name=\"tasks_comments\",\n on_delete=models.CASCADE,\n )\n\n invoice = models.ForeignKey(\n Invoice,\n blank=True,\n null=True,\n related_name=\"invoice_comments\",\n on_delete=models.CASCADE,\n )\n\n event = models.ForeignKey(\n Event,\n blank=True,\n null=True,\n related_name=\"events_comments\",\n on_delete=models.CASCADE,\n )\n\n class Meta:\n app_label: \"comments\"\n\n @property\n def data(self):\n return {\n \"pk\": self.pk,\n \"comment\": self.comment,\n \"author\": self.author.username if self.author else \"\",\n \"name\": self.name,\n \"email\": self.email,\n \"website\": self.website,\n \"submit_date\": str(self.submit_date)\n }\n\n def __str__(self):\n return \"pk=%d\" % self.pk # pragma: no cover\n\n\n def get_files(self):\n return Comment_Files.objects.filter(comment_id=self)\n\n @property\n def commented_on_arrow(self):\n return arrow.get(self.commented_on).humanize()\n\n\nclass Comment_Files(models.Model):\n comment = models.ForeignKey(Comment, on_delete=models.CASCADE)\n updated_on = models.DateTimeField(auto_now=True)\n comment_file = models.FileField(\n \"File\", upload_to=\"comment_files\", null=True, blank=True\n )\n\n def get_file_name(self):\n if self.comment_file:\n return self.comment_file.path.split(\"/\")[-1]\n\n return None","repo_name":"ithoanghai/CmsEcommerce","sub_path":"SPS/creme/comments/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17415618521","text":"import aiohttp\nimport asyncio\nfrom super_spider import SuperSpider\nip=SuperSpider(host='192.168.0.172')\nsession=aiohttp.ClientSession()\nasync def ip_check(sem,proxies):\n\tasync with sem:\n\t\ttry:\n\t\t\tkey='http' if not proxies.startswith('https') else 'https'\n\t\t\turl=f'{key}://www.baidu.com'\n\t\t\tasync with session.get(url,headers=ip.random_headers(),proxy=proxies,timeout=3) as response:\n\t\t\t\tstatus_code=response.status\n\t\t\t\tif status_code != 200:\n\t\t\t\t\tip.sql_search(f'delete from ip_pool where ip=\"{proxies}\"')\n\t\t\t\t\tprint(f'{proxies}-不可用已删除')\n\t\t\t\telse:\n\t\t\t\t\tprint(f'{proxies}-可用')\n\t\texcept:\n\t\t\tip.sql_search(f'delete from ip_pool where ip=\"{proxies}\"')\n\t\t\tprint(f'{proxies}-不可用已删除')\nproxies_list=ip.sql_search('select ip from ip_pool')\nasync def split_task():\n\tsem=asyncio.Semaphore(500)\n\ttasks=[ip_check(sem,proxies[0]) for proxies in proxies_list]\n\tawait asyncio.wait(tasks)\n\nloop=asyncio.get_event_loop()\nloop.run_until_complete(split_task())\nloop.close()\nip.spider_end()\n\n\t\t\t\t\n\n","repo_name":"cwy1019120542/MySpiders","sub_path":"MySpiders/phone_library/ip_check.py","file_name":"ip_check.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38641171118","text":"from collections import Counter\n\ndef to_inttuple(bitstr):\n \"\"\"Convert from bit string likes '01011' to int tuple likes (0, 1, 0, 1, 1)\n\n Args:\n bitstr (str, Counter, dict): String which is written in \"0\" or \"1\".\n If all keys are bitstr, Counter or dict are also can be converted by this function.\n\n Returns:\n tuple of int, Counter, dict: Converted bits.\n If bitstr is Counter or dict, returns the Counter or dict\n which contains {converted key: original value}.\n\n Raises:\n ValueError: If bitstr type is unexpected or bitstr contains illegal character.\n \"\"\"\n if isinstance(bitstr, str):\n return tuple(int(b) for b in bitstr)\n if isinstance(bitstr, Counter):\n return Counter({tuple(int(b) for b in k): v for k, v in bitstr.items()})\n if isinstance(bitstr, dict):\n return {tuple(int(b) for b in k): v for k, v in bitstr.items()}\n raise ValueError(\"bitstr type shall be `str`, `Counter` or `dict`\")\n\ndef ignore_global_phase(statevec):\n \"\"\"Multiple e^-iθ to `statevec` where θ is a phase of first non-zero element.\n\n Args:\n statevec np.array: Statevector.\n\n Returns:\n np.array: `statevec` is returned.\n \"\"\"\n for q in statevec:\n if abs(q) > 0.0000001:\n ang = abs(q) / q\n statevec *= ang\n break\n return statevec\n","repo_name":"johnmelodyme/Quantum","sub_path":"venv/Lib/site-packages/blueqat/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"23685212353","text":"def fleiss_kappa(ratings, n):\n '''\n Computes the Fleiss' kappa measure for assessing the reliability of\n agreement between a fixed number n of raters when assigning categorical\n ratings to a number of items.\n\n Args:\n ratings: a list of (item, category)-ratings\n n: number of raters\n Returns:\n the Fleiss' kappa score\n\n Refactored implementation from:\n https://gist.github.com/ShinNoNoir/4749548\n\n See also:\n http://en.wikipedia.org/wiki/Fleiss'_kappa\n '''\n items = set()\n categories = set()\n n_ij = {}\n\n for i, c in ratings:\n items.add(i)\n categories.add(c)\n n_ij[(i, c)] = n_ij.get((i, c), 0) + 1\n\n N = len(items)\n\n p_j = {}\n for c in categories:\n p_j[c] = sum(n_ij.get((i, c), 0) for i in items) / (1.0 * n * N)\n\n P_i = {}\n for i in items:\n P_i[i] = (sum(n_ij.get((i, c), 0) ** 2 for c in categories) - n) / (n * (n - 1.0))\n\n P_bar = sum(iter(P_i.values())) / (1.0 * N)\n P_e_bar = sum(p_j[c] ** 2 for c in categories)\n\n kappa = (P_bar - P_e_bar) / (1 - P_e_bar)\n\n return kappa\n\n\nexample = ([(1, 5)] * 14 +\n [(2, 2)] * 2 + [(2, 3)] * 6 + [(2, 4)] * 4 + [(2, 5)] * 2 +\n [(3, 3)] * 3 + [(3, 4)] * 5 + [(3, 5)] * 6 +\n [(4, 2)] * 3 + [(4, 3)] * 9 + [(4, 4)] * 2 +\n [(5, 1)] * 2 + [(5, 2)] * 2 + [(5, 3)] * 8 + [(5, 4)] * 1 + [(5, 5)] * 1 +\n [(6, 1)] * 7 + [(6, 2)] * 7 +\n [(7, 1)] * 3 + [(7, 2)] * 2 + [(7, 3)] * 6 + [(7, 4)] * 3 +\n [(8, 1)] * 2 + [(8, 2)] * 5 + [(8, 3)] * 3 + [(8, 4)] * 2 + [(8, 5)] * 2 +\n [(9, 1)] * 6 + [(9, 2)] * 5 + [(9, 3)] * 2 + [(9, 4)] * 1 +\n [(10, 2)] * 2 + [(10, 3)] * 2 + [(10, 4)] * 3 + [(10, 5)] * 7)\n\n","repo_name":"Alex92rus/ErrorDetectionProject","sub_path":"fleiss_kappa.py","file_name":"fleiss_kappa.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"12349603393","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*-\n\nimport sys\n\nargs1 = sys.argv[1] # 1つ目のコマンドライン引数\nargs2 = sys.argv[2] # 2つ目のコマンドライン引数\n\nprint('args1: %s' % args1)\nprint('args2: %s' % args2)\n","repo_name":"niya1123/Python_Security_Programimg","sub_path":"pysec101-master/pysec101-master/chap1/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40157580683","text":"\n\nimport numpy as np\nfrom scipy import signal\nfrom .base import FittingModelBase\n\n\nclass RabiOscillation(FittingModelBase):\n \"\"\"Fitting model for rabi oscillation\n \"\"\"\n # override\n def __init__(self):\n \"\"\"Initializer of rabi oscillation\n \"\"\"\n param_names = [\n 'amplitude',\n 'rabi_frequency',\n 'phase_offset',\n 'decay_rate',\n 'amplitude_offset',\n ]\n FittingModelBase.__init__(self, param_names)\n \n # override\n def _model_function(self, time, amplitude, rabi_frequency, phase_offset, decay_rate, amplitude_offset):\n \"\"\"Response function for rabi oscillation\n\n Args:\n time (float or np.ndarray): driving time of qubit\n amplitude (float): amplitude of rabi oscillation\n rabi_frequency (float): rabi frequency\n phase_offset (float): phase offset of rabi oscillation\n decay_rate (float): decaying rate of rabi oscillation\n amplitude_offset (float): amplitude offset of rabi oscillation\n \n Returns:\n float or np.ndarray: rabi oscillation signal\n \"\"\"\n oscillation = np.cos(phase_offset + 2 * np.pi * rabi_frequency * time)\n decay_term = np.exp( - time * decay_rate)\n response = amplitude_offset + amplitude * oscillation * decay_term\n return response\n\n # override\n def _initial_guess(self,x,y):\n \"\"\"Guess initial fittnig parameters\n\n Args:\n x (np.ndarray): drive frequency of cavity\n y (np.ndarray): amplitude of cavity response\n \n Returns:\n dict: Fitting parameters\n \"\"\"\n # amplitude of sine-wave is (max-min)/2\n amplitude = (np.max(y) - np.min(y))/2\n\n # take two abs_peak and their points, and compute decay rate.\n # exp(-decay_rate * time)\n half_data_count = len(x)//2\n peak = np.max(np.abs(y))\n peak_index = np.argmax(np.abs(y))\n peak_after_half = np.max(np.abs(y[half_data_count:]))\n peak_after_half_index = half_data_count + np.argmax(np.abs(y[half_data_count:]))\n half_time = x[peak_after_half_index] - x[peak_index]\n\n if peak>peak_after_half:\n decay_rate = np.log(peak/peak_after_half) / half_time\n else:\n decay_rate = 0.1\n\n # use LPF and take local minimums\n b,a = signal.butter(N=10,Wn=0.3,output='ba')\n y_lpf = signal.filtfilt(b,a,y)\n minimum_index_list = signal.argrelmin(y_lpf, order=5)[0]\n first_minimum_time = x[minimum_index_list[0]]\n second_minimum_time = x[minimum_index_list[1]]\n rabi_frequency = 1. / (second_minimum_time - first_minimum_time)\n param_dict = {\n 'amplitude' : amplitude,\n 'rabi_frequency' : rabi_frequency,\n 'phase_offset' : 0.,\n 'decay_rate' : decay_rate,\n 'amplitude_offset' : 0.,\n }\n return param_dict\n","repo_name":"qipe-nlab/measurement_codes_ut","sub_path":"measurement_codes_ut/fitting/rabi_oscillation.py","file_name":"rabi_oscillation.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35307265817","text":"#coding=utf-8\n\"\"\"\n 版本:5.0\n 作者:高建帅\n 功能:空气质量计算\n 日期:10/04/2019\n 新增功能:网络爬虫获取实时信息\n 新增功能:将数据保存到csv文件中\n 版本:6.0\n\"\"\"\nimport requests\nimport csv\nfrom bs4 import BeautifulSoup\ndef get_city_aqi(city_pinyin):\n \"\"\"\n 返回城市的AQI\n \"\"\"\n url = 'http://www.pm25.in/' + city_pinyin\n r = requests.get(url, timeout=30)\n soup = BeautifulSoup(r.text, 'lxml')\n div_list = soup.find_all('div', {'class' : 'span1'}) #('div', {'class' : 'span1'})\n city_aqi = []\n for i in range(8):\n div_content = div_list[i]\n #caption = div_content.find('div', {'class': 'caption'}).text.strip()\n value = div_content.find('div', {'class': 'value'}).text.strip()\n city_aqi.append(value)\n return city_aqi\ndef get_all_cities():\n \"\"\"\n 获取所有城市\n \"\"\"\n url = 'http://www.pm25.in/'\n city_list = []\n r = requests.get(url, timeout=30)\n soup = BeautifulSoup(r.text, 'lxml')\n city_div = soup.find_all('div', {'class': 'bottom'})[1]\n city_link_list = city_div.find_all('a')\n for city_link in city_link_list:\n city_name = city_link.text\n city_pinyin = city_link['href'][1:]\n city_list.append((city_name, city_pinyin))\n return city_list\n\ndef main():\n \"\"\"\n 主函数\n \"\"\"\n city_list = get_all_cities()\n header = ['City', 'AQI', 'PM2.5/1h', 'CO/1h', 'NO2/1h', 'O3/1h', 'O3/8h', 'SO2/1h']\n\n with open('china_city_aqi.csv', 'w', encoding='utf-8', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(header)\n for i, city in enumerate(city_list):\n if (i + 1) % 10 == 0:\n print('已处理{}条数据。(共{}条数据)'.format(i + 1, len(city_list)))\n city_name = city[0]\n city_pinyin = city[1]\n city_aqi = get_city_aqi(city_pinyin)\n row = [city_name] + city_aqi\n writer.writerow(row)\n\n\n # for city in city_list:\n # city_name = city[0]\n # city_pinyin = city[1]\n # city_aqi = get_city_aqi(city_pinyin)\n # print(city_name, city_aqi)\n\n # aqi_div = '''\n #
\n #
\n #
\n # '''\n # index = url_text.find(aqi_div)\n # begin_index = len(aqi_div) + index\n # end_index = begin_index + 2\n # aqi_val = url_text[begin_index: end_index]\n # print('空气质量为:{}'.format(aqi_val))\n\n\nif __name__ == '__main__':\n main()","repo_name":"gaojianshuai/vpc-ui","sub_path":"pycharm_practice/pycharm_practice_air_quality/pycharm_practice_quality_pachong4.py","file_name":"pycharm_practice_quality_pachong4.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"23774939992","text":"import pandas as pd\nimport numpy as np\nfrom app.db import cache, db\nfrom app.db.users import Group, UserRole\nSGL_ROLE_ID = 2\n\"\"\"\n一些零碎数据的cache,写在这个文件。\n\"\"\"\n\n\n@cache.memoize(timeout=3600 * 12) # 12小时\ndef get_group_df():\n \"\"\"获取组信息\n :return: Group Data Frame\n \"\"\"\n print('*****************Reload Group Info.****************\\n')\n q = (db.session.query(Group).filter(Group.group_id != 1).order_by(Group.group_name))\n group_df = pd.read_sql(q.statement, db.session.bind)\n return group_df\n\n\n@cache.memoize(timeout=3600 * 12) # 12小时\ndef get_gl_sgl_df(proj_id):\n \"\"\"\n 获取所有的GL组的父级关系信息信息\n :return:\n \"\"\"\n print('*****************Reload GL Group Info.****************\\n')\n sqlcmd = \"\"\"\n SELECT t1.group_id, t1.group_name, \n t2.group_id as parent_group_id, \n t2.group_name as parent_group_name\n FROM public.\"group\" as t1 left join public.\"group\" as t2\n on t1.parent_group_id = t2.group_id\n WHERE t1.group_id in (\t\n SELECT public.\"group\".group_id FROM public.\"group\"\n LEFT JOIN public.user_role\n ON public.\"group\".group_id = public.user_role.group_id\n WHERE proj_id = {proj_id} AND public.\"group\".group_id not in( \n select parent_group_id FROM public.\"group\"\n )AND parent_group_id != 1\n )\n \"\"\".format(proj_id=proj_id)\n gl_sgl_df = pd.read_sql(sqlcmd, db.session.bind)\n gl_sgl_df[Group.group_id.name] = gl_sgl_df[Group.group_id.name].astype(np.float)\n return gl_sgl_df\n\n\ndef refresh_group_df():\n \"\"\"刷新组信息的cache\n \"\"\"\n cache.delete_memoized(get_group_df)\n cache.delete_memoized(get_gl_sgl_df)\n group_df = get_group_df()\n # gl_sgl_df = get_gl_sgl_df()\n return group_df\n\n\n\n","repo_name":"clearloveyin/Cararote","sub_path":"koala/koala_server/app/data_server/ds_pieces.py","file_name":"ds_pieces.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2989304465","text":"\r\nimport pygame\r\nimport time\r\nimport csv\r\n\r\nclass Imagen(object):\r\n def __init__(self,imagen,nombre=None,x=0,y=0):\r\n self.fhoto=imagen\r\n self.nombre=nombre\r\n self.x=x\r\n self.y=y\r\n self.rect = self.fhoto.get_rect()\r\n self.rect.left,self.rect.top = (self.x,self.y) \r\n\r\n def dibujar(self,superficie): \r\n superficie.blit(self.fhoto,self.rect)\r\n \r\nclass Imagen2(object):\r\n\r\n def __init__(self,imagen,x=0,y=0):\r\n self.fhoto=imagen\r\n self.x=x\r\n self.y=y \r\n\r\n def dibujar(self,superficie): \r\n superficie.blit(self.fhoto,(self.x,self.y))\r\nclass Cursor(pygame.Rect):\r\n def __init__(self):\r\n pygame.Rect.__init__(self,0,0,1,1,)\r\n def update(self):\r\n (self.left,self.top)= pygame.mouse.get_pos()\r\n \r\nclass Boton(pygame.sprite.Sprite):\r\n def __init__(self,imagen1,imagen2,x=200,y=200):\r\n self.imagen_normal=imagen1\r\n self.imagen_seleccion=imagen2\r\n self.imagen_actual=self.imagen_normal\r\n self.rect = self.imagen_actual.get_rect()\r\n self.rect.left,self.rect.top = (x,y)\r\n \r\n def update(self,pantalla,cursor):\r\n \r\n if(cursor.colliderect(self.rect)):\r\n self.imagen_actual = self.imagen_seleccion\r\n\r\n else:self.imagen_actual=self.imagen_normal\r\n pantalla.blit(self.imagen_actual,self.rect)\r\n\r\nclass Preguntados():\r\n def __init__(self):\r\n \r\n pygame.init()\r\n self.ventana = pygame.display.set_mode([800,600])\r\n self.titulo = pygame.display.set_caption(\"Preguntados\")\r\n self.reloj = pygame.time.Clock()\r\n self.salir =True\r\n self.derecho_menu = True\r\n self.derecho_tematica=False\r\n self.derecho_credito=False\r\n self.mouse= Cursor()\r\n \r\n #Musica General\r\n pygame.mixer.music.load(\"Sonidos\\Main.mid\")\r\n self.s_bien = pygame.mixer.Sound(\"Sonidos\\Good.wav\")\r\n self.s_mal= pygame.mixer.Sound(\"Sonidos\\Fail.wav\")\r\n self.s_toque= pygame.mixer.Sound(\"Sonidos\\Toque.wav\")\r\n\r\n \r\n #Imagenes del boton de regreso al menu DEL MENU PRINCIPAL Y TEMATICA E INSTANCIADOS\r\n self.b_return1=pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\Return1.png\")\r\n self.b_return2=pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\Return2.png\")\r\n \r\n self.b_Return_menu=Boton(self.b_return1,self.b_return2,15,550)\r\n self.b_Return_menu2=Boton(self.b_return1,self.b_return2,15,550)\r\n \r\n \r\n self.imagen1 = pygame.image.load(\"Imagenes_preguntados\\Fondo_menu.png\")\r\n \r\n #Imagenes de botones del menu principal\r\n self.b_jugar1= pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\jugar1.png\")\r\n self.b_jugar2=pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\jugar2.png\")\r\n self.b_credito1=pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\credito1.png\")\r\n self.b_credito2=pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\credito2.png\")\r\n self.b_salir1=pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\salir1.png\")\r\n self.b_salir2=pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\salir2.png\")\r\n \r\n \r\n #INSTANCIAS DE LOS BOTONES DEL MENU PRINCIPAL\r\n self.b_jugar= Boton(self.b_jugar1,self.b_jugar2,293,240)\r\n self.b_creditos= Boton(self.b_credito1,self.b_credito2,270,330)\r\n self.b_salir= Boton(self.b_salir1,self.b_salir2,307,414)\r\n \r\n \r\n #IMAGENES E INSTANCIAS DE LOS B0TONES TEMATICOS \r\n self.imagen2 = pygame.image.load(\"Imagenes_preguntados\\Fondo2.png\")\r\n self.fondo_tematico= Imagen2(self.imagen2,0,0)\r\n \r\n #fondos de los botones tematicos\r\n self.f_geo=pygame.image.load(\"Imagenes_preguntados\\Fondo_geo.png\")\r\n self.f_astro=pygame.image.load(\"Imagenes_preguntados\\Fondo_astro.png\")\r\n self.f_histo=pygame.image.load(\"Imagenes_preguntados\\Fondo_Histo.png\")\r\n \r\n #Imagenes botones de las tematicas agregados a las instancias\r\n self.b_a= pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\Astronomia1.png\")\r\n self.b_a2= pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\Astronomia2.png\")\r\n self.b_g=pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\geo1.png\")\r\n self.b_g2=pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\geo2.png\")\r\n self.b_h=pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\histo1.png\")\r\n self.b_h2=pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\histo2.png\")\r\n \r\n \r\n #instancias de los botones tematicos\r\n self.b_astro = Boton(self.b_a,self.b_a2,293,240)\r\n self.b_geo = Boton(self.b_g,self.b_g2,304,300)\r\n self.b_histo= Boton(self.b_h,self.b_h2,313,361)\r\n \r\n \r\n # CREDITOS\r\n self.imagen3 = pygame.image.load(\"Imagenes_preguntados\\Fondo_credito.png\")\r\n self.Fondo_credito= Imagen2(self.imagen3,0,0)\r\n\r\n Preguntados.Motor(self)\r\n \r\n def Play(self,answer,reply,list,number,n):\r\n \r\n #variables numericas del juego\r\n self.pulsaciones=0\r\n self.puntaje=0\r\n self.cont=0\r\n self.cont2=0\r\n \r\n self.manager1 = csv.reader(answer[number])\r\n self.manager2= csv.reader(reply[number])\r\n \r\n #Fondo\r\n self.fondo_juego = pygame.image.load(\"Imagenes_preguntados\\Fondo_juego.png\")\r\n self.final_fondo=pygame.image.load(\"Imagenes_preguntados\\Final_Puntaje.png\")\r\n \r\n #Fuentes del juego\r\n self.fuente= pygame.font.SysFont(\"Harrington\",25)\r\n self.fuente1=pygame.font.SysFont(\"Harrington\",17)\r\n self.fuente2= pygame.font.SysFont(\"Harrington\",40)\r\n self.fuente3= pygame.font.SysFont(\"Harrington\",40)\r\n \r\n #BARRA DE RESPUESTAS\r\n self.cuadro1= pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\Cuadro.png\")\r\n self.cuadro2=pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\Cuadro2.png\")\r\n self.cuadro3=pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\Cuadro3.png\")\r\n \r\n \r\n #IMAGENES DE LOS BOTONES AL LADO DE LA BARRA\r\n self.a= pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\B_A.png\")\r\n self.b=pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\B_B.png\")\r\n self.c=pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\B_C.png\")\r\n self.d=pygame.image.load(\"Imagenes_preguntados\\Botones_cuadros\\B_D.png\")\r\n \r\n #fuentes de los carteles en cuadros\r\n self.cadena_tiempo= self.fuente.render(\"Tiempo\",False,(0,0,0))\r\n self.cadena_aciertos= self.fuente.render(\"Aciertos\",False,(0,0,0))\r\n self.cadena_puntaje= self.fuente.render(\"Puntos\",False,(0,0,0))\r\n self.loser=self.fuente2.render(\"Defeat\",False,(255,255,255),(0,0,0))\r\n self.win=self.fuente2.render(\"Winner\",False,(255,255,255),(0,0,0))\r\n \r\n #INSTANCIAS de las barras\r\n self.c_a= Imagen(self.cuadro1,\"c_a\",185,310)\r\n self.c_b=Imagen(self.cuadro1,\"c_b\",455,310)\r\n self.c_c=Imagen(self.cuadro1,\"c_c\",185,400)\r\n self.c_d=Imagen(self.cuadro1,\"c_d\",455,400)\r\n \r\n mouse= Cursor() \r\n self.limpiar=self.ventana.copy()\r\n \r\n try:\r\n for x in self.manager1:# lee las preguntas\r\n for y in self.manager2:#lee las respuestas\r\n while(True):\r\n self.ventana.blit(self.limpiar,(0,0))\r\n self.ventana.blit(self.fondo_juego,(0,0))\r\n \r\n self.temp=[]\r\n self.lista_cuadros =[self.c_a,self.c_b,self.c_c,self.c_d]\r\n \r\n self.cont+=1\r\n \r\n #Cuadros rectangulares\r\n pygame.draw.rect(self.ventana,(0,200,0),(718,20,90,80),0)\r\n pygame.draw.rect(self.ventana,(0,200,0),(718,110,90,60),0)\r\n \r\n pygame.draw.rect(self.ventana,(255,255,255),(721,22,77,76),0)\r\n pygame.draw.rect(self.ventana,(255,255,255),(721,112,77,56),0)\r\n \r\n #Fuente puestas en marcha de imprecion sobre los cuadros\r\n self.tiempo= self.fuente2.render(str(self.cont/17),True,(0,0,0),(255,255,255)) \r\n self.puntos=self.fuente.render(str(self.puntaje),False,(0,0,0))\r\n self.puntaje_final=self.fuente2.render(str(self.puntaje),True,(255,255,255),(0,0,0))\r\n \r\n #cadenas numericas impresas en los cuadros\r\n self.ventana.blit(self.tiempo,(745,50))\r\n self.ventana.blit(self.puntos,(750,140))\r\n \r\n #cadenas impresas en los cuadros \r\n self.ventana.blit(self.cadena_tiempo,(720,25))\r\n self.ventana.blit(self.cadena_puntaje,(720,113))\r\n \r\n self.c_a.dibujar(self.ventana)\r\n self.c_b.dibujar(self.ventana)\r\n self.c_c.dibujar(self.ventana)\r\n self.c_d.dibujar(self.ventana)\r\n \r\n self.ventana.blit(self.a,(150,300))\r\n self.ventana.blit(self.b,(420,300))\r\n self.ventana.blit(self.c,(150,390))\r\n self.ventana.blit(self.d,(420,390))\r\n \r\n #Fuentes e Imprecion e la pregunta\r\n pregunta = self.fuente3.render(str(x[self.cont2]),False,(0,255,0))\r\n self.ventana.blit(pregunta,(8,80))\r\n \r\n #FUENtes e imprecion de las opciones\r\n p_a=self.fuente1.render(str(list[n][self.cont2][0]),True,(0,0,0))\r\n p_b=self.fuente1.render(str(list[n][self.cont2][1]),True,(0,0,0))\r\n p_c=self.fuente1.render(str(list[n][self.cont2][2]),True,(0,0,0))\r\n p_d=self.fuente1.render(str(list[n][self.cont2][3]),True,(0,0,0))\r\n \r\n self.ventana.blit(p_a,(223,310))\r\n self.ventana.blit(p_b,(493,310))\r\n self.ventana.blit(p_c,(225,400))\r\n self.ventana.blit(p_d,(493,400))\r\n mouse.update()\r\n \r\n \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n exit()\r\n \r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if((self.pulsaciones%2)==0):\r\n \r\n if(self.c_a.nombre == y[self.cont2] and mouse.colliderect(self.c_a)):\r\n self.c_a.fhoto= self.cuadro2\r\n self.puntaje+=10\r\n self.s_bien.play()\r\n self.lista_cuadros.remove(self.c_a)\r\n \r\n elif(self.c_b.nombre == y[self.cont2]and mouse.colliderect(self.c_b)):\r\n self.c_b.fhoto= self.cuadro2\r\n self.puntaje+=10\r\n self.s_bien.play()\r\n self.lista_cuadros.remove(self.c_b)\r\n \r\n elif(self.c_c.nombre == y[self.cont2]and mouse.colliderect(self.c_c)):\r\n self.c_c.fhoto= self.cuadro2\r\n self.puntaje+=10\r\n self.s_bien.play()\r\n self.lista_cuadros.remove(self.c_c)\r\n \r\n elif(self.c_d.nombre == y[self.cont2]and mouse.colliderect(self.c_d)):\r\n self.c_d.fhoto= self.cuadro2\r\n self.puntaje+=10\r\n self.s_bien.play()\r\n self.lista_cuadros.remove(self.c_d)\r\n \r\n else:\r\n for z in range(0,len(self.lista_cuadros)):\r\n if(self.lista_cuadros[z].nombre == y[self.cont2]): \r\n self.temp.append(self.lista_cuadros[z])\r\n self.lista_cuadros[z].fhoto=self.cuadro3 \r\n self.temp[0].fhoto= self.cuadro2\r\n self.s_mal.play() \r\n self.pulsaciones+=1\r\n \r\n if event.type == pygame.MOUSEBUTTONUP: \r\n #Si se cliquea dos veces se cambia de pregunta y cuadros\r\n if((self.pulsaciones%2)==0):\r\n \r\n for z in self.lista_cuadros: \r\n z.fhoto= self.cuadro1\r\n self.cont2+=1 \r\n \r\n if(self.pulsaciones >=20):\r\n if(self.puntaje >=100):\r\n self.ventana.blit(self.final_fondo,(0,0))\r\n self.ventana.blit(self.win,(340,330))\r\n self.ventana.blit(self.puntaje_final,(381,370))\r\n else:\r\n self.ventana.blit(self.final_fondo,(0,0))\r\n self.ventana.blit(self.loser,(340,330))\r\n self.ventana.blit(self.puntaje_final,(381,370)) \r\n self.reloj.tick(20)\r\n pygame.display.update()\r\n pygame.quit() \r\n \r\n \r\n except(IndexError):#Antes de regresar a las tematicas muestra puntaje y cierra archivo\r\n \r\n time.sleep(3)\r\n return \r\n answer.close()\r\n reply.close()\r\n \r\n def juego(self,pos1,pos2):\r\n\r\n #abriendo archivos\r\n self.p_astronomia = open(\"P_Astronomia.csv\")\r\n self.p_geografia=open(\"P_geografia.csv\")\r\n self.p_historia=open(\"P_Historia.csv\")\r\n \r\n \r\n #abriendo respuestas\r\n self.r_astronomia=open(\"R_Astronomia.csv\")\r\n self.r_geografia= open(\"R_geografria.csv\")\r\n self.r_historia=open(\"R_Historia.csv\")\r\n \r\n #opciones de astronomia\r\n self.m1=[\"Priamo\",\"Sol\",\"Pinta\",\"Santa Maria\"]\r\n self.m2=[\"Triton\",\"Kerberos\",\"Andromeda\",\"Luna\"]\r\n self.m3=[\"Ulices\",\"Halley\",\"Antrogenia\",\"H23G\"]\r\n self.m4=[\"Via Lactea\",\"Ganimides\",\"Apolo\",\"Andromeda\"]\r\n self.m5=[\" 3\",\" 6\",\" 10\",\" 7\"]\r\n self.m6=[\"Galaxia\",\"Estrella\",\"Cometa\",\"Agujero Negro\"]\r\n self.m7=[\"Big Bang\",\"Colicion Planetaria\",\"Impacto espacial\",\"Bomba Nuclear\"]\r\n self.m8=[\"Luna\",\"Tierra\",\"Saturno\",\"Jupiter\"]\r\n self.m9=[\"Mercurio\",\"H23G\",\"Marte\",\"Arquimides\"]\r\n self.m10=[\"Gonz4 rg\",\"Astrolavio\",\"Alienigena\",\"Mutante\"]\r\n \r\n #opciones de geografia\r\n self.g1=[\"Mongolia\",\"Nueva Zelanda\",\"Tibet\",\"Thailandia\"]\r\n self.g2=[\"peru\",\"Bolivia\",\"argentina\",\"chile\"]\r\n self.g3=[\"Africa\",\"Rusia\",\"Belgica\",\"Australia\"]\r\n self.g4=[\"Buenos Aires\",\"Ottawa\",\"Lima\",\"La plata\"]\r\n self.g5=[\"chile\",\"Rusia\",\"Brazil\",\"Canada\"]\r\n self.g6=[\"Uruguay\",\"New York\",\"Roma\",\"Chacarita\"]\r\n self.g7=[\"Inglaterra\",\"Thailandia\",\"Argentina\",\"EE.UU\"]\r\n self.g8=[\"Holanda\",\"Rusia\",\"Belgica\",\"Escocia\"]\r\n self.g9=[\"New jersey\",\"Rio Gallegos\",\"Finlandia\",\"Santa Cruz\"]\r\n self.g10=[\"Africa\",\"Oceania\",\"America\",\"Europa\"]\r\n \r\n #opciones de historia\r\n self.h1=[\" 2010 D.C\",\" 444 A.C\",\" 1608 D.C\",\" 778 D.C\"]\r\n self.h2=[\"Leonardo Davinchi\",\"Miguel Angel\",\"Isacc Newton\",\"Napoleon\"]\r\n self.h3=[\"Argentina\",\"Roma\",\"Chile\",\"Belgica\"]\r\n self.h4=[\"Steve Jobs\",\"Fhill Collin\",\"Antonio Stradivari\",\"Ash Kernel\"]\r\n self.h5=[\"El Destripador\",\"Harold Shipman\",\"Ricardo iorio\",\"Henry Bogard\"]\r\n self.h6=[\"Steban kito\",\"James Stolder\",\"Tenenbaum\",\"James Smithson\"]\r\n self.h7=[\" 1.7 mil A.C\",\" 500 D.C\",\" 2011 D.C\",\" 1.5 mil A.C\"]\r\n self.h8=[\"Steve Wozniak\",\"Steve Jobs\",\"Bill Gates\",\"Kevin mitnik\"]\r\n self.h9=[\" Musulmana\",\" Judia\",\" Catolica\",\" Profetica\"]\r\n self.h10=[\"Musico Italiano\",\" Mercenario\",\"10 de Boca\",\" Hacker\"]\r\n \r\n self.answer_major=[[self.m1,self.m2,self.m3,self.m4,self.m5,self.m6,self.m7,self.m8,self.m9,self.m10],[self.g1,self.g2,self.g3,self.g4,self.g5,self.g6,self.g7,self.g8,self.g9,self.g10],[self.h1,self.h2,self.h3,self.h4,self.h5,self.h6,self.h7,self.h8,self.h9,self.h10]]\r\n \r\n self.list_registry= [self.p_astronomia,self.p_geografia,self.p_historia,\"No Interrumpir\"]#+1 = no Fail in range\r\n self.list_reply=[self.r_astronomia,self.r_geografia,self.r_historia,\"No Interrumpir\"]#+1 = no Fail in range\r\n \r\n self.Play(self.list_registry,self.list_reply,self.answer_major,pos1,pos2)\r\n \r\n def menu(self):\r\n \r\n \r\n #INSTANCIAS PUESTAS EN PANTALLA\r\n self.ventana.blit(self.imagen1,(0,0))\r\n self.b_jugar.update(self.ventana, self.mouse)\r\n self.b_creditos.update(self.ventana, self.mouse)\r\n self.b_salir.update(self.ventana, self.mouse)\r\n \r\n pygame.display.update()\r\n\r\n \r\n def tematicas(self):\r\n \r\n self.fondo_tematico.dibujar(self.ventana)\r\n \r\n #Cindicion sobre el cambio de imagen\r\n if(self.mouse.colliderect(self.b_astro)):\r\n self.ventana.blit(self.f_astro,(0,0))\r\n if(self.mouse.colliderect(self.b_geo)):\r\n self.ventana.blit(self.f_geo,(0,0))\r\n if(self.mouse.colliderect(self.b_histo)):\r\n self.ventana.blit(self.f_histo,(0,0))\r\n \r\n self.b_astro.update(self.ventana, self.mouse)\r\n self.b_geo.update(self.ventana, self.mouse)\r\n self.b_histo.update(self.ventana, self.mouse)\r\n \r\n self.b_Return_menu.update(self.ventana,self.mouse)\r\n\r\n def credito(self):\r\n \r\n self.Fondo_credito.dibujar(self.ventana)\r\n self.b_Return_menu2.update(self.ventana, self.mouse) \r\n\r\n \r\n def Motor(self):\r\n \r\n self.derecho_menu= True\r\n self.derecho_credito=False\r\n self.derecho_tematica=False\r\n self.clear= self.ventana.copy()\r\n pygame.mixer.music.play(3)\r\n while(True):\r\n \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n exit()\r\n \r\n elif(event.type == pygame.MOUSEBUTTONUP):\r\n \r\n if(self.mouse.colliderect(self.b_jugar)):\r\n self.s_toque.play()\r\n self.derecho_menu = False\r\n self.derecho_tematica=True \r\n \r\n elif(self.mouse.colliderect(self.b_creditos)):\r\n self.s_toque.play()\r\n self.derecho_menu = False\r\n self.derecho_tematica=False\r\n self.derecho_credito= True\r\n \r\n elif(self.mouse.colliderect(self.b_salir)):\r\n self.s_toque.play()\r\n exit()\r\n \r\n elif(self.mouse.colliderect(self.b_Return_menu)):\r\n self.s_toque.play()\r\n Preguntados.Motor(self)\r\n \r\n elif(self.mouse.colliderect(self.b_Return_menu2)):\r\n self.s_toque.play()\r\n Preguntados.Motor()\r\n \r\n elif(self.mouse.colliderect(self.b_astro)):\r\n self.s_toque.play()\r\n Preguntados.juego(self,0,0)\r\n \r\n elif(self.mouse.colliderect(self.b_geo)):\r\n self.s_toque.play()\r\n Preguntados.juego(self,1,1)\r\n \r\n elif(self.mouse.colliderect(self.b_histo)):\r\n self.s_toque.play()\r\n Preguntados.juego(self,2,2) \r\n \r\n if(self.derecho_menu == True):\r\n \r\n \r\n (self.b_jugar.rect.left,self.b_jugar.rect.top)=(293,240)\r\n (self.b_creditos.rect.left,self.b_creditos.rect.top)=(270,330)\r\n (self.b_salir.rect.left,self.b_salir.rect.top)=(307,414)\r\n Preguntados.menu(self)\r\n \r\n elif(self.derecho_credito == True):\r\n \r\n #(b_jugar.rect.left,b_jugar.rect.top)=(0,0)\r\n #(b_creditos.rect.left,b_creditos.rect.top)=(0,0)\r\n (self.b_salir.rect.left,self.b_salir.rect.top)=(-100,0)\r\n \r\n (self.b_astro.rect.left,self.b_astro.rect.top)=(-100,0)\r\n (self.b_geo.rect.left,self.b_geo.rect.top)= (-100,0)\r\n (self.b_histo.rect.left,self.b_histo.rect.top)=(-100,0) \r\n \r\n Preguntados.credito(self)\r\n \r\n elif(self.derecho_tematica == True):\r\n \r\n (self.b_astro.rect.left,self.b_astro.rect.top)=(293,240)\r\n (self.b_geo.rect.left,self.b_geo.rect.top)= (304,300)\r\n (self.b_histo.rect.left,self.b_histo.rect.top)=(313,361)\r\n \r\n (self.b_jugar.rect.left,self.b_jugar.rect.top)=(-100,0)\r\n (self.b_creditos.rect.left,self.b_creditos.rect.top)=(-100,0)\r\n (self.b_salir.rect.left,self.b_salir.rect.top)=(-100,0)\r\n \r\n Preguntados.tematicas(self)\r\n \r\n self.mouse.update()\r\n self.reloj.tick(15)\r\n pygame.display.update()\r\n pygame.quit() \r\n\r\n \r\njuego = Preguntados()\r\n\r\n \r\n ","repo_name":"Gonz4Man/ensayos-complejidad","sub_path":"Gonz4PYTHON/TP(algoritmosyprogramas)/Integrador.py","file_name":"Integrador.py","file_ext":"py","file_size_in_byte":23120,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38648539032","text":"# -----------------------------------------------------------\n# Demonstrates how to plot a well path in 3D \n#\n# x is east, y is north, and z is total vertical depth (TVD)\n#\n# (C) 2020 Irene Wallis, Auckland, New Zealand \n# Email: irene@cubicearth.nz\n# Released under a permissive open source licence Apache 2.0 \n# https://choosealicense.com/licenses/apache-2.0/\n# -----------------------------------------------------------\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pandas as pd\nimport math\n\n# https://github.com/ICWallis/fractoolbox\nimport fractoolbox as ftb\n\n#\n# Import well survey data as a pandas dataframe\n#\n\ndfsurvey = pd.read_csv('testdata-survey.csv')\n#print(dfsurvey.head(2))\n\n#\n# Define the casing shoe pandas dataframe\n#\n\ndata = {\n 'shoe': ['production casing shoe'],\n 'depth_mMDRF': [1000],\n }\ndfcasingshoe = pd.DataFrame(data)\n\n# Interpolate casing shoe xyz using fractoolbox.xyzinterp\nmDdat = dfcasingshoe['depth_mMDRF']\nmDsur = dfsurvey['depth_mMDRF']\nxsur = dfsurvey['easting_m']\nysur = dfsurvey['northing_m']\nzsur = dfsurvey['TVD_mRF']\ndfxyz = ftb.xyzinterp(mDdat, mDsur, xsur, ysur, zsur) \ndfxyz.columns = ['depth_mMDRFx','easting_m','northing_m','TVD_mRF']\ndfcasingshoe = pd.concat([dfcasingshoe,dfxyz], axis=1, join='inner')\ndfcasingshoe = dfcasingshoe.drop(['depth_mMDRFx'],1)\n#print(dfsurvey.head(2))\n\n#\n# Plot the data\n#\n\nfig = plt.figure()\nax = plt.axes(projection='3d')\n\n# Plot well path\nax.plot(\n dfsurvey['easting_m'],\n dfsurvey['northing_m'],\n dfsurvey['TVD_mRF'],\n color='k',\n linewidth=1,\n )\n\n# Plot casing shoe\nax.scatter(\n dfcasingshoe.iloc[0]['easting_m'],\n dfcasingshoe.iloc[0]['northing_m'],\n dfcasingshoe.iloc[0]['TVD_mRF'],\n s=40, color='k'\n )\n\n#\n# Set z axis limits\n#\n# This well plot method uses TVD so the Z axis arguments are plotted\n# in the reverse of the cartesian coordinate system, which means that\n# the values get larger in the downward direction\n\n# Set the top of the plot: \n# use 0 if you want the rig floor at the top of the plot\nZshallowest = 0\n\n# Set the bottom of the plot: \n# set here to the well terminal depth but could be set deeper\nZdeepest = dfsurvey['TVD_mRF'].max() \n\nax.set_zlim(Zdeepest,Zshallowest)\n\n#\n# Set the xy axis limits\n#\n# Two methods are provided for setting xy lims\n# Just comment out the one you don't want to use\n\n# XYlims method 1: \n# Plot centred on the well but not 1:1 scale\n# This method also forces rounding so the axis numbers plot nicely\n'''\nXmin = dfsurvey['easting_m'].min()\nXmax = dfsurvey['easting_m'].max()\nprint('\\n','Xmin =', Xmin,'Xmax =', Xmax,'\\n')\n\nYmin = dfsurvey['northing_m'].min()\nYmax = dfsurvey['northing_m'].max()\nprint('Ymin =', Ymin,'Ymax =', Ymax,'\\n')\n\ndef roundup(x):\n return int(math.ceil(x / 1000.0)) * 1000\ndef rounddown(x):\n return int(math.floor(x / 1000.0)) * 1000\n\nYminR = rounddown(Ymin)\nYmaxR = roundup(Ymax)\n\nXminR = rounddown(Xmin)\nXmaxR = roundup(Xmax)\n\nax.set_xlim(XminR,XmaxR)\nax.set_ylim(YminR,YmaxR)\n'''\n\n# XYlims method 2: \n# Set x,y axis to make the plot scale 1:1 (horizontal to vertical)\n# \n# To make the scale 1:1 for a single well:\n# Take the min X and Y values and add the well terminal depth (mVD)\n#\n# To make the scale 1:1 for multiple wells:\n# Take the min X and Y values that capture all the wells and then add\n# either the plot depth or width, whichever is greater.\n# Calculate the maximum horizontal distance including all wells \n# If it is greater that the terminal depth (mVD) of the deepest well, \n# then use the maximum horizontal distance\n# Otherwise, use the terminal depth (mVD) of the deepest well\n#\n# This method also arranges the figure so it is centered on the casing shoe\n# Replacing dfcasingshoe with dfsurvey will arrange the figure so it's \n# centred on the rig floor\n\nax.set_xlim(\n dfcasingshoe.iloc[0]['easting_m'] - Zdeepest / 2,\n dfcasingshoe.iloc[0]['easting_m'] + Zdeepest / 2\n )\n\nax.set_ylim(\n dfcasingshoe.iloc[0]['northing_m'] - Zdeepest / 2,\n dfcasingshoe.iloc[0]['northing_m'] + Zdeepest / 2\n )\n\n#\n# Format plot\n# \n# A rage of options for formatting the plot\n# Just comment out the ones you don't want to use\n\n# Get rid of axis tick labels\nax.xaxis.set_ticklabels([])\nax.yaxis.set_ticklabels([])\n#ax.zaxis.set_ticklabels([])\n\n# Set axes as three lines\n[t.set_va('center') for t in ax.get_yticklabels()]\n[t.set_ha('left') for t in ax.get_yticklabels()]\n[t.set_va('center') for t in ax.get_xticklabels()]\n[t.set_ha('right') for t in ax.get_xticklabels()]\n[t.set_va('center') for t in ax.get_zticklabels()]\n[t.set_ha('left') for t in ax.get_zticklabels()]\n\n# Make the panes transparent\nax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\nax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\nax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n\n# Make all the grid lines transparent\n#ax.grid(False)\n\n# Make one set of grid lines transparent\nax.xaxis._axinfo[\"grid\"]['color'] = (1,1,1,0)\nax.yaxis._axinfo[\"grid\"]['color'] = (1,1,1,0)\n#ax.zaxis._axinfo[\"grid\"]['color'] = (1,1,1,0)\n\n# Turn the grid and fill off\nax.xaxis.pane.set_edgecolor('black')\nax.yaxis.pane.set_edgecolor('black')\nax.xaxis.pane.fill = False\nax.yaxis.pane.fill = False\nax.zaxis.pane.fill = False\n\n#\n# Set the start view for plt.show() or the exported view for plt.savefig()\n#\n\n#ax.view_init(elev=0.,azim=-90.) # elevation view standing in the south and facing north \n#ax.view_init(elev=0.,azim=0.) # elevation view standing in the west and facing east\nax.view_init(elev=0.,azim=-180.) # elevation view standing in the east and facing west\n#ax.view_init(elev=90.,azim=-90.) # plan view oriented north \n#ax.view_init(elev=20,azim=-120.) # oblique view \n\nplt.show()\n","repo_name":"ICWallis/geothermal-cookbook","sub_path":"3D-well-plot.py","file_name":"3D-well-plot.py","file_ext":"py","file_size_in_byte":5759,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"67"} +{"seq_id":"71867495253","text":"import sys\nimport torch\nimport torch.nn as nn\nimport torchvision.ops\nfrom torch.nn.utils import weight_norm\n\n\n# TCN\nclass tcn(nn.Module):\n def __init__(self, tcn_size):\n super(tcn, self).__init__()\n self.tcn_size = tcn_size\n\n def forward(self, x):\n x_new = x[:, :, :-self.tcn_size]\n return x_new.contiguous()\n\n\n# Deformable\nclass DeformableConv2d(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, padding, dilation, pad_mode):\n super(DeformableConv2d, self).__init__()\n\n self.padding = (padding, 0)\n self.dilation = (dilation, 1)\n self.ks = (kernel_size, 1)\n\n self.offset_conv = nn.Conv2d(in_channels, 2 * kernel_size, self.ks, padding=self.padding, dilation=self.dilation, padding_mode=pad_mode, bias=True)\n nn.init.constant_(self.offset_conv.weight, 0.)\n nn.init.constant_(self.offset_conv.bias, 0.)\n\n self.modulator_conv = nn.Conv2d(in_channels, kernel_size, self.ks, padding=self.padding, dilation=self.dilation, padding_mode=pad_mode, bias=True)\n nn.init.constant_(self.modulator_conv.weight, 0.)\n nn.init.constant_(self.modulator_conv.bias, 0.)\n\n self.regular_conv = nn.Conv2d(in_channels, out_channels, self.ks, padding=self.padding, dilation=self.dilation, padding_mode=pad_mode, bias=False)\n\n def forward(self, x):\n h, w = x.shape[2:]\n max_offset = max(h, w) / 4.\n offset = self.offset_conv(x).clamp(-max_offset, max_offset)\n modulator = 2. * torch.sigmoid(self.modulator_conv(x))\n\n x = torchvision.ops.deform_conv2d(input=x, offset=offset, weight=self.regular_conv.weight, bias=self.regular_conv.bias, padding=self.padding, dilation=self.dilation, mask=modulator)\n return x\n\n\n# One Conv. block\nclass Block(nn.Module):\n def __init__(self, model, c_in, c_out, ks, pad, dil, deformable):\n super(Block, self).__init__()\n self.model = model\n self.deform = deformable\n\n if model == 'CDIL':\n pad_mode = 'circular'\n else:\n pad_mode = 'zeros'\n\n if self.deform:\n self.conv = DeformableConv2d(c_in, c_out, ks, pad, dil, pad_mode)\n else:\n self.conv = weight_norm(nn.Conv1d(c_in, c_out, ks, padding=pad, dilation=dil, padding_mode=pad_mode))\n self.conv.weight.data.normal_(0, 0.01)\n self.conv.bias.data.normal_(0, 0.01)\n\n if model == 'TCN':\n self.cut = tcn(pad)\n self.tcn = nn.Sequential(self.conv, self.cut)\n\n self.res = nn.Conv1d(c_in, c_out, kernel_size=(1,)) if c_in != c_out else None\n if self.res is not None:\n self.res.weight.data.normal_(0, 0.01)\n self.res.bias.data.normal_(0, 0.01)\n\n self.nonlinear = nn.ReLU()\n\n def forward(self, x):\n if self.model == 'TCN':\n net = self.tcn\n else:\n net = self.conv\n\n if self.deform:\n x_2d = x.unsqueeze(-1)\n out = net(x_2d)\n res = x if self.res is None else self.res(x)\n y = self.nonlinear(out) + res.unsqueeze(-1)\n return y.squeeze(-1)\n else:\n out = net(x)\n res = x if self.res is None else self.res(x)\n return self.nonlinear(out) + res\n\n\n# Conv. blocks\nclass ConvPart(nn.Module):\n def __init__(self, model, dim_in, hidden_channels, ks, deformable, dynamic):\n super(ConvPart, self).__init__()\n layers = []\n num_layer = len(hidden_channels)\n begin = 1 if dynamic else 0\n for i in range(begin, num_layer):\n this_in = dim_in if i == 0 else hidden_channels[i - 1]\n this_out = hidden_channels[i]\n if model == 'CNN':\n this_dilation = 1\n this_padding = int((ks - 1) / 2)\n else:\n this_dilation = 2 ** i\n if model == 'TCN':\n this_padding = this_dilation * (ks - 1)\n elif model == 'CDIL' or model == 'DIL':\n this_padding = int(this_dilation*(ks-1)/2)\n else:\n print('no this model.')\n sys.exit()\n if i < (num_layer-3):\n layers += [Block(model, this_in, this_out, ks, this_padding, this_dilation, False)]\n else:\n layers += [Block(model, this_in, this_out, ks, this_padding, this_dilation, deformable)]\n self.conv_net = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.conv_net(x)\n\n\n# Conv. + classifier\nclass CONV(nn.Module):\n def __init__(self, task, model, input_size, output_size, num_channels, kernel_size, deformable=False, dynamic=False, use_embed=False, char_vocab=None, fix_length=True):\n super(CONV, self).__init__()\n self.task = task\n self.model = model\n self.dynamic = dynamic\n self.use_embed = use_embed\n self.fix_lengh = fix_length\n\n if self.use_embed:\n self.embedding = nn.Embedding(char_vocab, input_size)\n\n self.conv = ConvPart(model, input_size, num_channels, kernel_size, deformable, dynamic)\n\n if self.task != 'retrieval_4000':\n self.linear = nn.Linear(num_channels[-1], output_size)\n\n def forward(self, x, mask=None):\n if self.use_embed:\n x = self.embedding(x)\n if not self.dynamic:\n x = x.permute(0, 2, 1).to(dtype=torch.float) # out: num, dim, length\n # print(x.shape)\n # sys.exit()\n y_conv = self.conv(x)\n\n if self.model == 'TCN':\n if self.fix_lengh:\n y_class = y_conv[:, :, -1]\n else:\n P = mask.unsqueeze(1).expand(y_conv.size(0), y_conv.size(1)).unsqueeze(2)\n y_class = y_conv.gather(2, P).squeeze(2)\n else:\n y_class = torch.mean(y_conv, dim=2)\n\n if self.task == 'retrieval_4000':\n return y_class\n else:\n y = self.linear(y_class)\n return y\n","repo_name":"LeiCheng-no/CDIL-CNN","sub_path":"experiments/Models/net_conv.py","file_name":"net_conv.py","file_ext":"py","file_size_in_byte":6024,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"67"} +{"seq_id":"71812706135","text":"import cv2\nimport numpy as np\nimport os\nfrom datetime import date, datetime\nfrom tkinter import Tk, filedialog\nimport pickle\nimport Pose_stimation_functions as Ps\n\ndef pixel_dist(point1,point2):\n dist=np.sqrt((point1[0]-point2[0])**2+(point1[1]-point2[1])**2)\n return dist\n\n# Definir propiedades de los Aruco\naruco_dict=cv2.aruco.Dictionary_get(cv2.aruco.DICT_6X6_1000)\narucoParameters = cv2.aruco.DetectorParameters_create()\naruco_marker_side_length=0.065 # Tamaño del lado del Aruco en m\n\nwith open('calibration_basler.pckl', 'rb') as f:\n mtx, dist = pickle.load(f)\n\n\n# Definir carpeta de origen y Crear carpeta en la que colocar las nuevas imagenes\nPath_Origen = filedialog.askdirectory(initialdir = r'D:\\CoMAr Data\\BBDD\\Bag Images') # Returns opened path as str\nPath_Destino_Img_Filtradas=Path_Origen+\"_Filtradas_Ext\"\nPath_Destino_Etiquetas=Path_Origen+\"_Etiquetas\"\n\n# Crear carpeta de destino si no existe\nif not os.path.exists(Path_Destino_Etiquetas): os.mkdir(Path_Destino_Etiquetas)\nif not os.path.exists(Path_Destino_Img_Filtradas): os.mkdir(Path_Destino_Img_Filtradas)\n\nimages = []\n\nfor filename in os.listdir(Path_Origen):\n img = cv2.imread(os.path.join(Path_Origen,filename))\n img_copy=np.copy(img)\n corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(img, aruco_dict, \n parameters=arucoParameters)\n mainImgName, file_extension = os.path.splitext(filename) \n if img is not None and corners!=[]:\n for i in range(ids.shape[0]):\n #images.append(img)\n\n # extract Aruco pose\n _centerY = int((corners[i][0][0][1] + corners[i][0][2][1]) / 2)\n _centerX = int((corners[i][0][0][0] + corners[i][0][2][0]) / 2)\n _cornerX= corners[i][0][0][0] \n _cornerY= corners[i][0][0][1]\n rvecs, tvecs, obj_points = cv2.aruco.estimatePoseSingleMarkers(\n corners,\n aruco_marker_side_length,\n mtx,\n dist)\n\n _, axis_values =Ps.pose_stimation(img,ids,mtx,dist,rvecs,tvecs)\n pos_x=tvecs[i][0][0]\n pos_y=tvecs[i][0][1]\n pos_z=tvecs[i][0][2]\n\n px_x=_centerX #Pos del cel centro del Aruco\n px_y=_centerY #Pos del cel centro del Aruco\n\n px_Cx=_cornerX #Pos de la esquina caracteristica del Aruco\n px_Cy=_cornerY #Pos de la esquina caracteristica del Aruco\n\n roll_x=axis_values[i][0] # Eje verde\n pitch_y=axis_values[i][1]\n yaw_z=axis_values[i][2]\n\n id_ar=ids[i][0] \n\n\n # Save labeled Image\n Id_tag=\"_ID_{}\".format(id_ar)\n imgName=mainImgName+Id_tag+file_extension\n cv2.imwrite(os.path.join(Path_Destino_Img_Filtradas,imgName),img_copy)\n\n # Save Label Data\n label_name=mainImgName+Id_tag+'.txt'\n f= open(os.path.join(Path_Destino_Etiquetas,label_name),\"a+\")\n\n f.write(\"{},{},{},{:.2f},{:.2f},{:.2f},{},{}\\n\".format( \n id_ar,\n int(px_x),int(px_y),\n roll_x,pitch_y,yaw_z,\n int(px_Cx),int(px_Cy) \n ))\n f.close()\n\n\n\nprint('Done!')","repo_name":"AnderSanchezUPV/Aruco_Testing","sub_path":"Aruco_Image_filter_and_Labeling.py","file_name":"Aruco_Image_filter_and_Labeling.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27415480939","text":"import typer\nfrom cryptoml_core.services.model_service import ModelService\nimport json, time\n\n\n# app = typer.Typer()\n\n\n# @app.command()\n# def clear_parameters(queryfile: str):\n# with open(queryfile, 'r') as f:\n# query = json.load(f)\n# service = ModelService()\n# service.clear_parameters(query)\n#\n#\n# @app.command()\n# def clear_features(queryfile: str):\n# with open(queryfile, 'r') as f:\n# query = json.load(f)\n# service = ModelService()\n# service.clear_features(query)\n\n\n# @app.command()\ndef main(queryfile: str):\n with open(queryfile, 'r') as f:\n query = json.load(f)\n service = ModelService()\n print(f\"Clearing {query}\")\n time.sleep(5)\n service.clear_tests(query)\n\n\nif __name__ == '__main__':\n # app()\n typer.run(main)\n","repo_name":"RedLicorice/CryptoML-API","sub_path":"app/manage_model.py","file_name":"manage_model.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"365244442","text":"import logging\n\nfrom nfe_scanner.models import Nfe\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef console_report(nfes: list[Nfe]):\n LOGGER.info(\"%s\", \"=\" * 25 + \"RESULT\" + \"=\" * 25)\n for nfe in nfes:\n LOGGER.info(nfe)\n LOGGER.info(\"-\" * 50)\n","repo_name":"jonathadv/nfe-scanner","sub_path":"nfe_scanner/reports/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"42805108130","text":"class Student:\n def __init__(self):\n self.name = \"Manzoor\"\n self.age = 22\n def update(self):\n self.name = \"Ali\"\n def compare(self,other): # compare(who is calling,whom to compare)\n if self.age == other.age:\n return True\n else:\n return False\ns1 = Student()\ns2 = Student()\ns2.name = \"Mazhar\"\ns2.age = 18\n\ns2.update()\nprint(s1.name)\nprint(s1.age)\nprint(s2.name)\nprint(s2.age)\n\nif s1.compare(s2):\n print(\"They are in same age\")\nelse:\n print(\"They have different age\")\n\nclass Demo:\n pass\n\ndemo = Demo()\nprint(id(demo))","repo_name":"manzoorHusain/Python-OOP","sub_path":"OOP python/oop2.py","file_name":"oop2.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32144269672","text":"import math\nfrom enum import Enum\nfrom collections import namedtuple\n\nSize = namedtuple('Size', ['width', 'height'])\n\n# CONSTANTS\nBALL_SIZE = Size(35, 35)\nPADDLE_SIZE = Size(200, 25)\nBONUS_SIZE = Size(35, 35)\nBRICK_SIZE = Size(100, 35)\n\nBALL_VELOCITY = 12\nPADDLE_VELOCITY = 25\nBONUS_VELOCITY = 10\n\nBALL_DIRECTION = (-1, 1)\nPADDLE_DIRECTION = (0, 0)\nBONUS_DIRECTION = (0, 1)\n\n\nclass BallState(Enum):\n Caught = 0\n Free = 1\n Powerful = 2\n\n\nclass Bonuses(Enum):\n DecreaseBonus = 0\n ExpandBonus = 1\n FireBallBonus = 2\n FastBallBonus = 3\n LifeBonus = 4\n DeathBonus = 5\n\n","repo_name":"PejicM/BreakoutGame","sub_path":"BreakoutProject/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40513911463","text":"from bs4 import BeautifulSoup\n\nmonths_list = [\"december\", \"january\", \"february\", \"march\", \"april\", \"may\", \"june\", \"july\"]\n\nfor month in months_list:\n with open(\"{}.html\".format(month), \"r\") as html_file:\n html = BeautifulSoup(html_file, \"html.parser\")\n\n raw_schedule = html.find(id=\"div_schedule\")\n \n# tags of interest \"toi\"\nurls = [tag.find(\"a\")[\"href\"] for tag in html.find_all(attrs={\"data-stat\": \"box_score_text\"}, scope=False) if len(tag.contents) == 1]\n\nwith open(\"urls.txt\", \"a+\") as url_file:\n url_file.write(\"\\n\".join(urls))\n \n","repo_name":"achenchen1/bball-stats-playground","sub_path":"importer/schedule_scraper/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29089042989","text":"import argparse\nimport gc\nimport os\n\nimport torch\n\nimport data_doc3D\nimport data_UVDoc\nimport model\nimport utils\nfrom data_mixDataset import mixDataset\n\ntrain_mse = 0.0\nlosscount = 0\ngamma_w = 0.0\n\n\ndef setup_data(args):\n \"\"\"\n Returns train and validation dataloader.\n \"\"\"\n doc3D = data_doc3D.doc3DDataset\n UVDoc = data_UVDoc.UVDocDataset\n traindata = \"train\"\n valdata = \"val\"\n\n # Training data\n t_doc3D_data = doc3D(\n data_path=args.data_path_doc3D,\n split=traindata,\n appearance_augmentation=args.appearance_augmentation,\n )\n t_UVDoc_data = UVDoc(\n data_path=args.data_path_UVDoc,\n appearance_augmentation=args.appearance_augmentation,\n geometric_augmentations=args.geometric_augmentationsUVDoc,\n )\n t_mix_data = mixDataset(t_doc3D_data, t_UVDoc_data)\n if args.data_to_use == \"both\":\n trainloader = torch.utils.data.DataLoader(\n t_mix_data, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=True, pin_memory=True\n )\n elif args.data_to_use == \"doc3d\":\n trainloader = torch.utils.data.DataLoader(\n t_doc3D_data, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=True, pin_memory=True\n )\n else:\n raise ValueError(f\"data_to_use should be either doc3d or both, provided {args.data_to_use}.\")\n\n # Validation data (doc3D only)\n v_doc3D_data = doc3D(data_path=args.data_path_doc3D, split=valdata, appearance_augmentation=[])\n valloader = torch.utils.data.DataLoader(\n v_doc3D_data, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=True, pin_memory=True\n )\n\n return trainloader, valloader\n\n\ndef get_scheduler(optimizer, args, epoch_start):\n \"\"\"Return a learning rate scheduler\n Parameters:\n optimizer -- the optimizer of the network\n args -- stores all the experiment flags\n epoch_start -- the epoch number we started/continued from\n We keep the same learning rate for the first epochs\n and linearly decay the rate to zero over the next epochs.\n \"\"\"\n\n def lambda_rule(epoch):\n lr_l = 1.0 - max(0, epoch + epoch_start - args.n_epochs) / float(args.n_epochs_decay + 1)\n return lr_l\n\n scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)\n\n return scheduler\n\n\ndef update_learning_rate(scheduler, optimizer):\n \"\"\"Update learning rates; called at the end of every epoch\"\"\"\n old_lr = optimizer.param_groups[0][\"lr\"]\n scheduler.step()\n lr = optimizer.param_groups[0][\"lr\"]\n print(\"learning rate update from %.7f -> %.7f\" % (old_lr, lr))\n return lr\n\n\ndef write_log_file(log_file_name, loss, epoch, lrate, phase):\n with open(log_file_name, \"a\") as f:\n f.write(\"\\n{} LRate: {} Epoch: {} MSE: {:.5f} \".format(phase, lrate, epoch, loss))\n\n\ndef main_worker(args):\n # setup training data\n trainloader, valloader = setup_data(args)\n\n device = torch.device(\"cuda:0\")\n UVDocnet = model.UVDocnet(num_filter=32, kernel_size=5)\n UVDocnet.to(device)\n\n # define loss functions\n criterionL1 = torch.nn.L1Loss()\n criterionMSE = torch.nn.MSELoss()\n\n # initialize optimizers\n optimizer = torch.optim.Adam(UVDocnet.parameters(), lr=args.lr, betas=(0.9, 0.999))\n\n global gamma_w\n epoch_start = 0\n\n if args.resume is not None:\n if os.path.isfile(args.resume):\n print(\"Loading model and optimizer from checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n\n UVDocnet.load_state_dict(checkpoint[\"model_state\"])\n optimizer.load_state_dict(checkpoint[\"optimizer_state\"])\n print(\"Loaded checkpoint '{}' (epoch {})\".format(args.resume, checkpoint[\"epoch\"]))\n epoch_start = checkpoint[\"epoch\"]\n if epoch_start >= args.ep_gamma_start:\n gamma_w = args.gamma_w\n else:\n print(\"No checkpoint found at '{}'\".format(args.resume))\n\n # initialize learning rate schedulers\n scheduler = get_scheduler(optimizer, args, epoch_start)\n\n # Log file:\n if not os.path.exists(args.logdir):\n os.makedirs(args.logdir)\n\n experiment_name = (\n \"params\"\n + str(args.batch_size)\n + \"_lr=\"\n + str(args.lr)\n + \"_nepochs\"\n + str(args.n_epochs)\n + \"_nepochsdecay\"\n + str(args.n_epochs_decay)\n + \"_alpha\"\n + str(args.alpha_w)\n + \"_beta\"\n + str(args.beta_w)\n + \"_gamma=\"\n + str(args.gamma_w)\n + \"_gammastartep\"\n + str(args.ep_gamma_start)\n + \"_data\"\n + args.data_to_use\n )\n if args.resume:\n experiment_name = \"RESUME\" + experiment_name\n\n log_file_name = os.path.join(args.logdir, experiment_name + \".txt\")\n if os.path.isfile(log_file_name):\n log_file = open(log_file_name, \"a\")\n else:\n log_file = open(log_file_name, \"w+\")\n\n log_file.write(\"\\n--------------- \" + experiment_name + \" ---------------\\n\")\n log_file.close()\n\n exp_log_dir = os.path.join(args.logdir, experiment_name, \"\")\n if not os.path.exists(exp_log_dir):\n os.makedirs(exp_log_dir)\n\n global losscount\n global train_mse\n\n # Run training\n for epoch in range(epoch_start, args.n_epochs + args.n_epochs_decay + 1):\n print(f\"\\n----- Epoch {epoch} -----\")\n if epoch >= args.ep_gamma_start:\n gamma_w = args.gamma_w\n print(\"epoch \", epoch, \"gamma_w is now\", gamma_w)\n\n train_mse = 0.0\n best_val_mse = 99999.0\n losscount = 0\n\n # Train\n UVDocnet.train()\n\n for batch in trainloader:\n if args.data_to_use == \"both\":\n (\n imgs_doc3D_,\n imgs_unwarped_doc3D_,\n grid2D_doc3D_,\n grid3D_doc3D_,\n ) = batch[0]\n (\n imgs_UVDoc_,\n imgs_unwarped_UVDoc_,\n grid2D_UVDoc_,\n grid3D_UVDoc_,\n ) = batch[1]\n elif args.data_to_use == \"doc3d\":\n (\n imgs_doc3D_,\n imgs_unwarped_doc3D_,\n grid2D_doc3D_,\n grid3D_doc3D_,\n ) = batch\n\n # Train Doc3D step\n imgs_doc3D = imgs_doc3D_.to(device, non_blocking=True)\n unwarped_GT_doc3D = imgs_unwarped_doc3D_.to(device, non_blocking=True)\n grid2D_GT_doc3D = grid2D_doc3D_.to(device, non_blocking=True)\n grid3D_GT_doc3D = grid3D_doc3D_.to(device, non_blocking=True)\n\n grid2D_pred_doc3D, grid3D_pred_doc3D = UVDocnet(imgs_doc3D)\n unwarped_pred_doc3D = utils.bilinear_unwarping(imgs_doc3D, grid2D_pred_doc3D, utils.IMG_SIZE)\n\n optimizer.zero_grad(set_to_none=True)\n\n recon_loss = criterionL1(unwarped_pred_doc3D, unwarped_GT_doc3D)\n loss_grid2D = criterionL1(grid2D_pred_doc3D, grid2D_GT_doc3D)\n loss_grid3D = criterionL1(grid3D_pred_doc3D, grid3D_GT_doc3D)\n\n netLoss = args.alpha_w * loss_grid2D + args.beta_w * loss_grid3D + gamma_w * recon_loss\n netLoss.backward()\n optimizer.step()\n\n tmp_mse = criterionMSE(unwarped_pred_doc3D, unwarped_GT_doc3D)\n train_mse += float(tmp_mse)\n losscount += 1\n\n # Train UVDoc step\n if args.data_to_use == \"both\":\n imgs_UVDoc = imgs_UVDoc_.to(device, non_blocking=True)\n unwarped_GT_UVDoc = imgs_unwarped_UVDoc_.to(device, non_blocking=True)\n grid2D_GT_UVDoc = grid2D_UVDoc_.to(device, non_blocking=True)\n grid3D_GT_UVDoc = grid3D_UVDoc_.to(device, non_blocking=True)\n\n grid2D_pred_UVDoc, grid3D_pred_UVDoc = UVDocnet(imgs_UVDoc)\n unwarped_pred_UVDoc = utils.bilinear_unwarping(imgs_UVDoc, grid2D_pred_UVDoc, utils.IMG_SIZE)\n\n optimizer.zero_grad(set_to_none=True)\n\n recon_loss = criterionL1(unwarped_pred_UVDoc, unwarped_GT_UVDoc)\n loss_grid2D = criterionL1(grid2D_pred_UVDoc, grid2D_GT_UVDoc)\n loss_grid3D = criterionL1(grid3D_pred_UVDoc, grid3D_GT_UVDoc)\n\n netLoss = args.alpha_w * loss_grid2D + args.beta_w * loss_grid3D + gamma_w * recon_loss\n netLoss.backward()\n optimizer.step()\n\n tmp_mse = criterionMSE(unwarped_pred_UVDoc, unwarped_GT_UVDoc)\n train_mse += float(tmp_mse)\n losscount += 1\n gc.collect()\n\n train_mse = train_mse / max(1, losscount)\n curr_lr = update_learning_rate(scheduler, optimizer)\n write_log_file(log_file_name, train_mse, epoch + 1, curr_lr, \"Train\")\n\n # Evaluation\n UVDocnet.eval()\n\n with torch.no_grad():\n mse_loss_val = 0.0\n for imgs_val_, imgs_unwarped_val_, _, _ in valloader:\n imgs_val = imgs_val_.to(device)\n unwarped_GT_val = imgs_unwarped_val_.to(device)\n\n grid2D_pred_val, grid3D_pred_val = UVDocnet(imgs_val)\n unwarped_pred_val = utils.bilinear_unwarping(imgs_val, grid2D_pred_val, utils.IMG_SIZE)\n\n loss_img_val = criterionMSE(unwarped_pred_val, unwarped_GT_val)\n mse_loss_val += float(loss_img_val)\n\n val_mse = mse_loss_val / len(valloader)\n write_log_file(log_file_name, val_mse, epoch + 1, curr_lr, \"Val\")\n\n # save best models\n if val_mse < best_val_mse or epoch == args.n_epochs + args.n_epochs_decay:\n best_val_mse = val_mse\n state = {\n \"epoch\": epoch + 1,\n \"model_state\": UVDocnet.state_dict(),\n \"optimizer_state\": optimizer.state_dict(),\n }\n model_path = exp_log_dir + f\"ep_{epoch + 1}_{val_mse:.5f}_{train_mse:.5f}_best_model.pkl\"\n torch.save(state, model_path)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Hyperparams\")\n\n parser.add_argument(\n \"--data_path_doc3D\", nargs=\"?\", type=str, default=\"./data/doc3D/\", help=\"Data path to load Doc3D data.\"\n )\n parser.add_argument(\n \"--data_path_UVDoc\", nargs=\"?\", type=str, default=\"./data/UVDoc/\", help=\"Data path to load UVDoc data.\"\n )\n parser.add_argument(\n \"--data_to_use\",\n type=str,\n default=\"both\",\n choices=[\"both\", \"doc3d\"],\n help=\"Dataset to use for training, either 'both' for Doc3D and UVDoc, or 'doc3d' for Doc3D only.\",\n )\n parser.add_argument(\"--batch_size\", nargs=\"?\", type=int, default=8, help=\"Batch size.\")\n parser.add_argument(\n \"--n_epochs\",\n nargs=\"?\",\n type=int,\n default=10,\n help=\"Number of epochs with initial (constant) learning rate.\",\n )\n parser.add_argument(\n \"--n_epochs_decay\",\n nargs=\"?\",\n type=int,\n default=10,\n help=\"Number of epochs to linearly decay learning rate to zero.\",\n )\n parser.add_argument(\"--lr\", nargs=\"?\", type=float, default=0.0002, help=\"Initial learning rate.\")\n parser.add_argument(\"--alpha_w\", nargs=\"?\", type=float, default=5.0, help=\"Weight for the 2D grid L1 loss.\")\n parser.add_argument(\"--beta_w\", nargs=\"?\", type=float, default=5.0, help=\"Weight for the 3D grid L1 loss.\")\n parser.add_argument(\n \"--gamma_w\", nargs=\"?\", type=float, default=1.0, help=\"Weight for the image reconstruction loss.\"\n )\n parser.add_argument(\n \"--ep_gamma_start\",\n nargs=\"?\",\n type=int,\n default=10,\n help=\"Epoch from which to start using image reconstruction loss.\",\n )\n parser.add_argument(\n \"--resume\",\n nargs=\"?\",\n type=str,\n default=None,\n help=\"Path to previous saved model to restart from.\",\n )\n parser.add_argument(\"--logdir\", nargs=\"?\", type=str, default=\"./log/default\", help=\"Path to store the logs.\")\n parser.add_argument(\n \"-a\",\n \"--appearance_augmentation\",\n nargs=\"*\",\n type=str,\n default=[\"visual\", \"noise\", \"color\"],\n choices=[\"shadow\", \"blur\", \"visual\", \"noise\", \"color\"],\n help=\"Appearance augmentations to use.\",\n )\n parser.add_argument(\n \"-gUVDoc\",\n \"--geometric_augmentationsUVDoc\",\n nargs=\"*\",\n type=str,\n default=[\"rotate\"],\n choices=[\"rotate\", \"flip\", \"perspective\"],\n help=\"Geometric augmentations to use for the UVDoc dataset.\",\n )\n parser.add_argument(\"--num_workers\", type=int, default=8, help=\"Number of workers to use for the dataloaders.\")\n\n args = parser.parse_args()\n main_worker(args)\n","repo_name":"tanguymagne/UVDoc","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":12849,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"23303928918","text":"test_input_raw = \"\"\"\nabc\n\na\nb\nc\n\nab\nac\n\na\na\na\na\n\nb\n\"\"\"\n\ntest_expected_result = 11\n\nproblem_input = None\n\nwith open('./data/day_six_part_one.txt', 'r') as file:\n problem_input = file.read()\n\ndef group_answers(group_answers_raw):\n results = {}\n\n for answers in group_answers_raw.split('\\n'):\n if answers == '':\n continue\n\n for answer in answers.strip():\n previous_result = results.get(answer) or 0\n results[answer] = previous_result + 1\n\n return results\n\ndef parse_input(input_str):\n return [ group_answers(answers) for answers in input_str.split('\\n\\n')]\n\nif __name__ == '__main__':\n test_answers = parse_input(test_input_raw)\n\n test_result = 0\n\n for answers in test_answers:\n test_result += len(answers)\n\n if test_result != test_expected_result:\n print(\"TEST FAILURE!\")\n exit()\n\n problem_answers = parse_input(problem_input)\n\n problem_result = 0\n\n for answers in problem_answers:\n problem_result += len(answers)\n\n print(problem_result)\n","repo_name":"DSmedley1989/adventofcode2020","sub_path":"src/python/day_six/part_one.py","file_name":"part_one.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25891674088","text":"\"\"\"This script creates a unified Chukchi + Beaufort dataset for cases where one region has data from a single raster, but the other region does not have any data. The output is a merged raster using the data and the mask from the region without data. This is considered Case B: single raster, single region.\"\"\"\n\nimport argparse\nimport pickle\nimport rasterio as rio\nimport pandas as pd\nfrom pathlib import Path\nfrom rasterio.merge import merge\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Execute raster unification Case B.\")\n parser.add_argument(\n \"-dt\",\n \"--datetime_dict\",\n action=\"store\",\n dest=\"dt\",\n )\n parser.add_argument(\n \"-ts\",\n \"--timestamp\",\n action=\"store\",\n dest=\"ts\",\n )\n parser.add_argument(\n \"-m\",\n \"--mask-dir\",\n action=\"store\",\n dest=\"mask_dir\",\n type=str,\n )\n parser.add_argument(\n \"-src\",\n \"--src-dir\",\n action=\"store\",\n dest=\"src_dir\",\n type=str,\n )\n parser.add_argument(\n \"-o\",\n \"--out-dir\",\n action=\"store\",\n dest=\"out_dir\",\n type=str,\n )\n\n args = parser.parse_args()\n k = pd.Timestamp(args.ts)\n mask_dir = Path(args.mask_dir)\n src_dir = Path(args.src_dir)\n out_dir = Path(args.out_dir)\n\n with open(args.dt, \"rb\") as handle:\n dt_di = pickle.load(handle)\n\n if dt_di[k][\"chukchi_count\"] > dt_di[k][\"beaufort_count\"]:\n mask_fp = mask_dir.joinpath(\"beaufort_mask.tif\")\n else:\n mask_fp = mask_dir.joinpath(\"chukchi_mask.tif\")\n\n data_match = dt_di[k][\"matching data\"][0]\n data_file = f\"arrfix_{data_match}\"\n fp = src_dir.joinpath(data_file)\n\n data_src = rio.open(fp)\n mask_src = rio.open(mask_fp)\n out_arr, out_aff = merge([data_src, mask_src])\n data_src.close()\n mask_src.close()\n\n out_merged_fp = out_dir.joinpath(\"ak_landfast_ice_\" + str(k).split(\" \")[0].replace(\"-\", \"_\") + \".tif\")\n\n with open(mask_dir / \"both_region_profile.pickle\", \"rb\") as handle:\n new_profile = pickle.load(handle)\n\n new_profile[\"nodata\"] = 0\n with rio.open(out_merged_fp, \"w\", **new_profile) as dst:\n dst.write(out_arr[0], 1)\n\n print(f\"Results written to {out_merged_fp}\")\n","repo_name":"ua-snap/ardac-curation","sub_path":"seaice/pipeline/single_raster_single_region.py","file_name":"single_raster_single_region.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20876655841","text":"'''\n방 배정\nhttps://www.acmicpc.net/problem/13300\n백준 브론즈2 13300\n\n정보 초등학교에서는 단체로 2박 3일 수학여행을 가기로 했다. 여러 학년이 같은 장소로 수학여행을 가려고 하는데 1학년부터 6학년까지 학생들이 묵을 방을 배정해야 한다. 남학생은 남학생끼리, 여학생은 여학생끼리 방을 배정해야 한다. 또한 한 방에는 같은 학년의 학생들을 배정해야 한다. 물론 한 방에 한 명만 배정하는 것도 가능하다.\n\n한 방에 배정할 수 있는 최대 인원 수 K가 주어졌을 때, 조건에 맞게 모든 학생을 배정하기 위해 필요한 방의 최소 개수를 구하는 프로그램을 작성하시오.\n\n'''\n\nN, K = map(int, input().split())\ngirl = [0] * 7 # 여자 학년 인원\nboy = [0] * 7 # 남자 학년 인원\nfor _ in range(N):\n S, Y = map(int, input().split())\n if S > 0: # 남자일 때\n boy[Y] += 1\n else: # 여자일 때\n girl[Y] += 1\nall = girl + boy # 전원\nroom = 0 # 필요한 방\nfor a in all:\n room += a//K # 방 개수 구하기\n if a%K: # 나머지가 있으면 1 추가\n room += 1\nprint(room)","repo_name":"seoda0000/TIL","sub_path":"AlgorithmProblemSolving/04_백준/Bronze/13300_방_배정.py","file_name":"13300_방_배정.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"23466243515","text":"import rospy\n# Numpy\nimport numpy as np\n# Msgs\nfrom fssim_common.msg import State\n# Geometry import\nfrom shapely.geometry import Point\n# YAML\nimport yaml\n# System import\nimport os\n\n\ndef ccw(A, B, C):\n \"\"\"Tests whether the turn formed by A, B, and C is ccw\"\"\"\n return (B.x - A.x) * (C.y - A.y) > (B.y - A.y) * (C.x - A.x)\n\n\n# Return true if line segments AB and CD intersect\ndef intersect(A, B, C, D):\n return ccw(A, C, D) != ccw(B, C, D) and ccw(A, B, C) != ccw(A, B, D)\n\n\ndef to_point(odom):\n return Point(odom.x, odom.y)\n\n\nclass LapStatistic:\n\n def __init__(self, folder):\n \n self.mission = \"\"\n self.mission_finished = False\n\n self.last_state = State()\n\n self.start_A = Point(0.0, 2.0)\n self.start_B = Point(0.0, -2.0)\n\n self.end_A = Point(0.0,0.0)\n self.end_B = Point(0.0,0.0)\n\n self.lap_count = 0\n self.vel_avg = 0\n\n self.rosbag_name = \"\"\n self.state_received = False\n\n self.starting_time = 0.0\n self.res_go_time = 0.0\n self.lap_time = []\n self.position_rmse = [0]\n self.cones_rmse = 0.0\n self.cones_hit = []\n self.centerline_rmse = []\n\n self.fst_laps = 0\n\n\n if folder is None:\n self.report_file_name = None\n else:\n if not os.path.isdir(folder):\n os.makedirs(folder)\n self.report_file_name = folder + '/fst_output.yaml'\n \n\n def is_mission_finnished(self):\n if self.mission == 'trackdrive':\n rospy.logwarn(\"Lap Count: %i, speed: %f\", self.lap_count, self.last_state.vx)\n return self.lap_count == 11 and self.last_state.vx <= 1.5\n elif self.mission == 'acceleration':\n rospy.logwarn(\"State x: %f\", self.last_state.x)\n return self.last_state.x > 76 and self.last_state.x < 120 and len(self.lap_time) is not 0\n elif self.mission == 'skidpad':\n rospy.logwarn(\"State x: %f, Laps: %i\", self.last_state.x, self.lap_count)\n return self.last_state.x > 4 and self.last_state.x < 20 and self.lap_count == 5\n elif self.mission == 'autocross':\n rospy.logwarn(\"Lap Count: %i, speed: %f\", self.lap_count, self.last_state.vx)\n return self.lap_count == 2 and self.last_state.vx <= 1.5\n\n def update_state(self, state):\n self.state_received = True\n flag = False\n if self.mission == 'trackdrive' or self.mission == 'skidpad' or self.mission == 'autocross':\n cross_line = intersect(self.start_A, self.start_B, to_point(self.last_state), to_point(state))\n if cross_line:\n self.lap_count = self.lap_count + 1\n if self.lap_count == 1:\n self.starting_time = rospy.get_rostime().to_sec()\n self.res_go_time = rospy.get_rostime().to_sec()\n\n else:\n current_time = rospy.get_rostime().to_sec()\n self.lap_time.append(current_time - self.starting_time)\n self.starting_time = current_time\n rospy.logwarn(\"LAP Time: %f\", self.lap_time[-1])\n rospy.logwarn(\"LAP: %i\", self.lap_count)\n flag = True\n elif self.mission == 'acceleration':\n cross_line_start = intersect(self.start_A, self.start_B, to_point(self.last_state), to_point(state))\n cross_line_end = intersect(self.end_A, self.end_B, to_point(self.last_state), to_point(state))\n if cross_line_start:\n rospy.logwarn(\"Starting to measure\")\n self.starting_time = rospy.get_rostime().to_sec()\n self.res_go_time = rospy.get_rostime().to_sec()\n if cross_line_end:\n self.lap_time.append(rospy.get_rostime().to_sec() - self.starting_time)\n rospy.logwarn(\"STOP stopwatch wioth time: %f\", self.lap_time[-1])\n vel = np.sqrt(state.vx ** 2 + state.vy ** 2)\n if self.vel_avg == 0:\n self.vel_avg = vel\n else:\n self.vel_avg = (vel + self.vel_avg) / 2.0\n\n self.last_state = state\n\n return flag\n\n def get_rosbag_name(self, folder, sim_id):\n if folder is None:\n return None\n else:\n self.rosbag_name = str(sim_id) + '.bag'\n return folder + '/' + self.rosbag_name\n\n def get_duration(self):\n return 0.0 if self.res_go_time == 0.0 else rospy.get_rostime().to_sec() - self.res_go_time\n\n def get_statistics(self, id, repetition_info):\n name = 'Run ' + str(id)\n repetition_parameters = repetition_info['parameters'] if not None else []\n\n rospy.logwarn(\"Discipline successful: %i, mission_finnished: %i\", self.is_mission_finnished(), self.mission_finished)\n repetition = {name: {'duration': self.get_duration(),\n 'pass': self.is_mission_finnished() and self.mission_finished,\n 'bag': self.rosbag_name,\n 'parameters': repetition_parameters,\n 'results': {\n 'laps FSSIM': len(self.lap_time), \n 'laps FST': self.fst_laps, \n 'lap time': self.lap_time,\n 'event time': sum(self.lap_time),\n 'mission': self.mission,\n 'position_rmse': self.average(self.position_rmse),\n 'cones_rmse': self.cones_rmse,\n 'cones_hit': self.cones_hit,\n 'centerline_rmse': self.average(self.centerline_rmse)\n },\n }\n }\n return repetition\n\n def get_best_configuration(self, results, metrics_to_check):\n best_results = {}\n for metric in metrics_to_check:\n best_repetition = []\n best_value = float(\"inf\")\n for repetition_id, repetition in results[\"repetitions\"].iteritems():\n metric_value = self.get_metric_value(repetition['results'][metric], metric)\n if metric_value != None and repetition['pass'] and metric_value <= best_value:\n if metric_value < best_value:\n best_repetition = [] \n best_repetition.append(repetition_id)\n best_value = metric_value\n best_results[metric] = best_repetition\n results[\"best configuration\"] = best_results\n return results\n \n def average(self, list):\n return sum(list)/len(list)\n \n def get_metric_value(self, metric_value, metric):\n if metric == 'cones_hit':\n return sum(metric_value)\n elif metric == 'lap time':\n return None if metric_value == [] else min(metric_value)\n else:\n return metric_value\n \n def write_report(self, id, repetitions_info):\n if self.report_file_name is not None:\n with open(self.report_file_name, 'r+') as yamlfile:\n report_yaml = yaml.load(yamlfile)\n\n if report_yaml is None:\n report_yaml = {\"name\": \"default_name\", \"repetition\": {}}\n\n repetitions = report_yaml['repetitions']\n if repetitions is None:\n report_yaml['repetitions'] = self.get_statistics(id, repetitions_info[id])\n else:\n report_yaml[\"repetitions\"].update(self.get_statistics(id, repetitions_info[id]))\n \n #Check if this is the last repetition\n if id == len(repetitions_info) - 1 and repetitions_info[id][\"parameters\"] is not None:\n report_yaml = self.get_best_configuration(report_yaml, ['position_rmse', 'cones_rmse', 'cones_hit', 'lap time', 'event time','centerline_rmse'])\n \n with open(self.report_file_name, 'w+') as yamlfile:\n yaml.dump(report_yaml, yamlfile, default_flow_style = False) # Also note the safe_dump","repo_name":"hugopereira-eng/FST-2022-Release","sub_path":"src/simulation/fst_interface/scripts/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":8147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25634593484","text":"import numpy as np\nimport math\n\n# making the x points\ndef making_x_points(x1_domain, x2_domain):\n x = []\n \n for i in range(x1_domain[0], x1_domain[1]+1):\n for j in range(x2_domain[0], x2_domain[1]+1):\n x.append((i, j))\n \n return x\n\n\n# making the y points \ndef making_y_points1(x):\n y = []\n for i in x:\n r = my_function(i[0], i[1])\n f.write(f\"{i[0]}, {i[1]} = {r}\\n\")\n y.append(r)\n \n return y\n\n\ndef my_function(x1, x2):\n # return (2*x1)+(3*x2)\n # return 3*(x1**2) - math.sin(x2)\n return math.sin(x1) * (x2**3)\n\n\nif __name__ == \"__main__\":\n \n f = open('2D_in_out3.txt', 'w')\n\n f.write(\"our function is: sin(x1)*(x2**3) \\n\")\n\n x1_domain = (1, 10)\n x2_domain = (1, 10)\n x = making_x_points(x1_domain, x2_domain)\n y = making_y_points1(x)\n \n f.close() ","repo_name":"niusha-yaghini/Genetic_Function_Approximation-GFA-","sub_path":"make_2D_input_output.py","file_name":"make_2D_input_output.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13968829990","text":"import cv2\r\nimport numpy as np\r\nimport sys\r\nimport pytesseract\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\nimport operator\r\nfrom collections import Counter\r\nnp.set_printoptions(threshold=np.inf)\r\npytesseract.pytesseract.tesseract_cmd = 'C:\\\\Users\\\\divya.malhotra\\\\AppData\\\\Local\\\\Tesseract-OCR\\\\tesseract'\r\nTESSDATA_PREFIX = 'C:\\\\Users\\\\divya.malhotra\\\\AppData\\\\Local\\\\Tesseract-OCR\\\\tessdata'\r\nconfig_amount = (\"-l eng --oem 1 --psm 7 -c tessedit_char_whitelist=0123456789.,\")\r\nconfig = (\"-l eng --oem 1 --psm 7\")\r\nconfig_full = (\"-l eng --oem 01--psm 11\")\r\nNORM_FONT= (\"Verdana\", 10)\r\n\r\ndef popupmsg():\r\n msg = \"Please click at right area to get text\"\r\n popup = tk.Tk()\r\n popup.wm_title(\"!\")\r\n label = ttk.Label(popup, text=msg, font=NORM_FONT)\r\n label.pack(side=\"top\", fill=\"x\", pady=10)\r\n B1 = ttk.Button(popup, text=\"Okay\", command = popup.destroy)\r\n B1.pack()\r\n popup.mainloop()\r\n\r\ndef find_remove_inner_contours(mask,img_orig,img_thresh,y_top_val,y_bottom_val,x_left_val,x_right_val):\r\n # i = img_orig.copy()\r\n # print(\"shape is \", mask.shape)\r\n # contours, hierarchy = cv2.findContours(mask[y_top_val:y_bottom_val,x_left_val:x_right_val], cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE,offset=(x_left_val,y_top_val))\r\n # for ind,evry_cnt in enumerate(contours):\r\n # # if hierarchy[0, ind, 3] == -1:\r\n # x, y, w, h = cv2.boundingRect(evry_cnt)\r\n # cv2.rectangle(img_orig, (x, y), (x + w, y + h), (255, 0, 255), 2)\r\n # cv2.imshow(\"mmask_rect\",img_orig)\r\n # if ((h > (mask.shape[0]/3)) & (h != mask.shape[0] & w != mask.shape[1])):\r\n # cv2.rectangle(mask, (x, y), (x + w, y + h), (255,255, 255), -1)\r\n # cv2.imshow(\"masked_im\", mask)\r\n # np.savetxt('masked.csv',mask,delimiter=',')\r\n\r\n for cl in range(img_thresh.shape[1]):\r\n start = 0\r\n end = img_thresh.shape[0]\r\n first_black_pixel_mask = 0\r\n cnt = 1\r\n while(start 0:\r\n first_black_pixel_mask= colm[0] + start\r\n if mask[first_black_pixel_mask,cl] == mask[first_black_pixel_mask-1,cl]:\r\n cnt=cnt+1\r\n start = first_black_pixel_mask+1\r\n else:\r\n start= first_black_pixel_mask+1\r\n cnt=1\r\n if cnt>end/20:\r\n mask[:,cl] =255\r\n break\r\n else:\r\n break\r\n continue\r\n # compute_connected_components(mask)\r\n return mask\r\n\r\ndef find_bill_content(edges,r,c,img_thresh,img):\r\n img_orig = img.copy()\r\n final_contours = []\r\n contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n for ind, cn in enumerate(contours):\r\n if hierarchy[0,ind,3] == -1:\r\n x, y, w, h = cv2.boundingRect(cn)\r\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n final_contours.append(cn)\r\n # cv2.imshow(\"all_contours\", img)\r\n x_left = []\r\n y_top = []\r\n x_right = []\r\n y_bottom = []\r\n contours_tobe_removed = []\r\n for evry_cnt in final_contours:\r\n x,y,w,h= cv2.boundingRect(evry_cnt)\r\n if ((h>r/2) and (w>=.4*c)):\r\n print(\"dimension::\",x,y,w,h)\r\n print(\"area::\", cv2.contourArea(evry_cnt))\r\n cv2.rectangle(img, (x,y), (x + w, y + h), (0, 255, 0), 2)\r\n x_left.append(x)\r\n y_top.append(y)\r\n x_right.append(x+w)\r\n y_bottom.append(y+h)\r\n elif ((h>r/13) and (w<=.08*c)):\r\n img_thresh = cv2.rectangle(img_thresh,(x,y),(x+w,y+h),(255,255,255),-1)\r\n\r\n isBillLargestContour = True if ((len(x_left)>0) & (len(x_right)>0) & (len(y_top)>0) & (len(y_bottom)>0)) else False\r\n # cv2.imshow(\"image_bill_contour\", img)\r\n if isBillLargestContour:\r\n x_left_val = min(x_left)\r\n x_right_val = max(x_right)\r\n y_top_val = min(y_top)\r\n y_bottom_val = max(y_bottom)\r\n mask = np.zeros(img_thresh.shape, dtype='uint8')\r\n mask.fill(255)\r\n mask[y_top_val:y_bottom_val, x_left_val:x_right_val] = img_thresh[y_top_val:y_bottom_val,x_left_val:x_right_val]\r\n # cv2.imshow(\"masked image\", mask)\r\n img_thresh = find_remove_inner_contours(mask,img_orig,img_thresh,y_top_val,y_bottom_val,x_left_val,x_right_val)\r\n # cv2.imshow(\"final_im\", img_thresh)\r\n return img_thresh\r\n\r\ndef check_if_amount(img_thresh, mouseY, first_black_pixel, last_black_pixel):\r\n index_zero = np.where(img_thresh[mouseY,first_black_pixel:last_black_pixel])[0]\r\n index_zero[:] = index_zero[::-1]\r\n cnt = 0\r\n start_pt = 0\r\n for indx, elm in enumerate(index_zero[start_pt:]):\r\n start_pt_backup = start_pt\r\n if elm == index_zero[indx-1]-1:\r\n cnt = cnt + 1\r\n if cnt >= .1 * img_thresh.shape[1]:\r\n break\r\n else:\r\n cnt = 0\r\n start_pt = indx+1\r\n return index_zero[start_pt_backup]+first_black_pixel-10\r\n\r\ndef pre_process_image(img):\r\n rgb_planes = cv2.split(img)\r\n result_planes = []\r\n result_norm_planes = []\r\n for plane in rgb_planes:\r\n dilated_img = cv2.dilate(plane, np.ones((7, 7), np.uint8))\r\n bg_img = cv2.medianBlur(dilated_img, 21)\r\n diff_img = 255 - cv2.absdiff(plane, bg_img)\r\n norm_img = cv2.normalize(diff_img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)\r\n result_planes.append(diff_img)\r\n result_norm_planes.append(norm_img)\r\n result = cv2.merge(result_planes)\r\n result_norm = cv2.merge(result_norm_planes)\r\n # Convert image to gray scale\r\n img_gray = cv2.cvtColor(result_norm, cv2.COLOR_BGR2GRAY)\r\n # Apply normal thresholding to get only black and white pixels in image\r\n thresh, img_thresh = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY|cv2.THRESH_OTSU)\r\n thresh, img_thresh = cv2.threshold(img_gray, thresh, 255, cv2.THRESH_BINARY)\r\n # additional changes\r\n blur_img = cv2.GaussianBlur(img_thresh, (5, 5), 0)\r\n edges = cv2.Canny(blur_img, 70, 110, -1)\r\n return img_thresh, img_gray, edges, result_norm\r\n\r\ndef check_retailer(img_thresh,mouseY,retailer_x):\r\n index_left_x = np.where(img_thresh[mouseY, :mouseX])[0]\r\n index_right_x = np.where(img_thresh[mouseY, mouseX:])[0]\r\n index_right_x = [elm+retailer_x for elm in index_right_x]\r\n index_left_x[:] = index_left_x[::-1]\r\n cnt = 0\r\n start_pt = 0\r\n for indx, pt in enumerate(index_left_x[start_pt:]):\r\n if index_left_x[indx]> -1:\r\n if pt == index_left_x[indx - 1] - 1:\r\n cnt = cnt + 1\r\n if cnt >= .1 * img_thresh.shape[1]:\r\n break\r\n else:\r\n cnt = 0\r\n start_pt = indx + 1\r\n start_pt_backup = start_pt\r\n x_left = index_left_x[start_pt_backup] - 10\r\n\r\n cnt = 0\r\n start_pt = 0\r\n for indx, pt in enumerate(index_right_x[start_pt:]):\r\n if index_right_x[indx] < img_thresh.shape[1]-1:\r\n if pt == index_right_x[indx + 1] - 1:\r\n cnt = cnt + 1\r\n if cnt >= .1 * img_thresh.shape[1]:\r\n break\r\n else:\r\n cnt = 0\r\n start_pt = indx + 1\r\n start_pt_backup = start_pt\r\n x_right = index_right_x[start_pt_backup] + 10\r\n return x_left, x_right\r\n\r\ndef check_amount(img_thresh,mouseY,amount_x):\r\n index_left_x = np.where(img_thresh[mouseY, :mouseX])[0]\r\n index_right_x = np.where(img_thresh[mouseY, mouseX:])[0]\r\n index_right_x = [elm+amount_x for elm in index_right_x]\r\n index_left_x[:] = index_left_x[::-1]\r\n cnt = 0\r\n start_pt = 0\r\n for indx, pt in enumerate(index_left_x[start_pt:]):\r\n if index_left_x[indx]> -1:\r\n if pt == index_left_x[indx - 1] - 1:\r\n cnt = cnt + 1\r\n if cnt >= .05 * img_thresh.shape[1]:\r\n break\r\n else:\r\n cnt = 0\r\n start_pt = indx + 1\r\n start_pt_backup = start_pt\r\n x_left = index_left_x[start_pt_backup] - 10\r\n\r\n cnt = 0\r\n start_pt = 0\r\n for indx, pt in enumerate(index_right_x[start_pt:]):\r\n if index_right_x[indx] < img_thresh.shape[1]-1:\r\n if pt == index_right_x[indx + 1] - 1:\r\n cnt = cnt + 1\r\n if cnt >= .05 * img_thresh.shape[1]:\r\n break\r\n else:\r\n cnt = 0\r\n start_pt = indx + 1\r\n start_pt_backup = start_pt\r\n x_right = index_right_x[start_pt_backup] + 10\r\n return x_left, x_right\r\n\r\n\r\n\r\ndef cal_text_height(img_thresh,img,mouseY, bottom_y,retailer_x,amount_x, top_y=0):\r\n mouseY_backup = mouseY\r\n row,col,color= img.shape\r\n # np.savetxt('thresholded.csv', img_thresh, delimiter=',')\r\n # np.savetxt('mousePointer.csv',img_thresh[mouseY,:], delimiter=\",\")\r\n only_white_char = np.where(img_thresh[mouseY, :] == 255)\r\n # print(only_white_char)\r\n first_white_pixel = only_white_char[0][0]\r\n last_white_pixel = only_white_char[0][-1]\r\n only_black_chars = np.where(img_thresh[mouseY,first_white_pixel:last_white_pixel]==0)\r\n if len(only_black_chars[0])==0:\r\n popupmsg()\r\n sys.exit()\r\n first_black_pixel = only_black_chars[0][0] + first_white_pixel\r\n last_black_pixel = only_black_chars[0][-1] + first_white_pixel\r\n if is_amount:\r\n # first_black_pixel = check_if_amount(img_thresh, mouseY, first_black_pixel, last_black_pixel)\r\n first_black_pixel, last_black_pixel = check_amount(img_thresh, mouseY, amount_x)\r\n else:\r\n first_black_pixel, last_black_pixel = check_retailer(img_thresh,mouseY, retailer_x)\r\n list_above_min = []\r\n list_below_max = []\r\n list_right_max = []\r\n list_left_min = []\r\n\r\n for elm in range(first_black_pixel,last_black_pixel+1):\r\n mouseY = mouseY - 1\r\n while (mouseY>top_y):\r\n if is_amount:\r\n # first_pixel = check_if_amount(img_thresh, mouseY, first_black_pixel, last_black_pixel)\r\n # last_pixel = last_black_pixel\r\n first_pixel, last_pixel = check_amount(img_thresh, mouseY,amount_x)\r\n else:\r\n first_pixel, last_pixel = check_retailer(img_thresh, mouseY,retailer_x)\r\n list_left_min.append(first_pixel)\r\n list_right_max.append(last_pixel)\r\n if ((img_thresh[mouseY, elm] == 255)):\r\n list_above_min.append(mouseY)\r\n break\r\n else:\r\n mouseY = mouseY - 1\r\n mouseY = mouseY_backup\r\n mouseY = mouseY_backup\r\n\r\n for elm in range(first_black_pixel,last_black_pixel+1):\r\n mouseY = mouseY+1\r\n while (mouseY len(arr) or k2 > len(arr):\n return 0\n min_heap = []\n elements_sum = 0\n for i in range(len(arr)):\n heappush(min_heap, arr[i])\n\n for i in range(k1):\n heappop(min_heap)\n\n for i in range(k2-k1-1):\n elements_sum += heappop(min_heap)\n\n return elements_sum\n\n\nif __name__ == '__main__':\n print(sum_of_numbers_between([1, 3, 12, 5, 15, 11], 3, 6))\n print(sum_of_numbers_between([3, 5, 8, 7], 1, 4))","repo_name":"smartinsert/CodingProblem","sub_path":"algorithmic_patterns/top_k_elements/sum_of_elements.py","file_name":"sum_of_elements.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4030159621","text":"# DP motivation, it is recomputing values repeatedly\n\n# Normally, DP without memoization is known as backtracking\n# The intuition behind dynamic programming is that we trade space for time\n\n# One can think of dynamic programming as a table-filling algorithm: you know the calculations you have to do,\n# so you pick the best order to do them in and ignore the ones you don't have to fill in.\n\n# 1. Optimization problems\n# 2. Combinatorial problems\n\n# Space complexity: O(n)\ndef fib(n):\n if n <= n: return n\n\n return fib(n-1) + fib(n-2)\n\n# Using memo\n# Space complexity: O(n), In the worst case O(2^n), else O(n)\ndef fib_top_down(n, memo):\n if n <= 1: return n\n\n if not n in memo: memo[n] = fib(n-1, memo) + fib(n-2, memo)\n\n return memo[n]\n\n# Iterative # O(n * m) with m the time complexity of solve a subproblem, in general, 1\ndef fib_bottom_up(n):\n if n <= 1: return n\n \n fib = [0] * (n + 1)\n fib[1] = 1\n\n for i in range(2, n + 1):\n fib[i] = fib[i - 1] + fib[i - 2]\n\n return fib[n]\n\n# def counting_paths(grid, r, c): , using DP: O(n^2), using common recursion: O(2^n^2)\n\ndef climbStairs(n):\n if n == 1 or n == 2: return n\n\n dp = [0] * (n + 1)\n dp[1] = 1; dp[2] = 2\n\n for i in range(3, n + 1):\n dp[i] = dp[i - 1] + dp[i - 2]\n\n return dp[n]\n","repo_name":"FedeLochbaum/google-cloud-interview","sub_path":"data-structures-and-algorithms/recursion_&_dp/dp.py","file_name":"dp.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"41391916705","text":"import msgpack\nimport ipaddress\nimport six\n\nfrom .hashing import hash_function\nfrom .const import Message, MinMax\nfrom .helper import sixunicode\nfrom .excepions import MaxSizeException\nfrom .log import l\n\n\nclass Peer(object):\n ''' DHT Peer Information'''\n def __init__(\n self,\n port,\n id_,\n hostv4 = None,\n hostv6 = None,\n well_connected = False,\n is_bytes = False\n ):\n if hostv4:\n self.hostv4 = ipaddress.ip_address(\n sixunicode(hostv4, is_bytes)\n )\n else:\n self.hostv4 = None\n if hostv6:\n self.hostv6 = ipaddress.ip_address(\n sixunicode(hostv6, is_bytes)\n )\n else:\n self.hostv6 = None\n self.port = port\n self.id = id_\n self.well_connected = well_connected\n\n def astuple(self, for_export=False):\n if self.hostv4:\n hostv4 = self.hostv4.packed\n else:\n hostv4 = None\n if self.hostv6:\n hostv6 = self.hostv6.packed\n else:\n hostv6 = None\n if for_export:\n return (\n self.port,\n self.id,\n hostv4,\n hostv6,\n )\n else:\n return (\n self.port,\n self.id,\n hostv4,\n hostv6,\n self.well_connected,\n )\n\n def addressv4(self):\n return (str(self.hostv4), self.port)\n\n def addressv6(self):\n return (str(self.hostv6), self.port)\n\n def __repr__(self):\n return repr(self.astuple())\n\n def _sendmessage(self, message, dht, peer_id):\n message[Message.PEER_ID] = peer_id # more like sender_id\n message[Message.NETWORK_ID] = hash_function(\n peer_id + dht.network_id\n )\n encoded = msgpack.dumps(message)\n if len(encoded) > MinMax.MAX_MSG_SIZE:\n raise MaxSizeException(\n \"Message size max not exceed %d bytes\" % MinMax.MAX_MSG_SIZE\n )\n if self.hostv4 and dht.server4:\n try:\n dht.server4.socket.sendto(\n encoded,\n (str(self.hostv4), self.port)\n )\n except OSError:\n l.info(\"Could not send to %s\", self.hostv4)\n if self.hostv6 and dht.server6:\n try:\n dht.server6.socket.sendto(\n encoded,\n (str(self.hostv6), self.port)\n )\n except OSError:\n l.info(\"Could not send to %s\", self.hostv6)\n\n def _fw_sendmessage(self, message, dht, peer_id):\n message[Message.PEER_ID] = peer_id # more like sender_id\n message[Message.NETWORK_ID] = hash_function(\n peer_id + dht.network_id\n )\n encoded = msgpack.dumps(message)\n if len(encoded) > MinMax.MAX_MSG_SIZE:\n raise MaxSizeException(\n \"Message size max not exceed %d bytes\" % MinMax.MAX_MSG_SIZE\n )\n if self.hostv4 and dht.server4:\n dht.fw_sock4.sendto(\n encoded,\n (str(self.hostv4), self.port)\n )\n if self.hostv6 and dht.server6:\n dht.fw_sock6.sendto(\n encoded,\n (str(self.hostv6), self.port)\n )\n\n def ping(self, dht, peer_id, rpc_id=None):\n message = {\n Message.MESSAGE_TYPE: Message.PING,\n Message.ALL_ADDR: self.astuple(\n for_export=True\n ),\n }\n if rpc_id:\n message[Message.RPC_ID] = rpc_id\n self._sendmessage(message, dht, peer_id=peer_id)\n\n def fw_ping(self, dht, peer_id):\n message = {\n Message.MESSAGE_TYPE: Message.FW_PING,\n }\n self._sendmessage(message, dht, peer_id=peer_id)\n\n def pong(self, dht, peer_id, cpeer, rpc_id=None):\n message = {\n Message.MESSAGE_TYPE: Message.PONG,\n Message.ALL_ADDR: dht.peer.astuple(\n for_export=True\n ),\n Message.CLI_ADDR: cpeer.astuple(\n for_export=True\n ),\n }\n if rpc_id:\n message[Message.RPC_ID] = rpc_id\n self._sendmessage(message, dht, peer_id=peer_id)\n\n def fw_pong(self, dht, peer_id):\n message = {\n Message.MESSAGE_TYPE: Message.FW_PONG,\n Message.ID: self.id,\n }\n self._fw_sendmessage(message, dht, peer_id=peer_id)\n\n def store(self, key, value, dht, peer_id):\n message = {\n Message.MESSAGE_TYPE: Message.STORE,\n Message.ID: key,\n Message.VALUE: value\n }\n self._sendmessage(message, dht, peer_id=peer_id)\n\n def find_node(self, id_, rpc_id, dht, peer_id):\n message = {\n Message.MESSAGE_TYPE: Message.FIND_NODE,\n Message.ID: id_,\n Message.RPC_ID: rpc_id\n }\n self._sendmessage(message, dht, peer_id=peer_id)\n\n def found_nodes(self, id_, nearest_nodes, rpc_id, dht, peer_id):\n message = {\n Message.MESSAGE_TYPE: Message.FOUND_NODES,\n Message.VALUE: id_,\n Message.NEAREST_NODES: nearest_nodes,\n Message.RPC_ID: rpc_id\n }\n self._sendmessage(message, dht, peer_id=peer_id)\n\n def find_value(self, id_, rpc_id, dht, peer_id):\n message = {\n Message.MESSAGE_TYPE: Message.FIND_VALUE,\n Message.ID: id_,\n Message.RPC_ID: rpc_id\n }\n self._sendmessage(message, dht, peer_id=peer_id)\n\n def found_value(self, id_, value, rpc_id, dht, peer_id):\n message = {\n Message.MESSAGE_TYPE: Message.FOUND_VALUE,\n Message.ID: id_,\n Message.VALUE: value,\n Message.RPC_ID: rpc_id\n }\n self._sendmessage(message, dht, peer_id=peer_id)\n","repo_name":"pombreda/dht3k","sub_path":"dht3k/peer.py","file_name":"peer.py","file_ext":"py","file_size_in_byte":6038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72804091094","text":"from waiter import settings\n\nSITE_URL=settings.SITE_URL\nADMIN_URL=SITE_URL+'admin/'\nMEDIA_URL=settings.MEDIA_URL\nSTATIC_URL=settings.STATIC_URL\nMEDIA_ROOT=settings.MEDIA_ROOT\nTIME_ZONE=settings.TIME_ZONE\nSTATIC_ROOT=settings.STATIC_ROOT\nPUSHER_IS_ENABLE=settings.PUSHER_IS_ENABLE\nDEBUG=settings.DEBUG\nYEAR_ADDED=settings.YEAR_ADDED\nQRCODE_ROOT=settings.QRCODE_ROOT\nQRCODE_URL=settings.QRCODE_URL\nUPLOAD_ROOT=settings.UPLOAD_ROOT\n\nSITE_FULL_BASE_ADDRESS=settings.SITE_FULL_BASE_ADDRESS\n\n","repo_name":"hosseinmoghimi/waiter","sub_path":"core/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"36216276071","text":"# ! pip install python-math\r\n\r\nimport math\r\n\r\ndef sumPrimes(maxPrime):\r\n try:\r\n sumPrime = 0\r\n for x in range(2,maxPrime):\r\n for y in range(2,int(math.sqrt(x))+1):\r\n if (x % y == 0):\r\n break\r\n else:\r\n # print(x)\r\n sumPrime += x\r\n \r\n except Exception as e:\r\n print(type(e))\r\n \r\n return sumPrime\r\n \r\ndef main():\r\n maxPrime = 2* 10**6\r\n sumPrime = sumPrimes(maxPrime)\r\n print(\"The sum of all the primes below {} is {}\".format(maxPrime, sumPrime))\r\n \r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"EthanLaity/Project_Euler","sub_path":"Python 3/Problem_010.py","file_name":"Problem_010.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71877258132","text":"from time import time\n\n\nn = int(input())\ntimes = [list(map(int, input().split())) for _ in range(n)]\n\ntimes.sort(key= lambda x:(x[1], x[0]))\n\nanswer = 0\nlast = 0\n\nfor time in times:\n start, end = time\n if start >= last:\n answer += 1\n last = end\n\nprint(answer)","repo_name":"icarusw-code/Algorithm_solve","sub_path":"백준/Greedy(그리디)/1931_회의실배정.py","file_name":"1931_회의실배정.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37994871722","text":"import os\nimport numpy as np\nimport argparse\nimport torch\nimport time\nimport librosa\nimport pickle\n\nimport preprocess\nfrom trainingDataset import trainingDataset\nfrom model_VC2 import Generator, Discriminator\n\nclass CycleGANTest:\n def __init__(self,\n logf0s_normalization,\n mcep_normalization,\n model_checkpoint,\n validation_A_dir,\n output_A_dir):\n\n logf0s_normalization = np.load(logf0s_normalization)\n self.log_f0s_mean_A = logf0s_normalization['mean_A']\n self.log_f0s_std_A = logf0s_normalization['std_A']\n self.log_f0s_mean_B = logf0s_normalization['mean_B']\n self.log_f0s_std_B = logf0s_normalization['std_B']\n\n mcep_normalization = np.load(mcep_normalization)\n self.coded_sps_A_mean = mcep_normalization['mean_A']\n self.coded_sps_A_std = mcep_normalization['std_A']\n self.coded_sps_B_mean = mcep_normalization['mean_B']\n self.coded_sps_B_std = mcep_normalization['std_B']\n\n self.validation_A_dir = validation_A_dir\n self.output_A_dir = output_A_dir\n\n self.device = torch.device(\n 'cuda' if torch.cuda.is_available() else 'cpu')\n self.generator_A2B = Generator().to(self.device)\n self.generator_A2B.eval()\n\n checkPoint = torch.load(model_checkpoint)\n self.generator_A2B.load_state_dict(\n state_dict=checkPoint['model_genA2B_state_dict'])\n print(\"load model checkpoint finish!\")\n\n\n def validation_for_A_dir(self):\n num_mcep = 24\n sampling_rate = 16000\n frame_period = 5.0\n n_frames = 128\n validation_A_dir = self.validation_A_dir\n output_A_dir = self.output_A_dir\n\n print(\"Generating Test Data B from A...\")\n for file in os.listdir(validation_A_dir):\n filePath = os.path.join(validation_A_dir, file)\n wav, _ = librosa.load(filePath, sr=sampling_rate, mono=True)\n wav = preprocess.wav_padding(wav=wav,\n sr=sampling_rate,\n frame_period=frame_period,\n multiple=4)\n f0, timeaxis, sp, ap = preprocess.world_decompose(\n wav=wav, fs=sampling_rate, frame_period=frame_period)\n f0_converted = preprocess.pitch_conversion(f0=f0,\n mean_log_src=self.log_f0s_mean_A,\n std_log_src=self.log_f0s_std_A,\n mean_log_target=self.log_f0s_mean_B,\n std_log_target=self.log_f0s_std_B)\n coded_sp = preprocess.world_encode_spectral_envelop(\n sp=sp, fs=sampling_rate, dim=num_mcep)\n coded_sp_transposed = coded_sp.T\n coded_sp_norm = (coded_sp_transposed -\n self.coded_sps_A_mean) / self.coded_sps_A_std\n coded_sp_norm = np.array([coded_sp_norm])\n\n if torch.cuda.is_available():\n coded_sp_norm = torch.from_numpy(coded_sp_norm).cuda().float()\n else:\n coded_sp_norm = torch.from_numpy(coded_sp_norm).float()\n\n coded_sp_converted_norm = self.generator_A2B(coded_sp_norm)\n coded_sp_converted_norm = coded_sp_converted_norm.cpu().detach().numpy()\n coded_sp_converted_norm = np.squeeze(coded_sp_converted_norm)\n coded_sp_converted = coded_sp_converted_norm * \\\n self.coded_sps_B_std + self.coded_sps_B_mean\n coded_sp_converted = coded_sp_converted.T\n coded_sp_converted = np.ascontiguousarray(coded_sp_converted)\n decoded_sp_converted = preprocess.world_decode_spectral_envelop(\n coded_sp=coded_sp_converted, fs=sampling_rate)\n wav_transformed = preprocess.world_speech_synthesis(f0=f0_converted,\n decoded_sp=decoded_sp_converted,\n ap=ap,\n fs=sampling_rate,\n frame_period=frame_period)\n librosa.output.write_wav(path=os.path.join(output_A_dir, os.path.basename(file)),\n y=wav_transformed,\n sr=sampling_rate)\n print(\"finish!\")\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=\"Test CycleGAN\")\n\n logf0s_normalization_default = '../cache/logf0s_normalization.npz'\n mcep_normalization_default = '../cache/mcep_normalization.npz'\n coded_sps_A_norm = '../cache/coded_sps_A_norm.pickle'\n coded_sps_B_norm = '../cache/coded_sps_B_norm.pickle'\n model_checkpoint = '../cache/model_checkpoint/'\n resume_training_at = '../cache/model_checkpoint/_CycleGAN_CheckPoint'\n resume_training_at = None\n\n validation_A_dir_default = '../data/vcc2016_training/evaluation_all/SF1/'\n output_A_dir_default = '../data/vcc2016_training/converted_sound/SF1'\n\n validation_B_dir_default = '../data/vcc2016_training/evaluation_all/TF2/'\n output_B_dir_default = '../data/vcc2016_training/converted_sound/TF2/'\n\n parser.add_argument('--logf0s_normalization', type=str,\n help=\"Cached location for log f0s normalized\", default=logf0s_normalization_default)\n parser.add_argument('--mcep_normalization', type=str,\n help=\"Cached location for mcep normalization\", default=mcep_normalization_default)\n parser.add_argument('--model_checkpoint', type=str,\n help=\"location where your model\", default=model_checkpoint)\n parser.add_argument('--test_A_dir', type=str,\n help=\"test set for sound source A\", default=validation_A_dir_default)\n parser.add_argument('--output_A_dir', type=str,\n help=\"output for converted Sound Source A\", default=output_A_dir_default)\n\n argv = parser.parse_args()\n\n logf0s_normalization = argv.logf0s_normalization\n mcep_normalization = argv.mcep_normalization\n model_checkpoint = argv.model_checkpoint\n\n validation_A_dir = argv.test_A_dir\n output_A_dir = argv.output_A_dir\n\n cycleGAN = CycleGANTest(logf0s_normalization=logf0s_normalization,\n mcep_normalization=mcep_normalization,\n model_checkpoint=model_checkpoint,\n validation_A_dir=validation_A_dir,\n output_A_dir=output_A_dir)\n\n\n cycleGAN.validation_for_A_dir()\n\n\n\n \n","repo_name":"TaiChunYen/Pytorch-CycleGAN-VC2","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6827,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"67"} +{"seq_id":"22875411923","text":"\"\"\"Script to convert all Python UI files in the View folder to .py files\"\"\"\nimport os\nfrom PyQt5 import uic\n\n\nif __name__ == '__main__':\n FILES_PATH = os.path.join(os.getcwd(), \"Evolution\\\\View\")\n FILE_LIST = os.listdir(FILES_PATH)\n\n for file in FILE_LIST:\n filename, file_extension = os.path.splitext(file)\n if file_extension == \".ui\":\n full_path_ui = os.path.join(FILES_PATH, filename + \".ui\")\n full_path_py = os.path.join(FILES_PATH, \"Ui_\" + filename + \".py\")\n print(\"Converting: \" + full_path_ui)\n with open(full_path_py, 'w') as output_file:\n uic.compileUi(full_path_ui, output_file)\n ","repo_name":"ghosta0815/Evolution","sub_path":"Evolution/Scripts/convert_ui_script.py","file_name":"convert_ui_script.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18946099406","text":"import logging\nimport math\nimport os\nimport regex\nimport sys\nimport time\nimport tkrzw\nimport tkrzw_dict\nimport tkrzw_tokenizer\n\n\nlogger = tkrzw_dict.GetLogger()\n\n\nclass ExtractKeysBatch:\n def __init__(self, input_path, output_path, rev_prob_path):\n self.input_path = input_path\n self.rev_prob_path = rev_prob_path\n self.output_path = output_path\n self.tokenizer = tkrzw_tokenizer.Tokenizer()\n\n def Run(self):\n start_time = time.time()\n logger.info(\"Process started: input_path={}, output_path={}\".format(\n self.input_path, self.output_path))\n input_dbm = tkrzw.DBM()\n input_dbm.Open(self.input_path, False, dbm=\"HashDBM\").OrDie()\n rev_prob_dbm = None\n if self.rev_prob_path:\n rev_prob_dbm = tkrzw.DBM()\n rev_prob_dbm.Open(self.rev_prob_path, False, dbm=\"HashDBM\").OrDie()\n it = input_dbm.MakeIterator()\n it.First()\n num_entries = 0\n scores = []\n while True:\n record = it.GetStr()\n if not record: break\n key, expr = record\n if key.startswith(\" \"):\n it.Next()\n continue\n num_items = len(expr.split(\"\\t\"))\n rev_prob = 1.0\n if rev_prob_dbm:\n rev_prob = self.GetRevProb(rev_prob_dbm, key)\n score = (num_items * rev_prob) ** 0.5\n score -= len(key) * 0.0000001\n if regex.fullmatch(r\"[\\p{Hiragana}]+\", key):\n score *= 0.2\n if len(key) == 1:\n score *= 0.5\n scores.append((key, score))\n num_entries += 1\n if num_entries % 10000 == 0:\n logger.info(\"Reading: entries={}\".format(num_entries))\n it.Next()\n if rev_prob_dbm:\n rev_prob_dbm.Close().OrDie()\n input_dbm.Close().OrDie()\n logger.info(\"Reading done: entries={}\".format(num_entries))\n scores = sorted(scores, key=lambda x: x[1], reverse=True)\n with open(self.output_path, \"w\") as out_file:\n num_entries = 0\n for key, score in scores:\n print(key, file=out_file)\n num_entries += 1\n if num_entries % 10000 == 0:\n logger.info(\"Writing: entries={}\".format(num_entries))\n logger.info(\"Writing done: entries={}\".format(num_entries))\n logger.info(\"Process done: elapsed_time={:.2f}s\".format(time.time() - start_time))\n\n def GetRevProb(self, rev_prob_dbm, phrase):\n base_prob = 0.000000001\n tokens = self.tokenizer.Tokenize(\"ja\", phrase, False, True)\n if not tokens: return base_prob\n max_ngram = min(3, len(tokens))\n fallback_penalty = 1.0\n for ngram in range(max_ngram, 0, -1):\n if len(tokens) <= ngram:\n cur_phrase = \" \".join(tokens)\n prob = float(rev_prob_dbm.GetStr(cur_phrase) or 0.0)\n if prob:\n return max(prob, base_prob)\n fallback_penalty *= 0.1\n else:\n probs = []\n index = 0\n miss = False\n while index <= len(tokens) - ngram:\n cur_phrase = \" \".join(tokens[index:index + ngram])\n cur_prob = float(rev_prob_dbm.GetStr(cur_phrase) or 0.0)\n if not cur_prob:\n miss = True\n break\n probs.append(cur_prob)\n index += 1\n if not miss:\n inv_sum = 0\n for cur_prob in probs:\n inv_sum += 1 / cur_prob\n prob = len(probs) / inv_sum\n prob *= 0.3 ** (len(tokens) - ngram)\n prob *= fallback_penalty\n return max(prob, base_prob)\n fallback_penalty *= 0.1\n return base_prob\n\n\ndef main():\n args = sys.argv[1:]\n input_path = tkrzw_dict.GetCommandFlag(args, \"--input\", 1) or \"union-body.tkh\"\n output_path = tkrzw_dict.GetCommandFlag(args, \"--output\", 1) or \"union-tran-keys.txt\"\n rev_prob_path = tkrzw_dict.GetCommandFlag(args, \"--rev_prob\", 1) or \"\"\n if tkrzw_dict.GetCommandFlag(args, \"--quiet\", 0):\n logger.setLevel(logging.ERROR)\n if args:\n raise RuntimeError(\"unknown arguments: {}\".format(str(args)))\n ExtractKeysBatch(input_path, output_path, rev_prob_path).Run()\n\n\nif __name__==\"__main__\":\n main()\n","repo_name":"estraier/tkrzw-dict","sub_path":"extract_union_tran_keys.py","file_name":"extract_union_tran_keys.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"67"} +{"seq_id":"31703747130","text":"\n# Emails in django are a pain, use allauth to send it.\nfrom allauth.account.adapter import get_adapter\nfrom allauth.utils import build_absolute_uri\n\ndef emailBuildFailure(owner, upload):\n \"\"\"Report a failure to build an agent to it's owner\"\"\"\n adapter = get_adapter()\n\n context = {\n \"owner\": owner,\n \"upload\": upload,\n \"url\": build_absolute_uri(None, upload.get_absolute_url())\n }\n\n adapter.send_mail('fg_competitions/email/failure', owner.email, context)\n\n","repo_name":"fossgalaxy/comet","sub_path":"fg_competitions/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35848257025","text":"import torch\nimport random\nimport numpy as np\nimport argparse\nimport collections\nimport wandb\nimport data.data_loaders as module_data\nimport model.loss as module_loss\nimport model.model as module_arch\nimport trainer as module_trainer\nfrom parse_config import ConfigParser\nfrom util import prepare_device\n\n\ndef main(config, train_mode):\n\n # seed 고정\n random_seed = 21\n torch.manual_seed(random_seed)\n torch.cuda.manual_seed(random_seed)\n torch.cuda.manual_seed_all(random_seed) # if use multi-GPU\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n np.random.seed(random_seed)\n random.seed(random_seed)\n\n # data_path\n dataset_path = config['path']['dataset']\n decription_path = config['path']['data_description']\n train_path = decription_path + '/train.json'\n train_all_path = decription_path + '/train_all.json'\n val_path = decription_path + '/val.json'\n\n # setup data_loader instances\n batch_size = config['dataloader']['args']['batch_size']\n num_workers = config['dataloader']['args']['num_workers']\n data_loader = config.init_obj('data_loader', module_data)\n\n if train_mode == 'experiment':\n train_dataset = data_loader(data_dir=train_path, dataset_path = dataset_path ,mode='train')\n val_dataset = data_loader(data_dir=val_path, dataset_path = dataset_path, mode='train')\n \n train_loader = torch.utils.data.DataLoader(dataset=train_dataset, \n batch_size=batch_size,\n shuffle=True,\n num_workers=num_workers,\n collate_fn=module_data.collate_fn)\n\n val_loader = torch.utils.data.DataLoader(dataset=val_dataset, \n batch_size=batch_size,\n shuffle=False,\n num_workers=num_workers,\n collate_fn=module_data.collate_fn)\n else :\n train_dataset = data_loader(data_dir=train_all_path, dataset_path = dataset_path, mode='train')\n train_loader = torch.utils.data.DataLoader(dataset=train_dataset, \n batch_size=batch_size,\n shuffle=True,\n num_workers=4,\n collate_fn=module_data.collate_fn)\n\n # build model architecture, then print to console\n model = config.init_obj('arch', module_arch)\n\n # prepare for (multi-device) GPU training\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n device_ids = prepare_device(config['n_gpu'])\n model = model.to(device)\n if len(device_ids) > 1:\n model = torch.nn.DataParallel(model, device_ids=device_ids)\n\n # get function handles of loss and metrics\n criterion = getattr(module_loss, config['loss'])\n\n # build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler\n trainable_params = filter(lambda p: p.requires_grad, model.parameters())\n optimizer = config.init_obj('optimizer', torch.optim, trainable_params)\n lr_scheduler = config.init_obj('lr_scheduler', torch.optim.lr_scheduler, optimizer)\n\n # train\n pretrained_weight_path = config['path']['pretrainedweight']\n if pretrained_weight_path:\n model.load_state_dict(torch.load(pretrained_weight_path), strict=False)\n \n N = config['num_epoch']\n saved_dir = config['path']['save_checkpoint']['dir']\n file_name = config['path']['save_checkpoint']['file_name']\n if train_mode == 'experiment':\n train = module_trainer.experiment_trainer(num_epochs = N, model = model,\n train_loader = train_loader, val_loader = val_loader,\n criterion = criterion, optimizer = optimizer,\n saved_dir = saved_dir, file_name = file_name,\n device = device)\n else:\n train = module_trainer.all_trainer(num_epochs = N, model = model,\n train_loader = train_loader,\n criterion = criterion, optimizer = optimizer,\n saved_dir = saved_dir, file_name = file_name,\n device = device)\n \n wandb.init(project=config['project'], entity=config['entity'])\n wandb.watch(model)\n train()\n\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser(description='PyTorch Template')\n args.add_argument('-c', '--config', default=None, type=str,\n help='config file path (default: None)')\n\n # custom cli options to modify configuration from default values given in json file.\n CustomArgs = collections.namedtuple('CustomArgs', 'flags type target')\n options = [\n CustomArgs(['--lr', '--learning_rate'], type=float, target='optimizer;args;lr'),\n CustomArgs(['--bs', '--batch_size'], type=int, target='data_loader;args;batch_size')\n ]\n config = ConfigParser.from_args(args, options)\n \n # choose train mode\n args.add_argument('-m', '--mode', default='experiment', type=str,\n help='choose train mode from two types(experiment/all)')\n train_mode = args.pop()\n \n main(config, train_mode)\n","repo_name":"boostcampaitech2/semantic-segmentation-level2-cv-03","sub_path":"DeepLabv3/Train_Experiment/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5696,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"22887336834","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\nfrom django.conf.urls import url\nurlpatterns = [\n path('login/', views.login,name='login'),\n path('logout/',views.user_logout,name='logout'),\n path('register/',views.register,name='register'),\n path('dash/', views.dashboard, name=\"dash\"),\n url(r'^chart/$', views.HomeView.as_view(), name='home'),\n url(r'^api/data/$', views.get_data, name='api-data'),\n url(r'^api/chart/data/$', views.ChartData.as_view()),\n url(r'^api/pichart/data/$', views.PieChartData.as_view()),\n path('counts/', views.PostList.as_view(), name='post'),\n path('',views.index,name='index'),\n url(r'^piechart/$', views.PieView.as_view(), name='home'),\n\n]\n","repo_name":"Pratham-Shah/WhatsApp-Activity-Monitor","sub_path":"se/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22601481291","text":"from json import load\nfrom pendulum import from_timestamp\nfrom pyspark import SparkConf\nfrom pyspark.sql.types import StructType, StructField, StringType, LongType\nfrom requests import get\nfrom typing import Generator, Union\n\n# from src.spark.dependencies.utils import start_spark, get_spark_logger\n\n\ndef get_data(url: str) -> dict:\n if 'https' in url:\n return get(url).json()\n else:\n return load(open(url))\n\n\ndef get_insert_time(data: dict, logger) -> int:\n if '_metadata' in data:\n val = from_timestamp(data['_metadata']['generated']).format('YYYYMMDDHHmmss')\n try:\n val = int(val)\n except (ValueError, TypeError):\n if logger:\n logger.info('metadata.generated is not int type')\n else:\n print('metadata.generated is not int type')\n return val\n return -1\n\n\ndef iterate_dict_pairs(dict_obj: dict, depth: int) -> Generator:\n depth -= 1\n for key, value in dict_obj.items():\n if key == '_metadata':\n continue\n if depth == 0:\n yield key, str(value)\n elif not isinstance(value, dict):\n yield (key, *[None for _ in range(depth)], str(value) if value else None,)\n else:\n for pair in iterate_dict_pairs(value, depth):\n yield (key, *pair,)\n\n\ndef get_list_of_rows(data: dict, schema: StructType, last_field: Union[str, int, float]) -> list:\n rows = []\n for line in iterate_dict_pairs(dict_obj=data, depth=len(schema) - 2):\n row = list(line)\n row.append(last_field)\n rows.append(row)\n return rows\n\n\ndef get_table(path: str, schema: StructType, logger=None) -> list:\n data = get_data(path)\n insert_time = get_insert_time(data, logger)\n table = get_list_of_rows(data, schema, insert_time)\n return table\n\n\ndef main():\n # SOURCE_JSON_PATH = 'https://data.pr.eglobal.app/instrument-data.json'\n SOURCE_JSON_PATH = '../ignore/nested_stuff/tickers_test.json'\n TARGET_PARQUET_PATH = '/data/raw/api/instrument_data'\n\n schema = StructType([\n StructField('server', StringType(), False),\n StructField('market', StringType(), False),\n StructField('symbol_type', StringType(), True),\n StructField('symbol', StringType(), True),\n StructField('param', StringType(), True),\n StructField('indicator', StringType(), True),\n StructField('value', StringType(), True),\n StructField('insert_time', LongType(), True),\n ])\n\n # with start_spark(app_name='instrument_data') as spark:\n # logger = get_spark_logger(spark)\n\n table = get_table(SOURCE_JSON_PATH, schema)\n for i in table:\n print(i)\n # df = spark.createDataFrame(table).repartition(1)\n # df.write.format('delta').mode('overwrite').save(TARGET_PARQUET_PATH)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Megacinder/spot","sub_path":"spark_stuff/local_spark.py","file_name":"local_spark.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38038818251","text":"import asyncio\nimport psutil\n\nfrom typing import Optional, List, NamedTuple\n\nfrom .logger import logger\n\n\nclass ProcessInfo(NamedTuple):\n name: str\n memory_usage: int\n\n\nclass MemoryWatcher:\n BYTES_TO_MB_DIVIDER = 1024 ** 2\n\n def __init__(self, per_process_limit_mb: Optional[int]=None) -> None:\n self.per_process_limit_mb = per_process_limit_mb\n\n\n @classmethod\n def __get_memory_usage_percent(cls) -> int: \n return int(psutil.virtual_memory().percent)\n\n @classmethod\n async def get_memory_usage_percent(cls):\n return await asyncio.to_thread(cls.__get_memory_usage_percent)\n\n @classmethod\n def get_total_system_memory_mb(cls) -> int:\n return psutil.virtual_memory().total // cls.BYTES_TO_MB_DIVIDER\n\n\n def __get_fat_processes(self) -> List[ProcessInfo]:\n fat_processes = []\n if not self.per_process_limit_mb:\n return fat_processes\n for proc in psutil.process_iter():\n try:\n name = proc.name()\n usage_mb = proc.memory_info().rss // self.__class__.BYTES_TO_MB_DIVIDER\n if usage_mb >= self.per_process_limit_mb:\n fat_processes.append(\n ProcessInfo(name=name, memory_usage=usage_mb)\n )\n except (psutil.NoSuchProcess) as excn:\n logger.error(f\"Failed to find process {proc}. Details: {excn}\")\n except (psutil.AccessDenied) as exca:\n logger.error(f\"Failed to access process {proc}. Details: {exca}\")\n return fat_processes\n\n async def get_fat_processes(self):\n return await asyncio.to_thread(self.__get_fat_processes)\n","repo_name":"still-coding/memory_consumption_tool","sub_path":"monitored/mem_cons_tool/memory_watcher.py","file_name":"memory_watcher.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70718829974","text":"import requests\n\n\ndef getImageContents(img_url):\n url = \"https://us-central1-theinternetofthings.cloudfunctions.net/ProcessImage\"\n query_string = \"?\"\n query_string += \"image=\"+img_url\n r = requests.get(url + query_string)\n\n return eval(r.text)\n\ndef getImageText(img_url):\n url = \"https://us-central1-theinternetofthings.cloudfunctions.net/DefineCard\"\n query_string = \"?\"\n query_string += \"image=\"+img_url\n r = requests.get(url + query_string)\n\n return r.text\n\ndef translateText(text, lang):\n url = \"https://us-central1-theinternetofthings.cloudfunctions.net/translateLabel\"\n query_string = \"?\"\n query_string += \"text=\" + text\n query_string += \"&target=\" + lang\n r = requests.get(url + query_string)\n\n return r.text\n\n\nif __name__ == '__main__':\n print(\"Testing mode....\")\n while (True):\n img_url = input(\"Enter URL: \")\n if img_url == '':\n break\n img_url = img_url.split(\"?\")[0]\n #img_url = \"http://edge.rit.edu/edge/P15482/public/Photo Gallery/RIT_logo.jpg\"\n print(\"Getting image: \" + img_url + \"...\")\n resp = getImageText(img_url)\n print(\"Response received!\")\n print(resp)\n # for entry in resp:\n # print(entry)\n","repo_name":"jzaia18/TheInternetOfThings","sub_path":"theinternetofthings/utils/cloudFunctions.py","file_name":"cloudFunctions.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"23878198185","text":"class Solution:\n def restoreIpAddresses(self, s: str) -> List[str]:\n def dfs(s, path, res):\n if len(path) == 4 and not s:\n res.append('.'.join(path))\n return\n for i in range(1, 4):\n if i <= len(s):\n if i == 1:\n dfs(s[i:], path + [s[:i]], res)\n elif i == 2 and s[0] != '0':\n dfs(s[i:], path + [s[:i]], res)\n elif i == 3 and s[0] != '0' and int(s[:3]) <= 255:\n dfs(s[i:], path + [s[:i]], res)\n res = []\n dfs(s, [], res)\n return res","repo_name":"vishrutkmr7/MyLeetCodeSubmissions","sub_path":"problems/restore_ip_addresses/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"74827652374","text":"import pandas as pd\nimport numpy as np\nimport os\n\n# Current dir path\ncur_path = os.path.curdir\n\n# Folder Path\nyobs_folder_path = r\"D:\\OneDrive - Old Dominion University\\2023 Spring\\cs620\\HW2\\hw2-b-data\"\n\n# Mergered file target path\ntarget_path = cur_path\n\n# blank ready-to-merge data file\nyobs_df = pd.DataFrame()\n\n# iterate through all file\nfor file in os.listdir(yobs_folder_path):\n # Check whether file is in text format or not\n if file.endswith(\".txt\"):\n file_path = f\"{yobs_folder_path}\\{file}\"\n \n # call read_file file function\n # read_text_file(file_path)\n read_file = pd.read_csv(file_path, header=None)\n \n yob_str = file.strip(\".txt\").strip(\"yob\")\n read_file[\"year\"] = yob_str\n \n # merge all yob data for each year\n yobs_df = pd.concat([yobs_df, read_file], axis=0)\n\n#1:1 mapping to new names\nnew_names = {\n 0: 'name',\n 1: 'sex',\n 2: 'frequency',\n 'year': 'year',\n}\n\n# #do rename\nyobs_df.rename(\n columns = new_names, \n inplace = True\n)\n\n# print(yobs_df.columns)\n# print(yobs_df.head(3))\n\n# store the merged yob data into csv file, and reorder yob column names.\n# yobs_df.to_csv(r\"yob-names\", index=None, \n# columns=['year','name','sex','frequency'])\n\n\n\n\n","repo_name":"YoungYao2022/HW2","sub_path":"hw2-b.py","file_name":"hw2-b.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7542807425","text":"import uvicorn\nimport os\n\n# import dotenv\nfrom fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.responses import JSONResponse\nfrom pydantic import BaseModel\nfrom fastapi import FastAPI, UploadFile, HTTPException\nimport pandas as pd\nfrom fastapi import File\nimport httpx\nfrom module.agent import get_patient_match_result\nfrom module.helpers import get_top_5_trials\n\n# dotenv.load_dotenv(\".env\")\n# openai.api_key = os.environ.get(\"OPENAI_API_KEY\")\n\napp = FastAPI()\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\nclass RandRequest(BaseModel):\n query: str\n\nclass PatientRequest(BaseModel):\n patient: str\n\n@app.post(\"/\")\n@app.get(\"/\")\ndef root():\n return \"hi\"\n\n\n@app.post(\"/uploadcsv/\")\nasync def create_upload_file(file: UploadFile = File(...)):\n if file.filename.endswith(\".csv\"):\n dataframe = pd.read_csv(file.file)\n\n modelResponse = httpx.post(\"https://api.huggingface.com/\", data=dataframe)\n return dataframe.to_dict(\"records\")\n else:\n raise HTTPException(\n status_code=400, detail=\"Invalid file type. Please upload a CSV file.\"\n )\n\n@app.post(\"/get_patient_match_result/\")\nasync def get_patient_match_result_endpoint(\n request: PatientRequest,\n):\n \"\"\"\n Saves a resource to a starterpac and returns a new ID and URL for the resource\n @params:\n query: str\n @returns:\n result: str\n \"\"\"\n try:\n patient = request.patient\n\n # Get the top 5 clinical trials through embedding search\n top_5_trials = get_top_5_trials(patient_report=patient)\n\n if not patient:\n raise ValueError(\"query is required\")\n \n results = []\n for document, metadata in zip(top_5_trials[\"documents\"], top_5_trials[\"metadata\"]):\n results.append( {\n \"patient_match_result\": get_patient_match_result(patient_report=patient, clinical_trial=document),\n \"metadata\": metadata\n })\n \n response = JSONResponse(\n status_code=200,\n content={\n \"status\": \"success\",\n \"data\": {\n \"result\": results\n },\n },\n )\n response.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n return response\n except Exception as e:\n return error_handler(request, e)\n\n\n\n# This is gonna get data from the embeddings database\n@app.post(\"/get_data/\")\nasync def get_data(file: UploadFile = File(...)):\n data = pd.read_csv(file.file)\n return \"This is a test\"\n\n\n@app.post(\"/rand_request\")\nasync def rand_request_endpoint(\n request: RandRequest,\n):\n \"\"\"\n Saves a resource to a starterpac and returns a new ID and URL for the resource\n @params:\n query: str\n @returns:\n result: str\n \"\"\"\n try:\n query = request.query\n\n if not query:\n raise ValueError(\"query is required\")\n\n response = JSONResponse(\n status_code=200,\n content={\n \"status\": \"success\",\n \"data\": {\n \"result\": \"hi guys!\",\n },\n },\n )\n response.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n return response\n except Exception as e:\n return error_handler(request, e)\n\n\ndef error_handler(request, exc):\n return JSONResponse(\n status_code=500,\n content={\n \"status\": \"error\",\n \"data\": {\n \"error_message\": str(exc),\n },\n },\n )\n\n\nif __name__ == \"__main__\":\n port = int(os.getenv(\"PORT\", 8000))\n uvicorn.run(app, host=\"0.0.0.0\", port=port)\n","repo_name":"degtrdg/clinical-matching","sub_path":"docker_deploy/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4175102342","text":"# exercises/src/nebenlaufigkeit02.py\n# Lösung zur Aufgabe\n\nimport threading\nimport time\n\n\nclass MyThread(threading.Thread):\n def __init__(self, name):\n threading.Thread.__init__(self)\n self.name = name\n\n def run(self):\n for i in range(10):\n print(self.name, str(i + 1))\n time.sleep(1)\n\n\nfor i in range(100):\n thread = MyThread(\"Thread\" + str(i))\n thread.start()\n","repo_name":"MBrill/Python-Tutorial","sub_path":"exercises/src/nebenlaufigkeit02.py","file_name":"nebenlaufigkeit02.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"8355472560","text":"\"\"\" Read in the annovar file and the otherinfo headers and then filter based on columns \"\"\"\n\nimport sys\n\nannovar_output_file = sys.argv[1]\notherinfo_headers_file = sys.argv[2]\n\notherinfo_headers = []\n# read in the otherinfo headers\nwith open(otherinfo_headers_file) as fh:\n otherinfo_headers = fh.readline().strip().split(\"\\t\")\n\nfh = open(annovar_output_file)\nannovar_headers = fh.readline().strip().split(\"\\t\")\n\n# remove otherinfo (last element) from annovar_headers \n# and merge in the real otherheaders\n# NOTE: There are three extra headers in otherinfo before the \n# the real otherinfo starts\nextra_headers = [\"zygosity_status\", \"genotype_quality\", \"read_depth\"]\nall_headers = annovar_headers[:-1] + extra_headers + otherinfo_headers\n\n\n# Everything after the FORMAT column is a sample\nsample_headers = all_headers[(all_headers.index(\"FORMAT\")+1):]\n\n# list output headers and set their default filter function to identity\noutput_headers = ['#CHROM', 'POS', 'ID', 'REF', 'ALT',\n 'wgEncodeRegTfbsClusteredV3', 'tfbsConsSites',\n 'wgEncodeRegDnaseClusteredV3', 'RegulomeDB_dbSNP141_Score',\n 'gerp++gt2', 'Func.refGene', 'Gene.refGene', 'GeneDetail.refGene',\n 'ExonicFunc.refGene', 'AAChange.refGene', 'genomicSuperDups',\n 'esp6500siv2_all', 'gnomAD_exome_ALL', 'gnomAD_genome_ALL',\n 'SIFT_score', 'Polyphen2_HDIV_score', 'CADD13_RawScore',\n 'CADD13_PHRED'] \noutput_headers_formatters = [lambda x: x for o in output_headers]\n\n# Add the sample headers and a function to extract the genotype\noutput_headers += sample_headers\noutput_headers_formatters += [lambda x : x.split(\":\")[0] for s in sample_headers]\n\n# find out all the indices\noutput_headers_indices = [all_headers.index(o) for o in output_headers]\n\n# Filter functions should return True if a line is to be kept\n# and false if a line is to be rejected\n# line is a list of strings in an annotated line\n\ndef maf_filter_functor(column_name, maf_cutoff):\n idx = all_headers.index(column_name)\n def maf_filter(line):\n val = line[idx]\n return (val == \".\") or (float(val) <= maf_cutoff)\n return maf_filter\n\ndef cadd_filter_functor(cutoff):\n idx = all_headers.index(\"CADD13_PHRED\")\n def cadd_filter(line):\n val = line[idx]\n #return (float(val) >= cutoff)\n return (val == \".\") or (float(val) >= cutoff)\n return cadd_filter\n\ndef regulomedb_filter_functor():\n idx = all_headers.index(\"RegulomeDB_dbSNP141_Score\")\n def regulomedb_filter(line):\n val = line[idx] # value of regulomedb score\n # We want to eliminate 6's or 7's\n return (val == \".\") or (val != \"6\") or (val != \"7\")\n return (regulomedb_filter)\n\ndef superdups_filter_functor():\n idx = all_headers.index(\"genomicSuperDups\")\n def superdups_filter(line):\n val = line[idx]\n return val == \".\"\n return (superdups_filter)\n\ndef exonic_and_regulatory_filter_functor():\n func_refgene_idx = all_headers.index(\"Func.refGene\")\n exonicfunc_refgene_idx = all_headers.index(\"ExonicFunc.refGene\")\n tbfs_clustered_idx = all_headers.index('wgEncodeRegTfbsClusteredV3')\n tbfs_cons_idx = all_headers.index('tfbsConsSites') \n dnase_clustered_idx = all_headers.index('wgEncodeRegDnaseClusteredV3') \n gerp_idx = all_headers.index('gerp++gt2')\n def exonic_and_regulatory_filter(line):\n func_refgene = line[func_refgene_idx]\n exonicfunc_refgene = line[exonicfunc_refgene_idx]\n # if \"Func.refGene\" is in exonic or splicing region\n if func_refgene in [\"exonic\", \"exonic;splicing\", \"splicing\"]:\n # We don't want synonymous variants except in the \n # exonic;splicing region\n if exonicfunc_refgene != \"synonymous SNV\":\n return True # \n else: # This is a synonymous SNV so lets keep it only\n # when we are in the exonic;splicing region\n return func_refgene == \"exonic;splicing\"\n else: # now we are probably in a region where we need to filter on \n # the regulatory elements\n tbfs_clustered = line[tbfs_clustered_idx]\n tbfs_cons = line[tbfs_cons_idx]\n dnase_clustered = line[dnase_clustered_idx]\n gerp_val = line[gerp_idx] \n # We keep the line if there is something in tbfs_clustered or tbfs_cons\n # and dnase_clustered is non empty\n return (((tbfs_clustered != \".\" or tbfs_cons != \".\") and (dnase_clustered != \".\"))\n and (gerp_val != \".\") and (float(gerp_val) > 2.0))\n return (exonic_and_regulatory_filter)\n\n\n# put all the filters into a list\n# not all the filters are guaranteed to run. As soon as one of them returns\n# false we stop looking at the next filters and move onto the next line\n# We should put the filters that eliminate the most lines first\nall_filters = [ \n maf_filter_functor(\"gnomAD_genome_ALL\", 0.05),\n maf_filter_functor(\"gnomAD_exome_ALL\", 0.05),\n superdups_filter_functor(),\n regulomedb_filter_functor(),\n cadd_filter_functor(12.37),\n exonic_and_regulatory_filter_functor()\n ]\n\n# Print out the header of the output file\nprint(\"\\t\".join(output_headers))\n\nfor line in fh:\n line = line.strip().split(\"\\t\")\n output_line = []\n # Pass line in the filter pipeline\n line_ok = all(filter_func(line) for filter_func in all_filters)\n if line_ok is True: # print out\n # Build an output_line\n output_line = [fmt_func(line[idx]) for idx, fmt_func in\n zip(output_headers_indices, output_headers_formatters)]\n # and print it out\n print(\"\\t\".join(output_line))\n\nfh.close()\n\n","repo_name":"sameerd/LubbeLab","sub_path":"projects/family/scripts/filter_annovar_output.py","file_name":"filter_annovar_output.py","file_ext":"py","file_size_in_byte":5414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9712289800","text":"from flask import Flask\nfrom common import cache\nimport torch\n\n#def user_input():\n #get_audio()\n\n # Initialize recognizer class (for recognizing the speech)\n #r = sr.Recognizer()\n\n # Reading Audio file as source\n # listening the audio file and store in audio_text variable\n\n# with sr.AudioFile('tmp.wav') as source:\n\n# audio_text = r.listen(source)\n\n# # recoginize_() method will throw a request error if the API is unreachable, hence using exception handling\n# try:\n# #, language=\"pt-PT\"\n# # using google speech recognition\n# text = r.recognize_google(audio_text)\n# print('Converting audio transcripts into text ...')\n# print(text)\n# return text\n\n# except:\n# print('Sorry.. run again...')\n# text = user_input()\n# return text\n\n\ndef chat(model, tokenizer):\n\n for i, sentence in enumerate(history):\n new_user_input_ids = tokenizer.encode(sentence + tokenizer.eos_token, return_tensors='pt')\n # append the new user input tokens to the chat history\n bot_input_ids = torch.cat([bot_input_ids, new_user_input_ids], dim=-1) if i > 0 else new_user_input_ids\n\n # generate response,\n chat_history_ids = model.generate(\n bot_input_ids,\n max_length=150,\n do_sample=True,\n temperature = 0.7,\n top_k=50, # the K most likely next words are filtered and the probability mass is redistributed among only those K next words\n top_p=0.92, # chooses from the smallest possible set of words whose cumulative probability exceeds the probability p\n no_repeat_ngram_size=3,\n pad_token_id=tokenizer.eos_token_id\n )\n\n # pretty print last ouput tokens from bot\n output = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)\n\n return output","repo_name":"marianafidalgo/GrandPal","sub_path":"GrandPal-Model/raspberry/server/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"72951480852","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\nclass Solution:\n def inorderTraversal(self, root: Optional[TreeNode]) -> List[int]:\n if root is None:\n return []\n\n x = []\n x += self.inorderTraversal(root.left) # Add left subtree results\n x.append(root.val) # Add current node value\n x += self.inorderTraversal(root.right) # Add right subtree results\n return x\n","repo_name":"y938/LeetcodeSolutions","sub_path":"0094-binary-tree-inorder-traversal/0094-binary-tree-inorder-traversal.py","file_name":"0094-binary-tree-inorder-traversal.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37859051002","text":"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n# Load the \"Account.csv\" dataset\ndata = pd.read_csv('Account.csv')\n\n# Display basic statistics of numerical columns\nprint(\"Basic Statistics:\")\nprint(data.describe())\n\n# Calculate the correlation matrix for numerical columns\ncorrelation_matrix = data.corr()\n\n# Create a heatmap for numerical correlations\nplt.figure(figsize=(12, 10))\nsns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', linewidths=0.5)\nplt.title('Correlation Matrix for Numerical Columns')\nplt.show()\n\n# Visualize relationships between specific columns\nsns.pairplot(data, diag_kind='kde')\nplt.suptitle('Pairplot for Selected Columns')\nplt.show()\n\n# Explore categorical columns\ncategorical_columns = data.select_dtypes(include=['object'])\nfor column in categorical_columns:\n print(f\"Unique values in {column}:\")\n print(data[column].value_counts())\n print()\n\n# Explore relationships between categorical and numerical columns\nfor categorical_column in categorical_columns:\n if categorical_column != 'crm_Account_Account': # Replace with the appropriate column name\n sns.boxplot(x=categorical_column, y='crm_Account_Account', data=data)\n plt.xticks(rotation=45)\n plt.title(f'Boxplot for {categorical_column} vs. crm_Account_Account')\n plt.show()\n\n# Example: use hypothesis tests or regression models for in-depth analysis\n\n","repo_name":"Flor-DeBruyne/DataEngineerProj2","sub_path":"Analyse/CorrelationAccountEx.py","file_name":"CorrelationAccountEx.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17843640564","text":"import matplotlib\r\nmatplotlib.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nimport os, argparse\r\nimport pandas as pd\r\nfrom math import floor, log\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.models import Model\r\nfrom tensorflow.keras.optimizers import Adam\r\nfrom tensorflow.keras.layers import Dense, Input\r\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, CSVLogger\r\nfrom Utilities import mae, rmse, extract_fn, dataset_input\r\n\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\r\ntf.keras.backend.set_floatx('float64')\r\n\r\ndef layer_creator(n, x):\r\n\tif n == 512:\r\n\t\tx = Dense(512, activation = \"relu\")(x)\r\n\telse:\r\n\t\tx = Dense(n, activation = \"relu\")(x)\r\n\t\tx = layer_creator(n/2, x)\r\n\t\tx = Dense(n, activation = \"relu\")(x)\r\n\treturn x\r\n\r\ndef model(train_ds, val_ds, bdir, movie_num, epochs, batch_size, learning_rate, patience):\r\n\tos.system(\"rm \" + bdir + \"log.csv\")\r\n\tfilelendf = pd.read_csv(bdir + 'Dataset/file_length.dat', engine = 'python', sep = ':', index_col = 0)\r\n\tprint (\"--> Starting Training with learning rate = {learning_rate} for epochs = {epochs} with patience = {patience}\".format(learning_rate = learning_rate, epochs = epochs, patience = patience))\r\n\tadam = Adam(learning_rate = learning_rate)\r\n\r\n\tIp = Input(shape = (movie_num, ), name = \"Input\")\r\n\tOp = Input(shape = (movie_num, ), name = \"Target\")\r\n\tWeight = Input(shape = (movie_num, ), name = \"Weight\")\r\n\tCount = Input(shape = (1, ), name = \"Count\")\r\n\r\n\tn = pow(2, floor(log(movie_num)/log(2)))\r\n\tif n < 512:\r\n\t\tprint (\"Insufficient number of movies for a good model\")\r\n\t\texit()\r\n\telse:\r\n\t\tx = layer_creator(n, Ip)\r\n\r\n\tOutput = Dense(movie_num, activation = \"relu\", name = \"Output\")(x)\r\n\r\n\tmodel = Model(inputs = [Ip, Op, Weight, Count], outputs = Output)\r\n\tmodel.add_loss(rmse(Op, Output, Weight, Count))\r\n\tmodel.add_metric(mae(Op, Output, Weight, Count), aggregation = 'mean', name = 'mae')\r\n\tmodel.compile(optimizer = adam, loss = None, metrics = None)\r\n\t#print (model.summary())\r\n\r\n\tes = EarlyStopping(monitor = 'val_loss', mode = 'min', verbose = 1, patience = patience)\r\n\tcl = CSVLogger(bdir + 'log.csv', append = True, separator = ',')\r\n\tmc = ModelCheckpoint(bdir + 'model.h5', monitor = 'val_loss', verbose = 1, save_best_only = True)\r\n\r\n\thistory = model.fit(train_ds, epochs = epochs, steps_per_epoch = (filelendf.loc['Train']['Length'] // batch_size), validation_data = val_ds, callbacks = [es, cl, mc])\r\n\r\n\tprint (\"--> Plotting Loss\")\r\n\t#print(history.history.keys())\r\n\tplt.plot(history.history['loss'])\r\n\tplt.plot(history.history['val_loss'])\r\n\tplt.title('model loss')\r\n\tplt.ylabel('loss')\r\n\tplt.xlabel('epoch')\r\n\tplt.legend(['train', 'val'], loc = 'upper left')\r\n\tplt.savefig(bdir + 'loss.png', bbox_inches = 'tight')\r\n\r\ndef get_dataset(bdir, movie_num, batch_size):\r\n\tprint (\"--> Getting TfRecords\")\r\n\ttrain_ds = dataset_input(bdir, 'Train', movie_num, batch_size)\r\n\tval_ds = dataset_input(bdir, 'Validation', movie_num, batch_size)\r\n\treturn train_ds, val_ds\r\n\r\ndef main(bdir, epochs, batch_size, learning_rate, patience):\r\n\tprint (\"***Model.py***\")\r\n\tinp = open(bdir + 'Dataset/movie_num.txt', 'r')\r\n\tmovie_num = int(inp.read())\r\n\tinp.close()\r\n\ttrain_ds, val_ds = get_dataset(bdir, movie_num, batch_size)\r\n\tmodel(train_ds, val_ds, bdir, movie_num, epochs, batch_size, learning_rate, patience)\r\n\r\nif __name__ == \"__main__\":\r\n\tparser = argparse.ArgumentParser(description = \"Options\")\r\n\tparser.add_argument(\"-jb\", \"--job_dir\", type = str, default = \"\", help = \"Base Directory\")\r\n\tparser.add_argument(\"-e\", \"--epochs\", type = int, default = 1000, help = \"Number of epochs\")\r\n\tparser.add_argument(\"-bs\", \"--batch_size\", type = int, default = 256, help = \"Batch Size\")\r\n\tparser.add_argument(\"-lr\", \"--learning_rate\", type = float, default = 0.001, help = \"Learning Rate for Adam\")\r\n\tparser.add_argument(\"-p\", \"--patience\", type = int, default = 50, help = \"Early Stopping Patience\")\r\n\targs = parser.parse_args()\r\n\tmain(args.job_dir, args.epochs, args.batch_size, args.learning_rate, args.patience)","repo_name":"SauravSJK/Movie-Recommendation","sub_path":"Codes/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23630532722","text":"# troch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport os\nimport numpy as np\nimport torch.utils.data\nfrom torch.utils.data import DataLoader\n\nimport torchvision\nimport torchvision.transforms as transforms\nimport time\nimport torchvision.datasets as datasets\nimport torchvision.models as models\n\ndef eval(model, data_loader, device):\n print('Start test..')\n model.eval()\n correct = 0\n total = 0\n loss = 0\n\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(data_loader):\n inputs, targets = inputs.to(device), targets.to(device)\n total += targets.size(0)\n\n outputs = model(inputs)\n\n loss += criterion(outputs, targets).item()\n\n _, predicted = outputs.max(1)\n\n correct += predicted.eq(targets).sum().item()\n\n print('\\nTotal average test acc : ', correct / total)\n print('total average test_loss :', loss / total)\n\n # save model\n\n state = {\n 'net': model.state_dict()\n }\n\n# batch size\nbatch_size=128\n\n# dataset 구축\n# args='C:/Users/User/AI/ch/q1/t_v_re2'\n# traindir = os.path.join(args, 'train')\n# valdir = os.path.join(args, 'val')\ntrain_path='C:/Users/User/AI/ch/q1/t_v_re2/train/'\ntest_path='C:/Users/User/AI/ch/q1/1_test/'\n\ntrain_dataset = datasets.ImageFolder(\n train_path,\n transforms.Compose([\n transforms.ToTensor()\n ]))\n\ntest_dataset = datasets.ImageFolder(\n test_path,\n transforms.Compose([\n transforms.ToTensor()\n ]))\n\n# loader\ntrain_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\ntest_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n\ndevice=torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n#net = net.to(device)\n\n# loss\ncriterion = nn.CrossEntropyLoss().to(device)\n\n\n\n#checkpoint = torch.load(\"C:/Users/User/AI/ch/q1\")\n#net.load_state_dict(checkpoint[\"hyeon_model_best.pth\"])\n\nmodel=models.resnet50()\n#model = model.to(device)\nmodel = torch.nn.DataParallel(model).cuda()\n# opt\nlr_val = 0.1\noptimizer = optim.SGD(model.parameters(),lr=lr_val,momentum=0.9,weight_decay=0.0002)\nmodel_load_path = \"C:/Users/User/AI/ch/q1/model_best.pth.tar\"\n# model_load_path = \"C:/Users/User/AI/examples/imagenet/model_best.pth.tar\"\ncheckpoint=torch.load(model_load_path)\nmodel.load_state_dict(checkpoint['state_dict'])\n\n\neval(model,test_loader,device)\n\n#\n# model_load_path = \"./weights/SimpleCNN/best_model.pt\"\n# model = SimpleCNN().to(device)\n# model.load_state_dict(torch.load(model_load_path))\n","repo_name":"erain120/AI","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27395696060","text":"###########################################################\n# GAME: A 2D tile-based game made with Pygame #\n# Gaspard WIERZBINSKI, 2023 #\n# Licensed under MIT #\n###########################################################\n\n\"\"\"\nThis file contains the Entity class, which represents an object\nthat can be rendered on the screen\n\"\"\"\n\nimport pygame\nfrom pos import Coords, Vector2\nfrom variables import MAX_PLAYER_VELOCITY\n\n\nclass Entity(pygame.sprite.Sprite):\n \"\"\"An entity is an object that can be rendered on the screen\"\"\"\n\n coords: Coords\n velocity: float\n size: Vector2\n image: pygame.Surface\n rect: pygame.Rect\n dead: bool\n\n def __init__(\n self, coords: Coords, size: Vector2, velocity: Vector2 = Vector2(0, 0)\n ):\n super().__init__()\n self.coords = coords\n self.size = size\n self.velocity = velocity.x\n self.image = pygame.Surface(size.to_int_tuple())\n self.rect = self.image.get_rect()\n self.rect.x = coords.pos.x\n self.rect.y = coords.pos.y\n self.dead = False\n\n def update(self):\n \"\"\"Called every frame, at 60 frames a second\"\"\"\n self.rect.x = self.coords.pos.x\n self.rect.y = self.coords.pos.y\n\n def render(self):\n \"\"\"Render the entity on the screen\"\"\"\n surface = pygame.Surface(self.size.to_int_tuple())\n surface.blit(self.image, (0, 0))\n\n def move(self, pos: Vector2):\n \"\"\"Move the entity by the given position\"\"\"\n self.coords.pos += pos\n\n\nclass Player(Entity):\n \"\"\"The player entity\"\"\"\n\n throttle_on: bool\n frame: int\n timer: int\n sprites: list[pygame.Surface]\n\n def __init__(self, coords: Coords):\n super().__init__(coords, Vector2(43, 47))\n\n # Load animation sprites from a single image of 16x32 sprites stitched together\n spritesheet = pygame.image.load(\n \"assets/spaceship/greenships.png\"\n ).convert_alpha()\n self.sprites = [spritesheet.subsurface(pygame.Rect(0, 0, 43, 47))]\n\n self.image = self.sprites[0]\n self.frame = 0\n self.timer = 0\n self.throttle_on = False\n\n def shoot(self):\n \"\"\"Shoot a bullet\"\"\"\n OFFSET_RIGHT = 10\n # OFFSET_LEFT = 6\n\n right_side_pos = self.coords.pos # + self.coords.right() * OFFSET_RIGHT\n # left_side_pos = self.coords.pos # + self.coords.left() * OFFSET_LEFT\n\n # Play shooting sound\n sound = pygame.mixer.Sound(\"assets/sounds/laser.wav\")\n sound.set_volume(0.4)\n sound.play()\n\n return [\n Bullet(Coords(right_side_pos, self.coords.rotation), self.velocity),\n # Bullet(Coords(left_side_pos, self.coords.rotation)),\n ]\n\n def update(self):\n \"\"\"Called every frame, at 60 frames a second\"\"\"\n # Clamp velocity\n # self.velocity.x = max(\n # -MAX_PLAYER_VELOCITY, min(MAX_PLAYER_VELOCITY, self.velocity.x)\n # )\n # self.velocity.y = max(\n # -MAX_PLAYER_VELOCITY, min(MAX_PLAYER_VELOCITY, self.velocity.y)\n # )\n\n self.velocity = max(\n -MAX_PLAYER_VELOCITY, min(MAX_PLAYER_VELOCITY, self.velocity)\n )\n\n self.coords.pos += self.coords.forward() * self.velocity\n\n # Decrease velocity gradually if player is not pushing throttle\n if not self.throttle_on:\n self.velocity *= 0.95\n if abs(self.velocity) < 0.1:\n self.velocity = 0\n\n super().update()\n\n def render(self):\n \"\"\"Render the entity on the screen\"\"\"\n image = pygame.transform.rotate(self.image, self.coords.rotation.to_degrees())\n return image.copy()\n\n\nclass Bullet(Entity):\n \"\"\"\n A Bullet entity that is shot by the player, leaving\n behind a bullet trail and moving forwards for 10 secs\n \"\"\"\n\n coords: Coords\n sprite: pygame.Surface\n time_created: float\n\n def __init__(self, coords: Coords, velocity: float):\n super().__init__(coords, Vector2(1, 1))\n self.sprite = pygame.image.load(\"assets/ammo/ammo.png\").convert_alpha()\n self.time_created = pygame.time.get_ticks()\n self.velocity = velocity\n\n def update(self):\n \"\"\"Called every frame, at 60 frames a second\"\"\"\n # Destroy bullets after 5 seconds\n if pygame.time.get_ticks() - self.time_created > 5000:\n self.dead = True\n return\n\n BULLET_SPEED = -1.1\n self.coords.pos += self.coords.forward() * (self.velocity + BULLET_SPEED)\n super().update()\n\n def render(self):\n \"\"\"Render the entity on the screen\"\"\"\n # Image is a small red rectangle with a bullet trail\n image = pygame.transform.scale(self.sprite, (3, 8))\n\n # Draw bullet trail\n # pygame.draw.line(image, (255, 0, 0), (1, 0), (1, 50))\n\n # Draw bullet\n # pygame.draw.rect(ima ge, (255, 0, 0), pygame.Rect(0, 0, 3, 8))\n\n # Rotate bullet\n image = pygame.transform.rotate(image, self.coords.rotation.to_degrees())\n return image.copy()\n","repo_name":"CPlusPatch/adventure-tiles","sub_path":"entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":5126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4042993612","text":"import unittest\nfrom pyredux.store import create\n\n\nclass StoreTest(unittest.TestCase):\n\n @staticmethod\n def reducer(state, action):\n return state + 1\n\n def testShouldHandleCreate(self):\n state = 1\n store = create(self.reducer, state)\n\n self.assertEqual(store.get_state(), state + 1)\n\n def testShouldRaiseTypeExceptionOnInvalidActionType(self):\n action = {}\n state = 1\n store = create(self.reducer, state)\n\n with self.assertRaises(TypeError):\n store.dispatch('foo')\n\n with self.assertRaises(TypeError):\n store.dispatch(action)\n\n def testShouldRaiseTypeExceptionOnInvalidReducerType(self):\n state = 1\n with self.assertRaises(TypeError):\n create(\"foo\", state)\n\n def testShouldRaiseExceptionWhenAlreadyDispatching(self):\n _state = 1\n _store = {\"store\": create(self.reducer, _state)}\n store = _store[\"store\"]\n\n def reducer(state, action):\n store.dispatch(action)\n return state\n\n with self.assertRaises(RuntimeError):\n store.replace_reducer(reducer)\n\n def listener(self, dispatch, get_state):\n self.assertTrue(callable(dispatch))\n self.assertTrue(callable(get_state))\n\n def testShouldFireListenerOnDispatch(self):\n state = 0\n\n store = create(self.reducer, state)\n unsubscribe = store.subscribe(self.listener)\n self.assertTrue(callable(unsubscribe))\n\n store.dispatch({\"type\": \"foo\"})\n\n self.assertTrue(unsubscribe())\n self.assertFalse(unsubscribe())\n","repo_name":"rikbruil/pyredux","sub_path":"test/store_test.py","file_name":"store_test.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"72220933972","text":"from typing import List, Tuple\n\nimport numpy as np\n\n\ndef read_input():\n with open('d3.txt') as f:\n eins, zwei = [i.strip() for i in f.readlines()]\n\n return eins, zwei\n\n\nclass Solution:\n def __init__(self, first: str, second: str, size=20000):\n self.first_path = self.form_path(first)\n self.second_path = self.form_path(second)\n self.start = size // 2, size // 2\n\n self.first_grid = self.form_grid(self.first_path, size)\n self.second_grid = self.form_grid(self.second_path, size)\n\n def solve(self):\n indices = np.indices(self.first_grid.shape)\n total = self.first_grid * self.second_grid\n return np.abs(indices[..., total == 1].T - self.start).sum(1).min()\n\n @staticmethod\n def form_path(path: str):\n return [(p[0], int(p[1:])) for p in path.split(\",\")]\n\n def form_grid(self, path: List[Tuple[str, int]], size: int):\n x, y = self.start\n grid = np.zeros([size, size])\n for direction, steps in path:\n if direction == \"L\":\n grid[x, y - steps:y + 1] = 1\n y -= steps\n elif direction == \"R\":\n grid[x, y: y + steps + 1] = 1\n y += steps\n elif direction == \"U\":\n grid[x - steps: x + 1, y] = 1\n x -= steps\n elif direction == \"D\":\n grid[x: x + steps + 1, y] = 1\n x += steps\n else:\n raise ValueError(f\"Dir: {direction}\")\n\n grid[self.start] = 0\n return grid\n\n\ndef test1():\n for left, right, grid_size, exp in [\n (\"R8,U5,L5,D3\", \"U7,R6,D4,L4\", 20, 6),\n (\"R75,D30,R83,U83,L12,D49,R71,U7,L72\", \"U62,R66,U55,R34,D71,R55,D58,R83\", 500, 159),\n (\"R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51\", \"U98,R91,D20,R16,D67,R40,U7,R15,U6,R7\", 500, 135)\n ]:\n assert Solution(left, right, grid_size).solve() == exp\n\n\ndef solve1():\n return Solution(*read_input(), size=20000).solve()\n\n\nclass Solution2:\n def __init__(self, first: str, second: str, size=20000):\n self.first_path = self.form_path(first)\n self.second_path = self.form_path(second)\n self.start = size // 2, size // 2\n\n self.first_time_grid = self.form_grid(self.first_path, size)\n self.second_time_grid = self.form_grid(self.second_path, size)\n\n def solve(self):\n mask = (self.first_time_grid > 0) & (self.second_time_grid > 0)\n return min((self.first_time_grid + self.second_time_grid)[mask])\n\n @staticmethod\n def form_path(path: str):\n return [(p[0], int(p[1:])) for p in path.split(\",\")]\n\n def form_grid(self, path: List[Tuple[str, int]], size: int):\n x, y = self.start\n time_grid = np.zeros([size, size], int)\n\n time = 0\n for direction, steps in path:\n for _ in range(steps):\n time += 1\n if direction == \"L\":\n y -= 1\n elif direction == \"R\":\n y += 1\n elif direction == \"U\":\n x -= 1\n elif direction == \"D\":\n x += 1\n\n if time_grid[x, y] == 0:\n time_grid[x, y] = time\n\n return time_grid\n\n\ndef test2():\n for left, right, grid_size, exp in [\n (\"R8,U5,L5,D3\", \"U7,R6,D4,L4\", 20, 30),\n (\"R75,D30,R83,U83,L12,D49,R71,U7,L72\", \"U62,R66,U55,R34,D71,R55,D58,R83\", 500, 610),\n (\"R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51\", \"U98,R91,D20,R16,D67,R40,U7,R15,U6,R7\", 500, 410)\n ]:\n assert Solution2(left, right, grid_size).solve() == exp\n\n\ndef solve2():\n return Solution2(*read_input(), size=20000).solve()\n","repo_name":"DanielBok/aoc","sub_path":"2019/d3.py","file_name":"d3.py","file_ext":"py","file_size_in_byte":3717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23975307328","text":"# Sample Private Key: E9873D79C6D87DC0FB6A5778633389F4453213303DA61F20BD67FC233AA33262\n# Sample DES3 Key: This is sample DES3 key.\n# Sample Blowfish Key: This is fifty-six byte Blowfish key used for encryption.\n# Sample RC2 Key: A very long and confidential key for implementing the RC2 cipher\n\nfrom Crypto.Cipher import AES, DES3, Blowfish, ARC2\nfrom Crypto.Hash import SHA256\nfrom Crypto import Random\nfrom tss import share_secret, reconstruct_secret, Hash, TSSError\nimport base64\nimport tss\nimport sys\n\n\ndef padtext1(user_input):\n while len(user_input) % 16 != 0:\n user_input += ' '\n return user_input\n\n\ndef padtext2(user_input):\n while len(user_input) % 8 != 0:\n user_input += b' '\n return user_input\n\n\ndef sha256(user_input):\n hasher = SHA256.new(user_input.encode('utf-8'))\n return hasher.digest()\n\n\n\n# AES\ndef aes():\n plain_inputAES = input(\"Enter private key to be encrypted:\\n\")\n plain_length = len(plain_inputAES)\n print('Entered private key is ' + str(plain_length) + ' characters.')\n plainAES = padtext1(plain_inputAES)\n print('\\nPrivate key after padding:')\n print(plainAES)\n print(len(plainAES))\n\n hasherAES = sha256(plainAES)\n print('\\nHash of private key to be used as AES256 encryption key: ')\n print(hasherAES)\n print(len(hasherAES))\n\n global cipherAES\n cipherAES = AES.new(hasherAES)\n\n global ciphertextAES\n ciphertextAES = cipherAES.encrypt(plainAES)\n print('\\nPrivate key after encrypting with AES256:')\n print(ciphertextAES)\n print(len(ciphertextAES))\n\n global executedAES\n executedAES = True\n\n\n\n# Shamir's Secret Sharing\ndef shamir():\n secret = ciphertextAES\n print(\"Input to Shamir's Secret:\")\n print(secret)\n print(len(secret))\n global t\n t=2\n s=3\n\n if len(sys.argv)>1:\n secret=str(sys.argv[1])\n if len(sys.argv)>2:\n t=int(sys.argv[2])\n if len(sys.argv)>3:\n s=int(sys.argv[3])\n\n global shares\n shares = tss.share_secret(t, s, secret, 'my-id', Hash.NONE)\n print(\"\\n~~~~~ Key Split in 3 Shares ~~~~~\")\n for x in range(0, s):\n print(shares[x])\n\n global constructedShamir\n constructedShamir = True\n\n\n\n# DES3\ndef des3():\n print('Input to DES3:')\n plain_inputDES3 = shares[0]\n print(plain_inputDES3)\n print(len(plain_inputDES3))\n\n plainDES3 = padtext2(plain_inputDES3)\n print(\"\\nShamir's secret 1 after padding:\")\n print(plainDES3)\n print(len(plainDES3))\n\n while True:\n global DES3keyEn\n DES3keyEn = input('\\nEnter a 24 characters DES3 encryption Key:\\n')\n\n if len(DES3keyEn) < 24:\n print('Error: Entered Key is not 24 characters!')\n elif len(DES3keyEn) >= 25:\n print('Error: Entered Key is not 24 characters!')\n else:\n break\n\n print('\\nEntered DES3 Key:\\n' + DES3keyEn)\n\n cipherDES3 = DES3.new(DES3keyEn)\n\n global ciphertextDES3\n ciphertextDES3 = cipherDES3.encrypt(plainDES3)\n print(\"\\nShamir's secret 1 after encrypting with DES3:\")\n print(ciphertextDES3)\n print(len(ciphertextDES3))\n\n global executedDES3\n executedDES3 = True\n\n\n\n# Blowfish\ndef blow():\n print('Input to Blowfish:')\n plain_inputBLOW = shares[1]\n print(plain_inputBLOW)\n print(len(plain_inputBLOW))\n\n plainBLOW = padtext2(plain_inputBLOW)\n print(\"\\nShamir's secret 2 after padding:\")\n print(plainBLOW)\n print(len(plainBLOW))\n\n while True:\n global BLOWkeyEn\n BLOWkeyEn = input('\\nEnter a 56 characters Blowfish encryption Key:\\n')\n\n if len(BLOWkeyEn) < 56:\n print('Error: Entered Key is not 56 characters!')\n elif len(BLOWkeyEn) >= 57:\n print('Error: Entered Key is not 56 characters!')\n else:\n break\n\n print('\\nEntered Blowfish Key:\\n' + BLOWkeyEn)\n\n cipherBLOW = Blowfish.new(BLOWkeyEn)\n\n global ciphertextBLOW\n ciphertextBLOW = cipherBLOW.encrypt(plainBLOW)\n print(\"\\nShamir's secret 2 after encrypting with Blowfish:\")\n print(ciphertextBLOW)\n print(len(ciphertextBLOW))\n\n global executedBLOW\n executedBLOW = True\n\n\n\n# RC2\ndef rc2():\n print('Input to RC2:')\n plain_inputRC2 = shares[2]\n print(plain_inputRC2)\n print(len(plain_inputRC2))\n\n plainRC2 = padtext2(plain_inputRC2)\n print(\"\\nShamir's secret 3 after padding:\")\n print(plainRC2)\n print(len(plainRC2))\n\n while True:\n global RC2keyEn\n RC2keyEn = input('\\nEnter a 64 characters RC2 encryption Key:\\n')\n\n if len(RC2keyEn) < 64:\n print('Error: Entered Key is not 64 characters!')\n elif len(RC2keyEn) >= 65:\n print('Error: Entered Key is not 64 characters!')\n else:\n break\n\n print('\\nEntered RC2 Key:\\n' + RC2keyEn)\n\n cipherRC2 = ARC2.new(RC2keyEn)\n\n global ciphertextRC2\n ciphertextRC2 = cipherRC2.encrypt(plainRC2)\n print(\"\\nShamir's secret 3 after encrypting with RC2:\")\n print(ciphertextRC2)\n print(len(ciphertextRC2))\n\n global executedRC2\n executedRC2 = True\n\n\n\n# Decrypting AES\ndef dcrpt_aes():\n plaintextAES = cipherAES.decrypt(ciphertextAES)\n stringAES = plaintextAES.replace(b' ', b'')\n print('Original private key after decrypting AES:')\n pvtkey = stringAES.decode()\n print(pvtkey)\n print(len(pvtkey))\n\n global decryptedAES\n decryptedAES = True\n\n\n\n# Decrypting DES3\ndef dcrpt_des3():\n while True:\n DES3keyDe = input('Enter the 24 characters DES3 decryption Key:\\n')\n\n if len(DES3keyDe) < 24:\n print('Error: Entered Key is not 24 characters!\\n')\n elif len(DES3keyDe) >= 25:\n print('Error: Entered Key is not 24 characters!\\n')\n else:\n if DES3keyDe == DES3keyEn:\n cipherDES3 = DES3.new(DES3keyDe)\n break\n else:\n print('Error: Wrong key was entered. Please try again!\\n')\n\n plaintextDES3 = cipherDES3.decrypt(ciphertextDES3)\n stringDES3 = plaintextDES3.replace(b' ', b'')\n print(\"\\nShamir's secret 1 after decrypting DES3:\")\n print(stringDES3)\n print(len(stringDES3))\n\n global decryptedDES3\n decryptedDES3 = True\n\n\n\n# Decrypting Blowfish\ndef dcrpt_blow():\n while True:\n BLOWkeyDe = input('Enter the 56 characters Blowfish decryption Key:\\n')\n\n if len(BLOWkeyDe) < 56:\n print('Error: Entered Key is not 56 characters!\\n')\n elif len(BLOWkeyDe) >= 57:\n print('Error: Entered Key is not 56 characters!\\n')\n else:\n if BLOWkeyDe == BLOWkeyEn:\n cipherBLOW = Blowfish.new(BLOWkeyDe)\n break\n else:\n print('Error: Wrong key was entered. Please try again!\\n')\n\n plaintextBLOW = cipherBLOW.decrypt(ciphertextBLOW)\n stringBLOW = plaintextBLOW.replace(b' ', b'')\n print(\"Shamir's secret 2 after decrypting Blowfish:\")\n print(stringBLOW)\n print(len(stringBLOW))\n\n global decryptedBLOW\n decryptedBLOW = True\n\n\n\n# Decrypting RC2\ndef dcrpt_rc2():\n while True:\n RC2keyDe = input('Enter the 64 characters RC2 decryption Key:\\n')\n\n if len(RC2keyDe) < 64:\n print('Error: Entered Key is not 64 characters!\\n')\n elif len(RC2keyDe) >= 65:\n print('Error: Entered Key is not 64 characters!\\n')\n else:\n if RC2keyDe == RC2keyEn:\n cipherRC2 = ARC2.new(RC2keyDe)\n break\n else:\n print('Error: Wrong key was entered. Please try again!\\n')\n\n plaintextRC2 = cipherRC2.decrypt(ciphertextRC2)\n stringRC2 = plaintextRC2.replace(b' ', b'')\n print(\"Shamir's secret 3 after decrypting RC2:\")\n print(stringRC2)\n print(len(stringRC2))\n\n global decryptedRC2\n decryptedRC2 = True\n\n\n\n# Decrypting Shamir's Secret Sharing\ndef dcrpt_shamir():\n reconstructed_secret1 = tss.reconstruct_secret(shares[0:t])\n print(\"Reconstructed by User and Provider:\")\n print(reconstructed_secret1)\n\n reconstructed_secret2 = tss.reconstruct_secret(shares[0:t+1])\n print(\"\\nReconstructed by All:\")\n print(reconstructed_secret2)\n\n global reconstructShamir\n reconstructShamir = True\n\n\n\n# Main function\nif __name__ == \"__main__\":\n executedAES = False\n constructedShamir = False\n executedDES3 = False\n executedBLOW = False\n executedRC2 = False\n decryptedAES = False\n reconstructShamir = False\n decryptedDES3 = False\n decryptedBLOW = False\n decryptedRC2 = False\n\n while True:\n print(\"\"\"\n ***** MENU *****\n 1. Enter Private key for encryption.\n 2. Split the Private key with Shamir's Secret Sharing.\n 3. Encrypt Secret 1 with DES3.\n 4. Encrypt Secret 2 with Blowfish.\n 5. Encrypt Secret 3 with RC2.\n 6. Decrypt Secret 1.\n 7. Decrypt Secret 2.\n 8. Decrypt Secret 3.\n 9. Reconstruct Shamir's Secret.\n 10. Reconstruct Private Key.\n 11. Exit/Quit\n \"\"\")\n ans = input(\"What would you like to do?\\n\")\n if ans == \"1\":\n if executedAES:\n print(\"Already executed!\\nGo to Step 2.\")\n else:\n print(\"\\nHashing with SHA256 & encrypting with AES256:-\")\n aes()\n elif ans == \"2\":\n if constructedShamir:\n print(\"Already executed!\\nGo to Step 3.\")\n elif not executedAES:\n print(\"First complete Step 1!\")\n else:\n print(\"\\nSplitting the key:-\")\n shamir()\n elif ans == \"3\":\n if executedDES3:\n print(\"Already executed!\\nGo to Step 4.\")\n elif not constructedShamir:\n print(\"First complete Step 2!\")\n else:\n print(\"\\nEncrypting with DES3:-\")\n des3()\n elif ans == \"4\":\n if executedBLOW:\n print(\"Already executed!\\nGo to Step 5.\")\n elif not constructedShamir:\n print(\"First complete Step 2!\")\n else:\n print(\"\\nEncrypting with Blowfish:-\")\n blow()\n elif ans == \"5\":\n if executedRC2:\n print(\"Already executed!\\nGo to Step 6.\")\n elif not constructedShamir:\n print(\"First complete Step 2!\")\n else:\n print(\"\\nEncrypting with RC2:-\")\n rc2()\n elif ans == \"6\":\n if decryptedDES3:\n print(\"Already executed!\\nGo to Step 7.\")\n elif not executedDES3:\n print(\"First complete Step 3!\")\n else:\n print(\"\\nDecrypting Secret 1:-\")\n dcrpt_des3()\n elif ans == \"7\":\n if decryptedBLOW:\n print(\"Already executed!\\nGo to Step 8.\")\n elif not executedBLOW:\n print(\"First complete Step 4!\")\n else:\n print(\"\\nDecrypting Secret 2:-\")\n dcrpt_blow()\n elif ans == \"8\":\n if decryptedRC2:\n print(\"Already executed!\\nGo to Step 9.\")\n elif not executedRC2:\n print(\"First complete Step 5!\")\n else:\n print(\"\\nDecrypting Secret 3:-\")\n dcrpt_rc2()\n elif ans == \"9\":\n if reconstructShamir:\n print(\"Already executed!\\nGo to Step 10.\")\n elif not decryptedDES3 or not decryptedBLOW or not decryptedRC2:\n print(\"First complete Steps 6,7, and 8!\")\n else:\n print(\"\\nReconstructing Secret:-\")\n dcrpt_shamir()\n elif ans == \"10\":\n if decryptedAES:\n print(\"Already executed!\\nYou can Exit/Quit the program.\")\n elif not reconstructShamir:\n print(\"First complete Step 9!\")\n else:\n print(\"\\nReconstructing Private Key:-\")\n dcrpt_aes()\n elif ans == \"11\":\n print(\"Goodbye!\")\n break\n elif ans != \"\":\n print(\"Enter a valid choice & try again!\")","repo_name":"Sanmesh3/Key-Management-System","sub_path":"KeyMgmt.py","file_name":"KeyMgmt.py","file_ext":"py","file_size_in_byte":12104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1753620692","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\nimport requests\nimport bs4 as bs\nimport pandas as pd\nimport time\n\n# Write a single function to:\n# 1. Get the list of topics from the topics page\n# 2. Get list of top repos from the individual topic pages\n# 3. For each topic create a csv of the top repos for the topic \n\n# In[12]:\n\n\ndef get_topic_title(doc):\n selection_class = 'f3 lh-condensed mb-0 mt-1 Link--primary'\n topic_title_tag = doc.find_all('p',{'class': selection_class })\n \n topic_title = []\n for tag in topic_title_tag:\n topic_title.append(tag.text)\n return topic_title\n\ndef get_topic_desc(doc):\n desc_selector = 'f5 color-fg-muted mb-0 mt-1'\n topic_desc_tag = doc.find_all('p',{'class':desc_selector})\n topic_desc = []\n for desc in topic_desc_tag:\n topic_desc.append(desc.text.strip())\n return topic_desc\n \n \ndef get_topic_url(doc):\n base_url = \"https://github.com\"\n url_selector = 'no-underline flex-1 d-flex flex-column'\n topic_link_tag = doc.find_all('a',{'class':url_selector}) \n topic_urls = []\n \n for url in topic_link_tag:\n topic_urls.append(base_url+url['href'])\n return topic_urls\n \ndef scrape_topics(num):\n topic_url = 'https://github.com/topics?page={}'.format(num)\n response = requests.get(topic_url)\n if response.status_code !=200:\n print(\"Process Stops restarting..........\")\n time.sleep(20)\n os.system(\"ScraperCode.py 1\")\n# raise Exception('Failed to load the page {}'.format(topic_url))\n \n doc = bs.BeautifulSoup(response.text,'html.parser')\n topics_dict = {\n 'title' : get_topic_title(doc),\n 'description' : get_topic_desc(doc),\n 'url' : get_topic_url(doc),\n \n }\n# print(topics_dict)\n return pd.DataFrame(topics_dict)\n\n\n# In[3]:\n\n\ndef get_topic_page(topic_url):\n response = requests.get(topic_url)\n if response.status_code !=200:\n print(\"Process Stops restarting..........\")\n time.sleep(20)\n os.system(\"ScraperCode.py 1\")\n # raise Exception('Failed to load the page {}'.format(topic_url))\n topic_doc = bs.BeautifulSoup(response.text,'html.parser')\n return topic_doc\n\ndef get_repo_info(h3_tag, star_tags):\n #gives all info about repository\n base_url = \"https://github.com\"\n a_tags = h3_tag.find_all('a')\n username = a_tags[0].text.strip()\n repo_name = a_tags[1].text.strip()\n repo_url = repo_url = base_url + a_tags[1]['href']\n star_count = parse_star_count(star_tags.text)\n return username, repo_name, star_count, repo_url\n\ndef parse_star_count(stars_count):\n stars_str = stars_count.strip()\n if stars_str[-1] == 'k':\n return int(float(stars_str[:-1])*1000)\n return (int(stars_str))\n \n\ndef get_topic_repos(topic_doc):\n \n# get h3 tag for repo name, url, etc.\n h3_selection_class = 'f3 color-fg-muted text-normal lh-condensed'\n repo_tags = topic_doc.find_all('h3',{'class': h3_selection_class})\n \n# get star tags\n star_selector = 'Counter js-social-count'\n star_tags = topic_doc.find_all('span',{'class': star_selector})\n \n# get repo info\n topic_repos_dict = {\n 'username':[],\n 'repo_name':[],\n 'stars': [],\n 'repo_url':[]\n }\n for i in range (len(repo_tags)):\n repo_info = get_repo_info(repo_tags[i],star_tags[i])\n topic_repos_dict['username'].append(repo_info[0])\n topic_repos_dict['repo_name'].append(repo_info[1])\n topic_repos_dict['stars'].append(repo_info[2])\n topic_repos_dict['repo_url'].append(repo_info[3])\n \n# put all in dataframe\n topic_repos = pd.DataFrame(topic_repos_dict)\n return topic_repos\n\ndef scrape_topic(topic_url,path):\n\n if os.path.exists(path):\n print(\"File {} already exits.\".format(path))\n return\n topic_df = get_topic_repos(get_topic_page(topic_url))\n topic_df.to_csv(path,index=None)\n \n\n\n# In[4]:\n\n\ndef scrape_topics_repos():\n for i in range(1,7):\n topics_df = scrape_topics(i)\n\n os.makedirs('Resultant Data Files',exist_ok = True)\n\n for index,row in topics_df.iterrows():\n print(\"Scrapping topic {}.\".format(row['title']))\n scrape_topic(row['url'],'Resultant Data Files/{}.csv'.format(row['title']))\n \n\n\n# In[18]:\n\nif __name__ == '__main__':\n scrape_topics_repos()\n\n\n# ###### Helper site\n\n# https://jovian.ai/aakashns-6l3/scraping-github-topics-repositories\n\n# In[ ]:\n\n\n\n\n","repo_name":"mayankg6453/Git-Scraper","sub_path":"ScraperCode.py","file_name":"ScraperCode.py","file_ext":"py","file_size_in_byte":4516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2075581484","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport sys\n\nimport os\nimport sys\n\nthis_py = os.path.split(sys.argv[0])[-1][:-3]\n\n\ndef F0(t):\n if t < 0:\n return .0\n elif 0 <= t <= 120:\n return 1 - (1 - t / 120) ** (1 / 6)\n else:\n return 1.\n\n\ndef S0(t):\n return 1 - F0(t)\n\n\ndef pxt(x, t):\n if t <= 0:\n return 1\n try:\n return S0(x + t) / S0(x)\n except ZeroDivisionError:\n return .0\n\n\ndef qxt(x, t):\n return 1 - pxt(x, t)\n\n\ndef mu(x):\n if x < 0:\n return .0\n elif x >= 125:\n return np.inf\n else:\n return 1 / (720-6 * x)\n\n\n'''\nThe probability of $(25)$ dying before complete 50 year old.\n'''\nprint(round(qxt(25, 25), 10))\n\n'''\n$\\px[5]{45}$\n'''\nprint(round(pxt(45, 5), 10))\n\n'''\n$\\qx[5|2]{45}=\\px[5]{45}\\:\\qx[2]{47}\n'''\nprint(round(pxt(45, 5) * qxt(50, 2), 10))\n\n'''\nE(K_{118})\n'''\nprobs = [pxt(118, t) for t in range(1, 3)]\nprint(round(sum(probs), 10))\n\n'''some graphs'''\nx_s = np.linspace(0, 120, 1000)\n\n''' Ln force of mortality'''\nforce_of_motality_lst = [mu(t) for t in x_s]\nfig, axes = plt.subplots()\nplt.plot(x_s, np.log(force_of_motality_lst), label=f'Mortality Force({0}, {120})')\nplt.xlabel(r'$x$')\nplt.ylabel(r'$\\mu_{x}$')\nplt.title(r'Force of Mortality')\nplt.grid(b=True, which='both', axis='both', color='grey', linestyle='-', linewidth=.1)\nplt.legend()\nplt.savefig(this_py + '_force_of_mortality' + '.eps', format='eps', dpi=3600)\nplt.show()\n\n''' Survival Function '''\nprob_survival_lst = [S0(t) for t in x_s]\nfig, axes = plt.subplots()\nplt.plot(x_s, prob_survival_lst, label=f'Survival Function({0}, {120})')\nplt.xlabel(r'$x$')\nplt.ylabel(r'$S_{0}(x)$')\nplt.title(r'Survival Function')\nplt.grid(b=True, which='both', axis='both', color='grey', linestyle='-', linewidth=.1)\nplt.legend()\nplt.savefig(this_py + '_Survival_Function' + '.eps', format='eps', dpi=3600)\nplt.show()","repo_name":"parcr/lifeActuary_dev","sub_path":"exercisesLifeContingencies/test1_2021_22/survivalFunction_power6_age120.py","file_name":"survivalFunction_power6_age120.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"5235091391","text":"from typing import TYPE_CHECKING\n\nimport kerasltisubmission as klti\nfrom kerasltisubmission.exceptions import KerasLTISubmissionBadResponseException\n\nif TYPE_CHECKING: # pragma: no cover\n import keras.models\n\n\nclass Submission:\n def __init__(\n self,\n user_token: klti.AnyIDType,\n assignment_id: klti.AnyIDType,\n model: \"keras.models.Model\",\n ):\n self.user_token = user_token\n self.assignment_id = assignment_id\n self.model = model\n\n def submit(self, verbose: bool = True, strict: bool = False) -> None:\n provider = klti.LTIProvider(\n input_api_endpoint=\"https://neuralnet.xopic.de/ltiprovider\",\n submission_api_endpoint=\"https://neuralnet.xopic.de/ltiprovider/submit\",\n user_token=self.user_token,\n )\n\n submission = klti.Submission(assignment_id=self.assignment_id, model=self.model)\n\n try:\n print(f\"Model wird validiert...\")\n results = provider.submit(submission, verbose=verbose, strict=strict)\n for assignment_id, result in results.items():\n print(f\"Assignment {assignment_id} erfolgreich abgegeben!\")\n print(\n f\"Dein Model hat eine Accuracy von {round(result.get('accuracy') * 100, ndigits=1)}% auf unseren Validierungsdaten.\"\n )\n print(\n f\"Du erhältst {round(result.get('grade') * 100, ndigits=1)}% der Punkte auf dieses Assignment.\"\n )\n print(\n f\"Falls du bereits eine Abgabe mit höherer Bewertung abgegeben hast, wird automatisch das bessere Ergebnis gewählt.\"\n )\n except KerasLTISubmissionBadResponseException as e:\n print(e.message)\n except Exception as e:\n print(str(e))\n","repo_name":"into-ai/deeplearning2020","sub_path":"deeplearning2020/submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"de","doc_type":"code","stars":110,"dataset":"github-code","pt":"67"} +{"seq_id":"26390884289","text":"#!/usr/bin/env python3\n\nimport logging\nfrom .baseClassifier import Classifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.utils import shuffle\nfrom keras.utils import to_categorical\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom IPython import embed\nlogging.basicConfig(level=logging.DEBUG)\n\nclass Mlp(Classifier):\n def init(self):\n self.score = 0\n self.confusion_matrix = 0\n self.y_pred = 0\n self.model = Sequential()\n self.model.add(Dense(200, activation='relu', input_dim=self.x_train[0].shape[1]))\n self.model.add(Dropout(0.5))\n self.model.add(Dense(2, activation='softmax'))\n self.model.compile(loss='binary_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n self.x_train, self.y_train = shuffle(self.x_train, self.y_train, random_state=0)\n self.x_test, self.y_test = shuffle(self.x_test, self.y_test, random_state=0)\n #self.x_train = self.x_train.todense()\n #self.x_test = self.x_test.todense()\n self.cat_y_train = to_categorical(self.y_train, num_classes=2)\n self.cat_y_test = to_categorical(self.y_test, num_classes=2)\n\n def run(self):\n self.model.fit(self.x_train, self.cat_y_train, validation_split=0.25, epochs=100, batch_size=128)\n self.score = self.model.evaluate(self.x_test, self.cat_y_test, batch_size=128)\n self.y_pred = self.model.predict_classes(self.x_test)\n self.confusion_matrix = confusion_matrix(self.y_test, self.y_pred)\n\n def get_score(self):\n return self.score[-1]\n\n def get_y_pred(self):\n return self.y_pred.astype(float)\n\n def get_confusion_matrix(self):\n return self.confusion_matrix\n","repo_name":"mro15/text-classification","sub_path":"classifiers/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26419548864","text":"import numpy as np\n\n\nclass SVM:\n def __init__(self, learning_rate=0.001, lambda_param=0.01, n_iters=1000):\n self.lr = learning_rate\n self.lambda_param = lambda_param\n self.n_iters = n_iters\n self.w = None\n self.b = None\n\n def _init_weights_bias(self, X):\n n_features = X.shape[1]\n print(\"Features\")\n print(n_features)\n self.w = np.zeros(n_features)\n self.b = 0\n\n def _satisfy_constraint(self, x, idx):\n linear_model = np.dot(x, self.w) + self.b\n return self.cls_map[idx] * linear_model >= 1\n\n def _get_gradients(self, constrain, x, idx):\n # if data point lies on the correct side\n if constrain:\n dw = self.lambda_param * self.w\n db = 0\n return dw, db\n # if data point is on the wrong side\n dw = self.lambda_param * self.w - np.dot(self.cls_map[idx], x)\n db = - self.cls_map[idx]\n return dw, db\n\n def _update_weights_bias(self, dw, db):\n self.w -= self.lr * dw\n self.b -= self.lr * db\n\n def fit(self, X, y):\n # init weights & biases\n n_features = X.shape[1]\n self.w = np.zeros(n_features)\n self.b = 0\n # map binary class to {-1, 1}\n self.cls_map = np.where(y <= 0, -1, 1)\n\n for _ in range(self.n_iters):\n for idx, x in enumerate(X):\n # check if data point satisfies the constraint\n constrain = self._satisfy_constraint(x, idx)\n # compute the gradients accordingly\n dw, db = self._get_gradients(constrain, x, idx)\n # update weights & biases\n self._update_weights_bias(dw, db)\n\n def predict(self, X):\n estimate = np.dot(X, self.w) + self.b\n # compute the sign\n prediction = np.sign(estimate)\n # map class from {-1, 1} to original values {0, 1}\n return np.where(prediction == -1, 0, 1)\n","repo_name":"Clementine2829/honors-project","sub_path":"my_models/SupportVectorMachineClass/SupportVectorMachine.py","file_name":"SupportVectorMachine.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8017638157","text":"from . import CGAL\nfrom . import Surface_mesh\nfrom . import Polygon_mesh_processing\nfrom . import geom\nfrom . import Aff_transformation_3\nfrom . import Vector_3\nfrom . import Point_2\nfrom . import Partition_traits_2_Polygon_2\nfrom . import Polygon_2\nfrom . import Polygon_with_holes_2\nfrom . import Polyhedron_3\nfrom . import Triangle_3\nfrom . import Vector_3\nfrom . import CGAL\nfrom . import pythonHelpers\n\nimport numpy as _np\n\n\nclass CSG:\n def __init__(self):\n self.sm = Surface_mesh.Surface_mesh_EPECK()\n\n @classmethod\n def fromPolygons(cls, polygons, **kwargs):\n csg = CSG()\n csg.sm = Surface_mesh.Surface_mesh_EPECK()\n Surface_mesh.toCGALSurfaceMesh(csg.sm, polygons)\n Polygon_mesh_processing.triangulate_faces(csg.sm)\n return csg\n\n def toVerticesAndPolygons(self):\n return Surface_mesh.toVerticesAndPolygons(self.sm)\n\n def clone(self):\n csg = CSG()\n csg.sm = self.sm.clone()\n return csg\n\n def rotate(self, axisIn, angleDeg):\n rot = _np.zeros((3, 3))\n\n axis = geom.Vector(axisIn)\n\n normAxis = axis / axis.length()\n\n cosAngle = _np.cos(-angleDeg / 180.0 * _np.pi)\n sinAngle = _np.sin(-angleDeg / 180.0 * _np.pi)\n verSin = 1 - cosAngle\n\n x = normAxis.x\n y = normAxis.y\n z = normAxis.z\n\n rot[0][0] = (verSin * x * x) + cosAngle\n rot[0][1] = (verSin * x * y) - (z * sinAngle)\n rot[0][2] = (verSin * x * z) + (y * sinAngle)\n\n rot[1][0] = (verSin * y * x) + (z * sinAngle)\n rot[1][1] = (verSin * y * y) + cosAngle\n rot[1][2] = (verSin * y * z) - (x * sinAngle)\n\n rot[2][0] = (verSin * z * x) - (y * sinAngle)\n rot[2][1] = (verSin * z * y) + (x * sinAngle)\n rot[2][2] = (verSin * z * z) + cosAngle\n\n rotn = Aff_transformation_3.Aff_transformation_3_EPECK(\n rot[0][0],\n rot[0][1],\n rot[0][2],\n rot[1][0],\n rot[1][1],\n rot[1][2],\n rot[2][0],\n rot[2][1],\n rot[2][2],\n 1,\n )\n Polygon_mesh_processing.transform(rotn, self.sm)\n\n def translate(self, disp):\n vIn = geom.Vector(disp)\n # TODO tidy vector usage (i.e conversion in geom?)\n v = Vector_3.Vector_3_EPECK(vIn[0], vIn[1], vIn[2])\n transl = Aff_transformation_3.Aff_transformation_3_EPECK(CGAL.Translation(), v)\n Polygon_mesh_processing.transform(transl, self.sm)\n\n # TODO need to finish and check signatures\n def scale(self, *args):\n if len(args) == 3: # x,y,z\n x = args[0]\n y = args[1]\n z = args[2]\n elif len(args) == 1:\n if type(args[0]) is list:\n x = args[0][0]\n y = args[0][1]\n z = args[0][2]\n else: # Vector\n x = args[0][0]\n y = args[0][1]\n z = args[0][2]\n else:\n x = 1\n y = 1\n z = 1\n scal = Aff_transformation_3.Aff_transformation_3_EPECK(x, 0, 0, 0, y, 0, 0, 0, z, 1)\n Polygon_mesh_processing.transform(scal, self.sm)\n\n def getNumberPolys(self):\n return self.sm.number_of_faces()\n\n def getNumberVertices(self):\n return self.sm.number_of_vertices()\n\n def vertexCount(self):\n return self.sm.number_of_vertices()\n\n def polygonCount(self):\n return self.sm.number_of_faces()\n\n def intersect(self, csg2):\n out = Surface_mesh.Surface_mesh_EPECK()\n Polygon_mesh_processing.corefine_and_compute_intersection(self.sm, csg2.sm, out)\n csg = CSG()\n csg.sm = out\n return csg\n\n def union(self, csg2):\n out = Surface_mesh.Surface_mesh_EPECK()\n Polygon_mesh_processing.corefine_and_compute_union(self.sm, csg2.sm, out)\n csg = CSG()\n csg.sm = out\n return csg\n\n def subtract(self, csg2):\n out = Surface_mesh.Surface_mesh_EPECK()\n Polygon_mesh_processing.corefine_and_compute_difference(self.sm, csg2.sm, out)\n csg = CSG()\n csg.sm = out\n return csg\n\n def inverse(self):\n CGAL.reverse_face_orientations(self.sm)\n return self\n\n # TODO finish coplanar intersection\n def coplanarIntersection(self, csg):\n \"\"\"\n Compute the coplanar surfaces between self and csg\n\n \"\"\"\n\n sm1 = self.sm\n sm2 = csg.sm\n\n #######################################\n # triangle planes\n #######################################\n\n def makePlaneList(sm):\n # triangle plane list\n tplanel = []\n\n # loop over sm1 faces and make planes\n for f in sm.faces():\n he = Surface_mesh.Halfedge_index()\n sm.halfedge(f, he)\n\n tpl = []\n\n for he1 in CGAL.halfedges_around_face(he, sm):\n vi = sm.source(he1)\n p = sm.point(vi)\n tpl.append(p)\n\n t = Triangle_3.Triangle_3_EPECK(tpl[0], tpl[1], tpl[2])\n if t.is_degenerate():\n print(\"degenerate triangle\")\n\n pl = t.supporting_plane()\n tplanel.append([pl, t])\n\n return tplanel\n\n #######################################\n # Are two planes close?\n #######################################\n def close(p1, p2):\n p1dir = p1[0].orthogonal_direction()\n p1poi = p1[0].point()\n\n p2dir = p2[0].orthogonal_direction()\n p2poi = p2[0].point()\n\n # print(p1dir,p2dir,p1poi,p2poi)\n pd0 = Vector_3.Vector_3_EPECK(p2poi, p1poi)\n dd0 = p2dir.vector() - p1dir.vector()\n dd1 = p2dir.vector() + p1dir.vector()\n\n if CGAL.to_double(pd0.squared_length()) < 0.0001 and (\n CGAL.to_double(dd0.squared_length()) < 0.0001\n or CGAL.to_double(dd1.squared_length()) < 0.0001\n ):\n return True\n else:\n return False\n\n tpl1 = makePlaneList(sm1)\n tpl2 = makePlaneList(sm2)\n\n # return surface mesh\n c = CSG()\n out = c.sm\n\n # close planes\n for tpl1i in tpl1:\n for tpl2i in tpl2:\n # check if planes are close\n bClose = close(tpl1i, tpl2i)\n\n # if close compute 2d intersection\n if bClose:\n t1td0 = tpl1i[0].to_2d(tpl1i[1][0])\n t1td1 = tpl1i[0].to_2d(tpl1i[1][1])\n t1td2 = tpl1i[0].to_2d(tpl1i[1][2])\n\n t2td0 = tpl1i[0].to_2d(tpl2i[1][0])\n t2td1 = tpl1i[0].to_2d(tpl2i[1][1])\n t2td2 = tpl1i[0].to_2d(tpl2i[1][2])\n\n pgon1 = Polygon_2.Polygon_2_EPECK()\n pgon1.push_back(t1td0)\n pgon1.push_back(t1td1)\n pgon1.push_back(t1td2)\n\n pgon2 = Polygon_2.Polygon_2_EPECK()\n pgon2.push_back(t2td0)\n pgon2.push_back(t2td1)\n pgon2.push_back(t2td2)\n\n pgon3 = Polygon_with_holes_2.List_Polygon_with_holes_2_EPECK()\n CGAL.intersection(pgon1, pgon2, pgon3)\n\n if len(pgon3) != 0:\n v10 = out.add_vertex(tpl1i[1][0])\n v11 = out.add_vertex(tpl1i[1][1])\n v12 = out.add_vertex(tpl1i[1][2])\n\n v20 = out.add_vertex(tpl2i[1][0])\n v21 = out.add_vertex(tpl2i[1][1])\n v22 = out.add_vertex(tpl2i[1][2])\n\n out.add_face(v10, v11, v12)\n out.add_face(v20, v21, v22)\n\n \"\"\"\n for pwh in pgon3 :\n if pwh.outer_boundary().size() == 3 :\n obp = pwh.outer_boundary()\n\n # convert back to 3d\n v0 = tpl1i[0].to_3d(obp.vertex(0))\n v1 = tpl1i[0].to_3d(obp.vertex(1))\n v2 = tpl1i[0].to_3d(obp.vertex(2))\n\n v0i = out.add_vertex(v0)\n v1i = out.add_vertex(v1)\n v2i = out.add_vertex(v2)\n\n out.add_face(v0i,v1i,v2i)\n elif pwh.outer_boundary().size() == 4 :\n obp = pwh.outer_boundary()\n\n # convert back to 3d\n v0 = tpl1i[0].to_3d(obp.vertex(0))\n v1 = tpl1i[0].to_3d(obp.vertex(1))\n v2 = tpl1i[0].to_3d(obp.vertex(2))\n v3 = tpl1i[0].to_3d(obp.vertex(3))\n\n v0i = out.add_vertex(v0)\n v1i = out.add_vertex(v1)\n v2i = out.add_vertex(v2)\n v3i = out.add_vertex(v3)\n\n out.add_face(v0i,v1i,v2i)\n out.add_face(v0i,v2i,v3i)\n elif pwh.outer_boundary().size() == 6 :\n obp = pwh.outer_boundary()\n\n # convert back to 3d\n v0 = tpl1i[0].to_3d(obp.vertex(0))\n v1 = tpl1i[0].to_3d(obp.vertex(1))\n v2 = tpl1i[0].to_3d(obp.vertex(2))\n v3 = tpl1i[0].to_3d(obp.vertex(3))\n v4 = tpl1i[0].to_3d(obp.vertex(4))\n v5 = tpl1i[0].to_3d(obp.vertex(5))\n\n\n v0i = out.add_vertex(v0)\n v1i = out.add_vertex(v1)\n v2i = out.add_vertex(v2)\n v3i = out.add_vertex(v3)\n v4i = out.add_vertex(v4)\n v5i = out.add_vertex(v5)\n\n out.add_face(v0i,v1i,v2i)\n out.add_face(v0i,v2i,v3i)\n out.add_face(v0i,v3i,v4i)\n out.add_face(v0i,v4i,v5i)\n\n else :\n print(pwh.outer_boundary().size())\n \"\"\"\n\n return c\n\n @classmethod\n def cube(cls, center=[0, 0, 0], radius=[1, 1, 1]):\n \"\"\"\n Construct an axis-aligned solid cuboid. Optional parameters are `center` and\n `radius`, which default to `[0, 0, 0]` and `[1, 1, 1]`. The radius can be\n specified using a single number or a list of three numbers, one for each axis.\n\n Example code::\n\n cube = CSG.cube(\n center=[0, 0, 0],\n radius=1\n )\n \"\"\"\n c = geom.Vector(0, 0, 0)\n r = [1, 1, 1]\n if isinstance(center, list):\n c = geom.Vector(center)\n if isinstance(radius, list):\n r = radius\n else:\n r = [radius, radius, radius]\n\n polygons = [\n geom.Polygon(\n [\n geom.Vertex(\n geom.Vector(\n c.x + r[0] * (2 * bool(i & 1) - 1),\n c.y + r[1] * (2 * bool(i & 2) - 1),\n c.z + r[2] * (2 * bool(i & 4) - 1),\n )\n )\n for i in v[0]\n ]\n )\n for v in [\n [[0, 4, 6, 2], [-1, 0, 0]],\n [[1, 3, 7, 5], [+1, 0, 0]],\n [[0, 1, 5, 4], [0, -1, 0]],\n [[2, 6, 7, 3], [0, +1, 0]],\n [[0, 2, 3, 1], [0, 0, -1]],\n [[4, 5, 7, 6], [0, 0, +1]],\n ]\n ]\n return CSG.fromPolygons(polygons)\n\n def volume(self):\n return Polygon_mesh_processing.volume(self.sm)\n\n def area(self):\n return Polygon_mesh_processing.area(self.sm)\n\n def isNull(self):\n return self.sm.number_of_faces() == 0\n\n def isClosed(self):\n return CGAL.is_closed(self.sm)\n\n def isTriangleMesh(self):\n return CGAL.is_triangle_mesh(self.sm)\n\n def isOutwardOriented(self):\n return CGAL.is_outward_oriented(self.sm)\n\n def info(self):\n vAp = self.toVerticesAndPolygons()\n\n v = vAp[0]\n p = vAp[1]\n n = vAp[2]\n\n minEdge = 1e9\n maxEdge = -1e9\n\n for i, tri in enumerate(p):\n for j, vertInd in enumerate(tri):\n v1 = _np.array(v[j])\n v2 = _np.array(v[(j + 1) % 3])\n dv = v2 - v1\n mdv = _np.sqrt((dv * dv).sum())\n # print(j,(j+1)%3,v1,v2,dv, mdv)\n\n if mdv < minEdge:\n minEdge = mdv\n if mdv > maxEdge:\n maxEdge = mdv\n\n return {\n \"null\": self.isNull(),\n \"closed\": self.isClosed(),\n \"triangle\": self.isTriangleMesh(),\n \"outward\": self.isOutwardOriented(),\n \"volume\": self.volume(),\n \"area\": self.area(),\n \"numberfaces\": self.getNumberPolys(),\n \"numbervertices\": self.getNumberVertices(),\n \"minEdge\": minEdge,\n \"maxEdge\": maxEdge,\n }\n\n\ndef do_intersect(csg1, csg2):\n return Polygon_mesh_processing.do_intersect(csg1.sm, csg2.sm)\n\n\ndef intersecting_meshes(csgList):\n smList = [c.sm for c in csgList]\n print(smList)\n\n\nclass PolygonProcessing:\n @classmethod\n def windingNumber(cls, pgon):\n \"\"\"return the winding number of pgon\n :param pgon: list of points [[x1,y1], [x2,y2], ... ]\n :type pgon: List[List[x1,y1], ...]\n returns: Integer winding number\n \"\"\"\n\n # winding angle\n wa = 0\n\n def mag(v):\n return (v**2).sum() ** 0.5\n\n def norm(v):\n return v / mag(v)\n\n pgon = list(pgon)\n pgon = [[p[0], p[1], 0] for p in pgon]\n pgon = _np.array(pgon)\n\n for pi in range(len(pgon)):\n mpi = pi % len(pgon)\n mpj = (pi + 1) % len(pgon)\n mpk = (pi + 2) % len(pgon)\n d1 = norm(pgon[mpk] - pgon[mpj])\n d0 = norm(pgon[mpj] - pgon[mpi])\n\n xp = _np.cross(d1, d0)\n a = _np.arcsin(mag(xp)) * _np.sign(xp[2])\n wa += a\n\n wa /= 2 * _np.pi\n\n return wa\n\n @classmethod\n def reversePolygon(cls, pgon):\n \"\"\"return reversed polygon\n :param pgon: list of points [[x1,y1], [x2,y2], ... ]\n :type pgon: List[List[x1,y1], ...]\n returns: List[List[x1,y1], ...]\n \"\"\"\n\n pgon = _np.array(pgon)\n return pgon[::-1]\n\n @classmethod\n def makePolygonFromList(cls, pgon, type=\"\"):\n \"\"\"Convert list of points [[x1,y1], [x2,y2], ... ] to cgal Polygon_2\n\n :param pgon: list of points [[x1,y1], [x2,y2], ... ]\n :type pgon: List[List[x,y], ..]\n :param type: Class of polygon (Polygon_2_EPICK, Polygon_2_EPECK, Partition_traits_2_Polygon_2_EPECK)\n :param type: str\n returns: Polygon_2\n \"\"\"\n\n if type == \"Partition_traits_2_Polygon_2_EPECK\":\n poly2 = Partition_traits_2_Polygon_2.Partition_traits_2_Polygon_2_EPECK()\n elif type == \"Polygon_2_EPECK\":\n poly2 = Polygon_2.Polygon_2_EPECK()\n elif type == \"Polygon_2_EPICK\":\n poly2 = Polygon_2.Polygon_2_EPICK()\n else:\n poly2 = Polygon_2.Polygon_2_EPECK()\n\n for p in pgon:\n poly2.push_back(Point_2.Point_2_EPECK(p[0], p[1]))\n\n return poly2\n\n @classmethod\n def makeListFromPolygon(selfclas, pgon):\n \"\"\"Convert 2D polygon to list of points [[x1,y1], [x2,y2], ... ]\n\n :param pgon: cgal Polygon_2 input\n :type pgon: Polygon_2_EPECK or Polygon_2_EPICK\n returns: [[x1,y1], [x2,y2], ...]\n \"\"\"\n\n polyCoords = []\n for ppi in range(pgon.size()):\n pnt = pgon.vertex(ppi)\n polyCoords.append([pnt.x(), pnt.y()])\n\n return polyCoords\n\n @classmethod\n def decomposePolygon2d(cls, pgon):\n \"\"\"Decompose general 2D polygon (pgon) to convex 2D polygons\n\n :param pgon: list of pgon points (which are lists) [[x1,y1], [x2,y2], ...]\n :type pgon: List(List[2])\n returns: List of polgons [pgon1, pgon2, ...]\n\n \"\"\"\n\n poly2 = Partition_traits_2_Polygon_2.Partition_traits_2_Polygon_2_EPECK()\n\n for p in pgon:\n poly2.push_back(Point_2.Point_2_EPECK(p[0], p[1]))\n\n # pythonHelpers.draw_polygon_2(poly2)\n\n partPoly = Partition_traits_2_Polygon_2.List_Polygon_2_EPECK()\n\n Partition_traits_2_Polygon_2.optimal_convex_partition_2(poly2, partPoly)\n\n partPolyList = []\n\n for pp in partPoly:\n partPolyCoords = []\n for ppi in range(pp.size()):\n pnt = pp.vertex(ppi)\n partPolyCoords.append([pnt.x(), pnt.y()])\n\n # TODO check if needed\n # pnt = pp.vertex(0)\n # partPolyCoords.append([pnt.x(),pnt.y()])\n\n partPolyList.append(partPolyCoords)\n\n # pythonHelpers.draw_polygon_2_list(partPolyList)\n\n return partPolyList\n\n @classmethod\n def decomposePolygon2dWithHoles(cls, pgonOuter, pgonHoles):\n \"\"\"Decompose general 2D polygon with holes (pgon) to convex 2D polygons\n\n :param pgonOuter: list of pgon points (which are lists) [[x1,y1], [x2,y2], ...]\n :type pgon: List(List[2])\n :param pgonHoles: List of polgons [pgon1, pgon2, ...]\n returns: List of polgons [pgon1, pgon2, ...]\n \"\"\"\n\n poly2Boundary = cls.makePolygonFromList(pgonOuter)\n\n poly2WithHoles = Polygon_with_holes_2.Polygon_with_holes_2_EPECK(poly2Boundary)\n\n for hole in pgonHoles:\n holePoly = cls.makePolygonFromList(hole)\n poly2WithHoles.add_hole(holePoly)\n\n decomp = CGAL.PolygonWithHolesConvexDecomposition_2_wrapped(poly2WithHoles)\n\n decomPolyListList = []\n for decompPoly in decomp:\n decomPolyList = cls.makeListFromPolygon(decompPoly)\n decomPolyListList.append(decomPolyList)\n\n return decomPolyListList\n\n @classmethod\n def triangulatePolygon2d(cls, pgon):\n \"\"\"Triangulate general 2D polygon\n\n :param pgonOuter: list of pgon points (which are lists) [[x1,y1], [x2,y2], ...]\n :type pgon: List(List[2])\n returns: List of triangles [ [[x1,y1], [x2,y2], [x3,y3]], [[x1,y1], [x2,y2], [x3,y3]], ...]\n \"\"\"\n\n # first decompose as triangulation only works on convex hulls\n partPolyList = cls.decomposePolygon2d(pgon)\n triList = []\n\n # print('triangulatePolygon2d ndecom={}'.format(len(partPolyList)))\n\n # Loop over convex polygons and triangulate\n for partPoly in partPolyList:\n cdt = CGAL.CDT2_EPECK()\n\n for vert in partPoly:\n cdt.push_back(Point_2.Point_2_EPECK(vert[0], vert[1]))\n\n for f in cdt.all_face_handles():\n if not cdt.is_infinite(f):\n t = cdt.triangle(f)\n tvl = []\n for i in [0, 1, 2]:\n v = [t.vertex(i).x(), t.vertex(i).y()]\n tvl.append(v)\n triList.append(tvl)\n\n return triList\n\n\nclass PolyhedronProcessing:\n @classmethod\n def surfaceMesh_to_Polyhedron(cls, sm):\n vf = Surface_mesh.toVerticesAndPolygons(sm)\n\n p = Polyhedron_3.Polyhedron_3_EPECK()\n p.buildFromVertsAndFaces(vf[0], vf[1])\n\n return p\n\n @classmethod\n def nefPolyhedron_to_convexPolyhedra(cls, np):\n CGAL.convex_decomposition_3(np)\n vi = np.volume_begin()\n ve = np.volume_end()\n pList = []\n while vi != ve:\n si = vi.shells_begin()\n se = vi.shells_end()\n if vi.mark():\n while si != se:\n p = Polyhedron_3.Polyhedron_3_EPECK()\n np.convert_inner_shell_to_polyhedron(si, p)\n pList.append(p)\n si.next()\n vi.next()\n\n return pList\n\n @classmethod\n def polyhedron_to_numpyArrayPlanes(cls, p):\n return _np.array(p.convertToPlanes())\n\n # Following does not work, maybe because not triangles\n planes = []\n fi = p.facets_begin()\n fe = p.facets_end()\n while fi != fe:\n plane = fi.plane()\n # print(plane)\n # point = plane.point()\n print(plane.a(), plane.b(), plane.c(), plane.d())\n orthvec = plane.orthogonal_vector()\n\n # print(plane.point(), plane.orthogonal_vector())\n # planes.append([point.x(),point.y(),point.z(), orthvec.x(), orthvec.y(), orthvec.z()])\n fi.next()\n\n return _np.array(planes)\n","repo_name":"g4edge/pyg4ometry","sub_path":"src/pyg4ometry/pycgal/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":21067,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"127244139","text":"'''\nPractice asynchronous code\n\nCreate a separate asynchronous code to calculate Fibonacci, factorial, squares\nand cubic for an input n. Schedule the execution of this code using\nasyncio.gather for a list of integers from 1 to 10. You need to get four lists\nof results from corresponding functions.\n\nRewrite the code to use simple functions to get the same results but using a\nmultiprocessing library. Time the execution of both realizations, explore the results,\nwhat realization is more effective, why did you get a result like this.\n'''\n\nimport asyncio\nfrom multiprocessing import Process\nimport time\n\nasync def fibonacci(n):\n index = n - 2\n fib1 = fib2 = 1\n\n while index > 0:\n fib1, fib2 = fib2, fib1 + fib2\n index -= 1\n\n return fib2\n\n\nasync def factorial(n):\n factorial = 1\n while n > 1:\n factorial *= n\n n -= 1\n\n return factorial\n\n\nasync def squares(n):\n return n**2\n\n\nasync def cubic(n):\n return n**3\n\n\ndef fibonacci_math(n):\n result = []\n n1 = 0\n n2 = 1\n for i in range(1, n + 1):\n result.append(n1)\n n1, n2 = n2, n1 + n2\n return result\n\n\ndef factorial_math(n):\n result = []\n a = 1\n for i in range(1, n + 1):\n a *= i\n result.append(a)\n\n return result\n\n\ndef square_math(n):\n result = []\n for i in range(1, n + 1):\n a = i ** 2\n result.append(a)\n\n return result\n\n\ndef cubic_math(n):\n result = []\n for i in range(1, n + 1):\n a = i ** 3\n result.append(a)\n\n return result\n\n\nasync def gather_all(n):\n await asyncio.gather(\n factorial(n),\n fibonacci(n),\n squares(n),\n cubic(n))\n\n\nif __name__ == '__main__':\n math_functions = [cubic_math, square_math, fibonacci_math, factorial_math]\n n = 10\n \n start_time = time.time()\n loop = asyncio.get_event_loop()\n for i in range(n):\n asyncio.run(gather_all(i))\n end_time = time.time()\n print(f'Total time: {(end_time - start_time):.5f}ms.')\n\n \n start_time = time.time()\n for func in math_functions:\n new_process = Process(target=func, args=(n, ))\n new_process.start()\n new_process.join()\n end_time = time.time()\n print(f'Total time: {(end_time - start_time):.5f}ms.')\n","repo_name":"mila-orishchuk/pythoncourse","sub_path":"Lesson35/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33748703957","text":"# import some_maths\nimport numpy\nfrom scipy import stats\n\nordinal = lambda n: \"%d%s\" % (n, \"tsnrhtdd\"[(n // 10 % 10 != 1) * (n % 10 < 4) * n % 10::4])\n\ndata_length = 10\neffects = [12, -5, 77]\nvar_x1 = numpy.random.normal(0, 1, data_length)\nvar_x2 = numpy.random.normal(0, 1, data_length)\nerror_term = numpy.random.normal(0, 5, data_length)\nval_y = effects[0] + (effects[1] * var_x1) + (effects[2] * var_x2) + error_term\nintercept = ([1] * data_length)\ndesign_matrix = numpy.transpose([intercept, var_x1, var_x2])\ny_matrix = numpy.transpose([val_y])\n\n\n# some_maths.matrix_multiplication(some_maths.transpose(design_matrix), y_matrix)\n# print(type(design_matrix))\n\n\ndef ols(x, y):\n covariance = numpy.linalg.inv(numpy.matmul(numpy.transpose(x), x))\n projection = numpy.matmul(numpy.transpose(x), y)\n return numpy.matmul(covariance, projection)\n\n\ntest_beta = ols(design_matrix, y_matrix)\n\n\n# print([design_matrix[2]])\n# print(type(numpy.mean(val_y) + 0))\n# print(some_maths.matrix_multiplication(some_maths.transpose(beta), some_maths.transpose([design_matrix[2]])))\n# print(numpy.matmul(some_maths.transpose(beta), some_maths.transpose([design_matrix[2]])))\n# print(numpy.matmul(numpy.transpose(beta), numpy.transpose([design_matrix[2]])))\n# print([y_matrix[1]] - numpy.matmul(numpy.transpose(beta), numpy.transpose([design_matrix[2]])))\n\n\ndef statistics(x, y):\n beta = ols(x, y)\n predictors = len(beta)\n sample_size = len(y)\n y_hat = numpy.mean(y)\n sum_squares_residual = 0\n sum_squares_total = 0\n sum_squares_corrected = 0\n for i in range(0, data_length):\n sum_squares_residual += pow(y[i] - numpy.matmul(numpy.transpose(beta), numpy.transpose([x[i]])), 2)\n sum_squares_total += pow(y[i] - y_hat, 2)\n sum_squares_corrected += pow(numpy.matmul(numpy.transpose(beta), numpy.transpose([x[i]])) - y_hat, 2)\n coef_of_det = 1 - sum_squares_residual / sum_squares_total\n helpful_fraction = (sample_size - 1) / (sample_size - predictors - 1)\n adj_coef_det = 1 - ((1 - coef_of_det) * helpful_fraction)\n model_degrees_freedom = predictors - 1 # df1\n error_degrees_freedom = sample_size - predictors # df2\n mean_square_model = sum_squares_corrected / model_degrees_freedom\n mean_square_error = sum_squares_residual / error_degrees_freedom\n f_statistic = mean_square_model / mean_square_error # following a F(p-1,n-p)\n p_value_f_test = 1 - stats.f.cdf(f_statistic, model_degrees_freedom, error_degrees_freedom)\n covariance_diagonal = numpy.diagonal(numpy.linalg.inv(numpy.matmul(numpy.transpose(x), x)))\n standard_error = pow(covariance_diagonal, 0.5)\n t_vector = numpy.transpose(beta) / standard_error\n p_vector_t_test = 1 - stats.t.cdf(numpy.abs(t_vector), sample_size - predictors)\n return [coef_of_det, adj_coef_det, f_statistic, model_degrees_freedom, error_degrees_freedom, p_value_f_test, beta,\n standard_error, t_vector, p_vector_t_test]\n\n\nresults = statistics(design_matrix, y_matrix)\n\n\ndef summary(stat_output):\n coeff_line = f\"The R^2 of the regression was {numpy.round(stat_output[0][0][0], 3)}, \" \\\n f\"adjusted {numpy.round(stat_output[1][0][0], 3)}.\"\n f_line = f\"The overall F-Test gave an F-Statistc of {numpy.round(stat_output[2][0][0], 3)} on\" \\\n f\" {stat_output[3]} and {stat_output[4]} degrees of freedom, with a p-value \" \\\n f\"of {numpy.format_float_scientific(stat_output[5][0][0], 3)}.\"\n predictors = stat_output[3] + 1\n string_list = [\"a\"] * predictors\n for i in range(0, predictors):\n string_list[\n i] = f\"The {ordinal(i + 1)} predictor had a coefficient of {round(stat_output[6][i][0], 3)}. Together \" \\\n f\"with a standard error of {round(stat_output[7][i], 3)}, this gave a t-value \" \\\n f\"of {round(stat_output[8][0][i], 3)} and a one-sided p-value \" \\\n f\"of {numpy.format_float_scientific(stat_output[9][0][i], 3)}\"\n\n print(coeff_line)\n print(f_line)\n for i in range(0, predictors):\n print(string_list[i])\n\n\nsummary(results)\n","repo_name":"darkshoxx/PYTHON","sub_path":"21_02-02_Full_Stat.py","file_name":"21_02-02_Full_Stat.py","file_ext":"py","file_size_in_byte":4087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38913105842","text":"from modsandbox.models import User, Post, Log, Config\nimport re\n\n\ndef update_log(log_item, code):\n word_reg = r\"\\'([^']*)\\'\"\n check_reg = r\"(.*)\\:\"\n mod_reg = r\"\\((.*)\\)\"\n sections = [section for section in re.split(\"^---\", code, flags=re.MULTILINE) if section]\n\n string_count = len(re.findall(word_reg, code))\n check_count = len(re.findall(check_reg, code))\n rule_count = len(sections)\n mod_count = len(re.findall(mod_reg, code))\n\n log_item.string_count = string_count\n log_item.check_count = check_count\n log_item.rule_count = rule_count\n log_item.mod_count = mod_count\n\n log_item.save()\n\n\nautosave_logs = Log.objects.filter(info='autosave config')\napply_config_logs = Log.objects.filter(info='apply config')\nsubmit_config_logs = Log.objects.filter(info='submit config')\nprint('autosave number: ', autosave_logs.count())\nprint('apply_config number: ', apply_config_logs.count())\nprint('submit_config number: ', submit_config_logs.count())\n\nfor (count, log) in enumerate(apply_config_logs):\n print('apply_config_count', count)\n if log.config is not None and log.test_tp is not None:\n update_log(log, log.config.code)\n\nfor (count, log) in enumerate(autosave_logs):\n print('auto_save_count', count)\n if log.content is not None and log.test_tp is not None:\n update_log(log, log.content)\n\nfor (count, log) in enumerate(submit_config_logs):\n print('submit_config_count', count)\n if log.config is not None and log.test_tp is not None:\n update_log(log, log.config.code)\n","repo_name":"wooogler/reddit_virtual_sandbox","sub_path":"back/process_code.py","file_name":"process_code.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"520885145","text":"from datetime import datetime\nfrom src.infra.database import DBConnectionHandler\nfrom src.auth.models import ValidationCode\nfrom src.users.models import User\n\nclass AuthRepository:\n def __init__(self) -> None:\n pass\n\n def insert(self, valid_model: ValidationCode):\n with DBConnectionHandler() as database:\n try:\n email = valid_model.email\n code = valid_model.code\n updated_at = valid_model.updated_at\n\n query = '''insert into codes (email, code, updated_at) values ('{}', '{}', '{}')'''\n database.cursor.execute(query.format(email, code, updated_at))\n database.connection.commit()\n except:\n database.connection.rollback()\n raise\n\n def read(self, email):\n with DBConnectionHandler() as database:\n try:\n query = '''select * from codes where email = %s'''\n database.cursor.execute(query, (email,))\n data = database.cursor.fetchone()\n if data:\n return ValidationCode(email=data[0], code=data[1], updated_at=data[2])\n return None\n except:\n database.connection.rollback()\n raise\n\n def update_code(self, user: User, code: int):\n email = user.email\n time_now = datetime.now()\n\n with DBConnectionHandler() as database:\n try:\n query = '''update codes set code=%s, updated_at=%s where email=%s'''\n database.cursor.execute(query, (code, time_now, email))\n database.connection.commit()\n return None\n except:\n database.connection.rollback()\n raise\n","repo_name":"jpfcabral/user-registration-api","sub_path":"src/auth/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42142674108","text":"from django.shortcuts import render, redirect, get_object_or_404, HttpResponse\nfrom reportlab.pdfgen import canvas\nfrom .models import User\nfrom .models import Articles\nfrom .forms import LoginForm, RegisterForm, AddArticleform, GuestinfoForm\n\n\ndef login(request):\n if request.session.get('email'):\n return redirect('submit')\n form = LoginForm(request.POST or None)\n if form.is_valid():\n email = form.cleaned_data.get('email')\n password = form.cleaned_data.get('password')\n\n abc = User.objects.filter(email=email, password=password)\n\n if abc:\n request.session['email'] = email\n return redirect('submit')\n else:\n print('not match')\n\n context = {\n 'form': form\n }\n return render(request, 'Users/login.html', context)\n\n\ndef signup(request):\n form = RegisterForm(request.POST or None)\n if form.is_valid():\n form.save()\n\n context = {\n 'form': form\n }\n return render(request, 'Users/signup.html', context)\n\n\ndef submit(request):\n if not request.session.get('email'):\n return redirect('login')\n se_email = request.session.get('email')\n\n login_user = User.objects.get(email=se_email)\n\n form = AddArticleform(request.POST or None, request.FILES or None)\n if form.is_valid():\n Section = form.cleaned_data.get('Section')\n SubSection = form.cleaned_data.get('SubSection')\n title = form.cleaned_data.get('title')\n article = form.cleaned_data.get('article')\n\n submissions = Articles(user=login_user, Section=Section, SubSection=SubSection, title=title, article=article)\n submissions.save()\n\n context = {\n 'form': form,\n 'login_user': login_user,\n\n }\n return render(request, 'Users/submit.html', context)\n\n\ndef logout(request):\n del request.session['email']\n return redirect('allarticles')\n\n\ndef all_articles(request):\n user = None\n\n posts = Articles.objects\n\n if request.session.get('email'):\n user = request.session.get('email')\n\n context = {\n 'posts': posts,\n 'user': user\n }\n return render(request, 'Users/wikipage.html', context)\n\n\ndef detail(request, post_id):\n detailarticle = get_object_or_404(Articles, pk=post_id)\n return render(request, 'Users/wikipage.html', {'post': detailarticle})\n\n\ndef Guestinfo(request):\n form = GuestinfoForm(request.POST or None)\n if form.is_valid():\n form.save()\n\n context = {\n 'form': form\n }\n return render(request, 'Users/Guestinfo.html', context)\n","repo_name":"shubhangisrivastava06/WIKIPAGE-Articles-assignment","sub_path":"Users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28015291761","text":"import tkinter as tk\nfrom tkinter import messagebox as msg\n\n# dictionary with keys ints nums and values string equals\nNUMBERS = {\n 1: ('один', 'одна'),\n 2: ('два', 'две'),\n 3: 'три',\n 4: 'четыре',\n 5: 'пять',\n 6: 'шесть',\n 7: 'семь',\n 8: 'восем',\n 9: 'девять',\n 10: 'десять',\n 11: 'одинадцать',\n 12: 'двенадцать',\n 13: 'тринадцать',\n 14: 'четырнадцать',\n 15: 'пятнадцать',\n 16: 'шестнадцать',\n 17: 'семнадцать',\n 18: 'восемнадцать',\n 19: 'девятнадцать',\n 20: 'двадцать',\n 30: 'тридцать',\n 40: 'сорок',\n 50: 'пятьдесят',\n 60: 'шестьдесят',\n 70: 'семьдесят',\n 80: 'восемьдесят',\n 90: 'девяносто',\n 100: 'сто',\n 200: 'двести',\n 300: 'триста',\n 400: 'четыреста',\n 500: 'пятсот',\n 600: 'шестьсот',\n 700: 'семьсот',\n 800: 'восемьсот',\n 900: 'девятьсот',\n}\n\n# store cases for thousands name\nTHOUSANDS = ('тысяча', 'тысячи', 'тысяч')\n\n# store cases for millions name\nMILLIONS = ('миллион', 'миллиона', 'миллионов')\n\n\nclass NumToString:\n \"\"\"\n Create a class NumToString\n \"\"\"\n\n def __init__(self, num):\n self.num = num\n\n # method to return string name of a number\n @staticmethod\n def get_name(val, case=None):\n\n div = 100\n string_lst = []\n\n while val:\n cur = val - (val % div)\n if cur <= 0:\n div /= 10\n cur = val - (val % div)\n if case == 'thousand':\n if cur > 0 and val > 19:\n string_lst.append(NUMBERS[cur][1] if cur <= 2 else NUMBERS[cur])\n elif cur > 0 and val < 20:\n string_lst.append(NUMBERS[val][1] if cur <= 2 else NUMBERS[val])\n break\n else:\n if cur > 0 and val > 19:\n string_lst.append(NUMBERS[cur][0] if cur <= 2 else NUMBERS[cur])\n elif cur > 0 and val < 20:\n string_lst.append(NUMBERS[val][0] if cur <= 2 else NUMBERS[val])\n break\n else:\n string_lst.append(NUMBERS[cur])\n\n val = val % div\n\n return ' '.join(string_lst)\n\n # method to get proper name for millions and thousands\n @property\n def get_proper_name(self):\n\n val = abs(self.num)\n thousands = str(int(val // 1000))\n millions = str(int(val // 1000000))\n\n if thousands[-1] == '1':\n name_t = THOUSANDS[0]\n elif thousands[-2:] not in ['12', '13', '14'] and thousands[-1] in '234':\n name_t = THOUSANDS[1]\n else:\n name_t = THOUSANDS[2]\n\n if int(millions) > 0:\n\n thousands = str(int(val % 1000000 // 1000))\n\n if millions[-1] == '1':\n name_m = MILLIONS[0]\n elif millions[-2:] not in ['12', '13', '14'] and millions[-1] in '234':\n name_m = MILLIONS[1]\n else:\n name_m = MILLIONS[2]\n\n return (name_t if int(thousands) > 0 else ''), name_m\n return name_t if int(thousands) > 0 else ''\n\n # final method to stick all of the stings to create the name of a number\n def get_string(self):\n val = abs(self.num)\n string_lst = []\n\n if self.num < 0:\n string_lst.append('минус')\n\n if val < 1000000:\n thousands = int(val // 1000)\n numbers = int(val % 1000)\n else:\n millions = int(val // 1000000)\n thousands = int(val % 1000000 // 1000)\n numbers = int(val % 1000000 % 1000)\n\n string_lst.extend([self.get_name(millions), self.get_proper_name[1],\n self.get_name(thousands, 'thousand'), self.get_proper_name[0], self.get_name(numbers)])\n\n return ' '.join(string_lst)\n\n if val == 0:\n return 'Ноль'\n elif thousands > 0:\n string_lst.extend([self.get_name(thousands, 'thousand'), self.get_proper_name, self.get_name(numbers)])\n else:\n string_lst.append(self.get_name(numbers))\n\n return ' '.join(string_lst)\n\n\nclass GUI:\n \"\"\"\n Create class GUI to initialize interface representation\n \"\"\"\n\n def __init__(self, root):\n self.root = root\n self.inputs = tk.Entry(self.root, width=30, justify='center', font=20, bd=5)\n self.inputs.pack()\n self.btn_get_string = tk.Button(text='Get info', width=15, height=3, command=self.instructions)\n self.btn_get_string.pack()\n self.btn_get_string = tk.Button(text='Get string', width=15, height=3, command=self.show_string)\n self.btn_get_string.pack()\n self.results = tk.Label(self.root, text='string:', height=5, font=5)\n self.results.pack()\n\n # method for button click representation and makes a string name of a given number\n def show_string(self):\n try:\n nts = NumToString(int(self.inputs.get()))\n text = nts.get_string()\n self.results.config(text=text)\n except ValueError:\n self.error_msg_value()\n except KeyError:\n self.error_msg_key()\n\n # error message representation in GUI\n @staticmethod\n def error_msg_value():\n msg.showerror('Error', 'Use integers')\n\n # error message representation in GUI\n @staticmethod\n def error_msg_key():\n msg.showerror('Error', 'this program works till one billion')\n\n # show message with instructions for our program\n @staticmethod\n def instructions():\n msg.showinfo('Info', 'This program can convert you integer input '\n 'to string output, you can use negative numbers as well')\n\n\nif __name__ == '__main__':\n root = tk.Tk()\n gui = GUI(root=root)\n root.mainloop()\n","repo_name":"DupliiStanislav/eight_tasks","sub_path":"task_5.py","file_name":"task_5.py","file_ext":"py","file_size_in_byte":6146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19770883060","text":"from django.urls import path\n\nfrom logic import views\n\nurlpatterns = [\n path('index/', views.index, name=\"index\"),\n path('', views.index, name=\"landing\"),\n path('login_service/', views.login_service, name=\"login\"),\n path('logout_service/', views.logout_service, name='logout'),\n path('signup_service/', views.signup_service, name='signup'),\n path('counter_service/', views.counter_service, name='counter'),\n path('create_game_service/', views.create_game_service,\n name='create_game'),\n # path('join_game_service/', views.join_game_service, name='join_game'),\n path('select_game_service//', views.select_game_service,\n name='select_game'),\n path('select_game_service/', views.select_game_service,\n name='select_game'),\n path('show_game_service/', views.show_game_service, name='show_game'),\n path('move_service/', views.move_service, name='move'),\n path('ajax_make_move//', views.ajax_make_move,\n name=\"ajax_make_move\"),\n path('get_possible_moves_from_position/',\n views.get_possible_moves_from_position, name='get_possible_moves'),\n path('ajax_is_it_my_turn', views.ajax_is_it_my_turn,\n name='ajax_is_it_my_turn'),\n path('how_to_play', views.how_to_play, name='manual'),\n path('replay_move//', views.replay_move,\n name='replay_move'),\n path('next_move/', views.next_move, name='next_move')\n]\n","repo_name":"dieortin/PSI_cat_mouse_final","sub_path":"logic/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74794897492","text":"import math\nimport random\n\nimport utils\n\n\n\n\ndef Minmax(board, depth, alpha, beta, maxPlayer):\n freeSlots = utils.FreeSlots(board)\n endGame = utils.EndGame(board)\n if depth == 0 or endGame:\n if endGame:\n if utils.CheckWin(board, utils.AI_SYMBOL):\n return (None, 999999999)\n elif utils.CheckWin(board, utils.PLAYER_SYMBOL):\n return (None, -999999999)\n else:\n return(None, 0)\n\n else:\n return (None, utils.PositionScore(board, utils.AI_SYMBOL))\n\n if maxPlayer:\n value = -math.inf\n column = random.choice(freeSlots)\n for col in freeSlots:\n row = utils.FindRow(board, col)\n help = board.copy()\n utils.Move(help, row, col, utils.AI_SYMBOL)\n newScore = Minmax(help, depth - 1, alpha, beta, False)[1]\n if newScore > value:\n value = newScore\n column = col\n alpha = max(alpha, value)\n if alpha >= beta:\n break\n return [column, value]\n\n else:\n value = math.inf\n column = random.choice(freeSlots)\n for col in freeSlots:\n row = utils.FindRow(board, col)\n help = board.copy()\n utils.Move(help, row, col, utils.PLAYER_SYMBOL)\n newScore = Minmax(help, depth-1, alpha, beta, True)[1]\n if newScore < value:\n value = newScore\n column = col\n beta = min(beta, value)\n if(alpha >= beta):\n break\n return [column, value]\n","repo_name":"dam1508/Connect4-minmax","sub_path":"minmax.py","file_name":"minmax.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"75338148693","text":"import json\nimport os\nimport sys\n\n_CONFIG = {\n \"server\": \"irc.quakenet.org\",\n \"port\": 6667,\n \"prefixes\": \".\",\n \"channel\": \"#channel\",\n \"nick\": \"basebot\",\n \"owners\": [\"owner\"]\n}\n\n_DEFAULT_PATH = \"~/.basebot/\"\n\n\ndef try_create(path):\n print(\"Trying to create a config.json in \" + path)\n\n try:\n configFile = open(path + \"/config.json\", \"w\")\n except:\n print(\"Unable to write to config.json in \" + path)\n return False\n\n configFile.write(json.dumps(_CONFIG, indent=4))\n print(\"Wrote default config to config.json in \" + path)\n return True\n\n\ndef try_basepath(path):\n fullPath = os.path.abspath(os.path.expanduser(path))\n print(\"Trying to load config from\" + fullPath)\n\n try:\n configFile = open(fullPath + \"/config.json\")\n except FileNotFoundError:\n if try_create(fullPath):\n configFile = open(fullPath + \"/config.json\")\n else:\n return None\n except PermissionError:\n if fullPath == os.path.abspath(os.path.expanduser(_DEFAULT_PATH)):\n print(\"Cannot read from \" + fullPath + \", exiting\")\n return None\n else:\n return try_basepath(_DEFAULT_PATH)\n\n try:\n config = json.loads(configFile.read())\n config[\"path\"] = fullPath\n print(\"Config loaded from \" + fullPath)\n return config\n except:\n print(\"Invalid JSON in config file\")\n return None\n\n\ndef load_config():\n if len(sys.argv) < 2:\n print(\"No runtime directory specified\")\n tryPath = _DEFAULT_PATH\n else:\n tryPath = sys.argv[1]\n\n return try_basepath(tryPath)\n","repo_name":"anthonynguyen/basebot","sub_path":"basebot/configloader.py","file_name":"configloader.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"69983946773","text":"N, S = map(int, input(\"\").split(\" \"))\r\na = []\r\nfor l in range(N):\r\n row = []\r\n for j in range(N):\r\n row.append(0)\r\n a.append(row)\r\n\r\nfor k in range(N):\r\n for t in range(k+1):\r\n a[t][k] = S\r\n S += 1\r\n if S == 10: S = 1\r\n\r\nfor line in a:\r\n for char in line:\r\n if char != 0: print(f\"{char}\", end = \" \")\r\n else: print(' ', end = \" \")\r\n print('')\r\n","repo_name":"someguy599/AlphaStarBronzeA","sub_path":"Class 9/Generating Triangles/generatingtriangles.py","file_name":"generatingtriangles.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31486879398","text":"import datetime\nimport time\n\nimport pymysql\nimport requests\nimport re\n\nfrom conf import Conf\nfrom mylogger import Logger\n\n\n# 中图分类查询\n# 营销分类查询\nclass CategoryServer():\n # 授权链接\n auth_url = ''\n _logger = Logger().getLogger()\n proxies = {\n \"http\": '' # 代理ip\n }\n\n def __init__(self):\n # self.reload_authURL()\n config = Conf.config\n #初始化数据库连接\n self.db = pymysql.connect(host=config['mysql']['host'], port=config['mysql']['port'],user=config['mysql']['username'],\n passwd=config['mysql']['password'], db=config['mysql']['dbname_cate'])\n self.cursor = self.db.cursor()\n self.db_sale = pymysql.connect(host=config['mysql']['host'], port=config['mysql']['port'],user=config['mysql']['username'],\n passwd=config['mysql']['password'], db=config['mysql']['dbname_sale_cate'])\n self.cursor_sale = self.db_sale.cursor()\n\n\n def reload_authURL(self):\n \"\"\"\n 更换中图网站的授权链接\n :return:\n \"\"\"\n try:\n proxy_ip = requests.get('http://api.ip.data5u.com/dynamic/get.html?order=f6d9a18f02f520f2aaac6b249fd8689e').content.decode().strip()\n self.proxies['http'] = proxy_ip\n url = 'http://opac.nlc.cn/F?RN=989462048'\n response = requests.get(url,timeout=20,proxies=self.proxies)\n html = response.text\n self.auth_url = re.findall('tmp=\"([^\"]+)\"',html)[0]\n except:\n self._logger.error('更换中图授权链接的时候出错')\n self.auth_url = 'http://opac.nlc.cn:80/F/IYKXX91A5NCBPEQP1DQHLF471L8ANIEHXUMSUTI2HLRRXI77MF-10964'\n\n\n\n def query_cate_server(self,isbn):\n \"\"\"\n 中图查询入口:先查book_isbn_cate表,有则return,无则再查中图网站,查到的中图分类再存进mysql\n :param isbn:\n :return:\n \"\"\"\n # 先查询mysql是否有此isbn\n cate_code = self.query_cate_mysql(isbn)\n if cate_code:\n return cate_code\n # 更换授权链接\n try:\n self.reload_authURL()\n except Exception as e:\n self._logger.error(e)\n url = self.auth_url+'?func=find-b&find_code=ISB&request=%s&local_base=NLC01&filter_code_1=WLN&filter_request_1=&filter_code_2=WYR&filter_request_2=&filter_code_3=WYR&filter_request_3=&filter_code_4=WFM&filter_request_4=&filter_code_5=WSL&filter_request_5=' %isbn\n try:\n # 请求中图网站获取isbn对应的网页,进行解析\n response = requests.get(url,timeout=10,proxies=self.proxies)\n html = response.text\n except Exception as e:\n self._logger.error(e)\n html = ''\n cate_code = re.findall('CALL-NO:\\s*?([^\\r\\n]*)',html)\n if not cate_code :\n self._logger.info('中图服务器查询查无此isbn:' + isbn)\n return ''\n cate_code = cate_code[0].strip()\n if not cate_code:\n self._logger.info('中图服务器查询查无此isbn:' + isbn)\n return ''\n self._logger.info('中图服务器查询========>isbn:'+isbn+' 分类为:'+cate_code)\n # 往数据库中插入新的中图分类\n self.insert_cate_mysql(isbn,cate_code)\n return cate_code\n\n\n def query_cate_mysql(self, isbn):\n \"\"\"\n 从mysql中查询中图分类\n :param isbn:\n :return:\n \"\"\"\n sql = 'select category from book_isbn_cate where isbn = \"%s\" ' %isbn\n self.cursor.execute(sql)\n result = self.cursor.fetchone()\n if not result:\n self._logger.info('中图数据库查无此isbn:' + isbn +'转为中图服务器查询')\n return None\n self._logger.info('中图数据库查询========>isbn:' + isbn + ' 分类为:' + result[0])\n return result[0]\n\n def insert_cate_mysql(self,isbn,cate_code):\n \"\"\"\n 往数据库中插入中图分类\n :param isbn:\n :param cate_code: 中图分类号\n :return:\n \"\"\"\n sql = 'insert into book_isbn_cate(isbn,category,savetime) values(%s,%s,%s)'\n\n now = datetime.datetime.now()\n params = (isbn,cate_code,now)\n self.cursor.execute(sql,params)\n self.db.commit()\n pass\n\n def query_sale_category(self,salecategory_name):\n \"\"\"\n 从mysql中查询营销分类\n :param salecategory_name:\n :return:\n \"\"\"\n sql = 'select id from book_category_cate where name like \"%'+salecategory_name+'%\"'\n self.cursor_sale.execute(sql)\n result = self.cursor_sale.fetchone()\n if not result:\n self._logger.info('查无此营销分类========>salecategory_name:'+salecategory_name)\n return ''\n self._logger.info('查询营销分类========>salecategory_name:' + salecategory_name + ' ID为:' + result[0])\n return result[0]\n\n\n\nif __name__ == '__main__':\n cate = CategoryServer()\n # isbn = '978754842791'\n # cate_code = cate.query_cate_mysql(isbn)\n # # 如果结果集为空,则去中图服务器查询\n # if not cate_code:\n # # 查询到cate_code,并往数据库中添加此分类\n # cate_code = cate.query_cate_server(isbn)\n resu = cate.query_cate_server('9787115403179')\n # print(resu)\n\n\n\n\n","repo_name":"floydScript/spider","sub_path":"pipelines/categoryServer.py","file_name":"categoryServer.py","file_ext":"py","file_size_in_byte":5430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24364364649","text":"from binascii import hexlify\nclass Present():\n '''\n key (hex str): Key used for encryption. The length of key string must be 20\\n\n message (str): Plaintext to encrypt. The length of message must be less or equal to 8\n '''\n sbox = [12, 5, 6, 11, 9, 0, 10, 13, 3, 14, 15, 8, 4, 7, 1, 2] # sbox\n permute = [0]*64 # permutation layer\n\n subkeys = []\n rounds = 32 # 31 rounds in present cipher\n\n masterKey = 0 # 80/128 bit key # hexadecimal string\n m = 0 # 64 bit message\n\n def __init__(self):\n self.initPLayer()\n\n def setKey(self, key):\n if(len(key)*4 == 80 or len(key)*4 == 128): # verify that size of key is 80 bits\n temp_key = bytes.fromhex(key) # convert key from hex to bytes\n self.masterKey = int.from_bytes(temp_key, byteorder='big') # convert bytes to integer\n if((len(key)*4) == 80):\n self.subKeys80() # generate subkeys using 80 bit masterKey\n else:\n self.subKeys128()\n else:\n print('Length of key must be either 80 bits or 128 bits')\n exit()\n\n def setMessage(self, message):\n self.m = int(message, 16)\n\n # permutation layer is initialized\n def initPLayer(self):\n c = -1\n for i in range(64):\n if ((16*i) % 64) == 0:\n c += 1\n self.permute[i] = (16*i) % 64 + c\n\n def subKeys80(self):\n for i in range(1, self.rounds+1): # for each round\n self.subkeys.append(self.masterKey >> 16) # last 64 bits of masterKey is used as subkey\n\n # rotate the masterKey by 61 positions to left\n self.masterKey = ((self.masterKey & (2**19 - 1)) << 61) | (self.masterKey >> 19)\n\n # pass the leftmost 4 bits to sbox and update masterKey\n self.masterKey = ((self.sbox[self.masterKey >> 76] << 76) | self.masterKey & (2**76 - 1))\n\n # xor k[19],k[18],k[17],k[16],k[15] with round counter and update masterKey\n self.masterKey = (self.masterKey ^ (i << 15))\n\n def subKeys128(self):\n for i in range(1, self.rounds+1): # for each round\n self.subkeys.append(self.masterKey >> 64) # last 64 bits of masterKey is used as subkey\n\n # rotate the masterKey by 61 positions to left\n self.masterKey = (((self.masterKey & (2**67 - 1)) << 61) | (self.masterKey >> 67))\n\n # pass the leftmost 8 bits to sbox and update masterKey\n out1 = (self.sbox[self.masterKey >> 124] << 124) # sbox of bits from 124 to 127\n out2 = (self.sbox[(self.masterKey >> 120) & 15] << 120) # sbox of bits from 120 to 123\n out3 = (self.masterKey & (2**120 - 1)) # first 120 bits of masterkey\n self.masterKey = (out1 | out2 | out3)\n\n # xor k[66],k[65],k[64],k[63],k[62] with round counter and update masterKey\n self.masterKey = (self.masterKey ^ (i << 62))\n\n def pLayer(self, state):\n res = 0\n for i in range(64): # for each bit of the state\n bit = ((state >> i) & 1) # get the ith bit\n res = (res | (bit << self.permute[i]))\n return res\n\n def addRoundKey(self, state, subkey):\n return (state ^ subkey)\n\n def sBoxLayer(self, state):\n res = 0\n for i in range(16): # 4 bits at a time of the state\n bits = ((state >> (i*4)) & (2**4 - 1))\n res += (self.sbox[bits] << (i*4))\n return res\n\n def encryption(self):\n state = self.m\n for i in range(self.rounds-1):\n state = self.addRoundKey(state, self.subkeys[i])\n state = self.sBoxLayer(state)\n state = self.pLayer(state)\n # last round\n state = self.addRoundKey(state, self.subkeys[-1])\n\n # convert number of hex stringH\n return hex(state).replace('0x', '')\n\ndef main():\n print(\"Enter 8 Characters\")\n msg_str = input()\n print(\"Enter key in hex, 20 hex characters for 80bit or 32 hex characters for 128bit\")\n key_hex_str = input()\n msg_hex = hexlify(msg_str.encode()).decode()\n cipher = Present()\n cipher.setKey(key_hex_str)\n cipher.setMessage(msg_hex)\n print(\"Encryption: \", cipher.encryption())\n\nmain()\n","repo_name":"Code-Blooded-Human/Present-Cipher","sub_path":"present.py","file_name":"present.py","file_ext":"py","file_size_in_byte":4234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5922553098","text":"import aiohttp\nfrom aiohttp import web\n\napis = {\n 'api_1': {\n 'url': \"https://numbersapi.p.rapidapi.com/6/21/date\",\n 'querystring': {\"fragment\": \"true\", \"json\": \"true\"},\n 'headers': {\n 'x-rapidapi-key': \"f7a97a4b1cmsheca736213eec59fp10fda2jsn486833907246\",\n 'x-rapidapi-host': \"numbersapi.p.rapidapi.com\"\n }\n },\n 'api_2': {\n 'url': \"https://covid-19-coronavirus-statistics.p.rapidapi.com/v1/total\",\n 'querystring': {\"country\": \"Canada\"},\n 'headers': {\n 'x-rapidapi-key': \"f7a97a4b1cmsheca736213eec59fp10fda2jsn486833907246\",\n 'x-rapidapi-host': \"covid-19-coronavirus-statistics.p.rapidapi.com\"\n }\n }\n}\n\n\nasync def handler(request):\n async with aiohttp.ClientSession() as session: # создаем сессию для запроса на сервера\n web_data = []\n for api in apis.values(): # проходим в цикле по apis, делаем get запросы на url, с заголовками и параметрами\n async with session.get(api['url'], headers=api['headers'], params=api['querystring']) as responce:\n data = await responce.json() # получаем ответ - json данные\n web_data.append(data)\n return web.json_response(web_data)\n\n\napp = web.Application()\napp.add_routes([web.get('/collect_info', handler)])\n\nif __name__ == '__main__':\n web.run_app(app)\n","repo_name":"DeeStroKeR/hiller","sub_path":"HW10_Multithreading/hw10_aiohttp.py","file_name":"hw10_aiohttp.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1205382627","text":"def wrap(cls, settings):\n cls._ss_message_splitter = MessageSplitter(settings)\n cls._ss_orig_send = cls.send\n cls.send = send\n\n\ndef unwrap(cls):\n cls.send = cls._ss_orig_send\n del cls._ss_orig_send\n del cls._ss_message_splitter\n\n\nasync def send(self, content=None, *args, **kwargs):\n results = []\n\n for piece in self._ss_message_splitter.split(content):\n results.append(await self._ss_orig_send(piece, *args, **kwargs))\n\n if len(results) == 1: # this will usually be the case\n return results[0]\n\n return results\n\n\nclass MessageSplitter:\n def __init__(self, settings):\n self.max_message_len = settings.max_message_len\n self.newline_search_len = settings.newline_search_len\n self.space_search_len = settings.space_search_len\n\n def split(self, string):\n if not string:\n return ('',)\n\n return self.flatten(self.get_pieces(str(string)))\n\n def get_pieces(self, string):\n if len(string) <= self.max_message_len:\n return (string, )\n\n piece = string[:self.max_message_len]\n if '\\n' in piece[-self.newline_search_len:]:\n piece = piece.rsplit('\\n', 1)[0]\n\n elif ' ' in piece[-self.space_search_len:]:\n piece = piece.rsplit(' ', 1)[0]\n\n return (piece, self.get_pieces(string[len(piece):]))\n\n def flatten(self, tpl):\n return tuple(self.flattengen(tpl))\n\n def flattengen(self, tpl):\n for item in tpl:\n if isinstance(item, tuple):\n yield from self.flattengen(item)\n\n else:\n yield item\n","repo_name":"Levtastic/HypocrisyBot","sub_path":"src/levbot/send_splitter.py","file_name":"send_splitter.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72230888213","text":"import random\r\n\r\ni = 0\r\nwhile i <= 100:\r\n print(\"JETBRAINS\")\r\n i += 1\r\nprint(\"循环结束\")\r\nprint(\"计算1累加到100的和\")\r\nnum = 1\r\nsum = 0\r\nwhile num<=100:\r\n sum+=num\r\n num += 1\r\nprint(\"结果为:%d\"%sum)\r\n\r\nprint(\"----------------------------\")\r\nprint('猜数字')\r\nr1 = random.randint(1, 50)\r\nflag = True\r\nwhile flag:\r\n r2 = int(input(\"请输入你的猜出的数字:\"))\r\n if r2 > r1:\r\n print(\"猜中了\")\r\n flag = False\r\n else:\r\n print(\"猜错了,小了\")\r\n continue\r\nprint('游戏结束')\r\n","repo_name":"CStrive/Party2","sub_path":"4.While循环.py","file_name":"4.While循环.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32000427298","text":"# 99266 Luis Fonseca\n\ndef eh_tabuleiro(var):\n # eh_tabuleiro: universal -> booleano\n \"\"\"Devolve se o seu argumento corresponde a um tabuleiro.\"\"\"\n\n if not (type(var) == tuple and len(var) == 3):\n return False\n\n for linha in var:\n if not (type(linha) == tuple and len(linha) == 3):\n return False\n\n for cell in linha:\n if not (type(cell) == int and -1 <= cell <= 1):\n return False\n\n return True\n\n\ndef eh_posicao(var):\n # eh_posicao: universal -> booleano\n \"\"\"Devolve se o seu argumento corresponde a uma posicao.\"\"\"\n\n return type(var) == int and 1 <= var <= 9\n\n\ndef obter_coluna(tab, n):\n # obter_coluna: tabuleiro x inteiro -> tuplo\n \"\"\"Devolve um tuplo com os valores da coluna correspondente ao inteiro\n dado.\"\"\"\n\n if not (eh_tabuleiro(tab) and type(n) == int and 1 <= n <= 3):\n raise ValueError('obter_coluna: algum dos argumentos e invalido')\n\n vect = ()\n for linha in tab:\n vect += (linha[n-1],)\n\n return vect\n\n\ndef obter_linha(tab, n):\n # obter_linha: tabuleiro x inteiro -> tuplo\n \"\"\"Devolve um tuplo com os valores da linha correspondente ao inteiro\n dado.\"\"\"\n\n if not (eh_tabuleiro(tab) and type(n) == int and 1 <= n <= 3):\n raise ValueError('obter_linha: algum dos argumentos e invalido')\n\n return tab[n-1]\n\n\ndef obter_diagonal(tab, n):\n # obter_diagonal: tabuleiro x inteiro -> tuplo\n \"\"\"Devolve um tuplo com os valores da diagonal correspondente ao inteiro\n dado.\"\"\"\n\n if not (eh_tabuleiro(tab) and type(n) == int and 1 <= n <= 2):\n raise ValueError('obter_diagonal: algum dos argumentos e invalido')\n\n if n == 1:\n step = 1\n elif n == 2:\n n = 3\n step = -1\n\n vect = ()\n for c in range(3):\n vect += ( obter_coluna(tab, c + 1)[n-1], )\n n += step\n\n return vect\n\n\ndef tabuleiro_str(tab):\n # tabuleiro_str: tabuleiro -> cad. caracteres\n \"\"\"Devolve a cadeia de caracteres que representa o tabuleiro dado.\"\"\"\n\n if not eh_tabuleiro(tab):\n raise ValueError('tabuleiro_str: o argumento e invalido')\n\n tab_str = ''\n for n in range(3):\n tab_str += linha_str(obter_linha(tab, n + 1))\n if n <= 1:\n tab_str += '\\n-----------\\n'\n\n return tab_str\n\n\ndef linha_str(linha):\n # lin_str: tuplo -> cad. caracteres\n \"\"\"Devolve a cadeia de caracteres que representa a linha dada.\"\"\"\n\n if not (type(linha) == tuple and len(linha) == 3):\n raise ValueError('linha_str: o argumento e invalido')\n\n symbols = { -1: 'O', 0: ' ', 1: 'X'}\n\n lin_str = ''\n for n in range(3):\n lin_str += ' ' + symbols[linha[n]] + ' '\n if n <= 1:\n lin_str += '|'\n\n return lin_str\n\n\ndef obter_valor_posicao(tab, pos):\n # obter_valor_posicao: tabuleiro x posicao -> inteiro\n \"\"\"Devolve um inteiro com o valor da marca na posicao dada.\"\"\"\n\n if not (eh_tabuleiro(tab) and eh_posicao(pos)):\n raise ValueError('obter_valor_posicao: algum dos argumentos e invalido')\n\n pos -= 1\n\n return obter_linha(tab, (pos//3)+1)[(pos%3)]\n\n\ndef eh_posicao_livre(tab, pos):\n # eh_posicao_livre: tabuleiro x posicao -> booleano\n \"\"\"Devolve se a dada posicao se encontra livre.\"\"\"\n\n if not (eh_tabuleiro(tab) and eh_posicao(pos)):\n raise ValueError('eh_posicao_livre: algum dos argumentos e invalido')\n\n return obter_valor_posicao(tab,pos) == 0\n\n\ndef obter_posicoes_livres(tab):\n # obter_posicoes_livres: tabuleiro -> tuplo\n \"\"\"Devolve o tuplo ordenado com todas as posicoes livres do tabuleiro\n dado.\"\"\"\n\n if not eh_tabuleiro(tab):\n raise ValueError('obter_posicoes_livres: o argumento e invalido')\n\n vect = ()\n for n in range(1,10):\n if eh_posicao_livre(tab,n):\n vect += (n,)\n\n return vect\n\n\ndef obter_linhas_colunas_diagonais(tab, pos):\n # obter_linhas_colunas_diagonais: tabuleiro x posicao -> tuplo\n \"\"\"Devolve um tuplo com os tuplos correspondentes a todas as linhas,\n colunas e diagonais a que essa posicao pertence.\"\"\"\n\n if not (eh_tabuleiro(tab) and eh_posicao(pos)):\n raise ValueError('obter_linhas_colunas_diagonais: algum dos argumentos e invalido')\n\n lcds = ()\n\n linha = ((pos-1)//3)+1\n lcds += (obter_linha(tab, linha), )\n\n coluna = ((pos-1)%3)+1\n lcds += (obter_coluna(tab, coluna), )\n\n if pos in (1,5,9):\n lcds += (obter_diagonal(tab, 1), )\n\n if pos in (3,5,7):\n lcds += (obter_diagonal(tab, 2), )\n\n return lcds\n\n\ndef jogador_ganhador(tab):\n # jogador_ganhador: tabuleiro -> inteiro\n \"\"\"Devolve o inteiro a correspondente ao jogador que ganhou a partida.\"\"\"\n\n if not eh_tabuleiro(tab):\n raise ValueError('jogador_ganhador: o argumento e invalido')\n\n def eh_vitoria(vect):\n return vect[0] != 0 and vect[0] == vect[1] == vect[2]\n\n for obter_fila in (obter_linha, obter_coluna, obter_diagonal):\n for n in range(3):\n if not (n == 2 and obter_fila == obter_diagonal):\n fila = obter_fila(tab, n+1)\n if eh_vitoria(fila):\n return fila[0]\n\n return 0\n\n\ndef marcar_posicao(tab, j, pos):\n # marcar_posicao: tabuleiro x inteiro x posicao -> tabuleiro\n \"\"\"Devolve um tabuleiro com a marca do jogador na posicao dada.\"\"\"\n\n if not (\n eh_tabuleiro(tab) and\n type(j) == int and j in (-1, 1) and\n eh_posicao(pos) and eh_posicao_livre(tab,pos)\n ):\n raise ValueError('marcar_posicao: algum dos argumentos e invalido')\n\n new_tab = ()\n for n in range(3):\n linha = obter_linha(tab, n + 1)\n if n == (pos-1)//3:\n new_tab += (marcar_linha(linha , j, pos), )\n else:\n new_tab += (linha, )\n\n return new_tab\n\n\ndef marcar_linha(vect, j, pos):\n # marcar_linha: tuplo x inteiro x posicao -> tuplo\n \"\"\"Devolve um tuplo representante de uma linha com a marca do jogador na\n posicao dada.\"\"\"\n\n new_linha = ()\n for n in range(3):\n if n == (pos-1)%3:\n new_linha += (j,)\n else:\n new_linha += (vect[n], )\n\n return new_linha\n\n\ndef escolher_posicao_manual(tab):\n # escolher_posicao_manual: tabuleiro -> posicao\n \"\"\"Esta funcao realiza a leitura de uma posicao introduzida manualmente por\n um jogador e devolve esta posicao escolhida.\"\"\"\n\n if not eh_tabuleiro(tab):\n raise ValueError('escolher_posicao_manual: o argumento e invalido')\n\n pos = int(input('Turno do jogador. Escolha uma posicao livre: '))\n\n if not (eh_posicao(pos) and eh_posicao_livre(tab, pos)):\n raise ValueError('escolher_posicao_manual: a posicao introduzida e invalida')\n\n return pos\n\n\ndef escolher_posicao_auto(tab, j, strat):\n # escolher_posicao_auto: tabuleiro x inteiro x cad. caracteres -> posicao\n \"\"\"Devolve a posicao escolhida automaticamente de acordo com a estrategia\n selecionada.\"\"\"\n\n if not (\n eh_tabuleiro(tab) and\n type(j) == int and j in (-1, 1) and\n strat in ('basico', 'normal', 'perfeito')\n ):\n raise ValueError('escolher_posicao_auto: algum dos argumentos e invalido')\n\n acoes = (\n (vitoria, 'normal'),\n (bloqueio, 'normal'),\n (bifurcacao, 'perfeito'),\n (bloqueio_bifurcacao, 'perfeito'),\n (centro, 'basico'),\n (canto_oposto, 'normal'),\n (canto_vazio, 'basico'),\n (lateral_vazio, 'basico')\n )\n\n pos = None\n for acao in acoes:\n if eh_mais_avancada(strat, acao[1]):\n pos = acao[0](tab, j)\n\n if pos:\n return pos\n\n\ndef eh_mais_avancada(strat, outra_strat):\n # eh_mais_avancada: cad. caracteres x cad. caracteres -> booleano\n \"\"\"Devolve se a primeira estrategia e mais ou igualmente avancada em\n comparacao a segunda.\"\"\"\n\n strats = { 'basico': 0, 'normal': 1, 'perfeito': 2 }\n return strats[strat] >= strats[outra_strat]\n\n\ndef vitoria(tab, j):\n # vitoria: tabuleiro x inteiro -> posicao\n \"\"\"Se o jogador tiver um dois em linha devolve a posicao livre restante.\"\"\"\n\n for pos in obter_posicoes_livres(tab):\n for lcd in obter_linhas_colunas_diagonais(tab, pos):\n if lcd.count(j) == 2:\n return pos\n\n return None\n\n\ndef bloqueio(tab, j):\n # bloqueio: tabuleiro x inteiro -> posicao\n \"\"\"Se o adversario tiver um dois em linha devolve a posicao livre\n restante.\"\"\"\n\n # bloqueio utiliza a mesma logica que vitoria sendo assim possivel\n # reutilizar o codigo -j e o oponente de j\n return vitoria(tab, -j)\n\n\ndef bifurcacao(tab, j):\n # bifurcacao: tabuleiro x inteiro -> posicao\n \"\"\"Se o jogador tiver uma posicao de bifurcacao devolve essa posicao.\"\"\"\n\n for pos in obter_posicoes_livres(tab):\n if eh_intersecao(tab, pos, j):\n return pos\n\n return None\n\n\ndef eh_intersecao(tab,pos,j):\n # eh_intersecao: tabuleiro x posicao x inteiro -> booleano\n \"\"\"Verifica se a posicao se encontra em duas ou mais linhas, colunas ou\n diagonais com pecas do jogador.\"\"\"\n\n total = 0\n for lcd in obter_linhas_colunas_diagonais(tab, pos):\n if lcd.count(0) == 2:\n total += lcd.count(j)\n\n return total >= 2\n\n\ndef bloqueio_bifurcacao(tab, j):\n # bloqueio_bifurcacao: tabuleiro x inteiro -> posicao\n \"\"\"Se o oponente tiver apenas uma posicao de bifurcacao devolve essa\n posicao. Caso exista mais do que uma bifurcacao devolve a primeira\n posicao que impede o oponente de tirar partido da situacao.\"\"\"\n\n intersecoes = ()\n for pos in obter_posicoes_livres(tab):\n if eh_intersecao(tab, pos, -j):\n intersecoes += (pos, )\n\n if len(intersecoes) == 0:\n return None\n elif len(intersecoes) == 1:\n return intersecoes[0]\n else:\n return bloqueio_bifurcacoes(intersecoes, tab, j)\n\ndef bloqueio_bifurcacoes(intersecoes, tab, j):\n # bloqueio_bifurcacoes: tuplo x tabuleiro x inteiro -> posicao\n \"\"\"Devolve a posicao que impede a criacao de uma bifurcacao quando existem\n mais do que uma posicao que a crie.\"\"\"\n\n posicoes_livres = obter_posicoes_livres(tab)\n\n possibilidades = ()\n for pos in posicoes_livres:\n if pos not in intersecoes:\n\n # Forca o oponente a jogar numa posicao que nao seja intersecao\n acao = forcar_jogada_lc(tab, j, pos)\n if acao:\n possibilidades += (acao, )\n\n # Escolhe a primeira posicao que satisfaz o pretendido\n for pos in posicoes_livres:\n if pos in possibilidades:\n return pos\n\n return None\n\n\ndef forcar_jogada_lc(tab, j, pos):\n # forcar_jogada_lc: tabuleiro x inteiro x posicao -> posicao\n \"\"\"Devolve a posicao que forca o oponente a jogar na dada posicao no turno\n seguinte. Apenas se aplica a linhas e colunas.\"\"\"\n\n linha = (pos-1)//3\n coluna = (pos-1)%3\n\n # Caso a linha tenha uma peca do jogador e dois vazios\n # jogasse na posicao vazia que nao e a que se deseja forcar\n lin_vect = obter_linha(tab, linha+1)\n if j in lin_vect:\n for c in range(3):\n if c != coluna and lin_vect[c] == 0:\n return (linha*3+c)+1\n\n # Mesma logica para colunas\n col_vect = obter_coluna(tab, coluna+1)\n if j in col_vect:\n for l in range(3):\n if l != linha and col_vect[l] == 0:\n return (l*3+coluna)+1\n\n return None\n\n\ndef centro(tab, _):\n # centro: tabuleiro -> posicao\n \"\"\"Devolve a posicao central se esta estiver livre.\"\"\"\n\n if eh_posicao_livre(tab, 5):\n return 5\n\n return None\n\n\ndef canto_oposto(tab, j):\n # canto_oposto: tabuleiro x inteiro -> posicao\n \"\"\"Se o oponente tiver uma marca num canto do tabuleiro diagonalmente\n oposto a uma posicao livre devolve essa posicao.\"\"\"\n\n for pos in (1,3,7,9):\n if (\n eh_posicao_livre(tab, pos) and\n obter_valor_posicao(tab, 10-pos) == -j\n ):\n return pos\n\n return None\n\n\ndef canto_vazio(tab, _):\n # canto_vazio: tabuleiro -> posicao\n \"\"\"Devolve o primeiro canto que e uma posicao livre caso exista.\"\"\"\n\n for pos in (1,3,7,9):\n if eh_posicao_livre(tab, pos):\n return pos\n\n return None\n\n\ndef lateral_vazio(tab, _):\n # lateral_vazio: tabuleiro -> posicao\n \"\"\"Devolve a primeira lateral que e uma posicao livre caso exista.\"\"\"\n\n for pos in (2,4,6,8):\n if eh_posicao_livre(tab, pos):\n return pos\n\n return None\n\n\ndef jogo_do_galo(j, strat):\n # jogo_do_galo: cad. caracteres x cad. caracteres -> cad. caracteres\n \"\"\"Esta funcao corresponde a funcao principal que permite jogar um jogo\n completo de Jogo do Galo de uma jogador contra o computador.\"\"\"\n\n if not (j in ('X','O') and strat in ('basico','normal','perfeito')):\n raise ValueError('jogo_do_galo: algum dos argumentos e invalido')\n\n print('Bem-vindo ao JOGO DO GALO.')\n print('O jogador joga com \\'{}\\'.'.format(j))\n\n turnos = {'X': 1, 'O': -1}\n j = turnos[j]\n\n tab = ((0,)*3,)*3\n turno = 1\n\n while jogador_ganhador(tab) == 0 and len(obter_posicoes_livres(tab)) > 0:\n tab = jogar_turno(turno, strat, tab, j)\n print(tabuleiro_str(tab))\n turno *= -1\n\n resultados = {1: 'X', 0: 'EMPATE', -1: 'O'}\n return resultados[jogador_ganhador(tab)]\n\n\ndef jogar_turno(turno, strat, tab, j):\n # jogar_turno: inteiro x cad. caracteres x tabuleiro x inteiro -> tabuleiro\n \"\"\"Devolve o tabuleiro modificado apos o turno.\"\"\"\n\n if j == turno:\n pos = escolher_posicao_manual(tab)\n else:\n print('Turno do computador ({}):'.format(strat))\n pos = escolher_posicao_auto(tab, turno, strat)\n\n return marcar_posicao(tab, turno, pos)\n","repo_name":"luishfonseca/ist-fp-p1","sub_path":"jogo_do_galo.py","file_name":"jogo_do_galo.py","file_ext":"py","file_size_in_byte":13753,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70989732374","text":"\"\"\" Escribir un programa que lea los valores de c/campo de un registro de stock de un almacén. Los \n campos son: \n - Cod_art: integer; \n - Descripción: string [30]; \n - Cantidad: word; (0 ..65535) \n - Precio_unitario: real; \n \nSe pide además: \n a- Cargar datos hasta que el cod_art = 0. \n b- Mostrar del artículo más caro, cantidad en existencia. \n c- Dado un cod_articulo ver si existe. \n d- Mostrar si este almacén vende queso “Don Bautista”. \n e- Mostrar el artículo con menor existencia. \n f- Mostrar cual es el artículo más barato. \"\"\"\n \ndef insertar():\n stock = { 'cod_art' : int, 'descripcion' : str, 'cantidad' : int, 'precio_unitario' : float }\n sentinela = int(input(\"Ingrese el codigo del articulo: \"))\n if sentinela != 0:\n stock['cod_art'] = sentinela\n stock['descripcion'] = input(\"Ingrese la descipcion del articulo: \")\n stock['cantidad'] = int(input(\"Ingrese la cantidad de articulos: \"))\n stock['precio_unitario'] = float(input(\"Ingrese el precio unitario: \"))\n return stock, sentinela\n\ndef cargar(lyst : list, tamanio : int):\n indice = 0\n print(f\" # Stock {indice + 1}\")\n stock, sentinela = insertar()\n print()\n while ((sentinela != 0) or (len(lyst) < tamanio)):\n lyst[indice] = stock\n indice += 1\n print(f\" # Stock {indice + 1}\")\n stock, sentinela = insertar() \n print()\n return indice\n \ndef mostrar_todo(lista : list, ultimo : int):\n for stock in range(0, ultimo):\n print(f\"Articulo {stock + 1}.\")\n print(f\"Codigo Articulo = {lista[stock]['cod_art']}\") \n print(f\"Descripcion = {lista[stock]['descripcion']}\") \n print(f\"Cantidad = {lista[stock]['cantidad']}\") \n print(f\"Precio Unitario = {lista[stock]['precio_unitario']}\") \n print()\n\ndef mostrar(lista : list, posicion : int, campos : list):\n for indice in range(0, len(campos)):\n print(f\"{campos[indice]}={lista[posicion][campos[indice]]}\")\n \n\ndef mayor(lista : list, tamanio : int, campo : str):\n mayor = lista[0][campo]\n for stock in range(0, tamanio):\n if lista[stock][campo] > mayor:\n mayor = lista[stock][campo]\n posicion = stock\n return posicion\n\ndef menor(lista : list, tamanio : int, campo : str):\n menor = lista[0][campo]\n for stock in range(0, tamanio):\n if lista[stock][campo] < menor:\n menor = lista[stock][campo]\n posicion = stock\n return posicion\n\ndef burbuja(lista : list, clave : str):\n for i in range(0,(ultimo-1)):\n for j in range(0,(ultimo-1)-i):\n if lista[j][clave] > lista[j+1][clave]:\n aux = lista[j][clave]\n lista[j][clave] = lista[j+1][clave]\n lista[j+1][clave] = aux\n\ndef busqueda_binaria(lista : list, tamanio : int, buscado, clave, posicion=None) -> int:\n primero = 0\n ultimo = tamanio - 1\n while (posicion == None) and (primero <= ultimo):\n medio = (primero + ultimo) // 2\n if (lista[medio][clave] == buscado):\n posicion = medio\n elif (lista[medio][clave] >= buscado):\n ultimo = medio - 1\n else:\n primero = medio + 1 \n return posicion\n\n\n\nlyst = [0,0,0,0]\n\nlyst = [\n { 'cod_art' : 2, 'descripcion' : \"Papa\", 'cantidad' : 12, 'precio_unitario' : 100 },\n { 'cod_art' : 3, 'descripcion' : \"Tomate\", 'cantidad' : 30, 'precio_unitario' : 120 },\n { 'cod_art' : 4, 'descripcion' : \"Queso Don Bautista\", 'cantidad' : 8, 'precio_unitario' : 200 },\n { 'cod_art' : 5, 'descripcion' : \"Cebolla\", 'cantidad' : 10, 'precio_unitario' : 80 },\n 0,\n 0\n ]\n\nTAMANIO = len(lyst)\n\n# ultimo = cargar(lyst, TAMANIO)\n# print(\"ultimo=\",ultimo)\n# print(lyst)\nultimo = 4\nfor item in lyst:\n print(item)\n\n# TODO: b - Mostrar del artículo más caro, cantidad en existencia.\n\nposicion = mayor(lyst, ultimo, 'precio_unitario')\nprint(f\"El articulo mas caro es '{lyst[posicion]['descripcion']}', cantidad: {lyst[posicion]['cantidad']} \")\n\n# TODO: c - Dado un cod_articulo ver si existe. \n\nbuscado = int(input(\"Ingrese un codigo de articulo: \"))\nposicion = busqueda_binaria(lyst, ultimo, buscado, 'cod_art')\nif posicion != None:\n print(f\"El articulo existe. ({posicion})\")\nelse:\n print(\"El articulo no existe.\")\n\n# TODO: d - Mostrar si este almacén vende queso “Don Bautista”. \n\nburbuja(lyst, 'descripcion')\nmostrar_todo(lyst, ultimo)\n\nposicion = busqueda_binaria(lyst, ultimo, 'Queso Don Bautista', 'descripcion')\nif posicion != None:\n print(f\" 'Queso Don Bautista' EXISTE en el almacen.\")\nelse:\n print(f\" 'Queso Don Bautista' NO EXISTE en el almacen.\")\n\n# TODO: e - Mostrar el artículo con menor existencia. \n\nposicion = menor(lyst, ultimo, 'cantidad')\nprint(f\"El articulo con menor existencia es '{lyst[posicion]['descripcion']}'.\")\n\n# TODO: f - Mostrar cual es el artículo más barato.\n\nposicion = menor(lyst, ultimo, 'precio_unitario')\nprint(f\"El articulo mas barato es '{lyst[posicion]['descripcion']}'\")\n\n\n","repo_name":"curtistyle/Fundamentos-de-Programacion2023","sub_path":"Facultad/TP10 - Registros - Python/ejercicio4.py","file_name":"ejercicio4.py","file_ext":"py","file_size_in_byte":5081,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6701442599","text":"#!/usr/bin/env python\nimport os, sys\nimport argparse\nfrom os.path import dirname\nfrom pprint import pprint as pp\nimport envoy\n\nsys.path.insert(0, dirname(dirname(os.path.abspath(__file__))))\nsys.path.insert(0, dirname(os.path.abspath(__file__)))\nimport hapinsp_formatter\n\ndef main():\n args = get_args()\n cmd = get_cmd(args.inst, args.db, args.table)\n\n r = envoy.run(cmd)\n results = {}\n internal_status_code = -1\n if r.status_code != 0:\n print(cmd)\n print(r.std_err)\n print(r.std_out)\n else:\n violations = r.std_out.strip()\n if not isnumeric(violations):\n internal_status_code = 1\n fixed_violations = 1\n else:\n if int(violations) == -1:\n fixed_violations = 1\n else:\n fixed_violations = 0\n\n results['violation_cnt'] = fixed_violations\n\n results['rc'] = max(r.status_code, internal_status_code)\n print(hapinsp_formatter.transform_args(results))\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description=\"tests table statistics\")\n parser.add_argument(\"--inst\")\n parser.add_argument(\"--db\")\n parser.add_argument(\"--table\")\n args = parser.parse_args()\n\n args.inst = args.inst or os.environ.get('hapinsp_instance', None)\n args.db = args.db or os.environ.get('hapinsp_database', None)\n args.table = args.table or os.environ.get('hapinsp_table', None)\n if not args.inst:\n abort(\"Error: instance not provided as arg or env var\")\n if not args.db:\n abort(\"Error: database not provided as arg or env var\")\n if not args.table:\n abort(\"Error: table not provided as arg or env var\")\n return args\n\n\ndef get_cmd(inst, db, table):\n\n sql = \"\"\" SHOW TABLE STATS {tab} \\\n \"\"\".format(tab=table)\n sql = ' '.join(sql.split())\n cmd = \"\"\" impala-shell -i %s -d %s --quiet -B --ssl -q \"%s\" | columns | cut -f 1\n \"\"\" % (inst, db, sql)\n return cmd\n\n\ndef abort(msg):\n print(msg)\n sys.exit(1)\n\ndef isnumeric(val):\n try:\n int(val)\n except TypeError:\n return False\n except ValueError:\n return False\n else:\n return True\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"TheDataLeek/hadoopinspector_plugins","sub_path":"checks/rule_table_stats_exist.py","file_name":"rule_table_stats_exist.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36337159219","text":"from queue import PriorityQueue\nimport sys\nimport timeit\nimport csv\n\nfrom numpy import average\nfrom NodeClass import Node\n\nnumberOfArgumentsPassedFromCommandLine = len(sys.argv)\ninitial=sys.argv[1]\ngoal=sys.argv[2]\nif numberOfArgumentsPassedFromCommandLine != 3:\n print(\"ERROR: Not enough or too many input arguments.\")\n quit()\n\ndriving_dict={}\nstraight_dict={}\n\nwith open('driving.csv', 'rt') as f:\n driving = csv.DictReader(f)\n for row in driving:\n state = row['STATE']\n del row['STATE']\n driving_dict[state] = dict(row)\n\nwith open('straightline.csv', 'rt') as s:\n straight = csv.DictReader(s)\n for row in straight:\n state = row['STATE']\n del row['STATE']\n straight_dict[state] = dict(row)\n\n\nprint(\"Initial state:\", initial)\nprint(\"Goal state:\", goal)\nprint()\nif (goal not in straight_dict or initial not in straight_dict) or (goal not in driving_dict or initial not in driving_dict):\n print(\"Greedy Best First Search:\")\n print(\"Solution path: FAILURE: NO PATH FOUND\")\n print(\"Number of state on a path: 0\")\n print(\"Path cost: 0\")\n print(\"Execution time: 0\")\n print()\n print(\"A* Search:\")\n print(\"Solution path: FAILURE: NO PATH FOUND\")\n print(\"Number of state on a path: 0\")\n print(\"Path cost: 0\")\n print(\"Execution time: 0\")\n quit()\n \ndef GBFS(initial,goal):\n timeStart = timeit.default_timer()\n reached=dict()\n start=Node(state=initial,parent=None,pathCost=0,heuristics=int(straight_dict[initial][goal]), algorithm='GBFS')\n frontier=PriorityQueue()\n frontier.put((start.getEval(),start))\n reached.update({initial:start})\n while not frontier.qsize()==0:\n g,currNode=frontier.get()\n if currNode.getState()==goal:\n timeEnd = timeit.default_timer()\n return [currNode,timeEnd-timeStart]\n for child in GBFS_Expand(initial,currNode):\n s=child.getState()\n if s not in reached or child.getEval() int:\n rat_dict = {}\n for rectangle in rectangles:\n ratio = rectangle[0]/rectangle[1]\n if ratio not in rat_dict.keys():\n rat_dict[ratio] = 1\n else:\n rat_dict[ratio] += 1\n return int(np.sum([(v * (v - 1))/2 for v in rat_dict.values()]))","repo_name":"pathankhansalman/LeetCode","sub_path":"number-of-pairs-of-interchangeable-rectangles.py","file_name":"number-of-pairs-of-interchangeable-rectangles.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7981936539","text":"class Transformation():\n \"\"\"Class handle simple data transformation\"\"\"\n\n def remap(self, item):\n \"\"\"Remapping to a required dict form. \n :param item: {dict}\n :return: {dict}\n \"\"\"\n return {\n item['key']: item['value']\n }\n\n def to_single_object(self, data):\n \"\"\"Merging list of the dicts to single dict\n :param data: {list}\n :return: {dict}\n \"\"\"\n output = {}\n for item in data:\n output.update(item)\n return output\n\n def to_list(self, data):\n \"\"\"Remap dict to list of tuples\n :param data: {dict}\n :return: {list} of {tuples}\n \"\"\"\n return sorted(\n [(k, v) for k, v in data.items()], \n key=lambda k: k[1]\n )\n\n def execute(self, data):\n \"\"\"Executing defined transformations\n :param data: {list}\n :return: {list}\n \"\"\"\n data = map(self.remap, data)\n data = self.to_single_object(data)\n data = self.to_list(data)\n return data","repo_name":"maybelinot/tweets_correlation_analysis","sub_path":"transformation/transformation.py","file_name":"transformation.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"31919132737","text":"from flask_jwt_extended import jwt_required\nfrom flask_restful import Resource, reqparse\n\nfrom src.logic import student_teacher_logic\nfrom src.utils.response import ok\n\n\nparser = reqparse.RequestParser()\nparser.add_argument('teacher_id', type=int, help='This field cannot be blank', required=True)\nparser.add_argument('student_id', type=int, help='This field cannot be blank', required=True)\n\n\nclass StudentTeacherResource(Resource):\n @jwt_required()\n def get(self):\n \"\"\"Get teachers and students of the current account\n ---\n tags:\n - Account\n responses:\n 200:\n description: OK.\n \"\"\"\n result = student_teacher_logic.get_details()\n return ok(result)\n\n @jwt_required()\n def post(self):\n \"\"\"Request or confirm a new teacher student link\n ---\n tags:\n - Account\n parameters:\n - name: body\n in: body\n required: true\n schema:\n properties:\n teacher_id:\n type: int\n example: 1\n student_id:\n type: int\n example: 2\n responses:\n 200:\n description: OK.\n \"\"\"\n data = parser.parse_args()\n student_teacher_logic.make_request(data['teacher_id'], data['student_id'])\n return ok()\n\n @jwt_required()\n def delete(self):\n \"\"\"Deletes a student teacher link or request\n ---\n tags:\n - Account\n parameters:\n - name: body\n in: body\n required: true\n schema:\n properties:\n teacher_id:\n type: int\n example: 1\n student_id:\n type: int\n example: 2\n responses:\n 200:\n description: OK.\n \"\"\"\n data = parser.parse_args()\n student_teacher_logic.remove_request_and_link(data['teacher_id'], data['student_id'])\n return ok()\n","repo_name":"Chmiel123/self-learning-backend","sub_path":"src/resources/account/student_teacher_resource.py","file_name":"student_teacher_resource.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41819658909","text":"import copy\r\nimport numpy as np\r\n\r\ndef boot(client_data, sub_rate):\r\n n = len(client_data)\r\n rand_ind = np.random.choice(n, np.int(np.floor(n * sub_rate)))\r\n sub_data = copy.deepcopy(client_data)\r\n X = client_data.X\r\n y = client_data.y\r\n sub_data.X = X[rand_ind]\r\n sub_data.y = y[rand_ind]\r\n return sub_data\r\n\r\ndef boot_agg(params, sub_params, sub_rate):\r\n final_params = [np.zeros(len(p)) for p in params]\r\n for i, (param, sub_param) in enumerate(zip(params, sub_params)):\r\n final_params[i] = (param - sub_rate * sub_param) / (1 - sub_rate)\r\n return final_params","repo_name":"zhaolotelli/FedLearn","sub_path":"flearn/utils/sub.py","file_name":"sub.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"72137695253","text":"\"\"\"la \\ al lado de una comilla doble sirve para que aparezca la comilla doble en el compilador \"\"\"\nejemplo1 = \"\\\"Ultimate python\\\"\" \n\n\n\"\"\"si queremos ejecutar el backslash utilizamos otro backslash\"\"\"\nejemplo2 = \"\\\"\\\\Ultimate python\\\"\" \n\n\n\"\"\"el backslash n o \\n dara un espaciado al caracter \"\"\"\nejemplo3 = \"\\\"Ultimate \\npython\\\"\" \n\n\"\"\"y backslash comillas simples\"\"\"\nejemplo4 = '\\'Ultimate python\\''\n\nprint(ejemplo4)\n\n\n ","repo_name":"javitmx/pruebagit","sub_path":"Curso Basico.py","file_name":"Curso Basico.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2390123186","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\ndb = SQLAlchemy(app)\n\n# https://flask-sqlalchemy.palletsprojects.com/en/2.x/\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:4556029@localhost:3307/test'\n# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///relationships.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n\nclass Person(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20))\n\t# Specify a forward reference by relationship, backref specifies a backreference, and form a two-way reference relationship\n pets = db.relationship('Pet', backref='owner')\n\n\n\n\nclass Pet(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20))\n owner_id = db.Column(db.Integer, db.ForeignKey('person.id'))\n# owner_id = db.Column(db.Integer, db.ForeignKey(\"person.id\"), default=2)\n\n\ndef display_pet_with_owner():\n result = db.engine.execute(\n 'select person.name , pet.name from person join pet on person.id = pet.owner_id')\n # print(result)\n # print(type(result))\n for r in result:\n # print(type(r))\n print(r)\n print(r[0], r[1])\n\n# from one_to_many import display_pet_with_owner\n# display_pet_with_owner()\n\ndef display_person():\n result = db.engine.execute(\"select owner_id from pet where name='dog'\")\n # print(result)\n # print(type(result))\n a = 0\n for r in result:\n a = r[0]\n \n print(a)\n print(f\"select name from person where id={a}\")\n result = db.engine.execute(f\"select name from person where id={a}\")\n for r in result:\n # print(type(r))\n print(r)\n print(r[0])\n\n# from one_to_many import display_person\n# display_person()\n\n","repo_name":"NoumanShah042/Python-Flask","sub_path":"Flask SQLAlchemy/2_one_to_many relationship/one_to_many.py","file_name":"one_to_many.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24761077784","text":"import pandas as pd\n\n# Função para filtrar os dados por mês específico\ndef filtrar_dados_por_mes(data, data_especifica, nome_coluna_data):\n # Converte a coluna de data para o formato de data do pandas\n data[nome_coluna_data] = pd.to_datetime(data[nome_coluna_data], format='%d/%m/%Y')\n data_especifica = pd.to_datetime(data_especifica, format='%d/%m/%Y')\n primeiro_dia_mes = data_especifica.replace(day=1)\n ultimo_dia_mes = primeiro_dia_mes + pd.offsets.MonthEnd(0)\n return data[(data[nome_coluna_data] >= primeiro_dia_mes) & (data[nome_coluna_data] <= ultimo_dia_mes)].copy()\n\n# Função para limpar os dados, convertendo as colunas de temperatura e umidade para valores numéricos\ndef limpar_dados(dataframe):\n dataframe = dataframe.copy() # Cria uma cópia do dataframe\n\n colunas_temperatura = [\n 'TEMPERATURA DO AR - BULBO SECO, HORARIA (°C)',\n 'TEMPERATURA DO PONTO DE ORVALHO (°C)',\n 'TEMPERATURA MÁXIMA NA HORA ANT. (AUT) (°C)',\n 'TEMPERATURA MÍNIMA NA HORA ANT. (AUT) (°C)',\n 'TEMPERATURA ORVALHO MAX. NA HORA ANT. (AUT) (°C)',\n 'TEMPERATURA ORVALHO MIN. NA HORA ANT. (AUT) (°C)'\n ]\n\n colunas_umidade = [\n 'UMIDADE REL. MAX. NA HORA ANT. (AUT) (%)',\n 'UMIDADE REL. MIN. NA HORA ANT. (AUT) (%)'\n ]\n\n # Verifica se as colunas estão presentes no DataFrame antes de aplicar a conversão\n colunas_presentes = list(dataframe.columns)\n colunas_temperatura_presentes = [coluna for coluna in colunas_temperatura if coluna in colunas_presentes]\n colunas_umidade_presentes = [coluna for coluna in colunas_umidade if coluna in colunas_presentes]\n\n # Converte as colunas de temperatura e umidade para valores numéricos, com erros sendo substituídos por NaN\n dataframe.loc[:, colunas_temperatura_presentes + colunas_umidade_presentes] = dataframe.loc[:, colunas_temperatura_presentes + colunas_umidade_presentes].apply(pd.to_numeric, errors='coerce')\n\n return dataframe\n\n# Função para adicionar colunas indicando se a temperatura está acima de 28°C ou não\ndef adicionar_coluna_acima_de_28(dataframe):\n dataframe = dataframe.copy() # Cria uma cópia do dataframe\n\n colunas_temperatura = [\n 'TEMPERATURA DO AR - BULBO SECO, HORARIA (°C)',\n 'TEMPERATURA DO PONTO DE ORVALHO (°C)',\n 'TEMPERATURA MÁXIMA NA HORA ANT. (AUT) (°C)',\n 'TEMPERATURA MÍNIMA NA HORA ANT. (AUT) (°C)',\n 'TEMPERATURA ORVALHO MAX. NA HORA ANT. (AUT) (°C)',\n 'TEMPERATURA ORVALHO MIN. NA HORA ANT. (AUT) (°C)'\n ]\n\n colunas_umidade = [\n 'UMIDADE REL. MAX. NA HORA ANT. (AUT) (%)',\n 'UMIDADE REL. MIN. NA HORA ANT. (AUT) (%)'\n ]\n\n # Itera sobre as colunas de temperatura e umidade\n for coluna in colunas_temperatura:\n # Cria o nome da nova coluna indicando se a temperatura está acima de 28°C\n nome_coluna_acima_de_28 = f\"{coluna} - Acima de 28\"\n # Adiciona uma nova coluna ao dataframe com valores booleanos indicando se a temperatura está acima de 28°C\n dataframe[nome_coluna_acima_de_28] = dataframe[coluna] > 28\n\n return dataframe\n\n# Função principal\ndef main():\n arquivo_csv = 'C:\\\\Users\\\\anjos\\\\Desktop\\\\Meteorologia\\\\INMET01-01-2023_A_31-05-2023.CSV'\n\n # Lê o arquivo CSV e armazena as informações em um DataFrame\n informacoes_csv = pd.read_csv(arquivo_csv, delimiter=';', encoding='latin-1', skiprows=9, decimal=',')\n\n nome_coluna_data = 'DATA'\n\n while True:\n print(\"Escolha uma opção:\")\n print(\"1. Filtrar dados por mês específico\")\n print(\"2. Sair\")\n\n opcao = input(\"Opção: \")\n\n if opcao == '1':\n # Solicita a data específica ao usuário\n data_especifica = input(\"Digite a data no formato dd/mm/yyyy ou dd-mm-yyyy: \")\n\n # Filtra os dados do DataFrame para o mês específico\n dados_filtrados = filtrar_dados_por_mes(informacoes_csv, data_especifica, nome_coluna_data)\n\n if dados_filtrados.empty:\n print(\"Não há datas encontradas.\")\n continue\n\n # Limpa os dados do DataFrame\n dados_limpos = limpar_dados(dados_filtrados)\n # Adiciona colunas indicando se a temperatura está acima de 28°C ou não\n dados_com_coluna_acima_de_28 = adicionar_coluna_acima_de_28(dados_limpos)\n\n colunas_temperatura = [\n 'TEMPERATURA DO AR - BULBO SECO, HORARIA (°C)',\n 'TEMPERATURA DO PONTO DE ORVALHO (°C)',\n 'TEMPERATURA MÁXIMA NA HORA ANT. (AUT) (°C)',\n 'TEMPERATURA MÍNIMA NA HORA ANT. (AUT) (°C)',\n 'TEMPERATURA ORVALHO MAX. NA HORA ANT. (AUT) (°C)',\n 'TEMPERATURA ORVALHO MIN. NA HORA ANT. (AUT) (°C)'\n ]\n\n colunas_umidade = [\n 'UMIDADE REL. MAX. NA HORA ANT. (AUT) (%)',\n 'UMIDADE REL. MIN. NA HORA ANT. (AUT) (%)'\n ]\n\n # Calcula e exibe as estatísticas das colunas de temperatura e umidade\n for coluna in colunas_temperatura + colunas_umidade:\n temperatura_media = dados_limpos[coluna].mean()\n temperatura_minima = dados_limpos[coluna].min()\n temperatura_maxima = dados_limpos[coluna].max()\n\n print(f\"\\n{coluna}\")\n print(f\"Média: {temperatura_media:.2f}\")\n print(f\"Temperatura mínima: {temperatura_minima:.2f}\")\n print(f\"Temperatura máxima: {temperatura_maxima:.2f}\")\n\n # Obtém a temperatura máxima no dia 10/01/2023\n data_especifica = pd.to_datetime(data_especifica, format='%d/%m/%Y')\n temperatura_maxima_dia_10_01_2023 = dados_limpos.loc[dados_limpos[nome_coluna_data] == data_especifica, 'TEMPERATURA MÁXIMA NA HORA ANT. (AUT) (°C)'].values[0]\n print(f\"\\nTemperatura máxima no dia 10/01/2023: {temperatura_maxima_dia_10_01_2023:.2f}\")\n\n print(\"\\nDias com temperatura acima de 28°C:\")\n for coluna in colunas_temperatura:\n nome_coluna_acima_de_28 = f\"{coluna} - Acima de 28\"\n datas_acima_de_28 = dados_com_coluna_acima_de_28.loc[\n dados_com_coluna_acima_de_28[nome_coluna_acima_de_28], nome_coluna_data]\n datas_unicas = datas_acima_de_28.drop_duplicates()\n\n print(f\"\\nDias com temperatura acima de 28°C - {coluna}:\")\n if datas_unicas.empty:\n print(\"Não há datas encontradas.\")\n else:\n datas_formatadas = datas_unicas.dt.strftime('%d/%m/%Y')\n print(datas_formatadas.to_string(index=False))\n\n # Obtém as datas da primeira e última leitura dos dados\n primeira_data_leitura = dados_limpos[nome_coluna_data].min()\n ultima_data_leitura = dados_limpos[nome_coluna_data].max()\n print(f\"\\nData da primeira leitura dos dados: {primeira_data_leitura.strftime('%d/%m/%Y')}\")\n print(f\"Data da última leitura dos dados: {ultima_data_leitura.strftime('%d/%m/%Y')}\")\n\n elif opcao == '2':\n break\n\n else:\n print(\"Opção inválida. Digite novamente.\")\n\nif __name__ == '__main__':\n main()\n1","repo_name":"do2anjos/PGE-METEOROLOGIA-TESTE","sub_path":"DATA FRAME.py","file_name":"DATA FRAME.py","file_ext":"py","file_size_in_byte":7314,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71334181333","text":"\r\na=int(range(1,301))\r\nk=0\r\nfor i in a:\r\n d=i//2+1\r\nfor b in a:\r\n b=int(range(2,d))\r\n for c in b:\r\n if a%i==0:\r\n k+=1\r\n if k<=0:\r\n print(b,\"is prime number\")\r\n else:\r\n print(b,\"is not prime\")\r\n \r\n ","repo_name":"arunekuriakose/MyPython","sub_path":"prime odd even.py","file_name":"prime odd even.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73216367253","text":"import pickle\r\nimport os.path\r\nimport pandas as pd\r\nimport MessageHeandler\r\nfrom email.mime.text import MIMEText\r\nimport base64\r\n\r\n\r\nclass Loader:\r\n \r\n def __init__(self,service,directory,label_ids):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n service : Authorized Gmail API service instance.\r\n directory : User directory.\r\n label_ids : User LabelIds.\r\n \"\"\"\r\n self.service = service\r\n self.directory = directory\r\n self.user_label_ids = label_ids\r\n if(os.path.exists(self.directory+'/spam/list')):\r\n with open(self.directory+'/spam/list','rb') as file:\r\n self.spam_ids = pickle.load(file)\r\n else:\r\n self.spam_ids = set()\r\n \r\n \r\n def list_messages(self):\r\n \"\"\"\r\n List all Messages of the user's mailbox.\r\n \r\n Returns\r\n -------\r\n List of Messages. Note that the returned list contains Message IDs, \r\n you must use get with the appropriate ID to get the details of a Message.\r\n \r\n \"\"\"\r\n \r\n response = self.service.users().messages().list(userId='me').execute()\r\n messages = []\r\n if 'messages' in response:\r\n messages.extend(response['messages'])\r\n while 'nextPageToken' in response:\r\n page_token = response['nextPageToken']\r\n response = self.service.users().messages().list(userId='me',pageToken=page_token).execute()\r\n if 'messages' in response:\r\n messages.extend(response['messages'])\r\n \r\n return messages \r\n\r\n def get_message(self,msg_id):\r\n \"\"\"\r\n Get a Message with given ID.\r\n \r\n Parameters\r\n ----------\r\n msg_id: The ID of the Message.\r\n \r\n Returns\r\n -------\r\n message: Message.\r\n \r\n \"\"\"\r\n message = self.service.users().messages().get(userId='me', id=msg_id,format = 'full').execute()\r\n return message\r\n \r\n def save_message(self,msg_id):\r\n \"\"\"\r\n Save a Message if it is not spam or if it has not been saved yet.\r\n\r\n Parameters\r\n ----------\r\n msg_id: The ID of the Message.\r\n \r\n \"\"\"\r\n if(not self.spam(msg_id)):\r\n if(not os.path.exists(self.directory+'/mails/mail_{}'.format(msg_id))):\r\n msg = self.get_message(msg_id)\r\n if(MessageHeandler.is_user_msg(msg)):\r\n msg_data = MessageHeandler.pack_message(msg,self.user_label_ids) \r\n with open(self.directory+'/mails/mail_{}'.format(msg_id),'wb') as file:\r\n pickle.dump(msg_data,file)\r\n else:\r\n self.spam_ids.add(msg_id)\r\n \r\n def save_all(self):\r\n \"\"\"\r\n Load all Messages from user's mailbox and save them on disk. \r\n Does not resave Messages that has already been saved. \r\n \r\n \"\"\"\r\n message_list = self.list_messages()\r\n for msg_info in message_list:\r\n self.save_message(msg_info['id'])\r\n self.save_spam() \r\n \r\n def load_all(self):\r\n \"\"\"\r\n Load all Messages from disk to DataFrame.\r\n \r\n Returns\r\n -------\r\n data : DataFrame with user Messages.\r\n\r\n \"\"\"\r\n data = pd.DataFrame()\r\n for mail in os.listdir(self.directory+'/mails'):\r\n with open(self.directory+'/mails/'+mail,'rb') as file:\r\n cur_mail = pd.DataFrame.from_dict(pickle.load(file))\r\n data = data.append(cur_mail,ignore_index = True)\r\n return data \r\n \r\n def spam(self,msg_id):\r\n \"\"\"\r\n Check whether Message ID contains in Spam IDs.\r\n\r\n Parameters\r\n ----------\r\n msg_id: The ID of the Message.\r\n \r\n Returns\r\n -------\r\n True if msg_id contains in spam, in the other case returns False.\r\n\r\n \"\"\"\r\n if(msg_id in self.spam_ids):\r\n return True\r\n return False\r\n \r\n def save_spam(self):\r\n \"\"\"\r\n Save Spam IDs on disk for reusage purpose.\r\n \r\n \"\"\"\r\n with open(self.directory+'/spam/list','wb') as file:\r\n pickle.dump(self.spam_ids,file)\r\n \r\n def send_message(self, message):\r\n \"\"\"\r\n Send an email Message.\r\n\r\n Parameters\r\n ----------\r\n message: Message to be sent.\r\n \r\n Returns\r\n -------\r\n Sent Message.\r\n \"\"\"\r\n message = self.service.users().messages().send(userId='me', body=message).execute()\r\n return message\r\n \r\n def create_message(message_text,subject,address = 'apchikov@miem.hse.ru'):\r\n \"\"\"\r\n Create a Message for an email.\r\n\r\n Parameters\r\n ----------\r\n message_text: Text of the email Message.\r\n subject: Subject of the email Message.\r\n address: Email address.\r\n \r\n Returns\r\n -------\r\n An object containing a base64url encoded email object.\r\n \r\n \"\"\"\r\n message = MIMEText(message_text)\r\n message['To'] = address\r\n message['Subject'] = subject\r\n raw_message = base64.urlsafe_b64encode(message.as_string().encode(\"utf-8\"))\r\n return {'raw': raw_message.decode(\"utf-8\")}\r\n \r\n def create_label(self, label_object):\r\n \"\"\"\r\n Creates a new label within user's mailbox.\r\n\r\n Parameters\r\n ----------\r\n label_object: label to be added.\r\n \r\n Returns\r\n -------\r\n None.\r\n\r\n \"\"\"\r\n \r\n return self.service.users().labels().create(userId='me',body=label_object).execute()\r\n \r\n def make_label(label_name, mlv='show', llv='labelShow'):\r\n \"\"\"\r\n Create new Label object.\r\n\r\n Parameters\r\n ----------\r\n label_name: The name of the Label.\r\n mlv: Message list visibility, show/hide.\r\n llv: Label list visibility, labelShow/labelHide.\r\n \r\n Returns\r\n -------\r\n Created Label object.\r\n \"\"\"\r\n label = {'messageListVisibility': mlv,\r\n 'name': label_name,\r\n 'labelListVisibility': llv}\r\n return label \r\n \r\n \r\n \r\n\r\n\r\n\r\n ","repo_name":"alex-hse-repository/Summer-practice","sub_path":"Loader.py","file_name":"Loader.py","file_ext":"py","file_size_in_byte":6354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30096178990","text":"#!/bin/py\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\nimport pylab\nimport matplotlib.mlab as mlab\nimport matplotlib.ticker as ticker\nfrom scipy import stats\nimport sys\nimport IPython\n# local files that will be imported\nimport prior\nimport likelihood\nimport numpy as np\n\ndef normalizer(y,x):\n term = np.trapz(y,x)\n new = [val/term for val in y]\n print(np.trapz(new,x))\n return new\n\n\n\n# construct map of prior functions, to plot below\nfdict = {'prior_p': prior.prior_p,'prior_U': prior.prior_U,'prior_C': prior.prior_C}\n\n# -------------------------------------------------------------\n# subroutine that generates a .pdf file plotting a quantity\n# -------------------------------------------------------------\ndef plotter(chain,quant,xmin=None,xmax=None):\n from math import log, pi\n bins = np.linspace(np.min(chain), np.max(chain), 200)\n qkde = stats.gaussian_kde(chain)\n qpdf = qkde.evaluate(bins)\n\n # plot posterior\n qpdf = normalizer(qpdf, bins)\n plt.figure()\n plt.plot(bins, qpdf, linewidth=3, label=\"Post\")\n\n # plot prior (requires some cleverness to do in general)\n qpr = [fdict['prior_'+quant](x) for x in bins]\n qpri = [np.exp(x) for x in qpr]\n qpri=qpri/np.linalg.norm(qpri)\n qpri = normalizer(qpri,bins)\n plt.plot(bins, qpri, linewidth=3, label=\"Prior\")\n\n # user specified bounds to x-range:\n if(xmin != None and xmax != None):\n bounds = np.array([xmin, xmax])\n plt.xlim(bounds)\n\n plt.xlabel(quant, fontsize=30)\n plt.ylabel('$\\pi('+quant+')$', fontsize=30)\n plt.legend(loc='upper left')\n plt.savefig(quant+'_post.pdf', bbox_inches='tight')\n\n\n# -------------------------------------------------------------\n# MCMC sampling Function\n# -------------------------------------------------------------\n\nclass BayesianRichardsonExtrapolation(object):\n \"Computes the Bayesian Richardson extrapolation posterior log density.\"\n\n def __call__(self, params, dtype=np.double):\n q, C, p = params\n\n from math import log\n\n return (\n prior.prior(q,C,p) +\n likelihood.likelihood(q,C,p)\n )\n\n# -------------------------------------------------------------\n# Main Function\n# -------------------------------------------------------------\n#\n# Stop module loading when imported. Otherwise continue running.\nif __name__ != '__main__':\n raise SystemExit(0)\n\n# Example of sampling Bayesian Richardson extrapolation density using emcee\nfrom emcee import EnsembleSampler\nfrom math import ceil, floor, sqrt\n\n#\n# initalize the Bayesian Calibration Procedure\n#\nbre = BayesianRichardsonExtrapolation()\n\nprint(\"\\nInitializing walkers\")\nnwalk = 100\n\n# initial guesses for the walkers starting locations\nguess_q = 1.16389876649\nguess_c = 0 #No reason to think c is positive or negative\nguess_p = 6 #p is between 1 and 10\n\nparams0 = np.tile([guess_q, guess_c, guess_p], nwalk).reshape(nwalk, 3)\nparams0.T[0] += np.random.rand(nwalk) * 0.025 # Perturb q\nparams0.T[1] += np.random.rand(nwalk) * 0.1 # Perturb C\nparams0.T[2] += np.random.rand(nwalk) * 1.5 # Perturb p...\nparams0.T[2] = np.absolute(params0.T[2]) # ...and force >= 0\n\nprint(\"\\nInitializing the sampler and burning in walkers\")\ns = EnsembleSampler(nwalk, params0.shape[-1], bre, threads=4)\npos, prob, state = s.run_mcmc(params0, 5000)\ns.reset()\n\nprint(\"\\nSampling the posterior density for the problem\")\ns.run_mcmc(pos, 10000)\nprint(\"Mean acceptance fraction was %.3f\" % s.acceptance_fraction.mean())\n\n#\n# 1d Marginals\n#\nprint(\"\\nDetails for posterior one-dimensional marginals:\")\ndef textual_boxplot(label, unordered, header):\n n, d = np.size(unordered), np.sort(unordered)\n if (header): print((10*\" %15s\") % (\"\", \"min\", \"P5\", \"P25\", \"P50\", \"P75\", \"P95\", \"max\", \"mean\", \"stddev\"))\n print((\" %15s\" + 9*\" %+.8e\") % (label,\n d[0],\n d[[floor(1.*n/20), ceil(1.*n/20)]].mean(),\n d[[floor(1.*n/4), ceil(1.*n/4)]].mean(),\n d[[floor(2.*n/4), ceil(2.*n/4)]].mean(),\n d[[floor(3.*n/4), ceil(3.*n/4)]].mean(),\n d[[floor(19.*n/20), ceil(19.*n/20)]].mean(),\n d[-1],\n d.mean(),\n d.std()))\n #return d[[floor(1.*n/20), ceil(1.*n/20)]].mean(), d[[floor(17.*n/20), ceil(17.*n/20)]].mean()\n return d.mean(), 2*d.std()\n\nqm, qs = textual_boxplot(\"q\", s.flatchain[:,0], header=True)\ncm, cs = textual_boxplot(\"C\", s.flatchain[:,1], header=False)\npm, ps = textual_boxplot(\"p\", s.flatchain[:,2], header=False)\n\n#----------------------------------\n# FIGURES: Marginal posterior(s)\n#----------------------------------\nprint(\"\\nPrinting PDF output\")\n\nplotter(s.flatchain[:,0],'U')\nplotter(s.flatchain[:,1],'C')\nplotter(s.flatchain[:,2],'p')\n\n#----------------------------------\n# FIGURE: Joint posterior(s)\n#----------------------------------\n\nqbins = np.linspace(np.min(s.flatchain[:,0]), np.max(s.flatchain[:,0]), 200)\nCbins = np.linspace(np.min(s.flatchain[:,1]), np.max(s.flatchain[:,1]), 200)\npbins = np.linspace(np.min(s.flatchain[:,2]), np.max(s.flatchain[:,2]), 200)\n\nqkde = stats.gaussian_kde(s.flatchain[:,0])\nCkde = stats.gaussian_kde(s.flatchain[:,1])\npkde = stats.gaussian_kde(s.flatchain[:,2])\n\nqpdf = qkde.evaluate(qbins)\nCpdf = Ckde.evaluate(Cbins)\nppdf = pkde.evaluate(pbins)\n\n# TODO FIX ME\nqbounds = np.array([qm-qs,qm+qs])\nCbounds = np.array([cm-cs,cm+cs])\npbounds = np.array([pm-ps,pm+ps])\n\nqticks = np.linspace(qbounds[0], qbounds[1], 3)\nCticks = np.linspace(Cbounds[0], Cbounds[1], 3)\npticks = np.linspace(pbounds[0], pbounds[1], 5)\n\nplt.figure()\n\nfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter\nformatter = FormatStrFormatter('%5.4f')\nformatter2 = FormatStrFormatter('%5.f')\n\npylab.subplot(3,3,1)\nplt.plot(qbins, qpdf, linewidth=2, color=\"k\", label=\"Post\")\n\nplt.xlim(qbounds)\npylab.gca().set_xticks(qticks)\npylab.gca().xaxis.set_major_formatter(formatter)\npylab.gca().xaxis.set_minor_formatter(formatter)\npylab.gca().set_yticks([])\nplt.xlabel('$q$', fontsize=24)\n\npylab.subplot(3,3,2)\nH, qe, Ce = np.histogram2d(s.flatchain[:,0], s.flatchain[:,1], bins=(200,200))\n\nqv = 0.5*(qe[0:-1] + qe[1:len(qe)]);\nCv = 0.5*(Ce[0:-1] + Ce[1:len(Ce)]);\n\nplt.contour(Cv,qv,H,5,colors='k')\n\nplt.xlim(Cbounds)\npylab.gca().set_xticks(Cticks)\npylab.gca().set_xticklabels([])\n\n#plt.ylim(qbounds)\npylab.gca().set_yticks(qticks)\npylab.gca().set_yticklabels([])\n\npylab.subplot(3,3,3)\nH, qe, pe = np.histogram2d(s.flatchain[:,0], s.flatchain[:,2], bins=(200,200))\n\nqv = 0.5*(qe[0:-1] + qe[1:len(qe)]);\npv = 0.5*(pe[0:-1] + pe[1:len(pe)]);\n\nplt.contour(pv,qv,H,5,colors='k')\n\nplt.xlim(pbounds)\npylab.gca().set_xticks(pticks)\npylab.gca().set_xticklabels([])\n\nplt.ylim(qbounds)\npylab.gca().set_yticks(qticks)\npylab.gca().set_yticklabels([])\n\npylab.subplot(3,3,5)\nplt.plot(Cbins, Cpdf, linewidth=2, color=\"k\",label=\"Post\")\npylab.gca().xaxis.set_major_formatter(formatter)\npylab.gca().xaxis.set_minor_formatter(formatter)\npylab.gca().set_yticks([])\nplt.xlabel('$C$', fontsize=24)\n\nplt.xlim(Cbounds)\npylab.gca().set_xticks(Cticks)\n\npylab.subplot(3,3,6)\nH, Ce, pe = np.histogram2d(s.flatchain[:,1], s.flatchain[:,2], bins=(200,200))\n\nCv = 0.5*(Ce[0:-1] + Ce[1:len(Ce)]);\npv = 0.5*(pe[0:-1] + pe[1:len(pe)]);\n\nplt.contour(pv,Cv,H,5,colors='k')\n\nplt.xlim(pbounds)\npylab.gca().set_xticks(pticks)\npylab.gca().set_xticklabels([])\n\nplt.ylim(Cbounds)\npylab.gca().set_yticks(Cticks)\npylab.gca().set_yticklabels([])\n\npylab.subplot(3,3,9)\nplt.plot(pbins, ppdf, linewidth=2, color=\"k\", label=\"Post\")\npylab.gca().xaxis.set_major_formatter(formatter2)\npylab.gca().xaxis.set_minor_formatter(formatter2)\npylab.gca().set_yticks([])\nplt.xlabel('$p$', fontsize=24)\n\nplt.xlim(pbounds)\npylab.gca().set_xticks(pticks)\nplt.savefig('joint_post.pdf', bbox_inches='tight')\n","repo_name":"jjd9/CSE_397_Team","sub_path":"Problem1/channel.py","file_name":"channel.py","file_ext":"py","file_size_in_byte":8020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36001748424","text":"import csv\n\nimport pandas as pd\n\n\ndef learning_csv():\n data = open(\"resources/files/example.csv\", encoding=\"utf-8\")\n csv_data = csv.reader(data)\n data_lines = list(csv_data)\n for i in data_lines[:5]:\n print(i)\n # using list\n emails = list(a[3] for a in data_lines[1:5])\n print(emails)\n # using map\n names = map(lambda x: x[2], data_lines[1:5])\n print(list(names))\n\n # WRITING A CSV FILE\n file_to_output = open(\"resources/files/saving-example.csv\", mode=\"w\", newline=\"\")\n csv_writer = csv.writer(file_to_output, delimiter=\",\")\n csv_writer.writerow([\"a\", \"b\", \"c\"])\n csv_writer.writerows([[\"1\", \"2\", \"3\"], [\"4\", \"5\", \"6\"]])\n file_to_output.close()\n\n # APPENDING A CSV FILE\n file_to_output = open(\"resources/files/saving-example.csv\", mode=\"a\", newline=\"\")\n csv_writer = csv.writer(file_to_output, delimiter=\",\")\n csv_writer.writerow([\"x\", \"y\", \"z\"])\n file_to_output.close()\n\n data = open(\"resources/files/saving-example.csv\", encoding=\"utf-8\")\n csv_data = csv.reader(data)\n data_lines = list(csv_data)\n print(data_lines)\n\n\ndef learning_panda():\n # Replace \"filename.csv\" with the name of your CSV file\n df = pd.read_csv(\"resources/files/example.csv\")\n # Set the options to display all rows and columns\n pd.set_option('display.max_rows', None)\n pd.set_option('display.max_columns', None)\n # Print the first 5 rows of the DataFrame\n\n print(df.head())\n print(df.tail())\n\n print(\"Dataframe to list\")\n dataframe_list = df.values.tolist()\n\n for index, row in df.head(5).iterrows():\n for key, value in row.items():\n print(f\"{key}: {value}\")\n emails = list(df.head(5)[\"email\"])\n print(emails)\n\n # Create a DataFrame\n df = pd.DataFrame({'Name': ['Alice', 'Bob', 'Charlie'], 'Age': [25, 30, 35]})\n\n # Write the DataFrame to a CSV file\n df.to_csv(\"resources/files/panda-saving-example.csv\", index=False)\n # Append the new data to an existing CSV file\n\n df = pd.DataFrame({'Name': ['Jessica'], 'Age': [22]})\n df.to_csv(\"resources/files/panda-saving-example.csv\", mode='a', header=False, index=False)\n\n\ndef divider(title: str):\n print(f\"==========={title.upper()}===========\")\n\n\nif __name__ == '__main__':\n divider(\"learning csv\")\n learning_csv()\n\n divider(\"learning panda\")\n learning_panda()\n","repo_name":"emc255/python-pdf-and-csv-files","sub_path":"csv_main.py","file_name":"csv_main.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70700413014","text":"from PIL import Image, ImageDraw, ImageFont\n\nimport math\n\nchars = \"#$@B*og?-_+:,\\^`'. \"\n# chars = \"#Wo- \"[::-1]\ncharArray = list(chars)\ncharLength = len(charArray)\ninterval = charLength/256\n\nscaleFactor = 1\n\n\noneCharWidth = 5\noneCharHeight = 12\n\ndef getChar(inputInt):\n return charArray[math.floor(inputInt*interval)]\n\ntext_file = open(\"textOutput.txt\", \"w\")\n\nim = Image.open(\"input.jpg\")\n\nwidth, height = im.size\nim = im.resize((int(scaleFactor*width), int(scaleFactor*height*(oneCharWidth/oneCharHeight))), Image.NEAREST)\nim = im.convert('RGB')\nwidth, height = im.size\npix = im.load()\n\nfor i in range(height):\n for j in range(width):\n r, g, b = pix[j, i]\n h = int(r/3 + g/3 + b/3)\n pix[j, i] = (h, h, h)\n text_file.write(getChar(h))\n\n text_file.write('\\n')\n","repo_name":"JakubZojdzik/ASCII_Generator","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27535341956","text":"\nf = open('randomnumbers.txt','r')\n\nt = [0]*6\n\nfor x in f: # x is a line in the file f\n value = int(x)\n\n t[value-1] += 1\n\nf.close()\n\nfor i in range(6):\n print('The value {} appears {} times'.format(i+1, t[i]))","repo_name":"T316GAVI/T316GAVI_2015","sub_path":"06_fileread.py","file_name":"06_fileread.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"42919045911","text":"#!/usr/bin/env python\nimport os, re, requests, sys, urllib\n\nif len(sys.argv) > 1:\n write_location = \" \".join(sys.argv[1:])\nelse:\n write_location = \"\"\n\nsite_url = \"https://downloads.khinsider.com\"\nalbumarr = []\n\nwhile(len(albumarr) == 0):\n gamename = input(\"Enter search query: \").replace(\" \", \"+\")\n searchpage = requests.get(site_url + \"/search?search=\" + gamename)\n albumarr = re.findall(r\"\", searchpage.text)\n if(len(albumarr) == 0):\n print(\"No search results found!\")\n\nif searchpage.history and searchpage.url != searchpage.history[0].url:\n #redirect detected, no need to choose an album\n pagetitle = searchpage.text[searchpage.text.find('') + 7 : searchpage.text.find('')]\n foldername = re.findall(r\".*MP3 - Download \", pagetitle)[0][:-16]\n albumpage = searchpage\nelse:\n albumlinklib = {}\n for x, album in enumerate(albumarr):\n albumlinklib[x + 1] = [album[album.find(\"\\\">\")+2:-4], site_url + album[9:album.find(\"\\\">\")]]\n\n for key in albumlinklib:\n print(\"[{}] {}\".format(key, albumlinklib[key][0]))\n\n gameentry = int(input(\"Enter selection number: \"))\n foldername = albumlinklib[gameentry][0]\n albumpage = requests.get(albumlinklib[int(gameentry)][1])\n\nsongarr = re.findall(r\"[^<]*\", albumpage.text)\n\nsonglinklib = {}\nfor x, song in enumerate(songarr):\n songlinklib[x + 1] = [song[song.find(\"\\\">\")+2:-4], site_url + song[9:song.find(\"\\\">\")]]\n\nos.mkdir(write_location + \"/\" +foldername)\n\nfor key in songlinklib:\n title = \"({}) - {}.mp3\".format(key, songlinklib[key][0])\n link = songlinklib[key][1]\n print(\"Now downloading \" + title)\n dllink = re.findall(r\"https://.*com/.*mp3\", requests.get(link).text)\n with open(write_location + \"/\" + foldername + \"/\" + title, \"wb\") as f:\n with urllib.request.urlopen(dllink[0]) as r:\n f.write(r.read())\n","repo_name":"JRCdev/vgost-cli","sub_path":"vgost-cli.py","file_name":"vgost-cli.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"6088827449","text":"from .. import hooks\nfrom .. import util\nfrom ..extensions import db\nfrom ..models import Post, Setting, get_settings, Context\nfrom ..tasks import get_queue, async_app_context\n\nfrom flask.ext.login import login_required\nfrom flask import (\n request, redirect, url_for, Blueprint, current_app,\n)\n\nimport requests\nimport urllib\nimport datetime\n\nPERMALINK_RE = util.INSTAGRAM_RE\n\n\ninstagram = Blueprint('instagram', __name__)\n\n\ndef register(app):\n app.register_blueprint(instagram)\n hooks.register('create-context', create_context)\n hooks.register('post-saved', send_to_instagram)\n\n\n@instagram.route('/authorize_instagram')\n@login_required\ndef authorize_instagram():\n redirect_uri = url_for('.authorize_instagram', _external=True)\n\n code = request.args.get('code')\n if not code:\n # redirect to instagram authorization page\n params = {\n 'client_id': get_settings().instagram_client_id,\n 'redirect_uri': redirect_uri,\n 'response_type': 'code',\n 'scope': 'likes comments',\n }\n return redirect('https://api.instagram.com/oauth/authorize/?'\n + urllib.parse.urlencode(params))\n\n params = {\n 'client_id': get_settings().instagram_client_id,\n 'client_secret': get_settings().instagram_client_secret,\n 'grant_type': 'authorization_code',\n 'redirect_uri': redirect_uri,\n 'code': code,\n }\n\n result = requests.post(\n 'https://api.instagram.com/oauth/access_token', data=params)\n current_app.logger.debug('received result %s', result)\n payload = result.json()\n access_token = payload.get('access_token')\n\n Setting.query.get('instagram_access_token').value = access_token\n db.session.commit()\n return redirect(url_for('admin.edit_settings'))\n\n\ndef create_context(url):\n m = PERMALINK_RE.match(url)\n if not m:\n current_app.logger.debug('url is not an instagram media url %s', url)\n return\n\n r = ig_get('https://api.instagram.com/v1/media/shortcode/' + m.group(1))\n\n if r.status_code // 2 != 100:\n current_app.logger.warn(\n \"failed to fetch instagram media with shortcode %s %s %s\",\n m.group(1), r, r.content)\n return\n\n blob = r.json()\n author = blob.get('data', {}).get('user', {})\n author_name = author.get('full_name')\n author_image = author.get('profile_picture')\n author_url = author.get('website')\n created_time = blob.get('data', {}).get('created_time')\n caption_text = (blob.get('data', {}).get('caption') or {}).get('text')\n images = blob.get('data', {}).get('images', {})\n image = images.get('standard_resolution').get('url')\n\n if created_time:\n published = datetime.datetime.fromtimestamp(int(created_time))\n\n content = ''\n if caption_text:\n content += '

' + caption_text + '

'\n if image:\n content += ''\n\n context = Context()\n context.url = context.permalink = url\n context.author_name = author_name\n context.author_image = author_image\n context.author_url = author_url\n context.published = published\n context.title = None\n context.content = content\n context.content_plain = caption_text\n\n current_app.logger.debug('created instagram context %s', context)\n\n return context\n\n\ndef send_to_instagram(post, args):\n \"\"\"Share a like or comment to Instagram without user-input.\n \"\"\"\n if 'instagram' in args.getlist('syndicate-to'):\n if not is_instagram_authorized():\n return False, 'Current user is not authorized for instagram'\n\n current_app.logger.debug(\n \"queueing post to instagram {}\".format(post.id))\n get_queue().enqueue(do_send_to_instagram, post.id, current_app.config['CONFIG_FILE'])\n return True, 'Success'\n\n\ndef do_send_to_instagram(post_id, app_config):\n with async_app_context(app_config):\n current_app.logger.debug('posting to instagram %d', post_id)\n post = Post.load_by_id(post_id)\n\n in_reply_to, repost_of, like_of \\\n = util.posse_post_discovery(post, PERMALINK_RE)\n\n # likes are the only thing we can POSSE to instagram unfortunately\n if like_of:\n m = PERMALINK_RE.match(like_of)\n shortcode = m.group(1)\n\n r = ig_get('https://api.instagram.com/v1/media/shortcode/'\n + m.group(1))\n\n if r.status_code // 2 != 100:\n current_app.logger.warn(\n \"failed to fetch instagram media %s %s\", r, r.content)\n return None\n\n media_id = r.json().get('data', {}).get('id')\n if not media_id:\n current_app.logger.warn(\n 'could not find media id for shortcode %s', shortcode)\n return None\n\n r = ig_get('https://api.instagram.com/v1/users/self')\n my_username = r.json().get('data', {}).get('username')\n\n r = ig_post('https://api.instagram.com/v1/media/'\n + media_id + '/likes')\n\n if r.status_code // 2 != 100:\n current_app.logger.warn(\n \"failed to POST like for instagram id %s\", media_id)\n return None\n\n like_url = like_of + '#liked-by-' + my_username\n post.add_syndication_url(like_url)\n db.session.commit()\n return like_url\n\n if in_reply_to:\n comment_text = format_markdown_for_instagram(post.content)\n comment_url = post_comment(in_reply_to, comment_text)\n if comment_url:\n post.add_syndication_url(comment_url)\n db.session.commit()\n return comment_url\n\n\ndef format_markdown_for_instagram(data):\n return util.format_as_text(util.markdown_filter(data))\n\n\ndef post_comment(permalink, comment_text):\n if ('INSTAGRAM_USERNAME' not in current_app.config\n or 'INSTAGRAM_PASSWORD' not in current_app.config):\n return\n\n from selenium import webdriver\n from selenium.webdriver.common.keys import Keys\n import selenium.webdriver.support.ui as ui\n from selenium.webdriver.common.desired_capabilities import DesiredCapabilities\n\n dc = dict(DesiredCapabilities.PHANTOMJS)\n dc['ssl-protocol'] = 'any'\n\n browser = webdriver.PhantomJS(desired_capabilities=dc)\n wait = ui.WebDriverWait(browser, 10) # timeout after 10 seconds\n\n browser.get('https://instagram.com/accounts/login/')\n\n un = browser.find_element_by_id('lfFieldInputUsername')\n un.send_keys(current_app.config['INSTAGRAM_USERNAME']\n + Keys.TAB\n + current_app.config['INSTAGRAM_PASSWORD'])\n un.submit()\n\n wait.until(lambda b: b.current_url == 'https://instagram.com/')\n\n browser.get(permalink)\n\n inp = browser.find_element_by_tag_name('input')\n inp.send_keys(comment_text)\n inp.submit()\n\n # workaround for https://github.com/SeleniumHQ/selenium/issues/767\n browser.service.process.terminate()\n browser.quit()\n\n return (permalink + '#comment-by-'\n + current_app.config['INSTAGRAM_USERNAME']\n + '-' + datetime.datetime.now().isoformat())\n\n\ndef ig_get(url):\n return requests.get(url, params={\n 'access_token': get_settings().instagram_access_token,\n })\n\n\ndef ig_post(url):\n return requests.post(url, data={\n 'access_token': get_settings().instagram_access_token,\n })\n\n\ndef is_instagram_authorized():\n return (hasattr(get_settings(), 'instagram_access_token')\n and get_settings().instagram_access_token)\n","repo_name":"kylewm/redwind","sub_path":"redwind/plugins/instagram.py","file_name":"instagram.py","file_ext":"py","file_size_in_byte":7622,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"67"} +{"seq_id":"42665074799","text":"#Fonksiyonlar 2\r\ndef ortalama(ders_sayisi):\r\n toplam = 0\r\n for i in range(ders_sayisi):\r\n print(\"Lutfen ders\",i+1,\"için not giriniz:\")\r\n a=int(input())\r\n toplam = toplam + a\r\n sonuc = toplam / ders_sayisi\r\n return belge(sonuc)\r\n \r\ndef belge(ort):\r\n print(\"Ortalamaniz:\",ort)\r\n if ort >= 70 and ort < 85:\r\n print(\"Tebrikler Tesekkür belgesi almaya hak kazandiniz.\")\r\n elif ort >= 85:\r\n print(\"Tebrikler Takdir belgesi almaya hak kazandiniz.\")\r\n else:\r\n print(\"Maalesef hicbir belge alma hakkiniz yoktur.\")\r\n\r\nders_sayisi = int(input(\"Aldiginiz ders sayisini giriniz: \"))\r\nortalama(ders_sayisi)\r\n","repo_name":"dogukanyildiz99/python_exercises","sub_path":"functions_2.py","file_name":"functions_2.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2882059626","text":"i,j=0,0\r\nwhile True:\r\n s=input(\"enter no\")\r\n if not s.isnumeric():\r\n print(\"enter any valid numeric\")\r\n else:\r\n i=int(s)\r\n if i < 2 or i >100:\r\n print(\"enter no btw 2 and 100\")\r\n else:\r\n break\r\nwhile True:\r\n k=input(\"enter no\")\r\n if not k.isnumeric():\r\n print(\"enter any valid integer\")\r\n else:\r\n j=int(k)\r\n if j < 2 or j >100:\r\n print(\"enter no btw 2 and 100\")\r\n else:\r\n break\r\nprint(f\"{i} * {j} = {i*j}\") \r\n","repo_name":"laharikaneelam/Python_exercises","sub_path":"multi_2nos.py","file_name":"multi_2nos.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9479486377","text":"import simplejson as json\n\nfrom django.template import RequestContext\nfrom django.template.loader import render_to_string\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.http import Http404, HttpResponse, HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.views.decorators.cache import never_cache\n\nfrom components.makahiki_base.models import Like\n\nfrom components.resources import DEFAULT_NUM_RESOURCES\nfrom components.resources.models import Resource\nfrom components.resources.forms import TopicSelectForm\n# Create your views here.\n\n@never_cache\ndef index(request):\n \"\"\"Index page for the resources tab.\"\"\"\n resources = None\n resource_count = 0\n view_all = request.GET.has_key(\"view_all\") and request.GET[\"view_all\"]\n view_all_url = None\n \n if request.GET.has_key(\"topics\"):\n topic_form = TopicSelectForm(request.GET)\n if topic_form.is_valid():\n topics = topic_form.cleaned_data[\"topics\"]\n if view_all:\n resources = Resource.objects.filter(topics__pk__in=topics).distinct().order_by(\"-created_at\")\n else:\n resources = Resource.objects.filter(topics__pk__in=topics).distinct().order_by(\"-created_at\")[0:DEFAULT_NUM_RESOURCES]\n resource_count = Resource.objects.filter(topics__pk__in=topics).distinct().count()\n \n else:\n # We get here on first load\n topic_form = TopicSelectForm() # Note that all topics are selected by default.\n if view_all:\n resources = Resource.objects.order_by(\"-created_at\")\n else:\n resources = Resource.objects.order_by(\"-created_at\")[0:DEFAULT_NUM_RESOURCES]\n resource_count = Resource.objects.count()\n\n # Create the list header and view all link.\n list_title = \"%d resources\"\n if not view_all and resource_count > DEFAULT_NUM_RESOURCES:\n view_all_url = _construct_all_url(request)\n list_title = list_title % DEFAULT_NUM_RESOURCES\n else:\n list_title = list_title % resource_count\n \n return render_to_response('resources/index.html', {\n \"topic_form\": topic_form,\n \"resources\": resources,\n \"list_title\": list_title,\n \"resource_count\": resource_count,\n \"view_all_url\": view_all_url,\n }, context_instance = RequestContext(request))\n \ndef _construct_all_url(request):\n \"\"\"Constructs a view all url using the parameters in the request.\"\"\"\n url = \"/resources/view_all/\"\n if request.GET.has_key(\"topics\"):\n # If this url has topics, we need to append that list.\n url += \"?\" + request.GET.urlencode()\n \n return url\n \ndef filter(request):\n \"\"\"Uses AJAX to update resources list.\"\"\"\n view_all_url = None\n \n if request.is_ajax():\n topic_form = TopicSelectForm(request.GET)\n if topic_form.is_valid():\n topics = topic_form.cleaned_data[\"topics\"]\n if len(topics) > 0:\n resources = Resource.objects.filter(topics__pk__in=topics).distinct().order_by(\"-created_at\")[0:DEFAULT_NUM_RESOURCES]\n resource_count = Resource.objects.filter(topics__pk__in=topics).distinct().count()\n\n title = \"%d resources\" % resource_count\n if resource_count > DEFAULT_NUM_RESOURCES:\n view_all_url = _construct_all_url(request)\n title = \"%d resources\" % DEFAULT_NUM_RESOURCES\n\n response = render_to_string(\"resources/list.html\", {\n \"resources\": resources,\n \"resource_count\": resource_count,\n \"view_all_url\": view_all_url,\n })\n \n else:\n title = \"0 resources\"\n response = \"

No topics selected.

\"\n \n return HttpResponse(json.dumps({\n \"resources\": response,\n \"title\": title,\n }), mimetype='application/json')\n \n # If something goes wrong, all we can do is raise a 404 or 500.\n raise Http404\n \ndef view_all(request):\n \"\"\"Uses AJAX to view all resources.\"\"\"\n if request.is_ajax():\n if request.GET.has_key(\"topics\"):\n topic_form = TopicSelectForm(request.GET)\n if topic_form.is_valid():\n topics = topic_form.cleaned_data[\"topics\"]\n # Note that DEFAULT_NUM_RESOURCES is already loaded, so we just load the rest.\n resources = Resource.objects.filter(topics__pk__in=topics).distinct().order_by(\"-created_at\")[DEFAULT_NUM_RESOURCES:]\n resource_count = Resource.objects.filter(topics__pk__in=topics).distinct().count()\n \n else:\n # View all on default page.\n resources = Resource.objects.order_by(\"-created_at\")[DEFAULT_NUM_RESOURCES:]\n resource_count = Resource.objects.count()\n \n response = render_to_string(\"resources/resource_list.html\", {\n \"resources\": resources,\n \"resource_count\": resource_count,\n })\n title = \"%d resources\" % resource_count\n return HttpResponse(json.dumps({\n \"resources\": response,\n \"title\": title,\n }), mimetype='application/json')\n \n # If something goes wrong, all we can do is raise a 404 or 500.\n raise Http404\n \n@login_required\ndef like(request, item_id):\n \"\"\"Like a resource.\"\"\"\n \n error = None\n user = request.user\n content_type = get_object_or_404(ContentType, app_label=\"resources\", model=\"Resource\")\n try:\n like = Like.objects.get(user=user, content_type=content_type, object_id=item_id)\n error = \"You already like this item.\"\n except ObjectDoesNotExist:\n like = Like(user=user, floor=user.get_profile().floor, content_type=content_type, object_id=item_id)\n like.save()\n\n if request.is_ajax():\n return HttpResponse(json.dumps({\n \"error\":error\n }), mimetype='application/json')\n \n # elif error:\n # request.user.message_set.create(message=error)\n \n return HttpResponseRedirect(reverse(\"components.resources.views.resource\", args=(item_id,))) \n\n@login_required\ndef unlike(request, item_id):\n \"\"\"Unlike a resource.\"\"\"\n\n error = None\n user = request.user\n content_type = get_object_or_404(ContentType, app_label=\"resources\", model=\"Resource\")\n try:\n like = Like.objects.get(user=user, content_type=content_type, object_id=item_id)\n like.delete()\n except ObjectDoesNotExist:\n error = \"You do not like this item.\"\n\n if request.is_ajax():\n return HttpResponse(json.dumps({\n \"error\":error\n }), mimetype='application/json')\n #At this point, this is a non-ajax request.\n # elif error:\n # request.user.message_set.create(message=error)\n \n return HttpResponseRedirect(reverse(\"components.resources.views.resource\", args=(item_id,)))\n \n@never_cache\ndef resource(request, resource_id):\n \"\"\"View details for a resource.\"\"\"\n resource = get_object_or_404(Resource, pk=resource_id)\n resource.views += 1\n resource.save()\n \n return render_to_response('resources/resource_detail.html', {\n \"resource\": resource,\n }, context_instance = RequestContext(request))","repo_name":"keokilee/makahiki","sub_path":"makahiki/apps/components/resources/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6878,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"67"} +{"seq_id":"39439829360","text":"import pygame\r\nfrom pygame.locals import *\r\nimport random\r\n\r\na = 250\r\nclass Player(pygame.sprite.Sprite):\r\n def __init__(self):\r\n super(Player, self).__init__()\r\n self.image = pygame.image.load('200_s.gif').convert_alpha()\r\n self.rect = self.image.get_rect()\r\n\r\n def update(self, pressed_keys):\r\n if pressed_keys[K_UP]:\r\n self.rect.move_ip(0, -2)\r\n if pressed_keys[K_DOWN]:\r\n self.rect.move_ip(0, 2)\r\n if pressed_keys[K_LEFT]:\r\n self.rect.move_ip(-2, 0)\r\n if pressed_keys[K_RIGHT]:\r\n self.rect.move_ip(2, 0)\r\n \r\n if self.rect.left < 0:\r\n self.rect.left = 0\r\n elif self.rect.right > 1024:\r\n self.rect.right = 1024\r\n if self.rect.top <= 0:\r\n self.rect.top = 0\r\n elif self.rect.bottom >= 768:\r\n self.rect.bottom = 768 \r\n \r\nclass Opponent(pygame.sprite.Sprite):\r\n def __init__(self):\r\n super(Opponent, self).__init__()\r\n self.image = pygame.image.load('bird.gif').convert_alpha()\r\n self.rect = self.image.get_rect(center=(1050, random.randint(0, 768)))\r\n self.speed = random.randint(0,2)\r\n \r\n def update(self):\r\n self.rect.move_ip(-self.speed, 0)\r\n if self.rect.right < 0:\r\n self.kill()\r\n\r\npygame.init() \r\n\r\ntime = pygame.time.get_ticks()\r\n\r\nscreen = pygame.display.set_mode((1024, 768))\r\n \r\nplayer = Player()\r\n\r\nbackground = pygame.Surface(screen.get_size())\r\nbackground.fill((255,255,255))\r\n\r\nplayers = pygame.sprite.Group()\r\nopponents = pygame.sprite.Group()\r\nall_sprites = pygame.sprite.Group()\r\nall_sprites.add(player)\r\n \r\nADDOPPONENT = pygame.USEREVENT + 1\r\npygame.time.set_timer(ADDOPPONENT, a)\r\n \r\nrunning = True\r\nwhile running:\r\n \r\n #time = pygame.time.get_ticks()\r\n\r\n for event in pygame.event.get():\r\n \r\n #if time >= 5000 and event.type == ADDOPPONENT:\r\n #a=a-1\r\n #pygame.time.set_timer(ADDOPPONENT, a)\r\n #time = 0\r\n \r\n if event.type == KEYDOWN and event.key == K_ESCAPE:\r\n running = False\r\n print(\"Escape\")\r\n \r\n elif event.type == QUIT:\r\n running = False\r\n print(\"QUIT\")\r\n\r\n elif event.type == ADDOPPONENT:\r\n new_opponent = Opponent()\r\n opponents.add(new_opponent)\r\n all_sprites.add(new_opponent)\r\n\r\n #Draw background\r\n screen.blit(background, (0, 0))\r\n \r\n pressed_keys = pygame.key.get_pressed()\r\n \r\n player.update(pressed_keys)\r\n opponents.update()\r\n \r\n for entity in all_sprites:\r\n\r\n screen.blit(entity.image, entity.rect)\r\n\r\n pygame.display.flip()\r\n \r\nif a <= 150:\r\n for i in range(100):\r\n Level2 = True\r\n a=1\r\n pygame.time.set_timer(ADDOPPONENT, 1)\r\n a=a+1\r\n \r\n#while Level2\r\n\r\npygame.quit()\r\n","repo_name":"CharliePepin2017/Python","sub_path":"pygame/pygame2.py","file_name":"pygame2.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9649467475","text":"import numpy as np\n\n# TODO: Transform into base classes\ndef get_selector(selector_type, num_images_to_prime, staleness=2):\n if selector_type == \"threshold\":\n final_selector = ThresholdSelector()\n elif selector_type == \"alwayson\":\n final_selector = AlwaysOnSelector()\n elif selector_type == \"stale\":\n final_selector = StaleSelector(staleness)\n else:\n print(\"FP Selector must be in {alwayson, threshold}\")\n exit()\n selector = PrimedSelector(AlwaysOnSelector(),\n final_selector,\n num_images_to_prime)\n return selector\n\nclass PrimedSelector(object):\n def __init__(self, initial, final, initial_num_images, epoch=0):\n self.initial = initial\n self.final = final\n self.initial_num_images = initial_num_images\n self.num_trained = 0\n\n def next_partition(self, partition_size):\n self.num_trained += partition_size\n\n def get_selector(self):\n return self.initial if self.num_trained < self.initial_num_images else self.final\n\n def select(self, *args, **kwargs):\n return self.get_selector().select(*args, **kwargs)\n\n def mark(self, *args, **kwargs):\n return self.get_selector().mark(*args, **kwargs)\n\n\nclass AlwaysOnSelector():\n def mark(self, examples_and_metadata):\n for em in examples_and_metadata:\n em.example.forward_select_probability = 1.\n em.example.forward_select = True\n return examples_and_metadata\n\nclass StaleSelector():\n def __init__(self, threshold):\n self.threshold = threshold\n self.logger = {\"counter\": 0, \"forward\": 0, \"no_forward\": 0}\n\n def select(self, em):\n #if self.logger['counter'] % 50000 == 0:\n # print(self.logger)\n self.logger['counter'] += 1\n\n em.metadata[\"epochs_since_update\"] += 1\n if 'loss' not in em.metadata or em.metadata[\"epochs_since_update\"] >= self.threshold:\n self.logger['forward'] += 1\n return True\n else:\n self.logger['no_forward'] += 1\n em.example.loss = em.metadata[\"loss\"]\n return False\n\n def mark(self, examples_and_metadata):\n for em in examples_and_metadata: \n em.example.forward_select = self.select(em)\n return examples_and_metadata\n\n'''\nclass ThresholdSelector():\n def __init__(self):\n self.logger = {\"counter\": 0, \"path_3\": 0, \"path_2\": 0, \"path_1\": 0}\n self.historical_sps = {}\n self.times_passed = {}\n self.threshold = 0.0001\n self.times_passed_threshold = 5\n print(\"ThesholdSelector {}-{}\".format(self.threshold, self.times_passed_threshold))\n\n def select(self, example):\n\n if self.logger['counter'] % 10000 == 0:\n print(self.logger)\n self.logger['counter'] += 1\n\n image_id = example.image_id\n\n # First time seeing image. No SP calculated yet. FP image.\n if image_id not in self.times_passed.keys():\n self.historical_sps[image_id] = None\n self.times_passed[image_id] = 0\n return True\n\n times_passed = self.times_passed[image_id]\n # Image was forward propped last time. Update history with SP.\n if times_passed == 0:\n self.historical_sps[image_id] = example.select_probability\n self.logger['path_1'] += 1\n\n last_sp = self.historical_sps[image_id]\n if last_sp < self.threshold and times_passed <= self.times_passed_threshold:\n self.times_passed[image_id] += 1\n self.logger['path_2'] += 1\n return False\n else:\n self.times_passed[image_id] = 0\n self.logger['path_3'] += 1\n return True\n\n def mark(self, examples):\n for example in examples:\n example.forward_select = self.select(example)\n return examples\n'''\n","repo_name":"angelajiang/SelectiveBackprop","sub_path":"lib/fp_selectors.py","file_name":"fp_selectors.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"67"} +{"seq_id":"72480386454","text":"from PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom qgis.core import *\n\n\nfrom qad_polygon_maptool import *\nfrom qad_generic_cmd import QadCommandClass\nfrom qad_msg import QadMsg\nfrom qad_textwindow import *\nimport qad_utils\nimport qad_layer\n\n\n# Classe che gestisce il comando POLYGON\nclass QadPOLYGONCommandClass(QadCommandClass):\n\n def instantiateNewCmd(self):\n \"\"\" istanzia un nuovo comando dello stesso tipo \"\"\"\n return QadPOLYGONCommandClass(self.plugIn)\n \n def getName(self):\n return QadMsg.translate(\"Command_list\", \"POLYGON\")\n\n def getEnglishName(self):\n return \"POLYGON\"\n\n def connectQAction(self, action):\n QObject.connect(action, SIGNAL(\"triggered()\"), self.plugIn.runPOLYGONCommand)\n\n def getIcon(self):\n return QIcon(\":/plugins/qad/icons/polygon.png\")\n\n def getNote(self):\n # impostare le note esplicative del comando\n return QadMsg.translate(\"Command_POLYGON\", \"Draws a regular polygon.\")\n \n def __init__(self, plugIn):\n QadCommandClass.__init__(self, plugIn)\n # se questo flag = True il comando serve all'interno di un altro comando per disegnare un rettangolo\n # che non verrà salvato su un layer\n self.virtualCmd = False\n self.centerPt = None\n self.firstEdgePt = None\n self.vertices = []\n self.sideNumber = self.plugIn.lastPolygonSideNumber\n self.constructionModeByCenter = self.plugIn.lastPolygonConstructionModeByCenter\n self.area = 100\n\n def __del__(self):\n QadCommandClass.__del__(self)\n\n def getPointMapTool(self, drawMode = QadGetPointDrawModeEnum.NONE):\n if (self.plugIn is not None):\n if self.PointMapTool is None:\n self.PointMapTool = Qad_polygon_maptool(self.plugIn)\n return self.PointMapTool\n else:\n return None \n\n \n def addPolygonToLayer(self, layer):\n if layer.geometryType() == QGis.Line:\n qad_layer.addLineToLayer(self.plugIn, layer, self.vertices)\n elif layer.geometryType() == QGis.Polygon: \n qad_layer.addPolygonToLayer(self.plugIn, layer, self.vertices)\n \n\n #============================================================================\n # WaitForSideNumber\n #============================================================================\n def WaitForSideNumber(self):\n self.step = 1\n prompt = QadMsg.translate(\"Command_POLYGON\", \"Enter number of sides <{0}>: \")\n self.waitForInt(prompt.format(str(self.sideNumber)), self.sideNumber, \\\n QadInputModeEnum.NOT_ZERO | QadInputModeEnum.NOT_NEGATIVE)\n \n #============================================================================\n # WaitForCenter\n #============================================================================\n def WaitForCenter(self):\n self.step = 2\n self.getPointMapTool().setMode(Qad_polygon_maptool_ModeEnum.ASK_FOR_CENTER_PT)\n \n keyWords = QadMsg.translate(\"Command_POLYGON\", \"Edge\")\n prompt = QadMsg.translate(\"Command_POLYGON\", \"Specify center of polygon or [{0}]: \").format(keyWords)\n \n englishKeyWords = \"Edge\"\n keyWords += \"_\" + englishKeyWords\n # si appresta ad attendere un punto o enter\n # msg, inputType, default, keyWords, nessun controllo \n self.waitFor(prompt, \\\n QadInputTypeEnum.POINT2D | QadInputTypeEnum.KEYWORDS, \\\n None, keyWords, QadInputModeEnum.NONE)\n \n #============================================================================\n # WaitForInscribedCircumscribedOption\n #============================================================================\n def WaitForInscribedCircumscribedOption(self):\n self.step = 3 \n keyWords = QadMsg.translate(\"Command_POLYGON\", \"Inscribed in circle\") + \"/\" + \\\n QadMsg.translate(\"Command_POLYGON\", \"Circumscribed about circle\") + \"/\" + \\\n QadMsg.translate(\"Command_POLYGON\", \"Area\")\n prompt = QadMsg.translate(\"Command_POLYGON\", \"Enter an option [{0}] <{1}>: \").format(keyWords, \\\n self.constructionModeByCenter)\n\n englishKeyWords = \"Inscribed in circle\" + \"/\" + \"Circumscribed about circle\" + \"/\" + \"Area\"\n keyWords += \"_\" + englishKeyWords\n # si appresta ad attendere una parola chiave \n # msg, inputType, default, keyWords, valori positivi\n self.waitFor(prompt, QadInputTypeEnum.KEYWORDS, \\\n self.constructionModeByCenter, \\\n keyWords, QadInputModeEnum.NONE) \n\n #============================================================================\n # WaitForRadius\n #============================================================================\n def WaitForRadius(self, layer):\n self.step = 4\n if layer is not None:\n self.getPointMapTool().geomType = layer.geometryType()\n self.getPointMapTool().setMode(Qad_polygon_maptool_ModeEnum.CENTER_PT_KNOWN_ASK_FOR_RADIUS)\n \n # si appresta ad attendere un punto o un numero reale \n # msg, inputType, default, keyWords, valori positivi\n prompt = QadMsg.translate(\"Command_CIRCLE\", \"Specify the circle radius <{0}>: \")\n self.waitFor(prompt.format(str(self.plugIn.lastRadius)), \\\n QadInputTypeEnum.POINT2D | QadInputTypeEnum.FLOAT, \\\n self.plugIn.lastRadius, \"\", \\\n QadInputModeEnum.NOT_ZERO | QadInputModeEnum.NOT_NEGATIVE)\n\n #============================================================================\n # WaitForFirstEdgePt\n #============================================================================\n def WaitForFirstEdgePt(self):\n self.step = 5\n # imposto il map tool\n self.getPointMapTool().setMode(Qad_polygon_maptool_ModeEnum.ASK_FOR_FIRST_EDGE_PT)\n # si appresta ad attendere un punto\n self.waitForPoint(QadMsg.translate(\"Command_POLYGON\", \"Specify the first point of the edge: \"))\n\n #============================================================================\n # WaitForSecondEdgePt\n #============================================================================\n def WaitForSecondEdgePt(self, layer):\n self.step = 6\n self.getPointMapTool().firstEdgePt = self.firstEdgePt\n\n if layer is not None:\n self.getPointMapTool().geomType = layer.geometryType()\n\n # imposto il map tool\n self.getPointMapTool().setMode(Qad_polygon_maptool_ModeEnum.FIRST_EDGE_PT_KNOWN_ASK_FOR_SECOND_EDGE_PT)\n # si appresta ad attendere un punto\n self.waitForPoint(QadMsg.translate(\"Command_POLYGON\", \"Specify the second point of the edge: \"))\n\n #============================================================================\n # WaitForArea\n #============================================================================\n def WaitForArea(self):\n self.step = 7\n \n msg = QadMsg.translate(\"Command_POLYGON\", \"Enter the polygon area in current units <{0}>: \")\n # si appresta ad attendere un numero reale \n # msg, inputType, default, keyWords, valori positivi\n self.waitFor(msg.format(str(self.area)), QadInputTypeEnum.FLOAT, \\\n self.area, \"\", \\\n QadInputModeEnum.NOT_ZERO | QadInputModeEnum.NOT_NEGATIVE)\n\n \n #============================================================================\n # run\n #============================================================================\n def run(self, msgMapTool = False, msg = None):\n if self.plugIn.canvas.mapSettings().destinationCrs().geographicFlag():\n self.showMsg(QadMsg.translate(\"QAD\", \"\\nThe coordinate reference system of the project must be a projected coordinate system.\\n\"))\n return True # fine comando\n\n currLayer = None\n if self.virtualCmd == False: # se si vuole veramente salvare la polylinea in un layer \n # il layer corrente deve essere editabile e di tipo linea o poligono\n currLayer, errMsg = qad_layer.getCurrLayerEditable(self.plugIn.canvas, [QGis.Line, QGis.Polygon])\n if currLayer is None:\n self.showErr(errMsg)\n return True # fine comando\n \n #=========================================================================\n # RICHIESTA NUMERO DI LATI DEL POLIGONO \n if self.step == 0: # inizio del comando\n self.WaitForSideNumber()\n return False\n\n #=========================================================================\n # RISPOSTA ALLA RICHIESTA DEL NUMERO DI LATI DEL POLIGONO (da step = 0) \n elif self.step == 1: # dopo aver atteso un punto si riavvia il comando\n if msgMapTool == True: # il punto arriva da una selezione grafica\n if self.getPointMapTool().rightButton == True: # se usato il tasto destro del mouse\n value = self.sideNumber\n else:\n return False\n else: # il punto arriva come parametro della funzione\n value = msg\n\n if type(value) == int:\n if value < 3:\n self.showErr(QadMsg.translate(\"Command_POLYGON\", \"\\nEnter an integer greater than 2.\"))\n else:\n self.sideNumber = value\n self.getPointMapTool().sideNumber = self.sideNumber\n self.plugIn.setLastPolygonSideNumber(self.sideNumber)\n self.WaitForCenter()\n else:\n self.WaitForSideNumber() \n\n return False # continua\n\n\n #=========================================================================\n # RISPOSTA ALLA RICHIESTA DEL CENTRO DEL POLIGONO (da step = 1)\n elif self.step == 2: # dopo aver atteso un punto si riavvia il comando\n if msgMapTool == True: # il punto arriva da una selezione grafica\n # la condizione seguente si verifica se durante la selezione di un punto\n # é stato attivato un altro plugin che ha disattivato Qad\n # quindi stato riattivato il comando che torna qui senza che il maptool\n # abbia selezionato un punto \n if self.getPointMapTool().point is None: # il maptool é stato attivato senza un punto\n if self.getPointMapTool().rightButton == True: # se usato il tasto destro del mouse\n self.WaitForCenter()\n return False\n else:\n self.setMapTool(self.getPointMapTool()) # riattivo il maptool\n return False\n\n value = self.getPointMapTool().point\n else: # il punto arriva come parametro della funzione\n value = msg\n\n if type(value) == unicode:\n if value == QadMsg.translate(\"Command_POLYGON\", \"Edge\") or value == \"Edge\":\n self.WaitForFirstEdgePt()\n elif type(value) == QgsPoint:\n self.centerPt = value\n self.getPointMapTool().centerPt = self.centerPt\n self.WaitForInscribedCircumscribedOption() \n \n return False # continua\n\n #=========================================================================\n # RISPOSTA ALLA RICHIESTA DI POLIGONO INSCRITTO O CIRCOSCRITTO (da step = 2)\n elif self.step == 3:\n if msgMapTool == True: # il punto arriva da una selezione grafica\n # la condizione seguente si verifica se durante la selezione di un punto\n # é stato attivato un altro plugin che ha disattivato Qad\n # quindi stato riattivato il comando che torna qui senza che il maptool\n # abbia selezionato un punto \n if self.getPointMapTool().point is None: # il maptool é stato attivato senza un punto\n if self.getPointMapTool().rightButton == True: # se usato il tasto destro del mouse\n value = self.constructionModeByCenter\n else:\n self.setMapTool(self.getPointMapTool()) # riattivo il maptool\n return False\n else:\n value = self.getPointMapTool().point\n else: # la parola chiave arriva come parametro della funzione\n value = msg \n \n if type(value) == unicode:\n self.constructionModeByCenter = value\n self.plugIn.setLastPolygonConstructionModeByCenter(self.constructionModeByCenter)\n self.getPointMapTool().constructionModeByCenter = self.constructionModeByCenter\n if self.constructionModeByCenter == QadMsg.translate(\"Command_POLYGON\", \"Area\") or self.constructionModeByCenter == \"Area\":\n self.WaitForArea()\n else:\n self.WaitForRadius(currLayer)\n \n return False # fine comando\n\n #=========================================================================\n # RISPOSTA ALLA RICHIESTA DEL RAGGIO (da step = 3)\n elif self.step == 4:\n if msgMapTool == True: # il punto arriva da una selezione grafica\n # la condizione seguente si verifica se durante la selezione di un punto\n # é stato attivato un altro plugin che ha disattivato Qad\n # quindi stato riattivato il comando che torna qui senza che il maptool\n # abbia selezionato un punto \n if self.getPointMapTool().point is None: # il maptool é stato attivato senza un punto\n if self.getPointMapTool().rightButton == True: # se usato il tasto destro del mouse\n return True # fine comando\n else:\n self.setMapTool(self.getPointMapTool()) # riattivo il maptool\n return False\n\n value = self.getPointMapTool().point\n else: # il punto arriva come parametro della funzione\n value = msg\n\n if type(value) == QgsPoint or type(value) == float: # se é stato inserito il raggio del cerchio \n if type(value) == QgsPoint: # se é stato inserito il raggio del cerchio con un punto \n self.radius = qad_utils.getDistance(self.centerPt, value)\n ptStart = value\n else:\n self.radius = value\n ptStart = None\n \n self.plugIn.setLastRadius(self.radius) \n\n if self.constructionModeByCenter == QadMsg.translate(\"Command_POLYGON\", \"Inscribed in circle\") or \\\n self.constructionModeByCenter == \"Inscribed in circle\":\n mode = True\n else:\n mode = False\n \n self.vertices.extend(qad_utils.getPolygonByNsidesCenterRadius(self.sideNumber, self.centerPt, self.radius, \\\n mode, ptStart))\n\n if self.virtualCmd == False: # se si vuole veramente salvare i buffer in un layer\n self.addPolygonToLayer(currLayer)\n return True \n \n return False # fine comando\n \n \n #=========================================================================\n # RISPOSTA ALLA RICHIESTA DEL PRIMO PUNTO DELLO SPIGOLO (da step = 2)\n elif self.step == 5: # dopo aver atteso un punto si riavvia il comando\n if msgMapTool == True: # il punto arriva da una selezione grafica\n # la condizione seguente si verifica se durante la selezione di un punto\n # é stato attivato un altro plugin che ha disattivato Qad\n # quindi stato riattivato il comando che torna qui senza che il maptool\n # abbia selezionato un punto \n if self.getPointMapTool().point is None: # il maptool é stato attivato senza un punto\n if self.getPointMapTool().rightButton == True: # se usato il tasto destro del mouse\n return True # fine comando\n else:\n self.setMapTool(self.getPointMapTool()) # riattivo il maptool\n return False\n\n value = self.getPointMapTool().point\n else: # il punto arriva come parametro della funzione\n value = msg\n\n if type(value) == QgsPoint:\n self.firstEdgePt = value\n self.WaitForSecondEdgePt(currLayer)\n\n return False\n \n #=========================================================================\n # RISPOSTA ALLA RICHIESTA DEL SECONDO PUNTO DELLO SPIGOLO (da step = 5)\n elif self.step == 6: # dopo aver atteso un punto o un numero reale si riavvia il comando\n if msgMapTool == True: # il punto arriva da una selezione grafica\n # la condizione seguente si verifica se durante la selezione di un punto\n # é stato attivato un altro plugin che ha disattivato Qad\n # quindi stato riattivato il comando che torna qui senza che il maptool\n # abbia selezionato un punto \n if self.getPointMapTool().point is None: # il maptool é stato attivato senza un punto\n if self.getPointMapTool().rightButton == True: # se usato il tasto destro del mouse\n return True # fine comando\n else:\n self.setMapTool(self.getPointMapTool()) # riattivo il maptool\n return False\n\n value = self.getPointMapTool().point\n else: # il punto arriva come parametro della funzione\n value = msg\n\n if type(value) == QgsPoint:\n self.vertices.extend(qad_utils.getPolygonByNsidesEdgePts(self.sideNumber, self.firstEdgePt, value))\n\n if self.virtualCmd == False: # se si vuole veramente salvare i buffer in un layer\n self.addPolygonToLayer(currLayer)\n return True \n\n return False\n\n\n #=========================================================================\n # RISPOSTA ALLA RICHIESTA AREA POLIGONO (da step = 3)\n elif self.step == 7: # dopo aver atteso un numero reale si riavvia il comando\n if msgMapTool == True: # il punto arriva da una selezione grafica\n # la condizione seguente si verifica se durante la selezione di un punto\n # é stato attivato un altro plugin che ha disattivato Qad\n # quindi stato riattivato il comando che torna qui senza che il maptool\n # abbia selezionato un punto \n if self.getPointMapTool().point is None: # il maptool é stato attivato senza un punto\n if self.getPointMapTool().rightButton == True: # se usato il tasto destro del mouse\n value = self.area\n else:\n self.setMapTool(self.getPointMapTool()) # riattivo il maptool\n return False\n else:\n return False\n else: # il punto arriva come parametro della funzione\n value = msg\n\n if type(value) == float: # é stata inserita l'area\n self.vertices.extend(qad_utils.getPolygonByNsidesArea(self.sideNumber, self.centerPt, value))\n\n if self.virtualCmd == False: # se si vuole veramente salvare i buffer in un layer\n self.addPolygonToLayer(currLayer)\n return True \n \n return False\n","repo_name":"geosim/QAD","sub_path":"qad_polygon_cmd.py","file_name":"qad_polygon_cmd.py","file_ext":"py","file_size_in_byte":19341,"program_lang":"python","lang":"it","doc_type":"code","stars":16,"dataset":"github-code","pt":"67"} +{"seq_id":"19869452734","text":"from Animal import Animal\r\nfrom Dao import DAO\r\n\r\ndef registrar():\r\n codigo = input(\"Ingresa codigo del animal: \")\r\n raza = input(\"Ingresa la raza del animal: \")\r\n patas = input(\"Ingresa numero de patas del animal: \")\r\n peso = input(\"Ingresa el peso del animal: \")\r\n a = Animal(codigo, raza, patas, peso)\r\n d = DAO()\r\n d.registrar_animal(a)\r\n\r\ndef buscar()->Animal:\r\n codigo = input(\"Ingrese codigo del animal por buscar: \")\r\n d = DAO()\r\n a = d.buscar_animal(codigo)\r\n if a != None:\r\n print(a)\r\n else:\r\n print(\"El animal buscado no se encuentra\")\r\n return a\r\n\r\ndef eliminar():\r\n a = buscar()\r\n if a != None:\r\n opcion = input(\"Desea eliminar a este animal (s/n): \")\r\n if opcion.lower() == \"s\":\r\n d = DAO()\r\n d.eliminar_animal(a.get_id()) \r\n else:\r\n print(\"No se ha eliminado el animal\")\r\n\r\ndef modificar():\r\n a = buscar()\r\n if a != None:\r\n codigo = recibir_valor(\"codigo\", a.get_codigo())\r\n a.set_codigo(codigo)\r\n raza = recibir_valor(\"raza\", a.get_raza())\r\n a.set_raza(raza)\r\n peso = recibir_valor(\"peso\", a.get_peso())\r\n a.set_peso(float(peso))\r\n patas = int(recibir_valor(\"patas\", a.get_patas()))\r\n a.set_patas(patas)\r\n d = DAO()\r\n d.modificar_animal(a)\r\n\r\ndef recibir_valor(nombre_atributo: str, atributo):\r\n opcion = input(f\"Desea modificar {nombre_atributo} (s/n): \")\r\n if opcion.lower() == \"s\":\r\n valor = input(f\"Ingrese nuevo {nombre_atributo}: \")\r\n return valor\r\n return atributo\r\n\r\ndef mostrar_todo():\r\n d = DAO()\r\n animales = d.mostrar_animales()\r\n for a in animales:\r\n print(\"----------\")\r\n print(a)\r\n print(\"----------\")\r\n\r\ndef menu():\r\n print(\"1- Registrar\")\r\n print(\"2- Buscar\")\r\n print(\"3- Eliminar\")\r\n print(\"4- Modificar\")\r\n print(\"5- Mostrar todo\")\r\n print(\"6- Salir\")\r\n opcion = input(\"Ingrese una opcion: \")\r\n if opcion == \"1\":\r\n registrar()\r\n elif opcion == \"2\":\r\n buscar()\r\n elif opcion == \"3\":\r\n eliminar()\r\n elif opcion == \"4\":\r\n modificar()\r\n elif opcion == \"5\":\r\n mostrar_todo()\r\n elif opcion == \"6\":\r\n return True\r\n else:\r\n print(\"La opcion ingresada no es valida\")\r\n\r\nwhile menu() != True:\r\n pass","repo_name":"Taker-Miller/Animal","sub_path":"Nueva carpeta/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25292753596","text":"\"\"\"\nobject_detection.py\n\nContains functions that splices videos into images and detects objects contain within them\n\"\"\"\n\nfrom darkflow.net.build import TFNet\nfrom search.models import *\nimport sys\nimport cv2\nfrom pathlib import Path\nimport json\n\n#https://stackoverflow.com/a/47632941/7412757\ndef extract_images(vid_id):\n count = 0\n abs_path = Path(__file__).resolve().parent.parent\n video_path = abs_path.joinpath('videos', vid_id + '.mp4')\n vidcap = cv2.VideoCapture(str(video_path))\n\n success,image = vidcap.read()\n success = True\n print('Extracting images from [' + vid_id + ']...')\n screencap_path = abs_path.joinpath('screencaps', vid_id)\n screencap_path.mkdir(parents=True, exist_ok=True)\n while success:\n cv2.imwrite(str(screencap_path.joinpath(vid_id + '_' + str(count) + '.jpg')), image) # save frame as JPEG file\n vidcap.set(cv2.CAP_PROP_POS_MSEC,(count*2000)) # get frame every 2 seconds\n success,image = vidcap.read()\n count = count + 1\n print('Extracted ' + str(count) + ' images')\n video_path.unlink() #delete video\n return screencap_path\n\ndef detect_objects(vid_id, screencap_path):\n\tdarkflow_path = 'darkflow'\n\toptions = {\"model\": darkflow_path + \"/cfg/yolo.cfg\", \"load\": darkflow_path + \"/bin/yolo.weights\", \"threshold\": 0.25}\n\n\ttfnet = TFNet(options) #neural network library for object detection\n\timages = [x for x in screencap_path.glob('*.jpg')]\n\n\tcount = 1\n\tfor img in images:\n\t\tprint('Extracting objects from frame ' + str(count) + '/' + str(len(images)) + ' for [' + vid_id + ']')\n\t\timg_path = screencap_path.joinpath(img)\n\t\timgcv = cv2.imread(str(img_path))\n\t\tresult = tfnet.return_predict(imgcv)\n\t\tfor dict_ in result:\n\t\t\tobj = dict_['label']\n\t\t\ttry:\n\t\t\t\t#check if object already exists in db\n\t\t\t\tobj_db = Object.objects.get(name=obj)\n\t\t\texcept:\n\t\t\t\t#object does not exist yet\n\t\t\t\tobj_db = Object(name=obj)\n\t\t\t\tobj_db.save()\n\t\t\tfinally:\n\t\t\t\t#objet is in database, whether it was already there or just created, so associate object with video\n\t\t\t\tvideo = Video.objects.get(video_id=vid_id)\n\t\t\t\tobj_db.videos.add(video)\n\t\timg_path.unlink()\n\t\tcount += 1\n\tscreencap_path.rmdir()\n","repo_name":"JohnSorhannus/AdvancedYouTubeSearch","sub_path":"search/management/commands/object_detection.py","file_name":"object_detection.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14083060981","text":"import logging\nimport pickle\nimport random\nimport sys\nimport threading\nimport time\n\nimport pytest\n\nimport locked_dict.locked_dict as locked_dict\n\n\ndef test_locked_dict():\n \"\"\"Simple test driver. Switch to debug logging, if any argument given.\"\"\"\n\n _start = time.time()\n\n a_level = logging.INFO if len(sys.argv) < 2 else logging.DEBUG\n a_format = '%(asctime)s: %(message)s'\n\n logging.basicConfig(format=a_format)\n logger = logging.getLogger(__name__)\n logger.setLevel(a_level)\n\n def worker(key_seq, shared_map):\n for key in key_seq:\n shared_map[key] = {threading.current_thread().name: key}\n logger.debug('{} {}'.format(key, shared_map[key]))\n pause = float(random.randint(1, 5)) / 1000.0\n logger.debug('sleeping %02.3f' % (pause,))\n time.sleep(pause)\n\n return len(key_seq)\n\n expected = 0\n d = locked_dict.LockedDict()\n with d as m:\n m[0] = ['foo']\n expected += 1\n m.clear()\n expected -= 1\n assert len(m) == expected\n try:\n # noinspection PyUnusedLocal\n __ = m.popitem()\n assert False\n except KeyError:\n pass\n try:\n del m['not_there']\n assert False\n except KeyError:\n pass\n m.update({0: 'foo'})\n expected += 1\n assert len(m) == expected\n\n logger.debug('Entries({:5d}/{:5d})'.format(len(d), expected))\n\n for k, v in d.items():\n logger.debug('{}: {}'.format(k, v))\n logger.debug('{} {} {}'.format(id(d), isinstance(d, dict), isinstance(d, locked_dict.LockedDict)))\n logger.debug(dir(d))\n logger.debug(dir(getattr(d, '_lock')))\n\n with d as m:\n d_ser = pickle.dumps(m)\n logger.debug(d_ser)\n\n with d as m:\n m[1] = ('bar',)\n expected += 1\n m[42] = {'baz': 'ooka'}\n expected += 1\n x = m.pop(42)\n expected -= 1\n m[42] = x\n expected += 1\n __ = m.get(-42)\n assert __ is None\n if -42 not in m:\n __ = locked_dict.LockedDict.fromkeys(m.keys(), 'yes')\n bf = dict([(z, 'yes') for z in m.keys()])\n assert __ == bf\n assert __ is not bf\n logger.debug(__)\n\n with d as m:\n d_ser = pickle.dumps(m)\n logger.debug(d_ser)\n\n rd = pickle.loads(d_ser)\n for k, v in rd.items():\n logger.debug('{}: {}'.format(k, v))\n\n logger.debug('Entries({:5d}/{:5d})'.format(len(rd), expected))\n\n logger.debug('{} {} {}'.format(id(rd), type(rd), isinstance(rd, (dict, locked_dict.LockedDict))))\n\n worker_tasks = random.randint(1, 234)\n worker_count = random.randint(5, 67)\n logger.debug('Starting {} workers on {} tasks each ...'.format(worker_count, worker_tasks))\n expected += worker_count * worker_tasks\n for i in range(43, 43 + worker_count * worker_tasks, worker_tasks):\n t = threading.Thread(target=worker, args=(range(i, i + worker_tasks), rd))\n t.daemon = True\n t.start()\n\n main_thread = threading.current_thread()\n for t in threading.enumerate():\n if t is main_thread:\n continue\n logger.debug('joining %s' % (t.name,))\n t.join()\n\n for k, v in rd.items():\n logger.debug('{}: {}'.format(k, v))\n assert len(rd) == expected\n logger.info(\n 'WorkersTasks({:2d}:{:3d}).Entries({:5d}/{:5d}); SizeBytes({:6d})'\n ' - with Python({}); LatencySecs({:0.3f})'\n ''.format(\n worker_count,\n worker_tasks,\n len(rd),\n expected,\n sys.getsizeof(rd),\n ','.join(['{:2d}'.format(z) for z in sys.version_info[:3]]),\n round(time.time() - _start, 3),\n )\n )\n\n\ndef test_main():\n assert locked_dict\n\n\ndef test_lock_empty():\n expected = 0\n d = locked_dict.LockedDict()\n assert len(d) == expected\n assert bool(d) is False\n assert d is not True\n assert hasattr(d, '_lock')\n empty_d = {}\n assert d == empty_d\n d.setdefault('not_there_yet', []).append(42)\n assert d['not_there_yet'] == [42]\n\n\ndef test_lock_with():\n expected = 0\n d = locked_dict.LockedDict()\n empty_d = {}\n plain_old_d = {999: 'plain old dict', 12345: 54321}\n assert d != plain_old_d\n\n with d as m:\n assert len(m) == expected\n assert bool(m) is False\n assert m is not True\n assert hasattr(m, '_lock')\n assert m != plain_old_d\n assert m == empty_d\n\n m[0] = ['foo']\n expected += 1\n assert len(m) == expected\n assert bool(m) is True\n assert m is not False\n assert m != plain_old_d\n assert m != empty_d\n\n m.clear()\n expected -= 1\n assert len(m) == expected\n assert bool(m) is False\n assert m is not True\n assert m != plain_old_d\n assert m == empty_d\n\n with pytest.raises(KeyError):\n # noinspection PyUnusedLocal\n _ = m.popitem()\n\n with pytest.raises(KeyError):\n del m['not_there']\n\n m.update({0: 'foo'})\n expected += 1\n assert len(m) == expected\n assert bool(m) is True\n assert m is not False\n assert m != plain_old_d\n assert m != empty_d\n\n d_foo_ser = pickle.dumps(m[0])\n del m[0]\n assert m.get(0) is None\n m[0] = pickle.loads(d_foo_ser)\n assert m[0] == 'foo'\n","repo_name":"sthagen/locked-dict","sub_path":"test/test_locked_dict.py","file_name":"test_locked_dict.py","file_ext":"py","file_size_in_byte":5439,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"67"} +{"seq_id":"39852359689","text":"import uuid\n\nfrom fastapi import APIRouter, Depends, Path, Body\nfrom sqlmodel import Session\n\nfrom core.deps import get_db, get_current_user\nfrom core.database.models.users import User\nfrom core.database.models.teams import Team, TeamUpdate, TeamCreate\nfrom core.database.models.invites import TeamInvite\nfrom core.database.models.memberships import Membership, MembershipUpdate\nfrom core.database.crud import teams\nfrom core.database.crud import memberships\nfrom core.database.crud import invites\n\nfrom core.routes import not_authorized, not_found, forbidden\n\nrouter = APIRouter()\n\n\ndef check_membership(db: Session, current_user: User,\n team_uuid: uuid.UUID) -> Membership:\n mship = memberships.crud.get_by_team_user_uuid(db, current_user.uuid, team_uuid)\n\n if not current_user.is_superadmin and mship is None:\n raise forbidden()\n\n return mship\n\n\ndef check_adminship(db: Session, current_user: User,\n team_uuid: uuid.UUID) -> Membership:\n mship = memberships.crud.get_by_team_user_uuid(db, current_user.uuid, team_uuid)\n\n if not current_user.is_superadmin and (mship is None or not mship.is_admin):\n raise forbidden()\n\n return mship\n\n\n@router.get(\"/\", response_model=list[str | Team], tags=[\"teams\"])\ndef get_teams(*, db: Session = Depends(get_db),\n current_user: User | None = Depends(get_current_user)):\n if not current_user:\n return [team.name for team in teams.crud.get_multi(db)]\n elif not current_user.is_superadmin:\n return current_user.teams\n else:\n return teams.crud.get_multi(db)\n\n\n@router.post(\"/\", response_model=Team, tags=[\"teams\"])\ndef create_teams(*,\n db: Session = Depends(get_db),\n current_user: User = Depends(get_current_user),\n team: TeamCreate = Body(..., description=\"Team\")\n ):\n if not current_user:\n raise not_authorized()\n\n if not current_user.is_superadmin:\n raise forbidden()\n\n db_team = teams.crud.create(db, obj_in=team)\n return db_team\n\n\n@router.get(\"/{team_uuid}\", response_model=Team, tags=[\"teams\"])\ndef get_team(*,\n db: Session = Depends(get_db),\n current_user: User = Depends(get_current_user),\n team_uuid: uuid.UUID = Path(..., description=\"UUID of team\")\n ):\n if not current_user:\n raise not_authorized()\n\n team = teams.crud.get(db, team_uuid)\n\n if not team:\n raise not_found(\"Team\")\n\n return team\n\n\n@router.get(\"/{team_uuid}/invites\", response_model=list[TeamInvite],\n tags=[\"teams\", \"invites\"])\ndef get_invites_by_team(*,\n db: Session = Depends(get_db),\n current_user: User = Depends(get_current_user),\n team_uuid: uuid.UUID = Path(..., description=\"UUID of team\")\n ):\n if not current_user:\n raise not_authorized()\n\n team = teams.crud.get(db, team_uuid)\n\n if not team:\n raise not_found(\"Team\")\n\n check_adminship(db, current_user, team_uuid)\n\n return invites.crud.get_by_team_uuid(db, team_uuid)\n\n\n@router.put(\"/{team_uuid}\", response_model=Team, tags=[\"teams\"])\ndef update_team(*,\n db: Session = Depends(get_db),\n current_user: User = Depends(get_current_user),\n team_uuid: uuid.UUID = Path(..., description=\"UUID of team\"),\n team_update: TeamUpdate = Body(...,\n description=\"Contents to be updated\")\n ):\n if not current_user:\n raise not_authorized()\n\n db_team = teams.crud.get(db, team_uuid)\n\n if not db_team:\n raise not_found(\"Team\")\n\n check_adminship(db, current_user, team_uuid)\n\n db_team = teams.crud.update(db, db_obj=db_team, obj_in=team_update)\n return db_team\n\n\n@router.delete(\"/{team_uuid}\", response_model=Team, tags=[\"team\"])\ndef delete_team(*,\n current_user: User = Depends(get_current_user),\n db: Session = Depends(get_db),\n team_uuid: uuid.UUID = Path(..., description=\"UUID of team\")):\n if not current_user:\n raise not_authorized()\n\n db_team = teams.crud.get(db, team_uuid)\n\n if not db_team:\n raise not_found(\"Team\")\n\n check_adminship(db, current_user, team_uuid)\n\n teams.crud.remove(db, uuid=team_uuid)\n\n return db_team\n\n\n@router.get(\"/{team_uuid}/members\", response_model=list[Membership])\ndef get_members(*,\n current_user: User = Depends(get_current_user),\n db: Session = Depends(get_db),\n team_uuid: uuid.UUID = Path(..., description=\"UUID of team\")):\n if not current_user:\n raise not_authorized()\n\n db_team = teams.crud.get(db, team_uuid)\n\n if not db_team:\n raise not_found(\"Team\")\n\n check_membership(db, current_user, team_uuid)\n\n db_memberships = memberships.crud.get_by_team_uuid(db, team_uuid)\n\n return db_memberships\n\n\n# @router.get(\"/{team_uuid}/members/current\", response_model=Membership)\n# def get_current_member(*,\n# current_user: User = Depends(get_current_user),\n# db: Session = Depends(get_db),\n# team_uuid: uuid.UUID = Path(..., description=\"UUID of team\")):\n# if not current_user:\n# raise not_authorized()\n#\n# db_team = teams.crud.get(db, team_uuid)\n#\n# if not db_team:\n# raise not_found(\"Team\")\n#\n# membership = check_membership(db, current_user, team_uuid)\n#\n# if not membership:\n# raise not_found(\"Membership\")\n#\n# return membership\n\n\n@router.get(\"/{team_uuid}/members/{user_uuid}\", response_model=Membership)\ndef get_member(*,\n current_user: User = Depends(get_current_user),\n db: Session = Depends(get_db),\n team_uuid: uuid.UUID = Path(..., description=\"UUID of team\"),\n user_uuid: uuid.UUID = Path(..., description=\"UUID of user\")):\n if not current_user:\n raise not_authorized()\n\n db_team = teams.crud.get(db, team_uuid)\n\n if not db_team:\n raise not_found(\"Team\")\n\n check_membership(db, current_user, team_uuid)\n\n membership = memberships.crud.get_by_team_user_uuid(db, user_uuid, team_uuid)\n\n if not membership:\n raise not_found(\"Membership\")\n\n return membership\n\n\n@router.put(\"/{team_uuid}/members/{user_uuid}\", response_model=Membership)\ndef update_member(*,\n current_user: User = Depends(get_current_user),\n db: Session = Depends(get_db),\n team_uuid: uuid.UUID = Path(..., description=\"UUID of team\"),\n user_uuid: uuid.UUID = Path(..., description=\"UUID of user\"),\n member_update: MembershipUpdate = Body(..., description=\"Content to be updated\")):\n if not current_user:\n raise not_authorized()\n\n db_team = teams.crud.get(db, team_uuid)\n\n if not db_team:\n raise not_found(\"Team\")\n\n check_adminship(db, current_user, team_uuid)\n\n db_membership = memberships.crud.get_by_team_user_uuid(db, user_uuid, team_uuid)\n\n if not db_membership:\n raise not_found(\"Membership\")\n\n db_membership = memberships.crud.update(db, db_obj=db_membership, obj_in=member_update)\n\n return db_membership\n\n\n@router.delete(\"/{team_uuid}/members/{user_uuid}\", response_model=Membership)\ndef remove_member(*,\n current_user: User = Depends(get_current_user),\n db: Session = Depends(get_db),\n team_uuid: uuid.UUID = Path(..., description=\"UUID of team\"),\n user_uuid: uuid.UUID = Path(..., description=\"UUID of user\")):\n if not current_user:\n raise not_authorized()\n\n db_team = teams.crud.get(db, team_uuid)\n\n if not db_team:\n raise not_found(\"Team\")\n\n if user_uuid != current_user.uuid:\n check_adminship(db, current_user, team_uuid)\n\n db_membership = memberships.crud.get_by_team_user_uuid(db, user_uuid, team_uuid)\n\n if not db_membership:\n raise not_found(\"Membership\")\n\n memberships.crud.remove_by_team_user_uuid(db, user_uuid, team_uuid)\n\n return db_membership\n","repo_name":"Natsku123/ttv-tools","sub_path":"backend/core/routes/teams.py","file_name":"teams.py","file_ext":"py","file_size_in_byte":8178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36621809639","text":"from bridges.bridges import *\nfrom bridges.data_src_dependent import *\nimport sys\nimport random\n\ndef main():\n # create the Bridges object, set credentials\n bridges = Bridges(YOUR_ASSSIGNMENT_NUMBER, \"YOUR_USER_ID\", \"YOUR_API_KEY\")\n\n bridges.set_title(\"OpenStreet Map Data Access Example\")\n\n # get the OsmData\n osmdata = data_source.get_osm_data(\"Charlotte, North Carolina\", \"default\")\n # Alternatively, one can use a bounding box in latitude and longitude:\n # osmdata = data_source.get_osm_data(35.28, -80.75, 35.32, -80.71, \"default\")\n\n vertices = osmdata.vertices\n edges = osmdata.edges\n\n print (\"Number of Vertices [Charlotte]: \" + str(len(vertices)))\n print (\"Number of Edges [Charlotte]: \" + str(len(edges)))\n\n print (\"Position of first vertex: lat=\"+str(vertices[0].latitude)+\n \" long=\"+ str(vertices[0].longitude));\n\n print (\"Cartesian Coordinate of first vertex: \"+str(vertices[0].cartesian_coord[0])+\n \", \"+ str(vertices[0].cartesian_coord[1]));\n\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"BridgesUNCC/BridgesUNCC.github.io","sub_path":"tutorials/testing/python/osm_snippet.py","file_name":"osm_snippet.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"29133820801","text":"import argparse\nimport numpy.random\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom torchvision.datasets import CIFAR10, CIFAR100\nfrom timm.loss import LabelSmoothingCrossEntropy\nfrom models import get_model\nfrom sam_on import SAM_ON, ASAM_ON\nimport os\nimport time\nfrom autoaugment import CIFAR10Policy\nfrom datetime import datetime\n\ndef load_cifar(data_loader, batch_size=256, num_workers=2, autoaugment=False, data_path = '/scratch/datasets/CIFAR100/'):\n if data_loader == CIFAR10:\n mean = (0.4914, 0.4822, 0.4465)\n std = (0.2023, 0.1994, 0.2010)\n else:\n mean = (0.5071, 0.4867, 0.4408)\n std = (0.2675, 0.2565, 0.2761)\n\n # Transforms\n if autoaugment:\n train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4, fill=0),\n transforms.RandomHorizontalFlip(),\n CIFAR10Policy(),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)])\n else:\n train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)])\n\n test_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n\n # DataLoader\n train_set = data_loader(root=data_path, train=True, download=False, transform=train_transform)\n test_set = data_loader(root=data_path, train=False, download=False, transform=test_transform)\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True,\n num_workers=num_workers)\n test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False,\n num_workers=num_workers)\n return train_loader, test_loader\n\ndef train(args):\n dtime = datetime.now().strftime(\"%y-%m-%d_%H:%M:%S/\")\n os.makedirs(os.path.join(args.save,dtime),exist_ok=True)\n\n state = {k: v for k, v in args._get_kwargs()}\n print(state)\n # Data Loader\n train_loader, test_loader = load_cifar(eval(args.dataset), args.batch_size, autoaugment=args.autoaugment, data_path=args.data_path)\n num_classes = 10 if args.dataset == 'CIFAR10' else 100\n\n print('Creating Model...')\n # Model\n model = get_model(model_name=args.model,num_classes=num_classes)\n print('Model created.')\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n if torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model)\n\n print('Putting model on device...')\n model.to(device)\n print('On device.')\n # Minimizer\n if args.base_minimizer=='SGD':\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr,\n momentum=args.momentum, weight_decay=args.weight_decay)\n elif args.base_minimizer=='AdamW':\n optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n else:\n raise NotImplementedError('Invalid base optimizer. Choose one of either SGD or AdamW.')\n \n if args.minimizer == 'SGD' or args.minimizer=='AdamW':\n minimizer = optimizer\n else:\n minimizer = eval(args.minimizer)(optimizer, model, rho=args.rho, eta=args.eta, layerwise=args.layerwise,\n elementwise=args.elementwise, p=args.p, normalize_bias=args.normalize_bias,\n no_norm=args.no_norm, only_norm = args.only_norm)\n # Learning Rate Scheduler\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(minimizer if args.minimizer=='SGD' else minimizer.optimizer, args.epochs)\n\n # Loss Functions\n if args.smoothing:\n criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)\n else:\n criterion = torch.nn.CrossEntropyLoss(reduction='none')\n print('Starting to train...')\n start_time = time.time()\n best_accuracy = 0.\n loss_best = 0.\n for epoch in range(args.epochs):\n epoch_start = time.time()\n # perform update step with base optimizer instead of SAM\n if args.start_sam<=epoch 0:\n q.append(1)\n else:\n q.append(0)\n return q\n\n\n# In[12]:\n\nclass SVR():\n \n def fit(self, BET, target,tuning_parameter):\n l =(len(BET))\n BET1 = BET \n BET1.reset_index(drop = True, inplace = True)\n x = BET1.to_dict(orient='list')\n keys =list(x.keys())\n k = keys.index(target) \n EE = []\n last_row =[]\n Ede = []\n count = BET[target][k][0]\n for i in range(len(BET)):\n if i != keys.index(target):\n for j in range(len(BET)):\n if j != keys.index(target):\n m = keys[i]\n n = keys[j]\n EE.append(x[m][j][10])\n if j == keys.index(target):\n Ede.append(x[m][j][10]) \n EE.append(-x[m][i][6]) \n last_row.append(-x[m][i][6])\n final = EE+last_row \n final.pop()\n final.append(count)\n final = np.array(final)\n n = (len(BET))\n final = reshape(final,(n,n))\n\n Ede.append(-(BET[target][k][1]))\n print(Ede)\n I = np.identity(n)\n const = (((I/tuning_parameter)+ final))\n\n inverse = np.linalg.inv(const)\n self.Beta = np.dot(inverse, np.array(Ede))\n\n return(self.Beta)\n \n def predict(self, X):\n numpy_matrix = X.as_matrix()\n intercept_ = self.Beta.pop()\n return (np.dot(numpy_matrix, self.Beta) - self.intercept_)\n\n","repo_name":"ahhaa/artml","sub_path":"artml/models/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"39880303550","text":"#!/usr/bin/env python\n\nimport mock\nimport json\nimport unittest\n\nfrom hyperwallet.utils import ApiClient\nfrom hyperwallet.config import SERVER\nfrom hyperwallet.exceptions import HyperwalletAPIException\n\n\nclass ApiClientTest(unittest.TestCase):\n\n def setUp(self):\n\n self.client = ApiClient(\n 'test-user',\n 'test-pass',\n SERVER\n )\n\n def test_failed_connection(self):\n\n with self.assertRaises(HyperwalletAPIException) as exc:\n self.client._makeRequest()\n\n self.assertEqual(\n exc.exception.message.get('errors')[0].get('code'),\n 'COMMUNICATION_ERROR'\n )\n\n @mock.patch('requests.Session.request')\n def test_receive_valid_json_empty_response(self, session_mock):\n\n session_mock.return_value = mock.MagicMock(\n status_code=204\n )\n\n self.assertEqual(self.client._makeRequest(), {})\n\n @mock.patch('requests.Session.request')\n def test_receive_non_json_response(self, session_mock):\n\n data = '404'\n\n session_mock.return_value = mock.MagicMock(\n status_code=404,\n content=data\n )\n\n with self.assertRaises(HyperwalletAPIException) as exc:\n self.client._makeRequest()\n\n self.assertEqual(\n exc.exception.message.get('errors')[0].get('code'),\n 'GARBAGE_RESPONSE'\n )\n\n @mock.patch('requests.Session.request')\n def test_receive_valid_json_error_response(self, session_mock):\n\n data = {\n \"errors\": [{\n \"message\": \"Houston, we have a problem\",\n \"code\": \"FORBIDDEN\"\n }]\n }\n\n session_mock.return_value = mock.MagicMock(\n status_code=400,\n content=json.dumps(data)\n )\n\n with self.assertRaises(HyperwalletAPIException) as exc:\n self.client._makeRequest()\n\n self.assertEqual(\n exc.exception.message.get('errors')[0].get('code'),\n 'FORBIDDEN'\n )\n\n @mock.patch('requests.Session.request')\n def test_receive_valid_json_response(self, session_mock):\n\n data = {\n 'key': 'value'\n }\n\n session_mock.return_value = mock.MagicMock(\n status_code=200,\n content=json.dumps(data)\n )\n\n encoded = json.dumps(data)\n if hasattr(encoded, 'decode'): # Python 2\n encoded = encoded.decode('utf-8')\n\n self.assertEqual(\n self.client._makeRequest(),\n json.loads(encoded)\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"aseveryn-epam/python-sdk","sub_path":"hyperwallet/tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"17849561322","text":"from django.contrib import admin\n\nfrom coupon.models import Coupon\n\n\nclass CouponAdmin(admin.ModelAdmin):\n list_display = [\n \"course\",\n \"code\",\n \"discount\",\n \"active\",\n ]\n ordering = [\n \"course\",\n \"code\",\n \"discount\",\n ]\n readonly_fields = (\n \"id\",\n \"code\",\n )\n list_editable = (\"active\",)\n\n\nadmin.site.register(Coupon, CouponAdmin)\n","repo_name":"amitbhalla/feelfreetocode","sub_path":"app/coupon/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24521952922","text":"#!/usr/local/bin/python\n\nimport os, warnings, argparse, gzip, tqdm, xphyle\nfrom Bio import SeqIO\nfrom sys import argv\nfrom Bio.Seq import Seq\nfrom mimetypes import guess_type\nfrom functools import partial\nfrom xphyle import xopen\n\nxphyle.configure(progress=True)\nxphyle.configure(threads=4)\n\ndef rreplace(s, old, new, occurrence):\n li = s.rsplit(old, occurrence)\n return new.join(li)\n\ndef translate(fasta):\n dbf = os.path.basename( rreplace(fasta, 'fasta', 'fasta.db', 1 ) )\n print('Translating...')\n with xopen(dbf, 'wt') as db:\n parser = SeqIO.parse(xopen(fasta, 'rt'), 'fasta')\n for sequence in parser:\n id, seq = sequence.description, Seq(str(sequence.seq).upper().replace('X', 'N'))\n f = (-3,-2,-1,1,2,3)\n for i in f:\n if i < 0:\n db.write('>%s\\n%s\\n'%('%s_%d'%(id,i), seq.reverse_complement()[-(i+1):].translate(stop_symbol='X')))\n elif i > 0:\n db.write('>%s\\n%s\\n'%('%s_+%d'%(id,i), seq[i-1:].translate(stop_symbol='X')))\n\n\nif __name__ == '__main__':\n warnings.filterwarnings('ignore')\n parser = argparse.ArgumentParser(description='Translate .fasta file to fasta-db.')\n parser.add_argument('--fasta', help='input fasta file', required=True)\n args = parser.parse_args()\n translate( args.fasta )","repo_name":"agonopol/tara-ocean-analysis","sub_path":"fasta-translate/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16255292470","text":"print(\"Welcome to Wordle!\")\nN = 1\nwhile True:\n N += 1\n \n # PLAYER ONE INPUT\n validity = False\n while validity == False:\n playerOneInput = input(\"Input a five-letter word: \")\n playerWordLetterCount = 0\n \n for char3 in playerOneInput:\n playerWordLetterCount +=1\n \n if playerWordLetterCount != 5:\n print(f'{playerOneInput} is not a five-letter word.')\n continue\n \n validity = True\n \n ########################################################\n \n guessLeft = 6\n gameIndicator = 0\n \n boxesTotalOutput = []\n\n while guessLeft != 0:\n boxesOutput = []\n list3 = []\n playerTwoInput = input(\"Guess the five-letter word: \")\n playerTwoWordLetterCount = 0\n stopRepeat = False\n repeatChar = \"\"\n \n for char5 in playerTwoInput:\n playerTwoWordLetterCount += 1\n if char5 not in list3:\n list3.append(char5)\n else:\n repeatChar = char5\n\n if playerTwoWordLetterCount != 5:\n print(f'{playerTwoInput} is not a five-letter word.')\n continue\n \n else:\n if playerTwoInput == playerOneInput:\n gameIndicator += 1\n boxesOutput += '🟩' * 5\n print(''.join(boxesOutput))\n break\n \n else:\n guessLeft -= 1\n index = 0\n for char in playerTwoInput:\n index += 1\n for char2 in playerOneInput[index - 1]:\n if char2 == char:\n boxesOutput += '🟩'\n continue\n \n if char in playerOneInput:\n if char != repeatChar:\n boxesOutput += \"🟨\"\n \n else:\n if stopRepeat == True:\n boxesOutput += '🟨'\n stopRepeat = True\n else: \n boxesOutput += \"⬜\" \n else:\n boxesOutput += \"⬜\" \n \n print(''.join(boxesOutput))\n gameIndicator +=1\n boxesTotalOutput.append(boxesOutput) \n boxesTotalOutput.append(boxesOutput) \n \n # OUTPUT\n print('') \n print(f'Wordle {N} {gameIndicator}/6')\n for items in boxesTotalOutput:\n print(''.join(items))\n","repo_name":"neillouis3/Wordle-Python","sub_path":"Wordle.py","file_name":"Wordle.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27123352396","text":"\n# coding: utf-8\n\n# ## Titanic Data Science Solutions | Kaggle\n# \n# https://www.kaggle.com/startupsci/titanic-data-science-solutions/notebook\n\n# In[1]:\n\n\n# data analysis and wrangling\nimport pandas as pd\nimport numpy as np\nimport random as rnd\n\n# visualization\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n# machine learning\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\n\n# In[20]:\n\n\ntrain_df = pd.read_csv('titanic/train.csv')\ntest_df = pd.read_csv('titanic/test.csv')\ncombine = [train_df, test_df]\n\n\n# In[21]:\n\n\n# Which features are available in the dataset?\nprint(train_df.columns.values)\n\n\n# In[22]:\n\n\n# preview the data\ntrain_df.head()\n\n\n# In[23]:\n\n\ntrain_df.tail()\n\n\n# In[24]:\n\n\n# What are the data types for various features?\ntrain_df.info()\nprint('_'*40)\ntest_df.info()\n\n\n# In[25]:\n\n\n# see if there are any missing values in the data\ntrain_df.isnull().sum()\n\n\n# In[26]:\n\n\n# What is the distribution of numerical feature values across the samples?\ntrain_df.describe()\n\n\n# In[27]:\n\n\n# What is the distribution of categorical features?\ntrain_df.describe(include=['O'])\n\n\n# To confirm some of our observations and assumptions, we can quickly analyze our feature correlations by **pivoting features** against each other. We can only do so at this stage for features which **do not have any empty values**. It also makes sense doing so only for features which are _categorical_ (Sex), _ordinal_ (Pclass) or _discrete_ (SibSp, Parch) type.\n\n# In[28]:\n\n\n# We observe significant correlation (>0.5) among Pclass=1 and Survived (classifying #3)\n\ntrain_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean(). sort_values(by='Survived', ascending=False)\n\n\n# In[29]:\n\n\n# We confirm the observation during problem definition that Sex=female\n# had very high survival rate at 74% (classifying #1)\n\ntrain_df[[\"Sex\", \"Survived\"]].groupby(['Sex'], as_index=False).mean(). sort_values(by='Survived', ascending=False)\n\n\n# In[30]:\n\n\n# These features have zero correlation for certain values.\n# It may be best to derive a feature or a set of features from these individual features (creating #1)\n\ntrain_df[[\"SibSp\", \"Survived\"]].groupby(['SibSp'], as_index=False).mean(). sort_values(by='Survived', ascending=False)\n\n\n# In[31]:\n\n\ntrain_df[[\"Parch\", \"Survived\"]].groupby(['Parch'], as_index=False).mean(). sort_values(by='Survived', ascending=False)\n\n\n# ### Correlating numerical features\n# Let us start by understanding **correlations between numerical features and our solution goal** (Survived).\n# \n# A **histogram chart** is useful for analyzing continous **numerical variables** like Age where banding or ranges will help identify useful patterns. The histogram can indicate distribution of samples using automatically defined bins or equally ranged bands. This helps us answer questions relating to specific bands.\n\n# In[17]:\n\n\n# Did infants have better survival rate?\ng = sns.FacetGrid(train_df, col='Survived')\ng.map(plt.hist, 'Age', bins=20)\n\n\n# ### Correlating numerical and ordinal features\n# We can combine multiple features for identifying **correlations using a single plot**. This can be done with numerical and categorical features which have numeric values.\n\n# In[17]:\n\n\ngrid = sns.FacetGrid(train_df, col='Pclass', hue='Survived')\n#grid = sns.FacetGrid(train_df, col='Survived', row='Pclass', size=2.2, aspect=1.6)\ngrid.map(plt.hist, 'Age', alpha=.5, bins=20)\ngrid.add_legend()\n\n\n# ### Correlating categorical features\n# Now we can **correlate categorical features** with our solution goal.\n\n# In[18]:\n\n\ngrid = sns.FacetGrid(train_df, col='Embarked')\n#grid = sns.FacetGrid(train_df, row='Embarked', size=2.2, aspect=1.6)\ngrid.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette='deep')\ngrid.add_legend()\n\n\n# ### Correlating categorical and numerical features\n# We may also want to **correlate categorical features (with non-numeric values) and numeric features**. We can consider correlating Embarked (Categorical non-numeric), Sex (Categorical non-numeric), Fare (Numeric continuous), with Survived (Categorical numeric).\n\n# In[20]:\n\n\n#grid = sns.FacetGrid(train_df, col='Embarked', hue='Survived', palette={0: 'k', 1: 'w'})\ngrid = sns.FacetGrid(train_df, row='Embarked', col='Survived', size=2.2, aspect=1.6)\ngrid.map(sns.barplot, 'Sex', 'Fare', alpha=.5, ci=None)\ngrid.add_legend()\n\n\n# In[33]:\n\n\n# Correcting by dropping features\n\nprint(\"Before\", train_df.shape, test_df.shape, combine[0].shape, combine[1].shape)\n\ntrain_df = train_df.drop(['Ticket', 'Cabin'], axis=1)\ntest_df = test_df.drop(['Ticket', 'Cabin'], axis=1)\ncombine = [train_df, test_df]\n\nprint(\"After\", train_df.shape, test_df.shape, combine[0].shape, combine[1].shape)\n\n\n# In[35]:\n\n\nfor dataset in combine:\n dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)\n\npd.crosstab(train_df['Title'], train_df['Sex'])\n\n\n# In[36]:\n\n\nfor dataset in combine:\n dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')\n\n dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')\n dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')\n dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')\n \ntrain_df[['Title', 'Survived']].groupby(['Title'], as_index=False).mean()\n\n\n# In[37]:\n\n\ntitle_mapping = {\"Mr\": 1, \"Miss\": 2, \"Mrs\": 3, \"Master\": 4, \"Rare\": 5}\nfor dataset in combine:\n dataset['Title'] = dataset['Title'].map(title_mapping)\n dataset['Title'] = dataset['Title'].fillna(0)\n\ntrain_df.head()\n\n\n# In[39]:\n\n\nfor dataset in combine:\n dataset['Sex'] = dataset['Sex'].map( {'female': 1, 'male': 0} ).astype(int)\n\ntrain_df.head()\n\n\n# In[41]:\n\n\ntrain_df = train_df.drop(['Name', 'PassengerId'], axis=1)\ntest_df = test_df.drop(['Name'], axis=1)\ncombine = [train_df, test_df]\ntrain_df.shape, test_df.shape\n\n\n# In[29]:\n\n\n#grid = sns.FacetGrid(train_df, col='Pclass', hue='Sex')\ngrid = sns.FacetGrid(train_df, row='Pclass', col='Sex', size=2.2, aspect=1.6)\ngrid.map(plt.hist, 'Age', alpha=.5, bins=20)\ngrid.add_legend()\n\n\n# In[42]:\n\n\nguess_ages = np.zeros((2,3))\nguess_ages\n\n\n# In[43]:\n\n\nfor dataset in combine:\n for i in range(0, 2):\n for j in range(0, 3):\n guess_df = dataset[(dataset['Sex'] == i) & (dataset['Pclass'] == j + 1)]['Age'].dropna()\n\n # age_mean = guess_df.mean()\n # age_std = guess_df.std()\n # age_guess = rnd.uniform(age_mean - age_std, age_mean + age_std)\n\n age_guess = guess_df.median()\n\n # Convert random age float to nearest .5 age\n guess_ages[i, j] = int(age_guess / 0.5 + 0.5) * 0.5\n \n for i in range(0, 2):\n for j in range(0, 3):\n dataset.loc[ (dataset.Age.isnull()) & (dataset.Sex == i) & (dataset.Pclass == j + 1), 'Age'] = guess_ages[i, j]\n\n dataset['Age'] = dataset['Age'].astype(int)\n\ntrain_df.head()\n\n\n# In[44]:\n\n\ntrain_df['AgeBand'] = pd.cut(train_df['Age'], 5)\n\ntrain_df[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False).mean(). sort_values(by='AgeBand', ascending=True)\n\n\n# In[45]:\n\n\nfor dataset in combine: \n dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0\n dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1\n dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2\n dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3\n dataset.loc[ dataset['Age'] > 64, 'Age']\ntrain_df.head()\n\n\n# In[47]:\n\n\ntrain_df = train_df.drop(['AgeBand'], axis=1)\ncombine = [train_df, test_df]\ntrain_df.head()\n\n\n# In[48]:\n\n\nfor dataset in combine:\n dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1\n\ntrain_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean(). sort_values(by='Survived', ascending=False)\n\n\n# In[49]:\n\n\nfor dataset in combine:\n dataset['IsAlone'] = 0\n dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1\n\ntrain_df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False).mean()\n\n\n# In[50]:\n\n\ntrain_df = train_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)\ntest_df = test_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)\ncombine = [train_df, test_df]\n\ntrain_df.head()\n\n\n# In[52]:\n\n\nfor dataset in combine:\n dataset['Age*Class'] = dataset.Age * dataset.Pclass\n\ntrain_df.loc[:, ['Age*Class', 'Age', 'Pclass']].head(10)\n\n\n# In[54]:\n\n\nfreq_port = train_df.Embarked.dropna().mode()[0]\nfreq_port\n\n\n# In[55]:\n\n\nfor dataset in combine:\n dataset['Embarked'] = dataset['Embarked'].fillna(freq_port)\n \ntrain_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean(). sort_values(by='Survived', ascending=False)\n\n\n# In[56]:\n\n\nfor dataset in combine:\n dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)\n\ntrain_df.head()\n\n\n# In[58]:\n\n\ntest_df['Fare'].fillna(test_df['Fare'].dropna().median(), inplace=True)\ntest_df.head()\n\n\n# In[59]:\n\n\ntrain_df['FareBand'] = pd.qcut(train_df['Fare'], 4)\ntrain_df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean(). sort_values(by='FareBand', ascending=True)\n\n\n# In[60]:\n\n\nfor dataset in combine:\n dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0\n dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1\n dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2\n dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3\n dataset['Fare'] = dataset['Fare'].astype(int)\n\ntrain_df = train_df.drop(['FareBand'], axis=1)\ncombine = [train_df, test_df]\n \ntrain_df.head(10)\n\n\n# In[61]:\n\n\n# create a correlation matrix that measures the linear relationships between the variables\n\ncorrelation_matrix = train_df.corr().round(2)\nsns.heatmap(data=correlation_matrix, annot=True)\n\n\n# In[62]:\n\n\ntest_df.head(10)\n\n\n# In[63]:\n\n\nX_train = train_df.drop(\"Survived\", axis=1)\nY_train = train_df[\"Survived\"]\nX_test = test_df.drop(\"PassengerId\", axis=1).copy()\nX_train.shape, Y_train.shape, X_test.shape\n\n\n# In[64]:\n\n\n# Logistic Regression\n\nlogreg = LogisticRegression()\nlogreg.fit(X_train, Y_train)\n\nY_pred = logreg.predict(X_test)\nacc_log = round(logreg.score(X_train, Y_train) * 100, 2)\nacc_log\n\n\n# In[65]:\n\n\ncoeff_df = pd.DataFrame(train_df.columns.delete(0))\ncoeff_df.columns = ['Feature']\ncoeff_df[\"Correlation\"] = pd.Series(logreg.coef_[0])\n\ncoeff_df.sort_values(by='Correlation', ascending=False)\n\n\n# In[66]:\n\n\n# Linear Regression\nfrom sklearn.linear_model import LinearRegression\n#from sklearn.metrics import mean_squared_error, r2_score\n\nlinreg = LinearRegression()\nlinreg.fit(X_train, Y_train)\n\nY_pred = linreg.predict(X_test)\nacc_lin = round(linreg.score(X_train, Y_train) * 100, 2)\nacc_lin\n\n\n# In[52]:\n\n\n# Support Vector Machines\n\nsvc = SVC()\nsvc.fit(X_train, Y_train)\n\nY_pred = svc.predict(X_test)\nacc_svc = round(svc.score(X_train, Y_train) * 100, 2)\nacc_svc\n\n\n# In[53]:\n\n\n# k-Nearest Neighbors\n\nknn = KNeighborsClassifier(n_neighbors = 3)\nknn.fit(X_train, Y_train)\n\nY_pred = knn.predict(X_test)\nacc_knn = round(knn.score(X_train, Y_train) * 100, 2)\nacc_knn\n\n\n# In[54]:\n\n\n# Gaussian Naive Bayes\n\ngaussian = GaussianNB()\ngaussian.fit(X_train, Y_train)\n\nY_pred = gaussian.predict(X_test)\nacc_gaussian = round(gaussian.score(X_train, Y_train) * 100, 2)\nacc_gaussian\n\n\n# In[55]:\n\n\n# Perceptron\n\nperceptron = Perceptron()\nperceptron.fit(X_train, Y_train)\n\nY_pred = perceptron.predict(X_test)\nacc_perceptron = round(perceptron.score(X_train, Y_train) * 100, 2)\nacc_perceptron\n\n\n# In[56]:\n\n\n# Linear SVC\n\nlinear_svc = LinearSVC()\nlinear_svc.fit(X_train, Y_train)\n\nY_pred = linear_svc.predict(X_test)\nacc_linear_svc = round(linear_svc.score(X_train, Y_train) * 100, 2)\nacc_linear_svc\n\n\n# In[57]:\n\n\n# Stochastic Gradient Descent\n\nsgd = SGDClassifier()\nsgd.fit(X_train, Y_train)\n\nY_pred = sgd.predict(X_test)\nacc_sgd = round(sgd.score(X_train, Y_train) * 100, 2)\nacc_sgd\n\n\n# In[58]:\n\n\n# Decision Tree\n\ndecision_tree = DecisionTreeClassifier()\ndecision_tree.fit(X_train, Y_train)\n\nY_pred = decision_tree.predict(X_test)\nacc_decision_tree = round(decision_tree.score(X_train, Y_train) * 100, 2)\nacc_decision_tree\n\n\n# In[59]:\n\n\n# Random Forest\n\nrandom_forest = RandomForestClassifier(n_estimators=100)\nrandom_forest.fit(X_train, Y_train)\n\nY_pred = random_forest.predict(X_test)\nrandom_forest.score(X_train, Y_train)\nacc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2)\nacc_random_forest\n\n\n# In[60]:\n\n\nmodels = pd.DataFrame({\n 'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression', \n 'Random Forest', 'Naive Bayes', 'Perceptron', \n 'Stochastic Gradient Decent', 'Linear SVC', \n 'Decision Tree'],\n 'Score': [acc_svc, acc_knn, acc_log, \n acc_random_forest, acc_gaussian, acc_perceptron, \n acc_sgd, acc_linear_svc, acc_decision_tree]})\nmodels.sort_values(by='Score', ascending=False)\n\n\n# In[61]:\n\n\nsubmission = pd.DataFrame({\n \"PassengerId\": test_df[\"PassengerId\"],\n \"Survived\": Y_pred\n })\nsubmission.to_csv('titanic/submission.csv', index=False)\n\n","repo_name":"hjort/ai-labs","sub_path":"jupyter/titanic/Titanic Solution - Kaggle.py","file_name":"Titanic Solution - Kaggle.py","file_ext":"py","file_size_in_byte":13464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6039005964","text":"import os\nimport re\nimport hmac\nimport jinja2\nimport hashlib\nimport random\nimport webapp2\n\nfrom string import letters\nfrom google.appengine.ext import db\n\n\ntemplate_dir = os.path.join(os.path.dirname(__file__), 'templates')\njinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),\n autoescape=True)\n\n# Global Functions ========================================\n\n\ndef render_str(template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)\n\n\ndef users_key(group='default'):\n \"\"\"get the key from user table\"\"\"\n return db.Key.from_path('users', group)\n\n\ndef blog_key(name='default'):\n \"\"\"get the key from blog table\"\"\"\n return db.Key.from_path('blogs', name)\n\n\n# Authentication ==========================================\n\nsecret = 'unhinged'\n\n\ndef make_pw_hash(name, password, salt=None):\n \"\"\"for password encryption\"\"\"\n if not salt:\n salt = make_salt()\n h = hashlib.sha256(name + password + salt).hexdigest()\n return '%s,%s' % (salt, h)\n\n\ndef make_salt(length=5):\n \"\"\"salt to secure the password\"\"\"\n return ''.join(random.choice(letters) for x in xrange(length))\n\n\ndef valid_pw(name, password, h):\n \"\"\"password validation by hashing\"\"\"\n salt = h.split(',')[0]\n return h == make_pw_hash(name, password, salt)\n\n\ndef make_secure_val(val):\n \"\"\"create secure cookie values\"\"\"\n return '%s|%s' % (val, hmac.new(secret, val).hexdigest())\n\n\ndef check_secure_val(secure_val):\n \"\"\"check secure cookie values\"\"\"\n val = secure_val.split('|')[0]\n if secure_val == make_secure_val(val):\n return val\n\n# Blog Handler =================================================\n\n\nclass BlogHandler(webapp2.RequestHandler):\n def write(self, *a, **kw):\n self.response.out.write(*a, **kw)\n\n def render_str(self, template, **params):\n params['user'] = self.user\n return render_str(template, **params)\n\n def render(self, template, **kw):\n self.write(self.render_str(template, **kw))\n\n def login(self, user):\n self.set_secure_cookie('user_id', str(user.key().id()))\n\n def logout(self):\n self.response.headers.add_header(\n 'Set-Cookie',\n 'user_id=; Path=/')\n\n def set_secure_cookie(self, name, val):\n \"\"\"securely set a cookie\"\"\"\n cookie_val = make_secure_val(val)\n self.response.headers.add_header(\n 'Set-Cookie',\n '%s=%s; Path=/' % (name, cookie_val))\n\n def initialize(self, *a, **kw):\n \"\"\"get the user from secure cookie when we initializing\"\"\"\n webapp2.RequestHandler.initialize(self, *a, **kw)\n uid = self.read_secure_cookie('user_id')\n self.user = uid and User.by_id(int(uid))\n\n def read_secure_cookie(self, name):\n \"\"\"read the cookie\"\"\"\n cookie_val = self.request.cookies.get(name)\n return cookie_val and check_secure_val(cookie_val)\n\n# User Stuff ==========================================================\n\n\nclass User(db.Model):\n \"\"\"create a database to store user info\"\"\"\n name = db.StringProperty(required=True)\n pw_hash = db.StringProperty(required=True)\n email = db.StringProperty()\n\n @classmethod\n def register(cls, name, pw, email=None):\n pw_hash = make_pw_hash(name, pw)\n return User(parent=users_key(),\n name=name,\n pw_hash=pw_hash,\n email=email)\n\n @classmethod\n def login(cls, username, password):\n u = User.by_name(username)\n if u and valid_pw(username, password, u.pw_hash):\n return u\n\n @classmethod\n def by_id(cls, uid):\n return User.get_by_id(uid, parent=users_key())\n\n @classmethod\n def by_name(cls, name):\n u = User.all().filter('name =', name).get()\n return u\n\n# Blog Stuff =======================================================\n\n\nclass Post(db.Model):\n \"\"\"create a database to store blog posts\"\"\"\n subject = db.StringProperty(required=True)\n content = db.TextProperty(required=True)\n user_id = db.IntegerProperty(required=True)\n created = db.DateTimeProperty(auto_now_add=True)\n last_modified = db.DateTimeProperty(auto_now=True)\n likes = db.IntegerProperty(default=0)\n comment_count = db.IntegerProperty(default=0)\n\n def render(self, current_user_id):\n key = db.Key.from_path('User', int(self.user_id), parent=users_key())\n user = db.get(key)\n\n self._render_text = self.content.replace('\\n', '
')\n return render_str(\"post.html\", p=self, current_user_id=current_user_id,\n author=user.name)\n\n @classmethod\n def by_id(cls, uid):\n return Post.get_by_id(uid, parent=blog_key())\n\n\nclass Comment(db.Model):\n \"\"\"create a database to store all comments\"\"\"\n content = db.TextProperty(required=True)\n created = db.DateTimeProperty(auto_now_add=True)\n last_modified = db.DateTimeProperty(auto_now=True)\n user_id = db.IntegerProperty(required=True)\n user_name = db.TextProperty(required=True)\n\n\nclass Like(db.Model):\n \"\"\"create a database to store all likes\"\"\"\n created = db.DateTimeProperty(auto_now_add=True)\n last_modified = db.DateTimeProperty(auto_now=True)\n user_id = db.IntegerProperty(required=True)\n post_id = db.IntegerProperty(required=True)\n\n\nclass BlogFrontHandler(BlogHandler):\n \"\"\"show all the posts in the front page\"\"\"\n\n def get(self):\n posts = db.GqlQuery(\n \"select * from Post order by created desc\")\n\n self.render('main-page.html', posts=posts)\n\n# Posts ===============================================================\n\n\nclass PostHandler(BlogHandler):\n\n def get(self, post_id):\n key = db.Key.from_path('Post', int(post_id), parent=blog_key())\n post = db.get(key)\n\n comments = db.GqlQuery(\n '''select * from Comment where ancestor is :1\n order by created asc''', key)\n\n if not post:\n self.error(404)\n return\n\n self.render(\"permalink.html\", post=post, comments=comments)\n\n# Like Post ==========================================================\n\n\nclass LikePostHandler(BlogHandler):\n\n def get(self, post_id):\n key = db.Key.from_path('Post', int(post_id), parent=blog_key())\n post = db.get(key)\n if not post:\n error = \"ERROR: Post not found\"\n return self.render('main-page.html', like_error=error)\n\n if self.user and self.user.key().id() == post.user_id:\n error = \"ERROR: You can not like your own post.\"\n self.render('main-page.html', access_error=error)\n elif not self.user:\n self.redirect('/login')\n else:\n user_id = self.user.key().id()\n post_id = post.key().id()\n\n like = Like.all().filter('user_id =', user_id).filter(\n 'post_id =', post_id).get()\n\n if like:\n self.redirect('/' + str(post.key().id()))\n\n else:\n like = Like(parent=key,\n user_id=self.user.key().id(),\n post_id=post.key().id())\n\n post.likes += 1\n\n like.put()\n post.put()\n\n self.redirect('/' + str(post.key().id()))\n\n# Unlike Post ========================================================\n\n\nclass UnlikePostHandler(BlogHandler):\n\n def get(self, post_id):\n key = db.Key.from_path('Post', int(post_id), parent=blog_key())\n post = db.get(key)\n if not post:\n error = \"ERROR: Post not found\"\n return self.render('main-page.html', like_error=error)\n\n if self.user and self.user.key().id() == post.user_id:\n self.write(\"You cannot dislike your own post\")\n elif not self.user:\n self.redirect('/login')\n else:\n user_id = self.user.key().id()\n post_id = post.key().id()\n\n l = Like.all().filter('user_id =',\n user_id).filter('post_id =', post_id).get()\n\n if l:\n l.delete()\n post.likes -= 1\n post.put()\n\n self.redirect('/' + str(post.key().id()))\n else:\n self.redirect('/' + str(post.key().id()))\n\n# New Post ===========================================================\n\n\nclass NewPostHandler(BlogHandler):\n\n def get(self):\n if self.user:\n self.render(\"new-post.html\")\n else:\n error = \"You must be signed in to create a post.\"\n self.render(\"index.html\", access_error=error)\n\n def post(self):\n if not self.user:\n return self.redirect('/login')\n\n subject = self.request.get('subject')\n content = self.request.get('content')\n\n if subject and content:\n p = Post(parent=blog_key(), subject=subject,\n content=content, user_id=self.user.key().id())\n p.put()\n self.redirect('/%s' % str(p.key().id()))\n else:\n error = \"Please fill up the fields.\"\n self.render(\"new-post.html\", subject=subject,\n content=content, error=error)\n\n# Edit Post ===========================================================\n\n\nclass EditPostHandler(BlogHandler):\n\n def get(self, post_id):\n key = db.Key.from_path('Post', int(post_id), parent=blog_key())\n post = db.get(key)\n if not post:\n error = \"ERROR: Post not found\"\n return self.render('main-page.html', like_error=error)\n\n if self.user and self.user.key().id() == post.user_id:\n self.render('edit-post.html', subject=post.subject,\n content=post.content, post_id=post_id)\n\n elif not self.user:\n self.redirect('/login')\n\n else:\n self.write(\"You cannot edit your own posts.\")\n\n def post(self, post_id):\n key = db.Key.from_path('Post', int(post_id), parent=blog_key())\n post = db.get(key)\n if not post:\n error = \"ERROR: Post not found\"\n return self.render('main-page.html', like_error=error)\n\n if self.user and self.user.key().id() == post.user_id:\n subject = self.request.get('subject')\n content = self.request.get('content')\n\n if subject and content:\n post.subject = subject\n post.content = content\n\n post.put()\n\n self.redirect('/%s' % str(post.key().id()))\n else:\n error = \"Error: Please include subject and content\"\n self.render(\"new-post.html\", subject=subject,\n content=content, error=error)\n\n elif not self.user:\n return self.redirect('/login')\n\n else:\n self.write(\"You cannot edit this post.\")\n\n# Delete Post =========================================================\n\n\nclass DeletePostHandler(BlogHandler):\n\n def get(self, post_id, user_id):\n key = db.Key.from_path('Post', int(post_id), parent=blog_key())\n post = db.get(key)\n if not post:\n error = \"ERROR: Post not found\"\n return self.render('main-page.html', like_error=error)\n\n if self.user and self.user.key().id() == post.user_id:\n post.delete()\n\n self.redirect('/')\n\n elif not self.user:\n self.redirect('/login')\n\n else:\n key = db.Key.from_path('Post', int(post_id), parent=blog_key())\n post = db.get(key)\n\n comments = db.GqlQuery(\n '''select * from Comment where ancestor is :1\n order by created desc limit 10''', key)\n\n error = \"You don't have permission to delete this post\"\n self.render(\"permalink.html\", post=post,\n comments=comments, error=error)\n\n# Add Comment ========================================================\n\n\nclass AddCommentHandler(BlogHandler):\n\n def get(self, post_id, user_id):\n if not self.user:\n self.render('/login')\n else:\n self.render(\"new-comment.html\")\n\n def post(self, post_id, user_id):\n if not self.user:\n return\n\n content = self.request.get('content')\n if content:\n user_name = self.user.name\n key = db.Key.from_path('Post', int(post_id), parent=blog_key())\n post = db.get(key)\n if not post:\n self.error(404)\n\n c = Comment(parent=key, user_id=int(user_id), content=content,\n user_name=user_name)\n c.put()\n post.comment_count += 1\n post.put()\n\n self.redirect('/' + post_id)\n else:\n error = \"Error : Please fill up the fields.\"\n self.render(\"new-comment.html\",\n content=content, error=error)\n\n\n# Edit Comment ========================================================\n\nclass EditCommentHandler(BlogHandler):\n\n def get(self, post_id, author, comment_id):\n\n postKey = db.Key.from_path('Post', int(post_id), parent=blog_key())\n key = db.Key.from_path('Comment', int(comment_id), parent=postKey)\n comment = db.get(key)\n if not comment:\n error = \"ERROR: Comment not found\"\n return self.render('main-page.html', like_error=error)\n\n if self.user and self.user.name == comment.user_id:\n self.render('edit-comment.html', content=comment.content)\n\n elif not self.user:\n self.redirect('/login')\n\n else:\n error = \"You cannot edit other users' comments'\"\n self.render(\"edit-comment.html\", edit_error=error)\n\n def post(self, post_id, author, comment_id):\n if not self.user:\n return\n\n if self.request.get:\n content = self.request.get('content')\n if content:\n\n postKey = db.Key.from_path('Post', int(post_id),\n parent=blog_key())\n key = db.Key.from_path('Comment', int(comment_id),\n parent=postKey)\n comment = db.get(key)\n\n if not comment:\n error = \"ERROR: Comment not found\"\n return self.render('main-page.html', like_error=error)\n if comment.user_name == self.user.name:\n comment.content = content\n comment.put()\n\n if not content:\n error = \"Error: Please fill up all the fields.\"\n return self.render('edit-comment.html',\n content=content, error=error)\n\n self.redirect('/' + post_id)\n\n else:\n self.write(\"You don't have permission to edit this comment.\")\n\n# Delete Comment ======================================================\n\n\nclass DeleteCommentHandler(BlogHandler):\n\n def get(self, post_id, author, comment_id):\n postKey = db.Key.from_path('Post', int(post_id), parent=blog_key())\n key = db.Key.from_path('Comment', int(comment_id), parent=postKey)\n comment = db.get(key)\n if not comment:\n error = \"ERROR: Comment not found\"\n return self.render('main-page.html', like_error=error)\n\n if self.user and self.user.name == comment.user_id:\n self.render('edit-comment.html', content=comment.content)\n\n elif not self.user:\n self.redirect('/login')\n\n else:\n error = \"You cannot edit other users' comments'\"\n self.render(\"edit-comment.html\", edit_error=error)\n\n comment.delete()\n\n self.redirect('/' + post_id)\n\n# Validation for login / signup =======================================\n\nUSER_RE = re.compile(r\"^[a-zA-Z0-9_-]{3,20}$\")\n\n\ndef valid_username(username):\n return username and USER_RE.match(username)\n\nPASS_RE = re.compile(r\"^.{3,20}$\")\n\n\ndef valid_password(password):\n return password and PASS_RE.match(password)\n\nEMAIL_RE = re.compile(r'^[\\S]+@[\\S]+\\.[\\S]+$')\n\n\ndef valid_email(email):\n return not email or EMAIL_RE.match(email)\n\n# Login ===============================================================\n\n\nclass LoginHandler(BlogHandler):\n\n def get(self):\n self.render('login.html')\n\n def post(self):\n username = self.request.get('username')\n password = self.request.get('password')\n\n u = User.login(username, password)\n\n if u:\n self.login(u)\n self.redirect('/')\n else:\n msg = 'Invalid Username or Password'\n self.render('login.html', error=msg)\n\n# Logout ==============================================================\n\n\nclass LogoutHandler(BlogHandler):\n\n def get(self):\n self.logout()\n self.redirect('/')\n\n# Sign up =============================================================\n\n\nclass SignupHandler(BlogHandler):\n\n def done(self):\n u = User.by_name(self.username)\n\n if u:\n error = 'That user already exists.'\n self.render('signup.html', error=error)\n\n else:\n u = User.register(self.username, self.password, self.email)\n u.put()\n\n self.login(u)\n self.redirect('/')\n\n def get(self):\n self.render('signup.html')\n\n def post(self):\n have_error = False\n self.username = self.request.get('username')\n self.password = self.request.get('password')\n self.verify = self.request.get('verify')\n self.email = self.request.get('email')\n\n params = dict(username=self.username,\n email=self.email)\n\n if not valid_username(self.username):\n params['error'] = \"Error : Invalid username\"\n have_error = True\n\n if not valid_password(self.password):\n params['error'] = \"Error : Invalid password\"\n have_error = True\n\n elif self.password != self.verify:\n params['error'] = \"Error : Your passwords didn't match\"\n have_error = True\n\n if not valid_email(self.email):\n params['error'] = \"Error : That's not a valid email\"\n have_error = True\n\n if have_error:\n self.render('signup.html', **params)\n else:\n self.done()\n\n\napp = webapp2.WSGIApplication([\n ('/', BlogFrontHandler),\n ('/signup', SignupHandler),\n ('/login', LoginHandler),\n ('/logout', LogoutHandler),\n ('/newpost', NewPostHandler),\n ('/([0-9]+)', PostHandler),\n ('/([0-9]+)/like', LikePostHandler),\n ('/([0-9]+)/unlike', UnlikePostHandler),\n ('/([0-9]+)/edit', EditPostHandler),\n ('/([0-9]+)/delete/([0-9]+)', DeletePostHandler),\n ('/([0-9]+)/addcomment/([0-9]+)', AddCommentHandler),\n ('/([0-9]+)/([0-9]+)/editcomment/([0-9]+)', EditCommentHandler),\n ('/([0-9]+)/([0-9]+)/deletecomment/([0-9]+)', DeleteCommentHandler)\n], debug=True)\n","repo_name":"shubh305/Udacity-Multi-User-Blog-Project-FSND-","sub_path":"blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":19071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25751247882","text":"from django import template\n\nregister = template.Library()\n\n@register.filter(name='phone_number')\ndef phone_number(number):\n\t\"\"\"Convert 10 character string into (xxx) xxx-xxxx.\"\"\"\n\tfirst = str(number[0:3])\n\tsecond = str(number[3:6])\n\tthird = str(number[6:10])\n\treturn '(' + first + ') ' + second + '-' + third\n","repo_name":"pineapplejuice/earc2-members","sub_path":"manage_members/templatetags/member_filters.py","file_name":"member_filters.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29398591975","text":"# !/usr/bin/env python\r\n# -*- coding:utf-8 -*- \r\n# author:辉nono 2019/3/22 0022 22:28\r\n\r\nimport re\r\nimport time\r\nimport requests\r\nimport pytesser3\r\nfrom lxml import etree\r\nfrom PIL import Image\r\nfrom io import BytesIO\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nfrom selenium.webdriver import ActionChains\r\n\r\nclass Geetest(object):\r\n def __init__(self):\r\n options = Options()\r\n options.add_argument('--window-size=1366,768')\r\n self.driver = webdriver.Chrome(chrome_options=options)\r\n self.wait = WebDriverWait(self.driver,10)\r\n def go_to_register(self):\r\n try:\r\n self.driver.get(\"https://www.huxiu.com/\")\r\n self.wait.until(EC.presence_of_element_located((\r\n By.XPATH,'//a[@class=\"js-login\"]'))).click()\r\n self.wait.until(EC.presence_of_element_located((\r\n By.XPATH,'//input[@id=\"sms_username\"]'\r\n ))).send_keys(\"smartliu it\")\r\n except Exception as e:\r\n print('注册函数有误:%s'%e)\r\n self.driver.quit()\r\n def get_image(self):\r\n #没有缺陷的图片\r\n get_image_list = self.driver.find_elements_by_xpath('//div[@class=\"gt_cut_fullbg gt_show\"]/div')\r\n print(len(get_image_list))\r\n style_list = [i.get_attribute('style') for i in get_image_list]\r\n image_url = re.search(r'url\\(\"(.*?)\"\\);',style_list[0]).group(1)\r\n image_content = requests.get(image_url).content\r\n get_image = self.get_complete_image(style_list,image_content)\r\n #有缺陷的图片\r\n cut_get_image_list = self.driver.find_elements_by_xpath('//div[@class=\"gt_cut_fullbg gt_show\"]/div')\r\n cut_style_list = [i.get_attribute('style') for i in cut_get_image_list]\r\n cut_image_url = re.search(r'url\\(\"(.*?)\"\\);',cut_style_list[0]).group(1)\r\n cut_image_content = requests.get(cut_image_url).content\r\n cut_get_image = self.get_complete_image(style_list,cut_image_content)\r\n #对比两张照片,的到缺口位置\r\n return self.compare_image(get_image,cut_get_image)\r\n def get_complete_image(self,style_list,image):\r\n image_position_list = [re.findall(r'background-position: -(.*?)px -?(.*?)px;',i) for i in style_list]\r\n new_im = Image.new(\"RGB\",(260,116))\r\n im = Image.open(BytesIO(image))\r\n up_count = dn_count = 0\r\n for i in image_position_list[:26]:\r\n croped = im.crop((int(i[0][0]),58,int(i[0][0]) + 10,116))\r\n new_im.paste(croped,(up_count,0))\r\n up_count += 10\r\n for i in image_position_list[26:]:\r\n croped = im.crop((int(i[0][0]),0,int(i[0][0]) + 10,58))\r\n new_im.paste(croped,(dn_count,58))\r\n dn_count += 10\r\n return new_im\r\n def compare_image(self,cut,no_cut):\r\n def compare_pixel(pixel1,pixel2):\r\n for i in range(3):\r\n if abs(pixel1[i]-pixel2[i])>50:\r\n return False\r\n for i in range(260):\r\n for j in range(116):\r\n pixel1 = cut.getpixel((i,j))\r\n pixel2 = no_cut.getpixel((i,j))\r\n if compare_pixel(pixel1,pixel2) is False:\r\n return i\r\n def slide(self,distance):\r\n button = self.wait.until(EC.visibility_of_element_located((\r\n By.XPATH,'//dic[@class=\"gt_slider_knob gt_show\"]'\r\n )))\r\n ActionChains(self.driver).click_and_hold(button).perform()\r\n for i in self.track(distance-5):\r\n ActionChains(self.driver).move_by_offset(i,0).perform()\r\n ActionChains(self.driver).release().perform()\r\n def track(self,distance):\r\n t = 0.2\r\n current = 0\r\n mid = distance * 0.6\r\n speed = 0\r\n move_distance_list = []\r\n while current < distance:\r\n if current < mid:\r\n a = 5\r\n else:\r\n a = -10\r\n move_distance = speed*t + 0.5*a*t*t\r\n move_distance_list.append(round(move_distance))\r\n speed += (a*t)\r\n current += move_distance\r\n offset = sum(move_distance_list)*distance\r\n if offset > 0:\r\n move_distance_list.extend([-1]*offset)\r\n elif offset<0:\r\n move_distance_list.extend([1]*abs(offset))\r\n move_distance_list.extend([-1,-1,-1,-1,0,0,1,1,1,1,1,1,1,1,0,0,-1,-1,-1,-1])\r\n return move_distance_list\r\n\r\nif __name__=='__main__':\r\n gee = Geetest()\r\n gee.go_to_register()\r\n distance = gee.get_image()\r\n gee.slide(distance)\r\n","repo_name":"college20/spiders","sub_path":"hd_YZM.py","file_name":"hd_YZM.py","file_ext":"py","file_size_in_byte":4758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9398678842","text":"#reqd library\nimport random\nimport time\nimport sys\nfrom datetime import datetime\nfrom datetime import date\n\n#--------------------------------------------------------------------------------------------------------------------------------------------------------------\n\nprint(\"YOU CAN DO THE FOLLOWING WORK HERE: \")\ntime.sleep(1)\nprint(\"1)Booking\")\ntime.sleep(1)\nprint(\"2)Cancellation\")\ntime.sleep(1)\nprint(\"3)Exit\")\nBooking = \"Booking\"\nCancellation = \"Cancellation\"\nExit = \"Exit\"\ntime.sleep(1)\nwork = input(\"Enter work: \")\nif work == Booking:\n#info about the destination\n today = date.today()\n date1 = input(\"Enter date in dd-mm-yyyy format: \")\n date2 = datetime.strptime(date1, \"%d-%m-%Y\").date()\n while date2 <= today:\n if date2 <= today:\n print(\"Date cannot be earlier than today.\")\n date1 = input(\"Enter date in dd-mm-yyyy format: \")\n date2 = datetime.strptime(date1, \"%d-%m-%Y\").date()\n if date2 > today:\n fdate = date2\n\n if date2 > today:\n fdate = date2\n print(\"Date has been noted.\")\n\n print(\"CHOOSE DESTINATION: \")\n time.sleep(1)\n print(\"Bangalore\")\n time.sleep(1)\n print(\"Kolkata\")\n time.sleep(1)\n print(\"Mumbai\")\n\n#--------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n Bangalore = 'Bangalore'\n Kolkata = 'Kolkata'\n Mumbai = 'Mumbai'\n\n time.sleep(1)\n des = input(\"ENTER DESTINATION: \")\n #input data bout states in this indent block\n if des == Bangalore:\n time.sleep(1)\n print(\"Available flights: \")\n time.sleep(1)\n print(\"OPTION#1 @ Rs.1000\")\n time.sleep(1)\n print(\"OPTION#2 @ Rs.2000\")\n OPTION1 = \"OPTION1\"\n OPTION2 = 'OPTION2'\n option = input(\"Flight chosen: \")\n #input flight data in this indent block\n if option == OPTION1:\n print(\"OPTION1 chosen.\")\n print(\"This airline supports snacks.\")\n print(\"Do you want snack for additional Rs. 500?(Yes/No)\")\n Yes = \"Yes\"\n No = \"No\"\n food = input(\"Food: \")\n if food == Yes:\n print(\"Ok\")\n print(\"Total stands at Rs 1500\")\n\n#--------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n elif food == No:\n print(\"OK\")\n print(\"Total stands at Rs. 1000\")\n if option == OPTION2:\n print(\"OPTION2 chosen\")\n print(\"This airline does not support snacks\")\n pay2 = 2000\n if des == Kolkata:\n time.sleep(1)\n print(\"OPTION#3 @Rs. 3000\")\n time.sleep(1)\n print(\"OPTION#4 @Rs. 2500\")\n print(\"Both flights offer no snacks\")\n OPTION3 = \"OPTION3\"\n OPTION4 = 'OPTION4'\n option = input(\"Flight chosen: \")\n\n#--------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n n = int(input(\"ENTER NUMBER OF PERSON: \"))\n if des == Bangalore:\n if option == OPTION1:\n if food == Yes:\n gt = n * 1500\n if food == No:\n gt = n * 1000\n elif option == OPTION2:\n gt = n * 2000\n if des == Kolkata:\n if option == OPTION3:\n gt = n * 3000\n if option == OPTION4:\n gt = n * 2500\n\n#--------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n if des == Mumbai:\n time.sleep(1)\n print(\"Available flights: \")\n time.sleep(1)\n print(\"OPTION#5 @ Rs.5000\")\n time.sleep(1)\n print(\"OPTION#6 @ Rs.6000\")\n print(\"OPTION6 offers snacks @Rs 1000.\")\n OPTION5 = \"OPTION5\"\n OPTION6 = 'OPTION6'\n option = input(\"Flight chosen: \")\n if des == Mumbai:\n if option == OPTION5:\n gt = 5000 * n\n elif option == OPTION6:\n food = input(\"Snacks required(Yes/No): \")\n Yes = 'Yes'\n No = 'No'\n if food == Yes:\n gt = 7000 * n\n if food == No:\n gt = 6000 * n\n\n\n\n dict1 = {}\n\n for i in range(n):\n x = input(\"ENTER NAME: \")\n y = random.randrange(0, 99999)\n z = int(input(\"ENTER CONTACT NUMBER: \"))\n\n dict1[x] = y, z, des, fdate, option\n\n print(\"CHECK VALUES: \")\n print(\"Your Grand Total stands at:\", gt)\n print('FORMAT: Name, ID number, Destination, Date, Flight')\n print(\"USER'S INFO\", dict1)\n sys.stdout = open(\"test.txt\", \"a+\")\n print('FORMAT: Name, ID number, Phone Number, Destination, Date, Flight')\n print(dict1)\n print(\"Amount Due:\", gt)\n sys.stdout.close()\n\n sys.stdout = open(\"Backup.txt\", \"a+\")\n print('FORMAT: Name, ID number, Destination, Date, Flight')\n print(dict1)\n print(\"Amount Due:\", gt)\n sys.stdout.close()\n\n#--------------------------------------------------------------------------------------------------------------------------------------------------------------\n\nif work == Cancellation:\n dummy = input(\"Enter ID number: \")\n with open(\"test.txt\", \"r+\") as f:\n new_f = f.readlines()\n f.seek(0)\n for line in new_f:\n if dummy not in line:\n f.write(line)\n f.truncate()\n print(\"Ok, your booking has been cancelled.\")\n\n with open(\"Backup.txt\", \"r+\") as g:\n new_g = g.readlines()\n g.seek(0)\n for line in new_g:\n if dummy not in line:\n g.write(line)\n g.truncate()\n\n#--------------------------------------------------------------------------------------------------------------------------------------------------------------\nelif work == \"Exit\":\n print(\"Thank you.\")\n#--------------------------------------------------------------------------------------------------------------------------------------------------------------\n","repo_name":"DarkPharoah1/Ticket-resv","sub_path":"Ticket_resv_final.py","file_name":"Ticket_resv_final.py","file_ext":"py","file_size_in_byte":6229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72969601813","text":"f = open('input.txt')\n\nfile = []\nfor line in f:\n line = line.replace('\\n', '')\n line = line.split(' ')\n line = [int(i) for i in line]\n file.append(line)\n\nsols = []\nfor line in file[1:]:\n found = False\n for item in line:\n if item < 0 and abs(item) <= file[0][1] and abs(item) in line:\n found = True\n if line.index(item) < line.index(abs(item)):\n sols.append([line.index(item)+1, line.index(abs(item))+1])\n elif line.index(abs(item)) < line.index(item):\n sols.append([line.index(abs(item))+1, line.index(item)+1])\n\n if found == True:\n break\n if found == False:\n sols.append([-1])\n\nprint(sols)\n\nfor item in sols:\n print(' '.join([str(x) for x in item]))\n\n","repo_name":"keithgmitchell/Bioinformatics-Competitive-Programming","sub_path":"rosalind/2sum/2sum.py","file_name":"2sum.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38887534696","text":"\"\"\"add a category table and an association table with the blog\n\nRevision ID: 3f6f8868282b\nRevises: 60be1820bd41\nCreate Date: 2021-09-26 08:01:10.397962\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '3f6f8868282b'\ndown_revision = '60be1820bd41'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('category',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('category_name', sa.String(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('category_choice',\n sa.Column('blog_id', sa.Integer(), nullable=True),\n sa.Column('category_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['blog_id'], ['blogs.id'], ),\n sa.ForeignKeyConstraint(['category_id'], ['category.id'], )\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('category_choice')\n op.drop_table('category')\n # ### end Alembic commands ###\n","repo_name":"Ken-mbira/BLOG_SPOT","sub_path":"migrations/versions/3f6f8868282b_add_a_category_table_and_an_association_.py","file_name":"3f6f8868282b_add_a_category_table_and_an_association_.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15187410824","text":"import time\n\nfrom front_base.host_manager import HostManagerBase\nfrom sni_manager import SniManager\n\n\nclass HostManager(HostManagerBase):\n def __init__(self, config, logger):\n self.config = config\n self.logger = logger\n self.appid_manager = None\n\n self.sni_manager = SniManager(logger)\n\n def get_sni_host(self, ip):\n if not self.appid_manager:\n raise Exception()\n\n sni = self.sni_manager.get()\n appid = self.appid_manager.get()\n if not appid:\n self.logger.warn(\"no appid\")\n time.sleep(10)\n raise Exception()\n\n top_domain = appid + \".appspot.com\"\n return sni, top_domain\n\n","repo_name":"pipili0131/https-github.com-XX-net-XX-Net","sub_path":"code/default/gae_proxy/local/host_manager.py","file_name":"host_manager.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32981994850","text":"from __future__ import division, print_function, absolute_import\n\n# Import Python modules\nimport os\nimport sys\nimport numpy as np\n\n# Import seismtools needed classes\nfrom ts_library import TimeseriesComponent\n\ndef reverse_up_down(station):\n \"\"\"\n reverse up down component\n \"\"\"\n # station has 3 components [ns, ew, ud]\n # only need to flip the 3rd one\n station[2].acc *= -1\n station[2].vel *= -1\n station[2].dis *= -1\n\n return station\n# end of reverse_up_down\n\ndef scale_from_m_to_cm(station):\n # scales timeseries from meters to centimeters\n for i in range(0, len(station)):\n station[i].acc *= 100\n station[i].vel *= 100\n station[i].dis *= 100\n\n return station\n# end of scale_from_m_to_cm\n\ndef get_dt(input_file):\n \"\"\"\n Read timeseries file and return dt\n \"\"\"\n val1 = None\n val2 = None\n file_dt = None\n\n # Figure out dt first, we need it later\n ifile = open(input_file)\n for line in ifile:\n # Skip comments\n if line.startswith(\"#\") or line.startswith(\"%\"):\n continue\n pieces = line.split()\n pieces = [float(piece) for piece in pieces]\n if val1 is None:\n val1 = pieces[0]\n continue\n if val2 is None:\n val2 = pieces[0]\n break\n ifile.close()\n\n # Quit if cannot figure out dt\n if val1 is None or val2 is None:\n print(\"[ERROR]: Cannot determine dt from file! Exiting...\")\n sys.exit(1)\n\n # Return dt\n return val2 - val1\n# end get_dt\n\ndef read_files(obs_file, input_files):\n \"\"\"\n Reads all input files\n \"\"\"\n # read obs data\n obs_data = None\n if obs_file is not None:\n obs_data = read_file(obs_file)\n # Make sure we got it\n if not obs_data:\n print(\"[ERROR]: Reading obs file: %s!\" % (obs_file))\n sys.exit(-1)\n # Fix units if needed\n if obs_file.lower().endswith(\".bbp\"):\n units = read_unit_bbp(obs_file)\n # If in meters, scale to cm\n if units == \"m\":\n obs_data = scale_from_m_to_cm(obs_data)\n else:\n print(\"[ERROR]: Unknown file format: %s!\" % (obs_file))\n sys.exit(-1)\n\n # reads signals\n stations = []\n for input_file in input_files:\n station = read_file(input_file)\n # Make sure we got it\n if not station:\n print(\"[ERROR]: Reading input file: %s!\" % (input_file))\n sys.exit(-1)\n # Fix units if needed\n if input_file.lower().endswith(\".bbp\"):\n units = read_unit_bbp(input_file)\n # If in meters, scale to cm\n if units == \"m\":\n station = scale_from_m_to_cm(station)\n else:\n print(\"[ERROR]: Unknown file format: %s!\" % (obs_file))\n sys.exit(-1)\n\n # Done with this station\n stations.append(station)\n\n # all done\n return obs_data, stations\n\ndef read_filelist(filelist):\n \"\"\"\n This function reads the filelist provided by the user\n \"\"\"\n station_list = []\n coor_x = []\n coor_y = []\n\n try:\n input_file = open(filelist, 'r')\n except IOError:\n print(\"[ERROR]: error loading filelist.\")\n sys.exit(-1)\n\n for line in input_file:\n if not '#' in line:\n line = line.split()\n # Get station name and make substitution\n station_name = line[0]\n station_name = station_name.replace(\".\", \"_\")\n\n if len(line) == 1:\n # not containing coordinates\n station_list.append(station_name)\n coor_x.append(0.0)\n coor_y.append(0.0)\n elif len(line) == 3:\n # containing coordinates\n station_list.append(station_name)\n try:\n coor_x.append(float(line[1]))\n coor_y.append(float(line[2]))\n except ValueError:\n coor_x.append(0.0)\n coor_y.append(0.0)\n\n # Close the input file\n input_file.close()\n\n return station_list, coor_x, coor_y\n# end of read_filelist\n\n# ================================ READING ================================\ndef read_file(filename):\n \"\"\"\n This function reads a timeseries file in bbp format\n \"\"\"\n if filename.lower().endswith(\".bbp\"):\n # Filename in bbp format\n print(\"[READING]: %s\" % (filename))\n return read_file_bbp(filename)\n # Unknown file format\n print(\"[ERROR]: Unknown file format: %s!\" % (filename))\n sys.exit(-1)\n# end of read_file\n\ndef read_file_bbp2(filename):\n \"\"\"\n This function reads a bbp file and returns the timeseries in the\n format time, h1, h2, up tuple\n \"\"\"\n time = []\n h1_comp = []\n h2_comp = []\n ud_comp = []\n\n try:\n input_file = open(filename, 'r')\n for line in input_file:\n line = line.strip()\n if line.startswith('#') or line.startswith('%'):\n # Skip comments\n continue\n # Trim in-line comments\n if line.find('#') > 0:\n line = line[:line.find('#')]\n if line.find('%') > 0:\n line = line[:line.find('%')]\n # Make them float\n pieces = line.split()\n pieces = [float(piece) for piece in pieces]\n time.append(pieces[0])\n h1_comp.append(pieces[1])\n h2_comp.append(pieces[2])\n ud_comp.append(pieces[3])\n except IOError:\n print(\"[ERROR]: error reading bbp file: %s\" % (filename))\n sys.exit(1)\n\n # Convert to NumPy Arrays\n time = np.array(time)\n h1_comp = np.array(h1_comp)\n h2_comp = np.array(h2_comp)\n ud_comp = np.array(ud_comp)\n\n # All done!\n return time, h1_comp, h2_comp, ud_comp\n# end of read_file_bbp2\n\ndef read_file_bbp(filename):\n \"\"\"\n This function reads timeseries data from a set of BBP files\n \"\"\"\n # Get filenames for displacement, velocity and acceleration bbp files\n work_dir = os.path.dirname(filename)\n base_file = os.path.basename(filename)\n\n base_tokens = base_file.split('.')[0:-2]\n if not base_tokens:\n print(\"[ERROR]: Invalid BBP filename: %s\" % (filename))\n sys.exit(1)\n dis_tokens = list(base_tokens)\n vel_tokens = list(base_tokens)\n acc_tokens = list(base_tokens)\n\n dis_tokens.append('dis')\n vel_tokens.append('vel')\n acc_tokens.append('acc')\n\n dis_tokens.append('bbp')\n vel_tokens.append('bbp')\n acc_tokens.append('bbp')\n\n dis_file = os.path.join(work_dir, '.'.join(dis_tokens))\n vel_file = os.path.join(work_dir, '.'.join(vel_tokens))\n acc_file = os.path.join(work_dir, '.'.join(acc_tokens))\n\n # Read 3 bbp files\n [time, dis_h1, dis_h2, dis_ver] = read_file_bbp2(dis_file)\n [_, vel_h1, vel_h2, vel_ver] = read_file_bbp2(vel_file)\n [_, acc_h1, acc_h2, acc_ver] = read_file_bbp2(acc_file)\n\n # Read orientation from one of the files\n orientation = read_orientation_bbp(vel_file)\n\n # Read padding information from one of the files\n padding = read_padding_bbp(vel_file)\n\n samples = dis_h1.size\n delta_t = time[1]\n\n # samples, dt, data, acceleration, velocity, displacement\n signal_h1 = TimeseriesComponent(samples, delta_t, orientation[0],\n acc_h1, vel_h1, dis_h1, padding=padding)\n signal_h2 = TimeseriesComponent(samples, delta_t, orientation[1],\n acc_h2, vel_h2, dis_h2, padding=padding)\n signal_ver = TimeseriesComponent(samples, delta_t, orientation[2],\n acc_ver, vel_ver, dis_ver, padding=padding)\n\n station = [signal_h1, signal_h2, signal_ver]\n return station\n# end of read_file_bbp\n\ndef read_file_her(filename):\n \"\"\"\n The function is to read 10-column .her files.\n Return a list of psignals for each orientation.\n \"\"\"\n time, dis_ns, dis_ew, dis_up = [np.array([], float) for _ in range(4)]\n vel_ns, vel_ew, vel_up = [np.array([], float) for _ in range(3)]\n acc_ns, acc_ew, acc_up = [np.array([], float) for _ in range(3)]\n\n try:\n (time, dis_ns, dis_ew, dis_up, vel_ns, vel_ew,\n vel_up, acc_ns, acc_ew, acc_up) = np.loadtxt(filename,\n comments='#',\n unpack=True)\n except IOError:\n print(\"[ERROR]: error loading her file.\")\n return False\n\n samples = dis_ns.size\n delta_t = time[1]\n\n # samples, dt, orientation, acceleration, velocity, displacement\n # right now the values for orientation for the her file are hardcoded here\n signal_ns = TimeseriesComponent(samples, delta_t, 0.0,\n acc_ns, vel_ns, dis_ns)\n signal_ew = TimeseriesComponent(samples, delta_t, 90.0,\n acc_ew, vel_ew, dis_ew)\n signal_up = TimeseriesComponent(samples, delta_t, \"UP\",\n acc_up, vel_up, dis_up)\n\n station = [signal_ns, signal_ew, signal_up]\n return station\n# end of read_file_her\n\ndef read_unit_bbp(filename):\n \"\"\"\n Get the units from the file's header\n Returns either \"m\" or \"cm\"\n \"\"\"\n units = None\n\n try:\n input_file = open(filename, 'r')\n for line in input_file:\n if line.find(\"units=\") > 0:\n units = line.split()[2]\n break\n input_file.close()\n except IOError:\n print(\"[ERROR]: No such file.\")\n sys.exit(-1)\n\n # Make sure we got something\n if units is None:\n print(\"[ERROR]: Cannot find units in bbp file!\")\n sys.exit(-1)\n\n # Figure out if we have meters or centimeters\n if units == \"cm\" or units == \"cm/s\" or units == \"cm/s^2\":\n return \"cm\"\n elif units == \"m\" or units == \"m/s\" or units == \"m/s^2\":\n return \"m\"\n\n # Invalid units in this file\n print(\"[ERROR]: Cannot parse units in bbp file!\")\n sys.exit(-1)\n# end of read_unit_bbp\n\ndef read_padding_bbp(filename):\n \"\"\"\n Get the padding information from a BBP file's header\n \"\"\"\n padding = 0\n\n try:\n input_file = open(filename, 'r')\n for line in input_file:\n if line.find(\"padding=\") > 0:\n line = line.strip()\n padding = line[(line.find(\"=\") + 1):]\n padding = int(float(padding))\n break\n input_file.close()\n except IOError:\n print(\"[ERROR]: No such file.\")\n sys.exit(-1)\n\n # All done!\n return padding\n# end of read_padding_bbp\n\ndef read_orientation_bbp(filename):\n \"\"\"\n Get the orientation from the file's header\n \"\"\"\n orientation = None\n\n try:\n input_file = open(filename, 'r')\n for line in input_file:\n if line.find(\"orientation=\") > 0:\n line = line.strip()\n orientation = line[(line.find(\"=\") + 1):]\n orientation = orientation.strip().split(\",\")\n orientation = [val.strip() for val in orientation]\n orientation[0] = float(orientation[0])\n orientation[1] = float(orientation[1])\n orientation[2] = orientation[2].lower()\n if orientation[2] != \"up\" and orientation[2] != \"down\":\n print(\"[ERROR]: Vertical orientation must be up or down!\")\n sys.exit(-1)\n break\n input_file.close()\n except IOError:\n print(\"[ERROR]: No such file.\")\n sys.exit(-1)\n\n # Make sure we got something\n if orientation is None:\n print(\"[ERROR]: Cannot find orientation in bbp file: %s!\" % (filename))\n sys.exit(-1)\n\n # All done!\n return orientation\n# end of read_orientation_bbp\n\ndef read_stamp(filename):\n \"\"\"\n Get the time stamp from file's header\n \"\"\"\n if filename.endswith(\".bbp\"):\n # File in bbp format\n return read_stamp_bbp(filename)\n # Otherwise use hercules format\n return read_stamp_her(filename)\n# end of read_stamp\n\ndef read_stamp_bbp(filename):\n \"\"\"\n Get the time stamp from the bbp file's header\n \"\"\"\n try:\n input_file = open(filename, 'r')\n for line in input_file:\n if line.find(\"time=\") > 0:\n stamp = line.split()[2].split(',')[-1].split(':')\n break\n input_file.close()\n except IOError:\n print(\"[ERROR]: No such file.\")\n return []\n\n # Converting time stamps to floats\n stamp = [float(i) for i in stamp]\n return stamp\n# end of read_stamp_bbp\n\ndef read_stamp_her(filename):\n \"\"\"\n Get the time stamp from the her file's header\n \"\"\"\n try:\n with open(filename) as input_file:\n try:\n header = input_file.readline().split()\n stamp = header[4].split(',')[-1].split(':')\n input_file.close()\n except IndexError:\n print(\"[ERROR]: missing time stamp.\")\n return []\n except IOError:\n print(\"[ERROR]: No such file.\")\n return []\n\n # converting time stamps to floats\n for i in range(0, len(stamp)):\n stamp[i] = float(stamp[i])\n return stamp\n# end of read_stamp_her\n\n# ================================ WRITING ==================================\ndef write_hercules(filename, station):\n # filename = 'processed-' + filename.split('/')[-1]\n try:\n out_f = open(filename, 'w')\n except IOError as e:\n print(e)\n dis_ns = station[0].dis.tolist()\n vel_ns = station[0].vel.tolist()\n acc_ns = station[0].acc.tolist()\n dis_ew = station[1].dis.tolist()\n vel_ew = station[1].vel.tolist()\n acc_ew = station[1].acc.tolist()\n dis_up = station[2].dis.tolist()\n vel_up = station[2].vel.tolist()\n acc_up = station[2].acc.tolist()\n\n # get a list of time incremented by dt\n time = [0.000]\n samples = station[0].samples\n dt = station[0].dt\n tmp = samples\n\n while tmp > 1:\n time.append(time[len(time)-1] + dt)\n tmp -= 1\n\n out_f.write('# missing header \\n')\n\n descriptor = '{:>12}' + ' {:>12}'*9 + '\\n'\n out_f.write(descriptor.format(\"# time\",\n \"dis_ns\", \"dis_ew\", \"dis_up\",\n \"vel_ns\", \"vel_ew\", \"vel_up\",\n \"acc_ns\", \"acc_ew\", \"acc_up\")) # header\n\n descriptor = '{:>12.3f}' + ' {:>12.7f}'*9 + '\\n'\n for c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 in zip(time,\n dis_ns, dis_ew, dis_up,\n vel_ns, vel_ew, vel_up,\n acc_ns, acc_ew, acc_up):\n out_f.write(descriptor.format(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9))\n out_f.close()\n# end of write_hercules\n\ndef write_bbp(input_file, output_file, station, params={}):\n \"\"\"\n This function generates processed .bbp files for\n each of velocity/acceleration/displacement\n and copies the header of the input bbp file\n \"\"\"\n output_dir = os.path.dirname(output_file)\n output_basename = os.path.basename(output_file)\n\n # Prepare data for output\n acc_h1 = station[0].acc.tolist()\n vel_h1 = station[0].vel.tolist()\n dis_h1 = station[0].dis.tolist()\n acc_h2 = station[1].acc.tolist()\n vel_h2 = station[1].vel.tolist()\n dis_h2 = station[1].dis.tolist()\n acc_ver = station[2].acc.tolist()\n vel_ver = station[2].vel.tolist()\n dis_ver = station[2].dis.tolist()\n\n # Start with time = 0.0\n time = [0.000]\n samples = station[0].samples\n while samples > 1:\n time.append(time[len(time)-1] + station[0].dt)\n samples -= 1\n\n # Prepare to output\n out_data = [['dis', dis_h1, dis_h2, dis_ver, 'displacement', 'cm'],\n ['vel', vel_h1, vel_h2, vel_ver, 'velocity', 'cm/s'],\n ['acc', acc_h1, acc_h2, acc_ver, 'acceleration', 'cm/s^2']]\n\n for data in out_data:\n if not output_basename.endswith('.bbp'):\n # Remove extension\n bbp_output_basename = os.path.splitext(output_basename)[0]\n bbp_output_filename = os.path.join(output_dir,\n \"%s.%s.bbp\" %\n (bbp_output_basename,\n data[0]))\n output_header = [\"# Station= NoName\",\n \"# time= 00/00/00,00:00:00.00 UTC\",\n \"# lon= 0.00\",\n \"# lat= 0.00\",\n \"# units= %s\" % (data[5]),\n \"# padding= %d\" % (station[0].padding),\n \"# orientation= %s\" % (\",\".join([str(int(station[0].orientation)),\n str(int(station[1].orientation)),\n station[2].orientation])),\n \"#\",\n \"# Data fields are TAB-separated\",\n \"# Column 1: Time (s)\",\n \"# Column 2: H1 component ground \"\n \"%s (+ is %s)\" % (data[4],\n str(int(station[0].orientation))),\n \"# Column 3: H2 component ground \"\n \"%s (+ is %s)\" % (data[4],\n str(int(station[1].orientation))),\n \"# Column 4: V component ground \"\n \"%s (+ is %s)\" % (data[4], station[2].orientation),\n \"#\"]\n else:\n # Read header of input file\n input_dirname = os.path.dirname(input_file)\n input_basename = os.path.basename(input_file)\n pieces = input_basename.split('.')\n pieces = pieces[0:-2]\n bbp_input_file = os.path.join(input_dirname,\n \"%s.%s.bbp\" %\n ('.'.join(pieces),\n data[0]))\n input_header = []\n in_fp = open(bbp_input_file, 'r')\n for line in in_fp:\n line = line.strip()\n if line.startswith(\"#\"):\n input_header.append(line)\n in_fp.close()\n\n # Compose new header\n output_header = []\n for item in input_header:\n if item.find(\"units=\") > 0:\n output_header.append(\"# units= %s\" % (data[5]))\n elif item.find(\"orientation=\") > 0:\n output_header.append(\"# orientation= %s\" % (\",\".join([str(int(station[0].orientation)),\n str(int(station[1].orientation)),\n station[2].orientation])))\n elif item.find(\"lp=\") > 0:\n if 'lp' in params and params['lp'] is not None:\n output_header.append(\"# lp= %.2f\" % (params['lp']))\n else:\n output_header.append(item)\n elif item.find(\"hp=\") > 0:\n if 'hp' in params and params['hp'] is not None:\n output_header.append(\"# hp= %.2f\" % (params['hp']))\n else:\n output_header.append(item)\n elif item.find(\"padding=\") > 0:\n output_header.append(\"# padding= %d\" % (station[0].padding))\n elif item.find(\"Column 2\") > 0:\n output_header.append(\"# Column 2: H1 component ground \"\n \"%s (+ is %s)\" % (data[4],\n str(int(station[0].orientation))))\n elif item.find(\"Column 3\") > 0:\n output_header.append(\"# Column 3: H2 component ground \"\n \"%s (+ is %s)\" % (data[4],\n str(int(station[1].orientation))))\n elif item.find(\"Column 4\") > 0:\n output_header.append(\"# Column 4: V component ground \"\n \"%s (+ is %s)\" % (data[4], station[2].orientation))\n else:\n output_header.append(item)\n\n pieces = output_basename.split('.')\n pieces = pieces[0:-2]\n bbp_output_filename = os.path.join(output_dir,\n \"%s.%s.bbp\" %\n ('.'.join(pieces),\n data[0]))\n # Write output file\n try:\n out_fp = open(bbp_output_filename, 'w')\n except IOError as e:\n print(e)\n continue\n\n # Write header\n for item in output_header:\n out_fp.write(\"%s\\n\" % (item))\n\n # Write timeseries\n for val_time, val_ns, val_ew, val_ud in zip(time, data[1],\n data[2], data[3]):\n out_fp.write(\"%5.7f %5.9e %5.9e %5.9e\\n\" %\n (val_time, val_ns, val_ew, val_ud))\n\n # All done, close file\n out_fp.close()\n print(\"[WRITING]: %s\" % (bbp_output_filename))\n# end of write_bbp\n","repo_name":"SCECcode/ts-process","sub_path":"ts_process/file_utilities.py","file_name":"file_utilities.py","file_ext":"py","file_size_in_byte":21697,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"67"} +{"seq_id":"35018276356","text":"from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom django.shortcuts import render\nfrom .models import Project, Tag, Skill, Education, Experience, Award, Profile\nfrom .serializer.serializers import (\n ProjectSerializer,\n TagSerializer,\n SkillSerializer,\n ExperienceSerializer,\n EducationSerializer,\n AwardSerializer,\n ProfileSerializer,\n)\n\n# Create your views here.\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\n\n\n# * APIs\n# ? Profile APIs (Since there is only one profile, we don't need to get profile by id)\n@api_view([\"GET\"])\ndef get_admin(request):\n profile = Profile.objects.first()\n serializer = ProfileSerializer(profile, many=False)\n return Response(serializer.data)\n\n\n# ? Project APIs\n@api_view([\"GET\"])\ndef get_projects(request):\n projects = Project.objects.all()\n serializer = ProjectSerializer(projects, many=True)\n return Response(serializer.data)\n\n\n# ? Tag APIs\n@api_view([\"GET\"])\ndef get_tags(request):\n tags = Tag.objects.all()\n serializer = TagSerializer(tags, many=True)\n return Response(serializer.data)\n\n\n@api_view([\"GET\"])\ndef get_tag_by_id(request, id):\n tag = Tag.objects.get(id=id)\n serializer = TagSerializer(tag, many=False)\n return Response(serializer.data)\n\n\n# ? Skill APIs\n@api_view([\"GET\"])\ndef get_skills(request):\n skills = Skill.objects.all()\n serializer = SkillSerializer(skills, many=True)\n return Response(serializer.data)\n\n\n# ? Experience APIs\n@api_view([\"GET\"])\ndef get_experiences(request):\n experiences = Experience.objects.all()\n serializer = ExperienceSerializer(experiences, many=True)\n return Response(serializer.data)\n\n\n# ? Education APIs\n@api_view([\"GET\"])\ndef get_educations(request):\n educations = Education.objects.all()\n serializer = EducationSerializer(educations, many=True)\n return Response(serializer.data)\n\n\n# ? Award APIs\n@api_view([\"GET\"])\ndef get_awards(request):\n awards = Award.objects.all()\n serializer = AwardSerializer(awards, many=True)\n return Response(serializer.data)\n","repo_name":"leanhduy/personal-website","sub_path":"backend/mysite/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35840851459","text":"from person import Person\nfrom deck import Deck\nfrom card import Card\n\nclass Strategy:\n\n def __init__(self, threshold=0.6):\n self.threshold = threshold\n\n def next_card(self, dealer, player, deck):\n # adding 1 because we're not checking dealer's secret card\n num_of_cards_left = len(deck.get_cards()) + 1\n leftovers = deck.get_leftovers()\n player_score = player.get_score()\n dealers_cards = dealer.get_cards()\n players_cards = player.get_cards()\n\n dealer_secret_card = dealers_cards[1]\n val_of_secret_card = dealer_secret_card.get_value()\n leftovers[val_of_secret_card] += 1\n\n max_safe_score_for_player = 21 - player_score\n if max_safe_score_for_player >= 10:\n max_safe_score_for_player = 10\n num_of_safe_cards = 0\n\n for i in range(1, max_safe_score_for_player + 1):\n num_of_cards = leftovers[i]\n num_of_safe_cards = num_of_safe_cards + num_of_cards\n\n prob_of_not_busting = float(num_of_safe_cards)/float(num_of_cards_left)\n prob_of_busting = 1 - prob_of_not_busting\n\n if prob_of_busting <= self.threshold:\n return True\n return False","repo_name":"daniel-petrov/twenty_one","sub_path":"strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3310517510","text":"#!/usr/bin/python3\n# -*-coding:utf-8-*-\n\"\"\"\nFind all fork repo that are ahead of original repo\nBefore running, install requirement first: python3 -m pip install requests\n\nReference: https://docs.github.com/en/free-pro-team@latest/rest/reference\n\"\"\"\nimport sys\nimport json\nimport time\nimport requests\nfrom math import ceil\n\nGITHUB_API_TOKEN = \"\"\nREPO = \"\"\nDEBUG = True\n\ndef get_forks():\n session = requests.Session()\n forks = []\n\n # auth\n auth_url = \"https://api.github.com\"\n session.headers.update({'Authorization': 'token ' + GITHUB_API_TOKEN}) # should carry this header for all request\n resp = session.get(auth_url)\n if resp.status_code != 200:\n resp_json = resp.json()\n msg = resp_json.get('message')\n print('[-] Auth error: %s' % msg)\n return []\n if DEBUG:\n print(\"[*] Github API Rate Limit. Limit: %s, Remaining: %s, Reset: %s\" % (\n resp.headers.get(\"X-RateLimit-Limit\"),\n resp.headers.get(\"X-RateLimit-Remaining\"),\n time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(resp.headers.get(\"X-RateLimit-Reset\"))))\n ))\n\n # set header\n session.headers.update({'Accept': 'application/vnd.github.v3+json'})\n\n # get repo info\n repo_url = \"https://api.github.com/repos/%s\" % REPO\n resp = session.get(repo_url)\n if resp.status_code != 200:\n print(\"[-] Get repo info failed: %s\" % resp.text)\n return []\n forks_count = json.loads(resp.text).get(\"forks\")\n\n # get forks\n page_count = ceil(forks_count / 30) # 30 items each page\n for page in range(1, page_count + 1):\n params = {\"page\": page}\n forks_url = \"https://api.github.com/repos/%s/forks\" % REPO\n resp = session.get(forks_url, params=params, headers={'Accept': 'application/vnd.github.v3+json'})\n if resp.status_code != 200:\n print('[-] Get forks failed: %s' % resp.text)\n return []\n\n repos = json.loads(resp.text)\n for repo in repos:\n if DEBUG:\n print(\"[*] get fork: \" + repo.get(\"full_name\"))\n forks.append({\n \"full_name\": repo.get(\"full_name\"),\n \"pushed_at\": repo.get(\"pushed_at\"),\n \"stargazers_count\": repo.get(\"stargazers_count\"),\n \"forks_count\": repo.get(\"forks_count\")\n })\n\n # compare (ONLY compare master branch)\n compare_url = \"https://api.github.com/repos/%s/compare/%s:master...master\"\n for fork in forks:\n if DEBUG:\n print(\"[*] compare %s\" % fork.get(\"full_name\"))\n resp = session.get(compare_url % (fork.get(\"full_name\"), REPO.split(\"/\")[0]), headers={'Accept': 'application/vnd.github.v3+json'})\n if resp.status_code != 200:\n print('[-] compare %s failed: %s' % (fork.get(\"full_name\"), resp.text))\n continue\n res = json.loads(resp.text)\n fork.update({\n \"ahead_by\": res.get(\"ahead_by\"),\n \"behind_by\": res.get(\"behind_by\")\n })\n\n return forks\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n print(\"Usage: python3 %s GITHUB_API_TOKEN REPO\" % sys.argv[0])\n print(\"\\teg: python3 %s ab124942710137429ffdac322314701471234411 Ovi3/BurpBeautifier\")\n exit(0)\n\n GITHUB_API_TOKEN = sys.argv[1].strip()\n REPO = sys.argv[2].strip()\n\n try:\n forks = get_forks()\n if forks:\n # only show fork that is ahead of original repo. sorted by ahead_by and pushed_at\n forks = list(filter(lambda i: i.get(\"ahead_by\"), forks))\n forks = sorted(forks, key=lambda i: (i.get(\"ahead_by\"), i.get(\"pushed_at\")), reverse=True)\n\n print(\"Done\")\n print(\"%-48s%-30s%-8s%-8s%-8s%-8s\" % (\"url\", \"last push\", \"star\", \"fork\", \"ahead\", \"behind\"))\n for fork in forks:\n print(\"%-48s%-30s%-8d%-8d%-8d%-8d\" % (\n \"https://www.github.com/\" + fork.get(\"full_name\"),\n fork.get(\"pushed_at\"),\n fork.get(\"stargazers_count\"),\n fork.get(\"forks_count\"),\n fork.get(\"ahead_by\"),\n fork.get(\"behind_by\"),\n ))\n except Exception as e:\n print(\"[-] Error: %s\" % e.args)\n\n","repo_name":"neilspink/aheadfork","sub_path":"aheadfork.py","file_name":"aheadfork.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5785839302","text":"import os\nimport sys\nfrom sqlalchemy import Column, ForeignKey, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy import create_engine\nfrom eralchemy import render_er\n\nBase = declarative_base()\n\nclass User(Base):\n __tablename__ = 'users'\n # Here we define columns for the table person\n # Notice that each column is also a normal Python instance attribute.\n id = Column(Integer, primary_key=True)\n username = Column(String(250), nullable=False)\n Firstname = Column(String(250), nullable=False)\n Lastname = Column(String(250), nullable=False)\n email = Column(String(250), nullable=False)\n\n\nclass Address(Base):\n __tablename__ = 'addresses'\n # Here we define columns for the table address.\n # Notice that each column is also a normal Python instance attribute.\n id = Column(Integer, primary_key=True)\n street_name = Column(String(250))\n street_number = Column(String(250))\n post_code = Column(String(250), nullable=False)\n user_id = Column(Integer, ForeignKey('users.id'))\n user = relationship(User)\n\nclass Follower(Base):\n __tablename__ = 'followers'\n # Here we define columns for the table address.\n # Notice that each column is also a normal Python instance attribute.\n user_from_id = Column(Integer, primary_key=True)\n user_to_id = Column(Integer, primary_key=True)\n \nclass Post(Base):\n __tablename__ = 'posts'\n # Here we define columns for the table address.\n # Notice that eac column is also a normal Python instance attribute.\n id = Column(Integer, primary_key=True)\n user_to_id = Column(Integer, primary_key=True)\n\nclass Media(Base):\n __tablename__ = 'medias'\n # Here we define columns for the table address.\n # Notice that each column is also a normal Python instance attribute.\n id = Column(Integer, primary_key=True)\n post_code = Column(String(250), nullable=False)\n user_id = Column(Integer, ForeignKey('post.id'))\n \n\n\n def to_dict(self):\n return {}\n\n## Draw from SQLAlchemy base\nrender_er(Base, 'diagram.png')","repo_name":"Guaaan/instagram-database","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23759313716","text":"#Break Your Head Here ;)\r\nfor _ in range(int(input())):\r\n\tn=int(input())\r\n\ta=list(map(int,input().split()))\r\n\todd=0\r\n\teven=0\r\n\tc=0\r\n\tfor i in a:\r\n\t\teven=even+1 if i%2==0 else even+0\r\n\tfor i in range(n-1,-1,-1):\r\n\t\tif a[i]%2!=0:\r\n\t\t\todd+=1\r\n\t\t\tcontinue\r\n\t\telse:\r\n\t\t\tc+=odd*even\r\n\t\t\todd=0\r\n\t\t\teven-=1\t\t\r\n\tprint(c)\t","repo_name":"ritesh49/Python-Practice-Programs","sub_path":"codechef_a_puzzle_game.py","file_name":"codechef_a_puzzle_game.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17917265859","text":"import json\nimport xml.etree.ElementTree as ET\nimport requests\n\n\nclass Oracle:\n\n def getLocationAliases(self,subjectID):\n url = \"http://vocabsservices.getty.edu/TGNService.asmx/\"\n serviceName = \"TGNGetSubject\"\n params = {'subjectID': subjectID}\n\n r = requests.get(url=url + serviceName, params=params)\n data = r.content\n return data\n\n def searchLocation(self,name, role, nationality):\n url = \"http://vocabsservices.getty.edu/TGNService.asmx/\"\n serviceName = \"TGNGetTermMatch\"\n params = {'name': name,\n 'placetypeid': role,\n 'nationid': nationality}\n\n r = requests.get(url=url + serviceName, params=params)\n data = r.content\n return data\n\n def parseXMLForSubjects(self,xmlStr):\n root = ET.fromstring(xmlStr)\n subjects = []\n for item in root.findall('Subject'):\n ids = {}\n termCount = 0\n # iterate child elements of item\n for child in item:\n # print(child.tag)\n # print(child.attrib)\n # print(child.text)\n if child.tag == 'Subject_ID':\n ids['id'] = child.text\n elif child.tag == 'Preferred_Term':\n ids['name'] = child.text\n elif child.tag == 'Term':\n termCount = termCount + 1\n ids['termCount'] = termCount\n subjects.append(ids)\n return subjects\n\n def parseXMLForAliasNames(self,xmlStr):\n root = ET.fromstring(xmlStr)\n info = {}\n coordinateInformation = {}\n names = []\n for item in root.findall('Subject'):\n for child in item:\n if child.tag == 'Terms':\n for c in child:\n if c.tag == 'Preferred_Term':\n for cc in c:\n if cc.tag == 'Term_Text':\n names.append(cc.text)\n elif c.tag == 'Non-Preferred_Term':\n for cc in c:\n if cc.tag == 'Term_Text':\n names.append(cc.text)\n elif child.tag == 'Coordinates':\n for coordinates in child:\n for coordinateDetails in coordinates:\n if coordinateDetails.tag == 'Latitude':\n latitudeInfo = {}\n for latitudeValues in coordinateDetails:\n latitudeInfo[latitudeValues.tag.lower()] = latitudeValues.text\n coordinateInformation['latitude'] = latitudeInfo\n elif coordinateDetails.tag == 'Longitude':\n longitudeInfo = {}\n for longitudeValues in coordinateDetails:\n longitudeInfo[longitudeValues.tag.lower()] = longitudeValues.text\n coordinateInformation['longitude'] = longitudeInfo\n info['coordinates'] = coordinateInformation;\n info['aliases'] = names\n return info\n\n def getLocationInfo(self,searchName,type,nation):\n xml = self.searchLocation(searchName,type,nation)\n pars = self.parseXMLForSubjects(xml)\n for a in pars:\n aliasXML = self.getLocationAliases(a['id'])\n results = self.parseXMLForAliasNames(aliasXML)\n a['aliases'] = results['aliases']\n a['coordinates'] = results['coordinates']\n return pars","repo_name":"MEC402/openpipe","sub_path":"backend/openpipeAPI/oracles/Oracle.py","file_name":"Oracle.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"31611079388","text":"import pytest\nimport os\nimport time\nimport unittest\nfrom tml.config import CONFIG\nfrom tml.cache import CacheVersion, CachedClient\nfrom tml.cache_adapters import file as FileAdapter\nfrom tml.cache_adapters.test_utils import check_alive\nfrom tml.cache_adapters.memcached import PyLibMCCacheAdapter, DefaultMemcachedAdapter, BaseMemcachedAdapter\nfrom tml.cache_adapters.rediscache import BaseRedisAdapter, DefaultRedisAdapter\nfrom tml import configure\nfrom tests.common import override_config, FIXTURES_PATH\nfrom .settings import TML\n\nMEMCACHE_CONNECTION = ['127.0.0.1:11211']\n\n\nclass MockCachedClient(CachedClient):\n\n cache = {}\n\n def store(self, key, data, **opts):\n self.cache[key] = data\n\n def fetch(self, key, miss_callback=None, **opts):\n val = self.cache.get(key, None)\n if not val:\n val = self.cache[key] = miss_callback(key)\n return val\n\nclass DumbCachedClient(object):\n cache = {}\n def store(self, key, data, **opts):\n self.cache[key] = data\n\n def fetch(self, key):\n return self.cache[key]\n\nclass TestCacheVersion(unittest.TestCase):\n\n def setUp(self):\n configure()\n self.cache = MockCachedClient()\n self.test_version = \"123-dummy\"\n\n def test_init(self):\n version = CacheVersion(self.cache)\n self.assertIsInstance(version.cache, CachedClient, 'cache attr')\n version.set(self.test_version)\n self.assertEquals(version.version, self.test_version, 'version set')\n version.reset()\n self.assertEquals(version.version, None, 'version reset')\n\n def test_manipul(self):\n version = CacheVersion(self.cache)\n version.store(self.test_version)\n cur_version = version.fetch()\n self.assertEquals(cur_version, self.test_version, \"version fetch\")\n with override_config(version_check_interval=-1):\n cur_version = version.fetch()\n self.assertEquals(cur_version, 'undefined')\n self.assertTrue(version.is_undefined())\n self.assertTrue(version.is_invalid())\n version.store('new-ver')\n self.assertEquals(version.version, 'new-ver', 'stored')\n self.assertTrue(version.is_defined(), 'defined')\n self.assertTrue(version.is_valid())\n version.set(0)\n self.assertTrue(version.is_defined(), 'defined if 0')\n self.assertTrue(version.is_invalid(), '0 is not valid')\n\n\n@pytest.mark.usefixtures(\"memcached\")\nclass TestCache(unittest.TestCase):\n\n def test_init_adapter(self):\n path = 'tests.integration.cache.DumbCachedClient'\n with override_config(cache={'enabled': True}):\n cache = CachedClient.instance(adapter=path)\n self.assertTrue(isinstance(cache, CachedClient))\n cache.store('foo', 'bar')\n self.assertEquals(cache.fetch('foo'), 'bar', 'work as adapter')\n self.assertTrue(hasattr(cache, 'versioned_key'), 'has inhereted methods')\n\n def test_file_adapter(self):\n with override_config(cache={'enabled': True}):\n cache = CachedClient.instance(adapter=FileAdapter)\n self.assertIsInstance(cache, CachedClient)\n self.assertEquals(cache.cache_name, 'file')\n\n with override_config(cache={'enabled': True, 'path': TML['cache']['path'], 'version': TML['cache']['version']}):\n cache = CachedClient.instance(adapter=FileAdapter)\n self.assertEquals(cache.get_cache_path(), os.path.join(TML['cache']['path'], TML['cache']['version']))\n self.assertEquals(cache.file_path('application'), os.path.join(TML['cache']['path'], TML['cache']['version'], 'application.json'))\n app_data = cache.fetch('application')\n self.assertEquals(app_data['key'], TML['application']['key'])\n app_data = cache.fetch('application')\n\n def test_memcache_init(self):\n with override_config(cache={'enabled': True, 'adapter': 'memcached', 'host': '127.0.0.1', 'ttl': 3600, 'namespace': ''}):\n cache = CachedClient.instance()\n self.assertEquals(cache.namespace, CONFIG.application_key()[:5], 'namespace set first 5 symbols of key')\n\n with override_config(cache={'enabled': True, 'adapter': 'memcached', 'host': '127.0.0.1', 'ttl': 3600, 'namespace': ''}, application={'access_token': 'foobar'}):\n cache = CachedClient.instance()\n self.assertEquals(cache.namespace, CONFIG.access_token()[:5], 'namespace set first 5 symbols of token')\n\n\n with override_config(cache={'enabled': True, 'adapter': 'memcached', 'host': '127.0.0.1', 'namespace': 'tml-2', 'ttl': 3600}):\n cache = CachedClient.instance()\n self.assertIsInstance(cache, BaseMemcachedAdapter, 'proper factory build')\n self.assertEquals(cache.default_timeout, 3600)\n self.assertEquals(cache.namespace, 'tml-2', 'namespace set')\n cache._drop_it()\n\n with override_config(cache={'enabled': True, 'adapter': 'memcached', 'backend': 'pylibmc', 'host': '127.0.0.1', 'namespace': 'tml-3', 'ttl': 1200}):\n cache = CachedClient.instance()\n self.assertIsInstance(cache, BaseMemcachedAdapter, 'proper factory build')\n self.assertEquals(cache.default_timeout, 1200)\n cache._drop_it()\n\n\n def test_memcache_funct(self):\n with override_config(cache={'enabled': True, 'adapter': 'memcached', 'host': '127.0.0.1', 'namespace': 'tml-test'}):\n cache = CachedClient.instance()\n check_alive(cache)\n self._test_memcache_func(cache)\n self._test_versioning(cache)\n cache._drop_it()\n\n def test_pylibmc_funct(self):\n with override_config(cache={'enabled': True, 'adapter': 'memcached', 'backend': 'pylibmc', 'host': '127.0.0.1', 'namespace': 'tml-test'}):\n cache = CachedClient.instance()\n check_alive(cache)\n self._test_memcache_func(cache)\n self._test_versioning(cache)\n cache._drop_it()\n\n def _test_memcache_func(self, cache):\n self.assertEquals(cache.store('foo', 'bar'), 'bar', 'dummy store')\n self.assertEquals(cache.fetch('foo'), 'bar', 'dummy fetch')\n self.assertEquals(cache.delete('foo'), 'foo', 'dummy delete')\n self.assertEquals(cache.fetch('foo'), None, 'dummy check delete')\n self.assertEquals(cache.store('foo', {'a': 'b'}), {'a': 'b'}, 'json store')\n self.assertEquals(cache.fetch('foo'), {'a': 'b'}, 'json fetch')\n cache.delete('foo')\n cache.store('foo', 'new_bar', opts=dict(timeout=1))\n self.assertEquals(cache.fetch('foo'), 'new_bar', 'timeout')\n time.sleep(1)\n self.assertEquals(cache.fetch('foo'), None, 'timeout works')\n\n def _test_versioning(self, cache):\n cache.delete('a')\n cache.store_version('1312321')\n cache.store('a', 'b')\n self.assertEquals(cache.fetch('a'), 'b', 'upgrade version')\n cache.upgrade_version()\n self.assertEquals(cache.fetch('a'), None, 'upgrade version works')\n\n\nclass TestRedisCache(unittest.TestCase):\n def test_redis_init(self):\n with override_config(cache={'enabled': True, 'adapter': 'rediscache', 'host': '127.0.0.1:6379', 'namespace': 'tml-2', 'ttl': 3600}):\n cache = CachedClient.instance()\n self.assertIsInstance(cache, BaseRedisAdapter, 'proper factory build')\n self.assertEquals(cache.default_timeout, 3600)\n self.assertEquals(cache.namespace, 'tml-2', 'namespace set')\n cache._drop_it()\n\n with override_config(cache={'enabled': True, 'adapter': 'rediscache', 'backend': 'default', 'host': '127.0.0.1:6379', 'namespace': 'tml-3', 'ttl': 1200}):\n cache = CachedClient.instance()\n self.assertIsInstance(cache, BaseRedisAdapter, 'proper factory build')\n self.assertEquals(cache.default_timeout, 1200)\n cache._drop_it()\n\n with override_config(cache={'enabled': True, 'adapter': 'rediscache', 'backend': 'default', 'host': '127.0.0.1:6379', 'options': {'pool': True, 'max_connections': 2}}):\n import redis\n cache = CachedClient.instance()\n self.assertIsInstance(cache._cache, redis.ConnectionPool)\n self.assertEquals(cache._cache.max_connections, 2)\n cache._drop_it()\n\n def test_redis_func(self):\n with override_config(cache={'enabled': True, 'adapter': 'rediscache', 'backend': 'default', 'host': '127.0.0.1:6379', 'namespace': 'tml-test'}):\n cache = CachedClient.instance()\n check_alive(cache)\n self.assertEquals(cache.store('foo', ['bar']), ['bar'], 'dummy store')\n self.assertEquals(cache.fetch('foo'), ['bar'], 'dummy fetch')\n self.assertEquals(cache.delete('foo'), 'foo', 'dummy delete')\n self.assertEquals(cache.fetch('foo'), None, 'dummy check delete')\n self.assertEquals(cache.store('foo', {'a': 'b'}), {'a': 'b'}, 'json store')\n self.assertEquals(cache.fetch('foo'), {'a': 'b'}, 'json fetch')\n cache.delete('foo')\n cache.store('foo', 'new_bar', opts=dict(timeout=1))\n self.assertEquals(cache.fetch('foo'), 'new_bar', 'timeout')\n time.sleep(1)\n self.assertEquals(cache.fetch('foo'), None, 'timeout works')\n","repo_name":"translationexchange/tml-python","sub_path":"tests/integration/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":9352,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"28295506507","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom .. models.hero import *\n\n\nclass FetchHero:\n def __init__(self, url):\n self.url = url\n\n def fetch_url(self):\n r = requests.get(self.url)\n soup = BeautifulSoup(r.text)\n trs = soup.find('table', id='CardSelectTr').find_all('tr')\n for tr in trs:\n if not tr.td:\n continue\n name = tr.td.a['title']\n career_id = Career.name_get(tr['data-param1'] + '干员').id\n star = int(tr['data-param2'][0])\n sex = tr['data-param3'] + '性干员'\n position, *tags = sorted(tr['data-param5'].split(','), key=lambda s: '近战位'in s or '远程位' in s, reverse=True)\n tags = [tag.strip() for tag in tags]\n if '新手' in tags:\n tags.remove('新手')\n tag_ids = list(map(Tag.name_get, tags))\n is_public = '公开招募' in tr['data-param6']\n if star == 6:\n experience = '高级资深干员'\n elif star == 5:\n experience = '资深干员'\n elif star < 3:\n experience = '新手'\n else:\n experience = None\n hero = Hero.name_get(name)\n if hero:\n hero.career_id = career_id\n hero.star = star\n hero.sex = sex\n hero.position = position\n hero.tags = tag_ids\n hero.is_public = is_public\n hero.experience = experience\n else:\n hero = Hero(\n name=name,\n career_id=career_id,\n star=star,\n sex=sex,\n position=position,\n tags=tag_ids,\n is_public=is_public,\n experience=experience,\n )\n db.session.add(hero)\n db.session.commit()\n","repo_name":"JoJoJoJoJoJoJo/ArknightTool","sub_path":"app/controllers/fetch_data.py","file_name":"fetch_data.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"13554607651","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport os\n\n\n__all__ = (\n 'LOGGER_NAME',\n 'get_default_logger',\n)\n\n\nLOGGER_NAME = 'wf-ds-tools'\n\n\nstreamHandler = logging.StreamHandler()\nstreamHandler.setLevel(logging.DEBUG)\nformatter = logging.Formatter('[%(levelname)s] %(asctime)s %(name)s.%(funcName)s: %(message)s')\nstreamHandler.setFormatter(formatter)\n\nfileHandler = logging.FileHandler(os.path.expanduser('~/tmp/{}.log'.format(LOGGER_NAME)))\nfileHandler.setLevel(logging.DEBUG)\nfileHandler.setFormatter(formatter)\n\nlogger = logging.getLogger(LOGGER_NAME)\nlogger.setLevel(logging.INFO)\nlogger.addHandler(streamHandler)\nlogger.addHandler(fileHandler)\nlogger.propagate = False\n\n\ndef get_default_logger():\n return logger\n","repo_name":"dosER-/wf-ds-tools","sub_path":"src/dstools/utils/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19830302470","text":"#!/usr/bin/env python\r\nfrom __future__ import print_function\r\n# encoding: utf-8\r\n\r\n# Before running this program, first Start HFO server:\r\n# $> ./bin/HFO --offense-agents 1\r\n\r\nimport argparse\r\n# import itertools\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport time\r\nimport math\r\nimport tensorflow.compat.v1 as tf\r\n\r\n# tf.compat.v1.disable_resource_variables()\r\ntf.disable_eager_execution()\r\ntry:\r\n import hfo\r\nexcept ImportError:\r\n print('Failed to import hfo. To install hfo, in the HFO directory, run: \\\"pip install .\\\"')\r\n exit()\r\n\r\ntf.reset_default_graph()\r\n\r\n# ___________________________Actor Net____________________________\r\n\r\n\r\nclass Actor(object):\r\n def __init__(self, sess, a_dim, p_dim, learning_rate, tau):\r\n self.sess = sess\r\n self.a_dim = a_dim\r\n # self.action_bound = action_bound\r\n self.lr = learning_rate\r\n self.t_replace_counter = 0\r\n self.tau = tau\r\n self.params_dim = p_dim\r\n\r\n with tf.variable_scope('Actor'):\r\n # input s, output a\r\n self.a, self.params = self._build_net(S, scope='eval_net', trainable=True)\r\n\r\n # input s_, output a, get a_ for critic\r\n self.a_, self.params_ = self._build_net(S_, scope='target_net', trainable=False)\r\n\r\n self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval_net')\r\n # print(\"actor evaluate parameters: \", len(self.e_params)) # len = 12\r\n self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target_net')\r\n\r\n self.soft_replace = [tf.assign(t, (1 - self.tau) * t + self.tau * e)\r\n for t, e in zip(self.t_params, self.e_params)]\r\n\r\n def _build_net(self, s, scope, trainable):\r\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\r\n init_weights = tf.random_normal_initializer(0, 0.01)\r\n init_bias = tf.constant_initializer(0.1)\r\n\r\n net_1 = tf.layers.dense(s, 1024, kernel_initializer=init_weights, bias_initializer=init_bias,\r\n name='l1', trainable=trainable)\r\n net_1 = tf.nn.leaky_relu(net_1, alpha=0.01)\r\n net_2 = tf.layers.dense(net_1, 512, kernel_initializer=init_weights, bias_initializer=init_bias,\r\n name='l2', trainable=trainable)\r\n net_2 = tf.nn.leaky_relu(net_2, alpha=0.01)\r\n net_3 = tf.layers.dense(net_2, 256, kernel_initializer=init_weights, bias_initializer=init_bias,\r\n name='l3', trainable=trainable)\r\n net_3 = tf.nn.leaky_relu(net_3, alpha=0.01)\r\n net_4 = tf.layers.dense(net_3, 128, kernel_initializer=init_weights, bias_initializer=init_bias,\r\n name='l4', trainable=trainable)\r\n net_4 = tf.nn.leaky_relu(net_4, alpha=0.01)\r\n\r\n act_value = tf.layers.dense(net_4, self.a_dim, kernel_initializer=init_weights,\r\n bias_initializer=init_bias,\r\n name='action', trainable=trainable)\r\n # action_value = tf.keras.layers.Dense(self.a_dim, kernel_initializer=init_weights,\r\n # bias_initializer=init_bias,\r\n # name='action', trainable=trainable)(\r\n # net_4)\r\n params = tf.layers.dense(net_4, self.params_dim, kernel_initializer=init_weights,\r\n bias_initializer=init_bias,\r\n name='params', trainable=trainable)\r\n # params = tf.keras.layers.Dense(self.params_dim, kernel_initializer=init_weights, bias_initializer=init_bias,\r\n # name='params', trainable=trainable)(net_4)\r\n\r\n return act_value, params\r\n\r\n def learn(self, s): # batch update\r\n self.sess.run(self.train_op, feed_dict={S: s})\r\n self.sess.run(self.soft_replace)\r\n\r\n def choose_action(self, s, epsi):\r\n if epsi == 0 or np.random.uniform() > epsilon: # use epsilon-greedy\r\n # print(\"~~~~~~~~~~~~~~~~~~~~~~~choose max action~~~~~~~~~~~~~~~~~~~~~~~~~~\")\r\n s = s[np.newaxis, :]\r\n a_value, a_params = self.sess.run((self.a, self.params), feed_dict={S: s}) # this is questionable\r\n act = np.argmax(a_value, 1)[0]\r\n else:\r\n act = np.random.choice([0, 1, 2]) # 用np. 还是tf.\r\n s = s[np.newaxis, :] # important!!!!!\r\n a_value = sess.run(self.a, feed_dict={S: s})\r\n a_params = [np.random.uniform(0, 100), np.random.uniform(-180, 180),\r\n np.random.uniform(-180, 180),\r\n np.random.uniform(-180, 180),\r\n np.random.uniform(0, 100), np.random.uniform(-180, 180)]\r\n a_value = np.squeeze(a_value)\r\n a_params = np.squeeze(a_params)\r\n # print(\"6 parameters: \", a_params)\r\n return act, a_value, a_params # the action index\r\n\r\n def add_grad_to_graph(self, a_grads):\r\n with tf.variable_scope('policy_grads'):\r\n # ys = policy\r\n # xs = policy's parameters\r\n # a_grads = the gradients of the policy to get more Q\r\n # tf.gradients will calculate dys/dxs with a initial gradients for ys, so this is dq/da * da/dparams\r\n # print(\"ys: \", tf.concat([self.a, self.params], axis=1)) shape -------- [None, 10]\r\n # print('e_params: ', self.e_params)\r\n self.policy_grads = tf.gradients(ys=tf.concat([self.a, self.params], axis=1), xs=self.e_params, grad_ys=a_grads)\r\n # print(\"policy_grads: \", self.policy_grads)\r\n # print(len(self.policy_grads)) ------ 12\r\n\r\n with tf.variable_scope('A_train'):\r\n opt = tf.train.AdamOptimizer(-self.lr) # (- learning rate) for ascent policy\r\n self.train_op = opt.apply_gradients(zip(self.policy_grads, self.e_params))\r\n\r\n\r\n# '___________________Critic Net_____________________'\r\n\r\n\r\nclass Critic(object):\r\n def __init__(self, sess, s_dim, a_dim, learning_rate, gamma, a, a_, tau):\r\n self.sess = sess\r\n self.s_dim = s_dim\r\n self.a_dim = a_dim # 4+6 = 10, a contains action_value and parameters\r\n self.lr = learning_rate\r\n self.gamma = gamma\r\n self.tau = tau\r\n\r\n with tf.variable_scope('Critic', reuse=tf.AUTO_REUSE):\r\n # Input (s, a), output q\r\n self.a = tf.stop_gradient(a) # stop critic update flows to actor\r\n self.q = self._build_net(S, self.a, 'eval_net', trainable=True)\r\n\r\n # Input (s_, a_), output q_ for q_target\r\n self.q_ = self._build_net(S_, a_, 'target_net',\r\n trainable=False) # target_q is based on a_ from Actor's target_net\r\n\r\n self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval_net')\r\n self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target_net')\r\n\r\n with tf.variable_scope('target_q'):\r\n self.target_q = R + self.gamma * self.q_\r\n\r\n with tf.variable_scope('TD_error'):\r\n self.loss = tf.reduce_mean(tf.squared_difference(self.target_q, self.q))\r\n\r\n with tf.variable_scope('C_train'):\r\n self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss)\r\n\r\n with tf.variable_scope('a_grad'):\r\n self.a_grad = tf.gradients(self.q, self.a)\r\n # print('a_grad: ', self.a_grad) # shape = [?, 10]\r\n\r\n self.soft_replacement = [tf.assign(t, (1 - self.tau) * t + self.tau * e)\r\n for t, e in zip(self.t_params, self.e_params)]\r\n\r\n def _build_net(self, s, a, scope, trainable):\r\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\r\n init_weights = tf.random_normal_initializer(0., 0.01)\r\n init_bias = tf.constant_initializer(0.1)\r\n\r\n n_l1 = 1024\r\n w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], initializer=init_weights, trainable=trainable)\r\n w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], initializer=init_weights, trainable=trainable)\r\n b1 = tf.get_variable('b1', [1, n_l1], initializer=init_bias, trainable=trainable)\r\n net_1 = tf.nn.leaky_relu((tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1), alpha=0.01)\r\n\r\n net_2 = tf.layers.dense(net_1, 512, kernel_initializer=init_weights, bias_initializer=init_bias,\r\n name='l2', trainable=trainable)\r\n net_2 = tf.nn.leaky_relu(net_2, alpha=0.01)\r\n net_3 = tf.layers.dense(net_2, 256, kernel_initializer=init_weights, bias_initializer=init_bias,\r\n name='l3', trainable=trainable)\r\n net_3 = tf.nn.leaky_relu(net_3, alpha=0.01)\r\n net_4 = tf.layers.dense(net_3, 128, kernel_initializer=init_weights, bias_initializer=init_bias,\r\n name='l4', trainable=trainable)\r\n net_4 = tf.nn.leaky_relu(net_4, alpha=0.01)\r\n\r\n with tf.variable_scope('q', reuse=tf.AUTO_REUSE):\r\n q = tf.layers.dense(net_4, 1, kernel_initializer=init_weights, bias_initializer=init_bias,\r\n trainable=trainable)\r\n\r\n return q\r\n\r\n def learn(self, s, a, r, s_):\r\n self.sess.run(self.train_op, feed_dict={S: s, self.a: a, R: r, S_: s_})\r\n self.sess.run(self.soft_replacement)\r\n\r\n\r\n# '_______________Memory______________'\r\n\r\n\r\nclass Memory(object):\r\n def __init__(self, capacity, dims):\r\n self.capacity = capacity\r\n self.data = np.zeros((capacity, dims))\r\n self.pointer = 0\r\n\r\n def store_transition(self, s, a, p, r, s_):\r\n transition = np.hstack((s, a, p, [r], s_))\r\n index = self.pointer % self.capacity # replace the old memory with new memory\r\n self.data[index, :] = transition\r\n self.pointer += 1\r\n\r\n def sample(self, n):\r\n assert self.pointer >= self.capacity, 'Memory has not been fulfilled'\r\n indices = np.random.choice(self.capacity, size=n)\r\n return self.data[indices, :]\r\n\r\n\r\n# \"____________________Reward_____________________\"\r\n\r\n\r\n# get the distance between the ball and the goal\r\ndef get_ball_dist_goal(sta):\r\n ball_proximity = sta[53]\r\n goal_proximity = sta[15]\r\n ball_dist = 1.0 - ball_proximity\r\n goal_dist = 1.0 - goal_proximity\r\n ball_ang_sin_rad = sta[51]\r\n ball_ang_cos_rad = sta[52]\r\n ball_ang_rad = math.acos(ball_ang_cos_rad)\r\n if ball_ang_sin_rad < 0:\r\n ball_ang_rad *= -1.\r\n goal_ang_sin_rad = sta[13]\r\n goal_ang_cos_rad = sta[14]\r\n goal_ang_rad = math.acos(goal_ang_cos_rad)\r\n if goal_ang_sin_rad < 0:\r\n goal_ang_rad *= -1.\r\n alpha = max(ball_ang_rad, goal_ang_rad) - min(ball_ang_rad, goal_ang_rad)\r\n ball_dist_goal = math.sqrt(\r\n ball_dist * ball_dist + goal_dist * goal_dist - 2. * ball_dist * goal_dist * math.cos(alpha))\r\n return ball_dist_goal\r\n\r\n\r\n# there's explicit codes about reward in src/hfo_game.cpp\r\ndef getReward(old_state, current_state, get_kickable_reward, status):\r\n r = 0\r\n kickable = current_state[12]\r\n old_kickable = old_state[12]\r\n\r\n # NOTE: the closer agent gets towards the ball, the bigger this state[53] is\r\n ball_prox_delta = current_state[53] - old_state[53] # ball_proximity - old_ball_prox\r\n kickable_delta = kickable - old_kickable\r\n ball_dist_goal_delta = get_ball_dist_goal(current_state) - get_ball_dist_goal(old_state)\r\n player_on_ball = hfo_env.playerOnBall()\r\n our_unum = hfo_env.getUnum()\r\n\r\n # move to ball reward\r\n if player_on_ball.unum < 0 or player_on_ball.unum == our_unum:\r\n r += ball_prox_delta\r\n # if kickable_delta >= 1 and (not get_kickable_reward):\r\n if kickable_delta >= 1:\r\n r += 1.0\r\n get_kickable_reward = True\r\n\r\n # kick to goal reward\r\n if player_on_ball.unum == our_unum:\r\n r -= 3 * ball_dist_goal_delta\r\n # elif get_kickable_reward: # we have passed to teammate\r\n # r -= 3 * 0.2 * ball_dist_goal_delta\r\n\r\n # EOT reward\r\n if status == hfo.GOAL:\r\n if player_on_ball.unum == our_unum:\r\n r += 5\r\n else:\r\n r += 1\r\n elif status == hfo.CAPTURED_BY_DEFENSE:\r\n r += 0\r\n\r\n return r, get_kickable_reward\r\n\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--port', type=int, default=6000,\r\n help=\"Server port\")\r\nparser.add_argument('--seed', type=int, default=None,\r\n help=\"Python randomization seed; uses python default if 0 or not given\")\r\nparser.add_argument('--no-reorient', action='store_true',\r\n help=\"Do not use the new Reorient action\")\r\nparser.add_argument('--record', action='store_true',\r\n help=\"Doing HFO --record\")\r\nparser.add_argument('--rdir', type=str, default='log/',\r\n help=\"Set directory to use if doing HFO --record\")\r\nparser.add_argument('--MEM_CAPACITY', type=int, default=2000)\r\nparser.add_argument('--MAX_EPISODES', type=int, default=2000)\r\nparser.add_argument('--LR_A', type=int, default=0.001)\r\nparser.add_argument('--LR_C', type=int, default=0.001)\r\nparser.add_argument('--GAMMA', type=int, default=0.9) # reward discount\r\nparser.add_argument('--BATCH_SIZE', type=int, default=32)\r\nparser.add_argument('--tau', type=int, default=0.001) # I changed from 0.0001 to 0.001\r\nargs = parser.parse_args()\r\nif args.seed:\r\n random.seed(args.seed)\r\n# Create the HFO Environment\r\nhfo_env = hfo.HFOEnvironment()\r\n# Connect to the server with the specified\r\n# feature set. See feature sets in hfo.py/hfo.hpp.\r\nif args.record:\r\n hfo_env.connectToServer(hfo.LOW_LEVEL_FEATURE_SET,\r\n 'bin/teams/base/config/formations-dt', args.port,\r\n 'localhost', 'base_left', False,\r\n record_dir=args.rdir)\r\nelse:\r\n hfo_env.connectToServer(hfo.LOW_LEVEL_FEATURE_SET,\r\n 'bin/teams/base/config/formations-dt', args.port,\r\n 'localhost', 'base_left', False)\r\nif args.seed:\r\n print(\"Python randomization seed: {0:d}\".format(args.seed))\r\n\r\nstate_dim = hfo_env.getStateSize() # 59\r\naction_dim = 3\r\nparam_dim = 5\r\n\r\n# all placeholder for tf\r\nwith tf.name_scope('S'):\r\n S = tf.placeholder(tf.float32, shape=[None, state_dim], name='s')\r\nwith tf.name_scope('R'):\r\n R = tf.placeholder(tf.float32, [None, 1], name='r')\r\nwith tf.name_scope('S_'):\r\n S_ = tf.placeholder(tf.float32, shape=[None, state_dim], name='s_')\r\n\r\nsess = tf.Session()\r\n\r\n# Create actor and critic.\r\n# They are actually connected to each other\r\nactor = Actor(sess, action_dim, param_dim, args.LR_A, args.tau)\r\n# print(\"actor_a\", actor.a) -------- shape = [None, 4]\r\n# print(\"actor_params\", actor.params) ----------- shape = [None, 6]\r\ncritic = Critic(sess, state_dim, (action_dim + param_dim), args.LR_C, args.GAMMA,\r\n tf.concat([actor.a, actor.params], axis=1), tf.concat([actor.a_, actor.params_], axis=1), args.tau)\r\n# print(\"~~~~~~~~a_grad: \", critic.a_grad) shape --------- [None, 10]\r\nactor.add_grad_to_graph(critic.a_grad)\r\n\r\nsess.run(tf.global_variables_initializer())\r\n\r\nM = Memory(args.MEM_CAPACITY, dims=2 * state_dim + (action_dim + param_dim) + 1)\r\n\r\nepsilon = 1\r\n\r\nx_list = []\r\nreward_list = []\r\n\r\nt1 = time.time()\r\nfor i in range(args.MAX_EPISODES):\r\n ep_reward = 0\r\n count = 0\r\n status = hfo.IN_GAME\r\n if epsilon > 0.1:\r\n epsilon *= 0.995\r\n\r\n while status == hfo.IN_GAME:\r\n count += 1\r\n state = hfo_env.getState()\r\n # print(\"state shape\", state.shape) ------ (59,)\r\n action_index, action_value, action_params = actor.choose_action(state, epsilon)\r\n # print(\"the action chosen: \", action_index)\r\n if action_index == 0:\r\n hfo_env.act(hfo.DASH, action_params[0], action_params[1])\r\n elif action_index == 1:\r\n hfo_env.act(hfo.TURN, action_params[2])\r\n elif action_index == 2:\r\n hfo_env.act(hfo.KICK, action_params[4], action_params[5])\r\n\r\n status = hfo_env.step()\r\n state_ = hfo_env.getState()\r\n got_kickable_r = False\r\n reward, got_kickable_r = getReward(state, state_, got_kickable_r, status)\r\n\r\n # print('reward: ', reward)\r\n # print('state: ', state)\r\n # print(\"action_value: \", action_value)\r\n # print(\"action_params: \", action_params)\r\n M.store_transition(state, action_value, action_params, reward, state_)\r\n\r\n if M.pointer > args.MEM_CAPACITY:\r\n if M.pointer == args.MEM_CAPACITY + 1:\r\n print(\"####################################reach learn###################################\")\r\n b_M = M.sample(args.BATCH_SIZE)\r\n b_s = b_M[:, :state_dim]\r\n b_a = b_M[:, state_dim: state_dim + action_dim + param_dim] # action_value along with 6 parameters\r\n b_r = b_M[:, -state_dim - 1: -state_dim]\r\n b_s_ = b_M[:, -state_dim:]\r\n\r\n critic.learn(b_s, b_a, b_r, b_s_)\r\n actor.learn(b_s)\r\n\r\n state = state_\r\n ep_reward += reward\r\n\r\n x_list.append(i)\r\n reward_list.append(ep_reward)\r\n plt.figure(\"reward figure\")\r\n plt.clf()\r\n plt.title('reward figure')\r\n plt.xlabel('episode')\r\n plt.ylabel('reward')\r\n plt.scatter(np.array(x_list), np.array(reward_list))\r\n plt.pause(0.001)\r\n\r\n print(\"Episode: \", i, \"episode reward: \", ep_reward, 'explore: ', epsilon)\r\n\r\n if status == hfo.SERVER_DOWN:\r\n hfo_env.act(hfo.QUIT)\r\n exit()\r\n\r\n\r\n","repo_name":"MoonieC/Moonie","sub_path":"hfo_DDPG.py","file_name":"hfo_DDPG.py","file_ext":"py","file_size_in_byte":17906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36208808566","text":"'''\n121. 买卖股票的最佳时机\n\n给定一个数组,它的第 i 个元素是一支给定股票第 i 天的价格。\n\n如果你最多只允许完成一笔交易(即买入和卖出一支股票一次),设计一个算法来计算你所能获取的最大利润。\n\n注意:你不能在买入股票前卖出股票。\n\n\n\n示例 1:\n\n输入: [7,1,5,3,6,4]\n输出: 5\n解释: 在第 2 天(股票价格 = 1)的时候买入,在第 5 天(股票价格 = 6)的时候卖出,最大利润 = 6-1 = 5 。\n 注意利润不能是 7-1 = 6, 因为卖出价格需要大于买入价格;同时,你不能在买入前卖出股票。\n\n示例 2:\n\n输入: [7,6,4,3,1]\n输出: 0\n解释: 在这种情况下, 没有交易完成, 所以最大利润为 0。\n\n\n'''\n\nclass Solution:\n def maxProfit(self, prices):\n '''\n @describe: 动态规划,dp[i]=max(dp[i−1],prices[i]−minprice)\n 计算今天的股价与之前最低的股价之差即利润,并寻找最大利润即可\n @param prices: List[int]\n @return: int\n '''\n if len(prices) <= 1:\n return 0\n minVal = prices[0]\n maxdelta = 0\n for i in range(1, len(prices)):\n maxdelta = max(prices[i] - minVal, maxdelta)\n minVal = min(minVal, prices[i])\n return maxdelta\n\n\nif __name__ == '__main__':\n prices = [7,1,5,3,6,4]\n s=Solution()\n print(s.maxProfit((prices)))","repo_name":"lygeneral/LeetCode","sub_path":"Python/Array/easy/121_best-time-to-buy-and-sell-stock.py","file_name":"121_best-time-to-buy-and-sell-stock.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70678311254","text":"import os\nfrom moviepy.editor import *\nfrom path import *\nfrom ffprobe import FFProbe\n\ndef gyujtsd_ossze_metaadatokat(mappa_ut):\n metaadatok_lista = []\n\n # Ellenőrizzük a mappa létezését\n if not os.path.exists(mappa_ut):\n print(f\"A mappa nem található: {mappa_ut}\")\n return\n\n # Az összes fájl listázása a mappában\n for fajlnev in os.listdir(mappa_ut):\n fajl_ut = mappa_ut+\"/\"+fajlnev\n\n # Ellenőrizze, hogy a fájl egy .mov vagy .mp4 kiterjesztésű videófájl-e\n if os.path.isfile(fajl_ut) and fajlnev.lower().endswith(('.mov', '.mp4')):\n # Videó klip létrehozása a fájlból\n clip = VideoFileClip(fajl_ut)\n metada2 = FFProbe(fajl_ut)\n for key, value in metada2.all().items():\n print(f\"{key}: {value}\")\n # Metaadatok kinyerése\n metaadatok = {\n \"Fájlnév\": fajlnev,\n # \"Cím\": clip.title,\n # \"Készítette\": clip.creator,\n # \"Fájlformátum\": clip.fmt,\n \"Képkockasebesség\": clip.fps,\n \"Képkockák száma\": clip.reader.nframes,\n \"Képarány (szélesség x magasság)\": clip.size,\n \"Időtartam (mp)\": clip.duration\n }\n\n # Metaadatok hozzáadása a listához\n metaadatok_lista.append(metaadatok)\n\n # Video klip lezárása\n clip.close()\n\n return metaadatok_lista\n\n# Tesztelés: Add meg a mappa elérési útját\nmappa_eleresi_ut = 'p:/tmp/iTTHONVAGY/2016.09.12 Bükk'\nmetaadatok = gyujtsd_ossze_metaadatokat(mappa_eleresi_ut)\n\n# Metaadatok kiírása\nfor metaadat in metaadatok:\n print(\"\\nVideó metaadatok:\")\n for kulcs, ertek in metaadat.items():\n print(f\"{kulcs}: {ertek}\")\n","repo_name":"xerioo/pythonProject","sub_path":"Metadata reader.py","file_name":"Metadata reader.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19563059044","text":"number1=int(input(\"Enter first number :\"))\nnumber2=int(input(\"Enter second number :\"))\n\ntemp=number1\n\nif number1>number2:\n temp=number2\nwhile temp>0:\n if(number1%temp == number2%temp == 0):\n break\n temp=temp-1\n \nif temp==0:\n print(\"There is no greatest common divisor\") \n exit() \n\nprint(temp)\n ","repo_name":"aligerami/assignment1","sub_path":"chapter5-16.py","file_name":"chapter5-16.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43238954015","text":"from importlib.metadata import version\n\nfrom benchopt import BaseSolver, safe_import_context\nfrom benchopt.stopping_criterion import SingleRunCriterion\n\nwith safe_import_context() as import_ctx:\n import cuml\n import cupy\n import numpy as np\n\n\nclass Solver(BaseSolver):\n \"\"\"Note: not sure this solver actually should fit here.\n\n It is not documented wether it runs the lloyd algorithm or equivalent brute-force\n algorithm, and if the iterations are meant to result in the same inertia than\n other solvers on the bench ?\n \"\"\"\n\n name = \"cuml\"\n requirements = [\"cuml\"]\n\n parameters = dict(device=[\"gpu\"])\n\n stopping_criterion = SingleRunCriterion(1)\n\n def skip(self, **objective_dict):\n\n init = objective_dict[\"init\"]\n if not hasattr(init, \"copy\") and (init == \"k-means++\"):\n return True, (\n \"Support for k-means++ is not implemented in cuml. cuml only \"\n \"implements k-means|| whose walltime can't be compared with \"\n \"k-means++. \"\n )\n\n algorithm = objective_dict[\"algorithm\"]\n if algorithm != \"lloyd\":\n return True, \"cuml only support the lloyd algorithm.\"\n\n X = objective_dict[\"X\"]\n if X.dtype == np.float64:\n # We haven't came accross cuda devices that doesn't support float64 yet,\n # can it happen ? If it happens, the following instruction will fail,\n # please enclose it with the appropriate Try/Except to return the\n # appropriate skip decision.\n cupy.zeros(1, dtype=cupy.float64)\n # return True, (\n # f\"This {self.device} device has no support for float64 compute\"\n # )\n\n return False, None\n\n def set_objective(\n self,\n X,\n sample_weight,\n init,\n n_clusters,\n n_init,\n max_iter,\n tol,\n verbose,\n algorithm,\n random_state,\n ):\n if self.device == \"cpu\":\n # Copy the data before running the benchmark to ensure that no unfortunate\n # side effects can happen\n self.X = X.copy()\n if hasattr(sample_weight, \"copy\"):\n sample_weight = sample_weight.copy()\n if hasattr(init, \"copy\"):\n init = init.copy()\n\n else:\n self.X = cupy.asarray(X)\n if hasattr(sample_weight, \"copy\"):\n sample_weight = cupy.asarray(sample_weight)\n if hasattr(init, \"copy\"):\n init = cupy.asarray(init)\n\n self.sample_weight = sample_weight\n self.init = init\n self.n_clusters = n_clusters\n self.n_init = n_init\n self.max_iter = max_iter\n\n if tol == 0:\n tol = 1e-16\n self.tol = tol\n\n self.verbose = verbose\n self.algorithm = algorithm\n self.random_state = random_state\n\n def warm_up(self):\n cuml.KMeans(\n n_clusters=self.n_clusters,\n init=self.init,\n n_init=self.n_init,\n max_iter=1,\n tol=self.tol,\n verbose=self.verbose,\n random_state=self.random_state,\n ).fit(self.X, sample_weight=self.sample_weight)\n\n def run(self, _):\n estimator = cuml.KMeans(\n n_clusters=self.n_clusters,\n init=self.init,\n n_init=self.n_init,\n max_iter=self.max_iter,\n tol=self.tol,\n verbose=self.verbose,\n random_state=self.random_state,\n ).fit(self.X, sample_weight=self.sample_weight)\n self.inertia_ = estimator.inertia_\n self.n_iter_ = estimator.n_iter_\n\n def get_result(self):\n return dict(\n inertia=self.inertia_,\n n_iter=self.n_iter_,\n version_info=f\"cuml {version('cuml')}\",\n __name=self.name,\n comment=\"TODO: check it runs Lloyd ?\",\n **self._parameters,\n )\n","repo_name":"soda-inria/sklearn-engine-benchmarks","sub_path":"benchmarks/kmeans/solvers/cuml.py","file_name":"cuml.py","file_ext":"py","file_size_in_byte":3973,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"21372960981","text":"__author__ = 'max'\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom ..nn import ChainCRF, VarMaskedGRU, VarMaskedRNN, VarMaskedLSTM\nfrom ..nn import utils,Embedding\nimport numpy as np\nfrom scipy.stats import mode\n\nclass BiRecurrentConv(nn.Module):\n def __init__(self, word_dim, num_words, char_dim, num_chars, num_filters, kernel_size, rnn_mode, hidden_size, num_layers, num_labels,\n tag_space=0, embedd_word=None, embedd_char=None, p_in=0.33, p_out=0.5, p_rnn=(0.5, 0.5), initializer=None,use_char=True):\n super(BiRecurrentConv, self).__init__()\n\n self.word_embedd = Embedding(num_words, word_dim, init_embedding=embedd_word,freeze=True)\n self.char_embedd = Embedding(num_chars, char_dim, init_embedding=embedd_char) if use_char else None\n self.conv1d = nn.Conv1d(char_dim, num_filters, kernel_size, padding=kernel_size - 1) if use_char else None\n self.use_char = use_char\n # dropout word\n self.dropout_in = nn.Dropout2d(p=p_in)\n\n self.dropout_out = nn.Dropout(p_out)\n\n if rnn_mode == 'RNN':\n RNN = nn.RNN\n elif rnn_mode == 'LSTM':\n RNN = nn.LSTM\n elif rnn_mode == 'GRU':\n RNN = nn.GRU\n else:\n raise ValueError('Unknown RNN mode: %s' % rnn_mode)\n\n rnn_dim = word_dim+num_filters if use_char else word_dim\n self.rnn = RNN(rnn_dim, hidden_size, num_layers=num_layers, batch_first=True, bidirectional=True, dropout=p_rnn[1])\n\n self.dense = None\n out_dim = hidden_size * 2\n if tag_space:\n self.dense = nn.Linear(out_dim, tag_space)\n out_dim = tag_space\n self.dense_softmax = nn.Linear(out_dim, num_labels)\n self.logsoftmax = nn.LogSoftmax(dim=1)\n self.nll_loss = nn.NLLLoss(size_average=False, reduce=False)\n\n self.initializer = initializer\n self.reset_parameters()\n\n def reset_parameters(self):\n if self.initializer is None:\n return\n\n for name, parameter in self.named_parameters():\n if name.find('embedd') == -1:\n if parameter.dim() == 1:\n nn.init.constant_(parameter, 0.)\n else:\n self.initializer(parameter)\n\n def _get_rnn_output(self, input_word, input_char, mask=None, length=None, hx=None):\n # hack length from mask\n # we do not hack mask from length for special reasons.\n # Thus, always provide mask if it is necessary.\n if length is None and mask is not None:\n length = mask.sum(dim=1).long()\n\n # [batch, length, word_dim]\n word = self.word_embedd(input_word)\n # apply dropout word on input\n word = self.dropout_in(word)\n\n # [batch, length, char_length, char_dim]\n if (self.use_char):\n char = self.char_embedd(input_char)\n char_size = char.size()\n # first transform to [batch *length, char_length, char_dim]\n # then transpose to [batch * length, char_dim, char_length]\n char = char.view(char_size[0] * char_size[1], char_size[2], char_size[3]).transpose(1, 2)\n # put into cnn [batch*length, char_filters, char_length]\n # then put into maxpooling [batch * length, char_filters]\n char, _ = self.conv1d(char).max(dim=2)\n # reshape to [batch, length, char_filters]\n char = torch.tanh(char).view(char_size[0], char_size[1], -1)\n char = self.dropout_in(char)\n # concatenate word and char [batch, length, word_dim+char_filter]\n input = torch.cat([word, char], dim=2)\n\n\n else:\n input = word\n\n # prepare packed_sequence\n if length is not None:\n seq_input, hx, rev_order, mask = utils.prepare_rnn_seq(input, length, hx=hx, masks=mask, batch_first=True)\n seq_output, hn = self.rnn(seq_input, hx=hx)\n output, hn = utils.recover_rnn_seq(seq_output, rev_order, hx=hn, batch_first=True)\n else:\n # output from rnn [batch, length, hidden_size]\n output, hn = self.rnn(input, hx=hx)\n\n # apply dropout for the output of rnn\n output = self.dropout_out(output)\n\n if self.dense is not None:\n # [batch, length, tag_space]\n output = self.dropout_out(F.elu(self.dense(output)))\n\n return output, hn, mask, length\n\n def forward(self, input_word, input_char, mask=None, length=None, hx=None):\n # output from rnn [batch, length, tag_space]\n output, _, mask, length = self._get_rnn_output(input_word, input_char, mask=mask, length=length, hx=hx)\n return output, mask, length\n\n def loss(self, input_word, input_char, target, mask=None, length=None, hx=None, leading_symbolic=0,find_baseline_list = False):\n # [batch, length, tag_space]\n output, mask, length = self.forward(input_word, input_char, mask=mask, length=length, hx=hx)\n # [batch, length, num_labels]\n output = self.dense_softmax(output)\n # preds = [batch, length]\n _, preds = torch.max(output[:, :, leading_symbolic:], dim=2)\n preds += leading_symbolic\n\n if length is not None and target.size(1) != mask.size(1):\n max_len = length.max()\n target = target[:, :max_len].contiguous()\n\n if find_baseline_list:\n corr,preds = self.find_baseline_list_preds(output, length, target, mask, leading_symbolic, num_of_preds=100)\n return corr,preds\n\n output_size = output.size()\n # [batch * length, num_labels]\n output_size = (output_size[0] * output_size[1], output_size[2])\n output = output.view(output_size)\n\n if mask is not None:\n return (self.nll_loss(self.logsoftmax(output), target.view(-1)) * mask.contiguous().view(-1)).sum() / mask.sum(), \\\n (torch.eq(preds, target).type_as(mask) * mask).sum(dim=1), preds\n else:\n num = output_size[0] * output_size[1]\n return self.nll_loss(self.logsoftmax(output), target.view(-1)).sum() / num, \\\n (torch.eq(preds, target).type_as(output)).sum(dim=1), preds\n\n def find_baseline_list_preds(self,output,length,target,mask,leading_symbolic,num_of_preds=100,epsilon=0.0001):\n\n # output shape: [batch, length, num_labels]\n sorted_output, sorted_preds = torch.sort(output[:, :, leading_symbolic:], descending=True, dim=2)\n correct_all_batch = []\n preds_all_batch = []\n for sample_index in range(sorted_preds.size(0)):\n correct_per_sample = []\n preds_per_sample = []\n true_length = length[sample_index]\n orig_max_pred = sorted_preds[sample_index, :, 0]\n orig_max_values = sorted_output[sample_index, :, 0]\n remaining_num_preds = num_of_preds - 1\n\n correct_per_sample.append((torch.eq(orig_max_pred, target[sample_index,:]).type_as(mask[sample_index,:]) * mask[sample_index,:]).sum())\n preds_per_sample.append(orig_max_pred)\n\n current_max_values = torch.tensor(orig_max_values)\n current_max_pred = torch.tensor(orig_max_pred)\n\n output[sample_index, [range(output.size(1))], orig_max_pred] += epsilon\n\n while remaining_num_preds > 0:\n\n current_output = torch.Tensor(output[sample_index, :, :].cpu() - current_max_values[:, None].cpu())\n max_value = -np.inf\n\n for i in range(true_length):\n for k in range(output.size(2)):\n if current_output[i, k] > max_value and current_output[i, k] <= 0:\n max_value = torch.tensor(current_output[i, k])\n index_len = i\n index_pos = k\n\n current_max_pred[index_len] = index_pos\n current_max_values[index_len] = torch.tensor(output[sample_index, index_len, index_pos])\n\n output[sample_index, index_len, index_pos] += epsilon\n\n correct_per_sample.append((torch.eq(current_max_pred, target[sample_index,:]).type_as(mask[sample_index,:]) * mask[sample_index,:]).sum())\n preds_per_sample.append(current_max_pred)\n remaining_num_preds -= 1\n\n correct_all_batch.append(correct_per_sample)\n preds_all_batch.append(torch.stack(preds_per_sample)) ##[100,seq_length]\n\n return torch.tensor(correct_all_batch),torch.stack(preds_all_batch)\n\n\n @staticmethod\n def calc_majority_vote_per_batch(preds_batch):\n # preds batch :[batch_size,k_repetition, seq_length]\n preds_batch_numpy = preds_batch.cpu().numpy()\n most_common, count = mode(preds_batch_numpy, axis=1)\n most_common = np.squeeze(most_common, axis=1) #[batch_size, seq_length]\n return torch.from_numpy(most_common)\n\n\nclass BiVarRecurrentConv(BiRecurrentConv):\n def __init__(self, word_dim, num_words, char_dim, num_chars, num_filters, kernel_size, rnn_mode, hidden_size, num_layers, num_labels,\n tag_space=0, embedd_word=None, embedd_char=None, p_in=0.33, p_out=0.33, p_rnn=(0.33, 0.33), initializer=None,use_char=False):\n super(BiVarRecurrentConv, self).__init__(word_dim, num_words, char_dim, num_chars, num_filters, kernel_size, rnn_mode, hidden_size, num_layers, num_labels,\n tag_space=tag_space, embedd_word=embedd_word, embedd_char=embedd_char,\n p_in=p_in, p_out=p_out, p_rnn=p_rnn, initializer=initializer,use_char=use_char)\n\n self.dropout_rnn_in = None\n self.dropout_out = nn.Dropout2d(p_out)\n\n if rnn_mode == 'RNN':\n RNN = VarMaskedRNN\n elif rnn_mode == 'LSTM':\n RNN = VarMaskedLSTM\n elif rnn_mode == 'GRU':\n RNN = VarMaskedGRU\n else:\n raise ValueError('Unknown RNN mode: %s' % rnn_mode)\n\n self.rnn = RNN(word_dim + num_filters, hidden_size, num_layers=num_layers, batch_first=True, bidirectional=True, dropout=p_rnn, initializer=self.initializer)\n\n def _get_rnn_output(self, input_word, input_char, mask=None, length=None, hx=None):\n # [batch, length, word_dim]\n word = self.word_embedd(input_word)\n\n # [batch, length, char_length, char_dim]\n char = self.char_embedd(input_char)\n char_size = char.size()\n # first transform to [batch *length, char_length, char_dim]\n # then transpose to [batch * length, char_dim, char_length]\n char = char.view(char_size[0] * char_size[1], char_size[2], char_size[3]).transpose(1, 2)\n # put into cnn [batch*length, char_filters, char_length]\n # then put into maxpooling [batch * length, char_filters]\n char, _ = self.conv1d(char).max(dim=2)\n # reshape to [batch, length, char_filters]\n char = torch.tanh(char).view(char_size[0], char_size[1], -1)\n\n # apply dropout word on input\n word = self.dropout_in(word)\n char = self.dropout_in(char)\n\n # concatenate word and char [batch, length, word_dim+char_filter]\n input = torch.cat([word, char], dim=2)\n # output from rnn [batch, length, hidden_size]\n output, hn = self.rnn(input, mask, hx=hx)\n\n # apply dropout for the output of rnn\n # [batch, length, hidden_size] --> [batch, hidden_size, length] --> [batch, length, hidden_size]\n output = self.dropout_out(output.transpose(1, 2)).transpose(1, 2)\n\n if self.dense is not None:\n # [batch, length, tag_space] --> [batch, tag_space, length] --> [batch, length, tag_space]\n output = self.dropout_out(F.elu(self.dense(output)).transpose(1, 2)).transpose(1, 2)\n\n return output, hn, mask, length\n\n\nclass BiRecurrentConvCRF(BiRecurrentConv):\n def __init__(self, word_dim, num_words, char_dim, num_chars, num_filters, kernel_size, rnn_mode, hidden_size, num_layers, num_labels,\n tag_space=0, embedd_word=None, embedd_char=None, p_in=0.33, p_out=0.5, p_rnn=(0.5, 0.5), bigram=False, initializer=None):\n super(BiRecurrentConvCRF, self).__init__(word_dim, num_words, char_dim, num_chars, num_filters, kernel_size, rnn_mode, hidden_size, num_layers, num_labels,\n tag_space=tag_space, embedd_word=embedd_word, embedd_char=embedd_char,\n p_in=p_in, p_out=p_out, p_rnn=p_rnn, initializer=initializer)\n\n out_dim = tag_space if tag_space else hidden_size * 2\n self.crf = ChainCRF(out_dim, num_labels, bigram=bigram)\n self.dense_softmax = None\n self.logsoftmax = None\n self.nll_loss = None\n\n def forward(self, input_word, input_char, mask=None, length=None, hx=None):\n # output from rnn [batch, length, tag_space]\n output, _, mask, length = self._get_rnn_output(input_word, input_char, mask=mask, length=length, hx=hx)\n # [batch, length, num_label, num_label]\n return self.crf(output, mask=mask), mask\n\n def loss(self, input_word, input_char, target, mask=None, length=None, hx=None, leading_symbolic=0):\n # output from rnn [batch, length, tag_space]\n output, _, mask, length = self._get_rnn_output(input_word, input_char, mask=mask, length=length, hx=hx)\n\n if length is not None:\n max_len = length.max()\n target = target[:, :max_len]\n\n # [batch, length, num_label, num_label]\n return self.crf.loss(output, target, mask=mask).mean()\n\n def decode(self, input_word, input_char, target=None, mask=None, length=None, hx=None, leading_symbolic=0):\n # output from rnn [batch, length, tag_space]\n output, _, mask, length = self._get_rnn_output(input_word, input_char, mask=mask, length=length, hx=hx)\n\n if target is None:\n return self.crf.decode(output, mask=mask, leading_symbolic=leading_symbolic), None\n\n if length is not None:\n max_len = length.max()\n target = target[:, :max_len]\n\n preds = self.crf.decode(output, mask=mask, leading_symbolic=leading_symbolic)\n if mask is None:\n return preds, torch.eq(preds, target).float().sum()\n else:\n return preds, (torch.eq(preds, target).float() * mask).sum()\n\n\nclass BiVarRecurrentConvCRF(BiVarRecurrentConv):\n def __init__(self, word_dim, num_words, char_dim, num_chars, num_filters, kernel_size, rnn_mode, hidden_size, num_layers, num_labels,\n tag_space=0, embedd_word=None, embedd_char=None, p_in=0.33, p_out=0.33, p_rnn=(0.33, 0.33), bigram=False, initializer=None):\n super(BiVarRecurrentConvCRF, self).__init__(word_dim, num_words, char_dim, num_chars, num_filters, kernel_size, rnn_mode, hidden_size, num_layers, num_labels,\n tag_space=tag_space, embedd_word=embedd_word, embedd_char=embedd_char,\n p_in=p_in, p_out=p_out, p_rnn=p_rnn, initializer=initializer)\n\n out_dim = tag_space if tag_space else hidden_size * 2\n self.crf = ChainCRF(out_dim, num_labels, bigram=bigram)\n self.dense_softmax = None\n self.logsoftmax = None\n self.nll_loss = None\n\n def forward(self, input_word, input_char, mask=None, length=None, hx=None):\n # output from rnn [batch, length, tag_space]\n output, _, mask, length = self._get_rnn_output(input_word, input_char, mask=mask, length=length, hx=hx)\n # [batch, length, num_label, num_label]\n return self.crf(output, mask=mask), mask\n\n def loss(self, input_word, input_char, target, mask=None, length=None, hx=None, leading_symbolic=0):\n # output from rnn [batch, length, tag_space]\n output, _, mask, length = self._get_rnn_output(input_word, input_char, mask=mask, length=length, hx=hx)\n\n if length is not None:\n max_len = length.max()\n target = target[:, :max_len]\n\n # [batch, length, num_label, num_label]\n return self.crf.loss(output, target, mask=mask).mean()\n\n def decode(self, input_word, input_char, target=None, mask=None, length=None, hx=None, leading_symbolic=0):\n # output from rnn [batch, length, tag_space]\n output, _, mask, length = self._get_rnn_output(input_word, input_char, mask=mask, length=length, hx=hx)\n\n if target is None:\n return self.crf.decode(output, mask=mask, leading_symbolic=leading_symbolic), None\n\n if length is not None:\n max_len = length.max()\n target = target[:, :max_len]\n\n preds = self.crf.decode(output, mask=mask, leading_symbolic=leading_symbolic)\n if mask is None:\n return preds, torch.eq(preds, target).float().sum()\n else:\n return preds, (torch.eq(preds, target).float() * mask).sum()","repo_name":"ramyazdi/perturbations","sub_path":"neuro_nlp/models/sequence_labeling.py","file_name":"sequence_labeling.py","file_ext":"py","file_size_in_byte":16962,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"41108495614","text":"from titandash.bot.external.imagesearch import *\n\n\nclass Grabber:\n \"\"\"\n Grabber class provides functionality to capture a portion of the screen, based on the height\n and width that the emulator should be set to.\n \"\"\"\n def __init__(self, window, logger):\n # Base height and width, resolution of game.\n self.window = window\n self.logger = logger\n\n # Screen is updated and set to the result of an image\n # grab as needed through the snapshot method.\n self.current = None\n\n def snapshot(self, region=None, downsize=None):\n \"\"\"\n Take a snapshot of the current game session, based on the width and height of the grabber unless\n an explicit region is specified to use to take a screen-shot with.\n \"\"\"\n if not region:\n self.current = self.window.screenshot()\n else:\n self.current = self.window.screenshot(region=region)\n\n # Optionally, we can downsize the image grabbed, may improve performance\n # if we are grabbing or parsing many images and want them to be smaller sizes.\n if downsize:\n self.current.thumbnail((\n self.current.width / downsize,\n self.current.height / downsize\n ))\n\n return self.current\n\n def search(self, image, region=None, precision=0.8, bool_only=False, testing=False, im=None, return_image=False):\n \"\"\"\n Search the specified image for another image with a specified amount of precision.\n\n Specifying bool_only as True will only return whether or not the image is found.\n\n The testing boolean is used to aid the unit tests to use mock images as a snapshot instead\n of the actual screen.\n \"\"\"\n if not testing:\n self.snapshot()\n\n found = False\n position = -1, -1\n\n search_kwargs = {\n \"x1\": region[0] if region else self.window.x,\n \"y1\": region[1] if region else self.window.y,\n \"x2\": region[2] if region else self.window.width,\n \"y2\": region[3] if region else self.window.height,\n \"precision\": precision,\n \"im\": im if region else self.current if not im else im,\n \"logger\": self.logger\n }\n\n # If a list of images to be searched for is being used, loop through and search.\n # The first image specified that is found breaks the loop.\n if isinstance(image, list):\n for _image in image:\n position = imagesearcharea(window=self.window, image=_image, **search_kwargs)\n if position[0] != -1:\n image = _image # Set inline var to main for logging purposes.\n break\n else:\n position = imagesearcharea(window=self.window, image=image, **search_kwargs)\n\n if position[0] != -1:\n self.logger.debug(\"{image_name} was successfully found on the screen...\".format(image_name=image.split(\"/\")[-1]))\n found = True\n if bool_only:\n return found\n\n # Modify the position to reflect the current window location.\n if position[0] != -1:\n position = (position[0], position[1])\n\n # Include the image that was found if specified to do so.\n # May prove useful when searching for a list of images.\n if return_image:\n return found, position, image\n\n return found, position\n\n def point_is_color(self, point, color=None, color_range=None):\n \"\"\"\n Given a specified point, determine if that point is currently a specific color.\n \"\"\"\n if color and color_range:\n raise ValueError(\"Only one of color or color_range may be present, but not both.\")\n\n self.snapshot()\n\n pt = self.current.getpixel(point)\n\n # No padding or modification is required for our color check.\n # Since we are using the snapshot functionality, which takes into\n # account our emulator position and title bar height. The point being used\n # is in relation to the \"current\" image which already is padded properly.\n if color:\n return pt == color\n\n # Checking for a color range allows for a bit of irregularity in the colors present\n # at a certain location, this is mostly done to check for very different colors,\n # for example, when perks are active, they are greyed out, and blue when available.\n if color_range:\n return color_range[0][0] <= pt[0] <= color_range[0][1] and color_range[1][0] <= pt[1] <= color_range[1][1] and color_range[2][0] <= pt[2] <= color_range[2][1]\n","repo_name":"hohenheim52/titandash","sub_path":"titanbot/titandash/bot/core/grabber.py","file_name":"grabber.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19784069443","text":"class Customer (object):\n\n def __init__(self, name):\n self.orders = 0\n self.cart = {}\n self.name = name\n\n def add_toCart(self, item_name, quantity, price):\n if isinstance(item_name, str) and isinstance(quantity, int) and isinstance(price, int):\n self.orders += price*quantity\n self.cart.update({item_name: quantity})\n return self.cart\n else:\n return 'Enter an Intger'\n\n def remove_fromCart(self, item_name, quantity, price):\n if isinstance(item_name, str) and isinstance(quantity, int) and isinstance(price, int):\n if item_name in self.cart:\n if quantity < self.cart[item_name] and quantity > 0:\n self.cart[item_name] -= quantity\n self.orders -= price*quantity\n elif quantity == self.cart[item_name]:\n self.orders = self.orders - price*quantity\n del self.cart[item_name]\n return self.orders\n else:\n return str(item_name)+\" is not in your card\"\n else:\n return 'Enter an Intger'\n\n def cart_info(self):\n if self.cart == {}:\n return {}\n else:\n return self.cart\n\n def customer_info(self):\n return self.name \n\n def checkout(self, cash_paid):\n if cash_paid >= self.orders:\n if cash_paid - self.orders == 0:\n self.cart = {}\n return 0\n else:\n self.cart = {}\n return cash_paid - self.orders\n return \"sorry, Cash paid not enough\"\n","repo_name":"AbdelwahabAdam/Npyscreen","sub_path":"Ice Cream Order (TDD)/IceCream/customer.py","file_name":"customer.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13589790162","text":"# coding: utf-8\n\n\"\"\"\nFunctions that are invoked by interactive task methods.\n\"\"\"\n\n__all__ = [\n \"print_task_deps\", \"print_task_status\", \"print_task_output\", \"remove_task_output\",\n \"fetch_task_output\",\n]\n\n\nimport os\n\nimport six\n\nfrom law.config import Config\nfrom law.target.base import Target\nfrom law.target.file import FileSystemTarget\nfrom law.target.collection import TargetCollection, FileCollection\nfrom law.util import (\n colored, uncolored, uncolor_cre, flatten, flag_to_bool, query_choice, human_bytes,\n is_lazy_iterable, make_list, merge_dicts, makedirs, get_terminal_width,\n)\nfrom law.logger import get_logger\n\n\nlogger = get_logger(__name__)\n\n\n# formatting characters\nfmt_chars = {\n \"plain\": {\n \"ind\": 2,\n \"free\": 1,\n \"-\": \"-\",\n \"t\": \"+\",\n \"l\": \"+\",\n \"|\": \"|\",\n \">\": \">\",\n },\n \"fancy\": {\n \"ind\": 2,\n \"free\": 1,\n \"-\": \"─\",\n \"t\": \"├\",\n \"l\": \"└\",\n \"|\": \"│\",\n \">\": \">\",\n },\n}\nfmt_chars[\"compact\"] = merge_dicts(fmt_chars[\"plain\"], {\"free\": 0})\nfmt_chars[\"fancy_compact\"] = merge_dicts(fmt_chars[\"fancy\"], {\"free\": 0})\n\n\n# helper to create a list of 3-tuples (target, depth, prefix) of an arbitrarily structured output\ndef _flatten_output(output, depth):\n if isinstance(output, (list, tuple, set)) or is_lazy_iterable(output):\n return [(outp, depth, \"{}: \".format(i)) for i, outp in enumerate(output)]\n elif isinstance(output, dict):\n return [(outp, depth, \"{}: \".format(k)) for k, outp in six.iteritems(output)]\n else:\n return [(outp, depth, \"\") for outp in flatten(output)]\n\n\ndef _iter_output(output, offset, ind=\" \"):\n lookup = _flatten_output(output, 0)\n while lookup:\n output, odepth, oprefix = lookup.pop(0)\n ooffset = offset + odepth * ind\n\n if isinstance(output, Target):\n yield output, odepth, oprefix, ooffset, lookup\n\n else:\n # before updating the lookup list, but check if the output changes by this\n _lookup = _flatten_output(output, odepth + 1)\n if len(_lookup) > 0 and _lookup[0][0] == output:\n print(ooffset + oprefix + colored(\"not a target\", color=\"red\"))\n else:\n # print the key of the current structure\n print(ooffset + oprefix)\n\n # update the lookup list\n lookup[:0] = _lookup\n\n\ndef _print_wrapped(line, width, offset=\"\"):\n # when the width is not set or the line is empty, just print the line\n if not line or width is None or width <= 0:\n print(line)\n return\n\n # split into actual strings to print (even parts) and color/style modifiers (odd parts) for\n # proper width computation\n parts = [(part, i % 2 == 1) for i, part in enumerate(uncolor_cre.split(line))]\n\n # build lines with odd parts until the line is filled\n line, length, last_style = \"\", 0, \"\"\n while parts:\n part, is_style = parts.pop(0)\n if is_style:\n # style modifier\n line += part\n last_style = part\n elif length + len(part) <= width:\n # actual string that still fits\n line += part\n length += len(part)\n else:\n # actual string that would overflow the line, so add the characters that would still fit\n # and then print the line\n n = width - length\n line += part[:n]\n print(line)\n # add the remaining characters with an uncolored offset and reset the state\n parts[:0] = [\n (\"\\033[0m\", True),\n (uncolored(offset), False),\n (last_style, True),\n (part[n:], False),\n ]\n line, length, last_style = \"\", 0, \"\"\n # print any leftover line\n if line:\n print(line)\n\n\ndef print_task_deps(task, max_depth=1):\n max_depth = int(max_depth)\n\n print(\"print task dependencies with max_depth {}\".format(max_depth))\n print(\"\")\n\n # get the format chars\n cfg = Config.instance()\n fmt_name = cfg.get_expanded(\"task\", \"interactive_format\")\n fmt = fmt_chars.get(fmt_name, fmt_chars[\"fancy\"])\n\n # get the line break setting\n break_lines = cfg.get_expanded_bool(\"task\", \"interactive_line_breaks\")\n out_width = cfg.get_expanded_int(\"task\", \"interactive_line_width\")\n print_width = (out_width if out_width > 0 else get_terminal_width()) if break_lines else None\n _print = lambda line, offset: _print_wrapped(line, print_width, offset)\n\n parents_last_flags = []\n for dep, next_deps, depth, is_last in task.walk_deps(\n max_depth=max_depth,\n order=\"pre\",\n yield_last_flag=True,\n ):\n del parents_last_flags[depth:]\n next_deps_shown = bool(next_deps) and (max_depth < 0 or depth < max_depth)\n\n # determine the print common offset\n offset = [(\" \" if f else fmt[\"|\"]) + fmt[\"ind\"] * \" \" for f in parents_last_flags[1:]]\n offset = \"\".join(offset)\n parents_last_flags.append(is_last)\n\n # print free space\n free_offset = offset + fmt[\"|\"]\n free_lines = \"\\n\".join(fmt[\"free\"] * [free_offset])\n if depth > 0 and free_lines:\n print(free_lines)\n\n # determine task offset and prefix\n task_offset = offset\n if depth > 0:\n task_offset += fmt[\"l\" if is_last else \"t\"] + fmt[\"ind\"] * fmt[\"-\"]\n task_prefix = \"{} {} \".format(depth, fmt[\">\"])\n\n # determine text offset and prefix\n text_offset = offset\n if depth > 0:\n text_offset += (\" \" if is_last else fmt[\"|\"]) + fmt[\"ind\"] * \" \"\n text_prefix = (len(task_prefix) - 1) * \" \"\n text_offset += (fmt[\"|\"] if next_deps_shown else \" \") + text_prefix\n\n # print the task line\n _print(task_offset + task_prefix + dep.repr(color=True), text_offset)\n\n\ndef print_task_status(task, max_depth=0, target_depth=0, flags=None):\n from law.workflow.base import BaseWorkflow\n\n max_depth = int(max_depth)\n target_depth = int(target_depth)\n if flags:\n flags = tuple(flags.lower().split(\"-\"))\n\n print(\"print task status with max_depth {} and target_depth {}\".format(\n max_depth, target_depth))\n print(\"\")\n\n # get the format chars\n cfg = Config.instance()\n fmt_name = cfg.get_expanded(\"task\", \"interactive_format\")\n fmt = fmt_chars.get(fmt_name, fmt_chars[\"fancy\"])\n\n # get the line break setting\n break_lines = cfg.get_expanded_bool(\"task\", \"interactive_line_breaks\")\n out_width = cfg.get_expanded_int(\"task\", \"interactive_line_width\")\n print_width = (out_width if out_width > 0 else get_terminal_width()) if break_lines else None\n _print = lambda line, offset: _print_wrapped(line, print_width, offset)\n\n # get other settings\n skip_seen = cfg.get_expanded_bool(\"task\", \"interactive_status_skip_seen\")\n\n # walk through deps\n done = []\n parents_last_flags = []\n for dep, next_deps, depth, is_last in task.walk_deps(\n max_depth=max_depth,\n order=\"pre\",\n yield_last_flag=True,\n ):\n del parents_last_flags[depth:]\n next_deps_shown = bool(next_deps) and (max_depth < 0 or depth < max_depth)\n\n # determine the print common offset\n offset = [(\" \" if f else fmt[\"|\"]) + fmt[\"ind\"] * \" \" for f in parents_last_flags[1:]]\n offset = \"\".join(offset)\n parents_last_flags.append(is_last)\n\n # print free space\n free_offset = offset + fmt[\"|\"]\n free_lines = \"\\n\".join(fmt[\"free\"] * [free_offset])\n if depth > 0 and free_lines:\n print(free_lines)\n\n # when the dep is a workflow, independent of its create_branch_map_before_repr setting,\n # preload its branch map which updates branch parameters\n if isinstance(dep, BaseWorkflow):\n dep.get_branch_map()\n\n # determine task offset and prefix\n task_offset = offset\n if depth > 0:\n task_offset += fmt[\"l\" if is_last else \"t\"] + fmt[\"ind\"] * fmt[\"-\"]\n task_prefix = \"{} {} \".format(depth, fmt[\">\"])\n\n # determine text offset and prefix\n text_offset = offset\n if depth > 0:\n text_offset += (\" \" if is_last else fmt[\"|\"]) + fmt[\"ind\"] * \" \"\n text_prefix = (len(task_prefix) - 1) * \" \"\n text_offset += (fmt[\"|\"] if next_deps_shown else \" \") + text_prefix\n text_offset_ind = text_offset + fmt[\"ind\"] * \" \"\n\n # print the task line\n _print(task_offset + task_prefix + dep.repr(color=True), text_offset)\n\n # skip if already seen\n if skip_seen and dep in done:\n _print(text_offset_ind + colored(\"outputs already checked\", \"yellow\"), text_offset_ind)\n continue\n\n done.append(dep)\n\n # start the traversing\n for output, _, oprefix, ooffset, _ in _iter_output(\n dep.output(),\n text_offset_ind,\n fmt[\"ind\"] * \" \",\n ):\n _print(ooffset + oprefix + output.repr(color=True), ooffset + len(oprefix) * \" \")\n ooffset += fmt[\"ind\"] * \" \"\n status_text = output.status_text(max_depth=target_depth, flags=flags, color=True)\n status_lines = status_text.split(\"\\n\")\n _print(ooffset + status_lines[0], ooffset)\n for line in status_lines[1:]:\n _print(ooffset + line, ooffset)\n\n\ndef print_task_output(task, max_depth=0, scheme=True):\n max_depth = int(max_depth)\n scheme = flag_to_bool(scheme)\n\n print(\"print task output with max_depth {}, {} schemes\\n\".format(\n max_depth, \"showing\" if scheme else \"hiding\"))\n\n done = []\n for dep, _, depth in task.walk_deps(max_depth=max_depth, order=\"pre\"):\n done.append(dep)\n\n for outp in flatten(dep.output()):\n kwargs = {}\n if isinstance(outp, (FileSystemTarget, FileCollection)):\n kwargs = {\"scheme\": scheme}\n for uri in make_list(outp.uri(**kwargs)):\n print(uri)\n\n\ndef remove_task_output(task, max_depth=0, mode=None, run_task=False):\n from law.task.base import ExternalTask\n from law.workflow.base import BaseWorkflow\n\n max_depth = int(max_depth)\n\n print(\"remove task output with max_depth {}\".format(max_depth))\n\n run_task = flag_to_bool(run_task)\n if run_task:\n print(\"task will run after output removal\")\n\n # get the format chars\n cfg = Config.instance()\n fmt_name = cfg.get_expanded(\"task\", \"interactive_format\")\n fmt = fmt_chars.get(fmt_name, fmt_chars[\"fancy\"])\n\n # get the line break setting\n break_lines = cfg.get_expanded_bool(\"task\", \"interactive_line_breaks\")\n out_width = cfg.get_expanded_int(\"task\", \"interactive_line_width\")\n print_width = [(out_width if out_width > 0 else get_terminal_width()) if break_lines else None]\n _print = lambda line, offset: _print_wrapped(line, print_width[0], offset)\n\n # custom query_choice function that updates the terminal_width\n def _query_choice(*args, **kwargs):\n if print_width[0]:\n print_width[0] = out_width if out_width > 0 else get_terminal_width()\n return query_choice(*args, **kwargs)\n\n # determine the mode, i.e., interactive, dry, all\n modes = [\"i\", \"d\", \"a\"]\n mode_names = [\"interactive\", \"dry\", \"all\"]\n if mode and mode not in modes:\n raise Exception(\"unknown removal mode '{}'\".format(mode))\n if not mode:\n mode = _query_choice(\"removal mode?\", modes, default=\"i\", descriptions=mode_names)\n mode_name = mode_names[modes.index(mode)]\n print(\"selected {} mode\".format(colored(mode_name, \"blue\", style=\"bright\")))\n print(\"\")\n\n done = []\n parents_last_flags = []\n for dep, next_deps, depth, is_last in task.walk_deps(\n max_depth=max_depth,\n order=\"pre\",\n yield_last_flag=True,\n ):\n del parents_last_flags[depth:]\n next_deps_shown = bool(next_deps) and (max_depth < 0 or depth < max_depth)\n\n # determine the print common offset\n offset = [(\" \" if f else fmt[\"|\"]) + fmt[\"ind\"] * \" \" for f in parents_last_flags[1:]]\n offset = \"\".join(offset)\n parents_last_flags.append(is_last)\n\n # print free space\n free_offset = offset + fmt[\"|\"]\n free_lines = \"\\n\".join(fmt[\"free\"] * [free_offset])\n if depth > 0 and free_lines:\n print(free_lines)\n\n # when the dep is a workflow, independent of its create_branch_map_before_repr setting,\n # preload its branch map which updates branch parameters\n if isinstance(dep, BaseWorkflow):\n dep.get_branch_map()\n\n # determine task offset and prefix\n task_offset = offset\n if depth > 0:\n task_offset += fmt[\"l\" if is_last else \"t\"] + fmt[\"ind\"] * fmt[\"-\"]\n task_prefix = \"{} {} \".format(depth, fmt[\">\"])\n\n # determine text offset and prefix\n text_offset = offset\n if depth > 0:\n text_offset += (\" \" if is_last else fmt[\"|\"]) + fmt[\"ind\"] * \" \"\n text_prefix = (len(task_prefix) - 1) * \" \"\n text_offset += (fmt[\"|\"] if next_deps_shown else \" \") + text_prefix\n text_offset_ind = text_offset + fmt[\"ind\"] * \" \"\n\n # print the task line\n _print(task_offset + task_prefix + dep.repr(color=True), text_offset)\n\n # always skip external tasks\n if isinstance(dep, ExternalTask):\n _print(text_offset_ind + colored(\"task is external\", \"yellow\"), text_offset_ind)\n continue\n\n # skip when this task was already handled\n if dep in done:\n _print(text_offset_ind + colored(\"already handled\", \"yellow\"), text_offset_ind)\n continue\n done.append(dep)\n\n # skip when mode is \"all\" and task is configured to skip\n if mode == \"a\" and getattr(dep, \"skip_output_removal\", False):\n _print(text_offset_ind + colored(\"configured to skip\", \"yellow\"), text_offset_ind)\n continue\n\n # query for a decision per task when mode is \"interactive\"\n task_mode = None\n if mode == \"i\":\n task_mode = _query_choice(text_offset_ind + \"remove outputs?\", [\"y\", \"n\", \"a\"],\n default=\"y\", descriptions=[\"yes\", \"no\", \"all\"])\n if task_mode == \"n\":\n continue\n\n # start the traversing through output structure\n for output, odepth, oprefix, ooffset, lookup in _iter_output(\n dep.output(),\n text_offset_ind,\n fmt[\"ind\"] * \" \",\n ):\n _print(ooffset + oprefix + output.repr(color=True), ooffset + len(oprefix) * \" \")\n ooffset += fmt[\"ind\"] * \" \"\n\n # skip external targets\n if getattr(output, \"external\", False):\n _print(ooffset + colored(\"external output\", \"yellow\"), ooffset)\n continue\n\n # stop here when in dry mode\n if mode == \"d\":\n _print(ooffset + colored(\"dry removed\", \"yellow\"), ooffset)\n continue\n\n # when the mode is \"interactive\" and the task decision is not \"all\", query per output\n if mode == \"i\" and task_mode != \"a\":\n if isinstance(output, TargetCollection):\n coll_choice = _query_choice(ooffset + \"remove?\", (\"y\", \"n\", \"i\"),\n default=\"n\", descriptions=[\"yes\", \"no\", \"interactive\"])\n if coll_choice == \"i\":\n lookup[:0] = _flatten_output(output.targets, odepth + 1)\n continue\n else:\n target_choice = coll_choice\n else:\n target_choice = _query_choice(ooffset + \"remove?\", (\"y\", \"n\"),\n default=\"n\", descriptions=[\"yes\", \"no\"])\n if target_choice == \"n\":\n _print(ooffset + colored(\"skipped\", \"yellow\"), ooffset)\n continue\n\n # finally remove\n output.remove()\n _print(ooffset + colored(\"removed\", \"red\", style=\"bright\"), ooffset)\n\n return run_task\n\n\ndef fetch_task_output(task, max_depth=0, mode=None, target_dir=\".\", include_external=False):\n from law.task.base import ExternalTask\n from law.workflow.base import BaseWorkflow\n\n max_depth = int(max_depth)\n print(\"fetch task output with max_depth {}\".format(max_depth))\n\n target_dir = os.path.normpath(os.path.abspath(target_dir))\n print(\"target directory is {}\".format(target_dir))\n makedirs(target_dir)\n\n include_external = flag_to_bool(include_external)\n if include_external:\n print(\"include external tasks\")\n\n # get the format chars\n cfg = Config.instance()\n fmt_name = cfg.get_expanded(\"task\", \"interactive_format\")\n fmt = fmt_chars.get(fmt_name, fmt_chars[\"fancy\"])\n\n # get the line break setting\n break_lines = cfg.get_expanded_bool(\"task\", \"interactive_line_breaks\")\n out_width = cfg.get_expanded_int(\"task\", \"interactive_line_width\")\n print_width = [(out_width if out_width > 0 else get_terminal_width()) if break_lines else None]\n _print = lambda line, offset: _print_wrapped(line, print_width[0], offset)\n\n # custom query_choice function that updates the terminal_width\n def _query_choice(*args, **kwargs):\n if print_width[0]:\n print_width[0] = out_width if out_width > 0 else get_terminal_width()\n return query_choice(*args, **kwargs)\n\n # determine the mode, i.e., all, dry, interactive\n modes = [\"i\", \"a\", \"d\"]\n mode_names = [\"interactive\", \"all\", \"dry\"]\n if mode is None:\n mode = _query_choice(\"fetch mode?\", modes, default=\"i\", descriptions=mode_names)\n elif isinstance(mode, int):\n mode = modes[mode]\n else:\n mode = mode[0].lower()\n if mode not in modes:\n raise Exception(\"unknown fetch mode '{}'\".format(mode))\n mode_name = mode_names[modes.index(mode)]\n print(\"selected {} mode\".format(colored(mode_name, \"blue\", style=\"bright\")))\n print(\"\")\n\n done = []\n parents_last_flags = []\n for dep, next_deps, depth, is_last in task.walk_deps(\n max_depth=max_depth,\n order=\"pre\",\n yield_last_flag=True,\n ):\n del parents_last_flags[depth:]\n next_deps_shown = bool(next_deps) and (max_depth < 0 or depth < max_depth)\n\n # determine the print common offset\n offset = [(\" \" if f else fmt[\"|\"]) + fmt[\"ind\"] * \" \" for f in parents_last_flags[1:]]\n offset = \"\".join(offset)\n parents_last_flags.append(is_last)\n\n # print free space\n free_offset = offset + fmt[\"|\"]\n free_lines = \"\\n\".join(fmt[\"free\"] * [free_offset])\n if depth > 0 and free_lines:\n print(free_lines)\n\n # when the dep is a workflow, independent of its create_branch_map_before_repr setting,\n # preload its branch map which updates branch parameters\n if isinstance(dep, BaseWorkflow):\n dep.get_branch_map()\n\n # determine task offset and prefix\n task_offset = offset\n if depth > 0:\n task_offset += fmt[\"l\" if is_last else \"t\"] + fmt[\"ind\"] * fmt[\"-\"]\n task_prefix = \"{} {} \".format(depth, fmt[\">\"])\n\n # determine text offset and prefix\n text_offset = offset\n if depth > 0:\n text_offset += (\" \" if is_last else fmt[\"|\"]) + fmt[\"ind\"] * \" \"\n text_prefix = (len(task_prefix) - 1) * \" \"\n text_offset += (fmt[\"|\"] if next_deps_shown else \" \") + text_prefix\n text_offset_ind = text_offset + fmt[\"ind\"] * \" \"\n\n # print the task line\n _print(task_offset + task_prefix + dep.repr(color=True), text_offset)\n\n if not include_external and isinstance(dep, ExternalTask):\n _print(text_offset_ind + colored(\"task is external\", \"yellow\"), text_offset_ind)\n continue\n\n if dep in done:\n _print(text_offset_ind + colored(\"outputs already fetched\", \"yellow\"), text_offset_ind)\n continue\n\n if mode == \"i\":\n task_mode = _query_choice(text_offset_ind + \"fetch outputs?\", (\"y\", \"n\", \"a\"),\n default=\"y\", descriptions=[\"yes\", \"no\", \"all\"])\n if task_mode == \"n\":\n _print(text_offset_ind + colored(\"skipped\", \"yellow\"), text_offset_ind)\n continue\n\n done.append(dep)\n\n # start the traversing through output structure with a lookup pattern\n for output, odepth, oprefix, ooffset, lookup in _iter_output(\n dep.output(),\n text_offset_ind,\n fmt[\"ind\"] * \" \",\n ):\n try:\n stat = output.stat()\n except:\n stat = None\n\n # print the target repr\n target_line = ooffset + oprefix + output.repr(color=True)\n if stat:\n target_line += \" ({:.2f} {})\".format(*human_bytes(stat.st_size))\n _print(target_line, ooffset + len(oprefix) * \" \")\n ooffset += fmt[\"ind\"] * \" \"\n\n # skip external targets\n if not include_external and getattr(output, \"external\", False):\n _print(ooffset + colored(\"external output, skip\", \"yellow\"), ooffset)\n continue\n\n # skip missing targets\n if stat is None and not isinstance(output, TargetCollection):\n _print(ooffset + colored(\"not existing, skip\", \"yellow\"), ooffset)\n continue\n\n # skip targets without a copy_to_local method\n is_copyable = callable(getattr(output, \"copy_to_local\", None))\n if not is_copyable and not isinstance(output, TargetCollection):\n _print(ooffset + colored(\"not a file target, skip\", \"yellow\"), ooffset)\n continue\n\n # stop here when in dry mode\n if mode == \"d\":\n _print(ooffset + colored(\"dry fetched\", \"yellow\"), ooffset)\n continue\n\n # collect actual outputs to fetch\n to_fetch = [output]\n if mode == \"i\" and task_mode != \"a\":\n if isinstance(output, TargetCollection):\n coll_choice = _query_choice(ooffset + \"fetch?\", (\"y\", \"n\", \"i\"),\n default=\"y\", descriptions=[\"yes\", \"no\", \"interactive\"])\n if coll_choice == \"i\":\n lookup[:0] = _flatten_output(output.targets, odepth + 1)\n continue\n else:\n target_choice = coll_choice\n to_fetch = list(output._flat_target_list)\n else:\n target_choice = _query_choice(ooffset + \"fetch?\", (\"y\", \"n\"),\n default=\"y\", descriptions=[\"yes\", \"no\"])\n if target_choice == \"n\":\n _print(ooffset + colored(\"skipped\", \"yellow\"), ooffset)\n continue\n\n # flatten all target collections\n to_fetch_flat = []\n while to_fetch:\n t = to_fetch.pop(0)\n if isinstance(t, TargetCollection):\n to_fetch[:0] = list(t._flat_target_list)\n else:\n to_fetch_flat.append(t)\n\n # actual copy\n for outp in to_fetch_flat:\n if not callable(getattr(outp, \"copy_to_local\", None)):\n continue\n\n basename = \"{}__{}\".format(dep.live_task_id, outp.basename)\n outp.copy_to_local(os.path.join(target_dir, basename), retries=0)\n\n _print(ooffset + \"{} ({})\".format(colored(\"fetched\", \"green\", style=\"bright\"),\n basename), ooffset)\n","repo_name":"riga/law","sub_path":"law/task/interactive.py","file_name":"interactive.py","file_ext":"py","file_size_in_byte":23714,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"67"} +{"seq_id":"21360791275","text":"\nKILL_CHAIN_ID = \"stix:TTP-af3e707f-2fb9-49e5-8c37-14026ca0a5ff\"\nKILL_CHAIN_NAME = \"LM Cyber Kill Chain\"\nKILL_CHAIN_DEFINER = \"LMCO\"\nKILL_CHAIN_PHASES = [\n {\n \"phase_id\": \"stix:TTP-af1016d6-a744-4ed7-ac91-00fe2272185a\",\n \"name\": \"Reconnaissance\",\n \"ordinality\": \"1\"\n },\n {\n \"phase_id\": \"stix:TTP-445b4827-3cca-42bd-8421-f2e947133c16\",\n \"name\": \"Weaponization\",\n \"ordinality\": \"2\"\n },\n {\n \"phase_id\": \"stix:TTP-79a0e041-9d5f-49bb-ada4-8322622b162d\",\n \"name\": \"Delivery\",\n \"ordinality\": \"3\"\n },\n {\n \"phase_id\": \"stix:TTP-f706e4e7-53d8-44ef-967f-81535c9db7d0\",\n \"name\": \"Exploitation\",\n \"ordinality\": \"4\"\n },\n {\n \"phase_id\": \"stix:TTP-e1e4e3f7-be3b-4b39-b80a-a593cfd99a4f\",\n \"name\": \"Installation\",\n \"ordinality\": \"5\"\n },\n {\n \"phase_id\": \"stix:TTP-d6dc32b9-2538-4951-8733-3cb9ef1daae2\",\n \"name\": \"Command and Control\",\n \"ordinality\": \"6\"\n },\n {\n \"phase_id\": \"stix:TTP-786ca8f9-2d9a-4213-b38e-399af4a2e5d6\",\n \"name\": \"Actions on Objectives\",\n \"ordinality\": \"7\"\n },\n]\n","repo_name":"ukncsc/edge-mod","sub_path":"builder/kill_chain_definition.py","file_name":"kill_chain_definition.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"24765804110","text":"from gurobipy import *\r\nn = Model()\r\n\r\nfrom math import *\r\nfrom scipy.stats import norm\r\nimport pickle\r\nimport os\r\n\r\nimport matplotlib\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\r\nfrom matplotlib.figure import Figure\r\nimport matplotlib.animation as animation\r\nfrom matplotlib import style\r\nstyle.use(\"ggplot\")\r\n\r\ntry:\r\n from tkinter import *\r\n from ttk import *\r\nexcept ImportError:\r\n from tkinter import *\r\n from tkinter.ttk import *\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\n\r\n#-----Initilizations for various GUI methods----\r\ninnerList = []\r\ninnerList2 = []\r\ninnerList3 = []\r\ninnerList4 = []\r\nsaveList = []\r\nsaveList2 = []\r\nsaveList3 = []\r\nsaveList4 = []\r\nloadList = []\r\nloadList2 = []\r\nloadList3 = []\r\nloadList4 = []\r\nlistOfMonths = ['January', 'February', 'March', 'April', 'May', 'June', 'July', \r\n 'August', 'September', 'October', 'November', 'December']\r\n\r\n#-----Part 1, Initial values--------------------------------------------------\r\n#-----Part 1, Initial values--------------------------------------------------\r\n#-----Part 1, Initial values--------------------------------------------------\r\n#-----First GUI window-----\r\nclass SimpleTableInput1(tk.Frame):\r\n def __init__(self, parent, rows, columns):\r\n tk.Frame.__init__(self, parent)\r\n #-----Title-----\r\n root.title(\"Sabre\")\r\n #-----Icon-----\r\n root.wm_iconbitmap(\"OSabreIcon.ico\")\r\n #-----Initializations-----\r\n self._entry = {}\r\n self.rows = rows\r\n self.columns = columns \r\n #-----\"Default\" button-----\r\n self.default = ttk.Button(self, text=\"Default\", command=self.autofill)\r\n self.default.grid(row=9,column=0)\r\n #-----\"Load Values\" button-----\r\n self.loadValues = ttk.Button(self, text=\"Load Values\", \r\n command=self.load_save)\r\n self.loadValues.grid(row=9,column=1)\r\n\r\n #-----Default values-----\r\n self.LookUpList=[2019,0.85,9,2,300,20,3000,200]\r\n \r\n #-----register a command to use for validation-----\r\n vcmd = (self.register(self._validate), \"%P\")\r\n\r\n #-----create the table of widgets-----\r\n for row in range(self.rows):\r\n for column in range(self.columns):\r\n index = (row, column+1)\r\n e = tk.Entry(self, validate=\"key\", validatecommand=vcmd)\r\n e.grid(row=row, column=column+1, stick=\"nsew\")\r\n enterYear = tk.Label(self, text=\"Enter the year\")\r\n enterYear.grid(row=0, column=0, sticky=W)\r\n enterRely = tk.Label(self, text=\r\n \"Enter the reliability (must be a value between 0 and 1)\")\r\n enterRely.grid(row=1, column=0, sticky=W)\r\n enterMaxDays = tk.Label(self, text=\r\n \"Enter the maximum number of vacation days allotted for any given day\")\r\n enterMaxDays.grid(row=2, column=0, sticky=W)\r\n enterMinDays = tk.Label(self, text=\r\n \"Enter the minimum number of vacation days allotted for any given day\")\r\n enterMinDays.grid(row=3, column=0, sticky=W)\r\n enterMaxMonth = tk.Label(self, text=\r\n \"Enter the maximum number of vacation days for any given month\")\r\n enterMaxMonth.grid(row=4, column=0, sticky=W)\r\n enterMinMonth = tk.Label(self, text=\r\n \"Enter the minimum number of vacation days for any given month\")\r\n enterMinMonth.grid(row=5, column=0, sticky=W)\r\n enterMaxYear = tk.Label(self, text=\r\n \"Enter the maximum number of vacation days allotted for the year\")\r\n enterMaxYear.grid(row=6, column=0, sticky=W)\r\n enterMinYear = tk.Label(self, text=\r\n \"Enter the minimum number of vacation days allotted for the year\")\r\n enterMinYear.grid(row=7, column=0, sticky=W)\r\n self._entry[index] = e\r\n \r\n #-----adjust column weights so they all expand equally-----\r\n for column in range(self.columns):\r\n self.grid_columnconfigure(column+1, weight=1)\r\n \r\n #-----designate a final, empty row to fill up any extra space-----\r\n self.grid_rowconfigure(rows, weight=1)\r\n \r\n\r\n #-----Return a list containing the data in the table-----\r\n def get(self):\r\n for row in range(self.rows):\r\n for column in range(self.columns):\r\n index = (row, column+1)\r\n innerList.append(float(self._entry[index].get()))\r\n return innerList\r\n \r\n #-----Fill table with default values------\r\n def autofill(self):\r\n for row in range(self.rows):\r\n self._entry[row,1].delete(0, END)\r\n self._entry[row,1].insert(row, str(self.LookUpList[row]))\r\n \r\n #-----\"Load Values\" button function----- \r\n def load_save(self): \r\n loadList = []\r\n subLoadList = []\r\n with open('save_file.db', 'rb') as file:\r\n while True:\r\n try:\r\n loadList=pickle.load(file)\r\n y = len(loadList)\r\n x = y-9\r\n for m in range(8):\r\n subLoadList.append(loadList[0][m+x])\r\n except EOFError:\r\n break\r\n for row in range(self.rows):\r\n self._entry[row,1].delete(0, END)\r\n self._entry[row,1].insert(row, str(subLoadList[row]))\r\n \r\n #-----Perform input validation-----\r\n def _validate(self, P):\r\n #-----Allow only an empty value, or a value that can be converted to \r\n # a float-----\r\n if P.strip() == \"\":\r\n return True\r\n try:\r\n f = float(P)\r\n except ValueError:\r\n self.bell()\r\n return False\r\n return True\r\n\r\nclass Example(tk.Frame):\r\n def __init__(self, parent):\r\n tk.Frame.__init__(self, parent)\r\n #-----Table-----\r\n self.table = SimpleTableInput1(self, 8, 1)\r\n self.table.pack(side=\"top\")\r\n #-----\"Submit\" button-----\r\n self.submit = ttk.Button(self, text=\"Submit\", command=self.on_submit)\r\n self.submit.pack(side='bottom') \r\n #-----\"Save Values\" button-----\r\n self.saveValues = ttk.Button(self, text=\"Save Values\", \r\n command=self.save_values)\r\n self.saveValues.place(x=405,y=193) \r\n\r\n #-----\"Submit\" button function----- \r\n def on_submit(self):\r\n innerList.append(self.table.get())\r\n root.destroy()\r\n \r\n #-----\"Save Values\" button function----- \r\n def save_values(self):\r\n saveList = []\r\n saveList.append(self.table.get())\r\n with open('save_file.db', 'wb+') as file2:\r\n pickle.dump(saveList, file2)\r\n \r\nroot = tk.Tk()\r\nExample(root).pack(side=\"top\")\r\nroot.mainloop()\r\n\r\n#-----Fill in variables with inputs from user-----\r\nfor m in range(8):\r\n if m == 0:\r\n currentYear = int(innerList[m])\r\n elif m == 1:\r\n reliabilityLevel = float(innerList[m])\r\n elif m == 2:\r\n dailyUpperBoundVacationAllotted = int(innerList[m])\r\n elif m == 3:\r\n dailyLowerBoundVacationAllotted = int(innerList[m])\r\n elif m == 4:\r\n userMonthlyUpperBoundVacationAllotted = int(innerList[m])\r\n elif m == 5:\r\n monthlyLowerBoundVacationAllotted = int(innerList[m])\r\n elif m == 6:\r\n userAnnualUpperBoundVacationAllotted = int(innerList[m])\r\n else:\r\n annualLowerBoundVacationAllotted = int(innerList[m])\r\n\r\n\r\n#-----Second GUI window-----\r\nclass SimpleTableInput2(tk.Frame):\r\n def __init__(self, parent, rows, columns):\r\n tk.Frame.__init__(self, parent)\r\n #-----Title-----\r\n root.title(\"Sabre\")\r\n #-----Icon-----\r\n root.wm_iconbitmap(\"OSabreIcon.ico\")\r\n #-----Initializations-----\r\n self._entry = {}\r\n self.rows = rows\r\n self.columns = columns\r\n #-----\"Default\" button-----\r\n self.default = ttk.Button(self, text=\"Default\", command=self.autofill)\r\n self.default.grid(row=6,column=5)\r\n #-----\"Load Values\" button-----\r\n self.loadValues = ttk.Button(self, text=\"Load Values\", \r\n command=self.load_save)\r\n self.loadValues.grid(row=6,column=7)\r\n \r\n #-----Default values-----\r\n self.LookUpList=[\r\n [700,700,700,700,700,700,700,700,700,700,700,700],\r\n [476,432,469,447,465,481,475,482,436,460,480,478],\r\n [49.8,32.1,48.4,35.3,42.4,45.3,47.0,40.0,35.8,41.0,46.7,\r\n 55.7],\r\n [95.8,84.6,98.2,88.7,92.4,97.4,100.0,98.0,86.7,83.2,84.6,\r\n 91.0]]\r\n \r\n #-----register a command to use for validation-----\r\n vcmd = (self.register(self._validate), \"%P\")\r\n\r\n #-----create the table of widgets-----\r\n for row in range(self.rows):\r\n for column in range(self.columns):\r\n index = (row+1, column+1)\r\n e = tk.Entry(self, validate=\"key\", validatecommand=vcmd)\r\n e.grid(row=row+1, column=column+1, stick=\"nsew\")\r\n labelMonth = tk.Label(self, text=str(listOfMonths[column]))\r\n labelMonth.grid(row=0, column=column+1)\r\n enterLh = tk.Label(self, text=\"Lineholders\")\r\n enterLh.grid(row=1, column=0, sticky=W)\r\n enterCr = tk.Label(self, text=\"Crew Scheduled\")\r\n enterCr.grid(row=2, column=0, sticky=W)\r\n enterAvg = tk.Label(self, text=\"Average Absences\")\r\n enterAvg.grid(row=3, column=0, sticky=W)\r\n enterVar = tk.Label(self, text=\"Variance of Absences\")\r\n enterVar.grid(row=4, column=0, sticky=W)\r\n self._entry[index] = e\r\n \r\n #-----adjust column weights so they all expand equally-----\r\n for column in range(self.columns):\r\n self.grid_columnconfigure(column+1, weight=1)\r\n \r\n #-----designate a final, empty row to fill up any extra space-----\r\n self.grid_rowconfigure(rows+1, weight=1)\r\n \r\n #-----Fill table with default values------ \r\n def autofill(self):\r\n for row in range(self.rows):\r\n for column in range(self.columns):\r\n index = (row+1, column+1)\r\n self._entry[index].delete(0, END)\r\n self._entry[index].insert(0, str(self.LookUpList[row][column]))\r\n \r\n #-----\"Load Values\" button function----- \r\n def load_save(self):\r\n loadList2 = []\r\n subLoadList = []\r\n with open('save_file2.p', 'rb') as file2:\r\n loadList2=pickle.load(file2)\r\n y = len(loadList2)\r\n x = y-49\r\n for m in range(48):\r\n subLoadList.append(loadList2[0][m+x])\r\n for row in range(self.rows):\r\n for column in range(self.columns):\r\n index = (row+1, column+1)\r\n self._entry[index].delete(0, END)\r\n self._entry[index].insert(0, str(subLoadList[column+row*12]))\r\n \r\n #-----Return a list of lists, containing the data in the table------\r\n def get(self):\r\n for row in range(self.rows):\r\n for column in range(self.columns):\r\n index = (row+1, column+1)\r\n innerList2.append(float(self._entry[index].get()))\r\n return innerList2\r\n \r\n #-----Perform input validation-----\r\n def _validate(self, P):\r\n #-----Allow only an empty value, or a value that can be converted to \r\n # a float-----\r\n if P.strip() == \"\":\r\n return True\r\n try:\r\n f = float(P)\r\n except ValueError:\r\n self.bell()\r\n return False\r\n return True\r\n \r\n\r\nclass Example(tk.Frame):\r\n def __init__(self, parent):\r\n tk.Frame.__init__(self, parent)\r\n #-----Table-----\r\n self.table = SimpleTableInput2(self, 4, 12)\r\n self.table.pack(side=\"top\")\r\n #-----\"Submit\" button-----\r\n self.submit = ttk.Button(self, text=\"Submit\", command=self.on_submit)\r\n self.submit.pack(side=\"bottom\")\r\n #-----\"Save Values\" button-----\r\n self.saveValues = ttk.Button(self, text=\"Save Values\", \r\n command=self.save_values)\r\n self.saveValues.place(x=558,y=130)\r\n \r\n #-----\"Submit\" button function----- \r\n def on_submit(self):\r\n innerList2.append(self.table.get())\r\n root.destroy()\r\n \r\n #-----\"Save Values\" button function----- \r\n def save_values(self):\r\n saveList2 = []\r\n saveList2.append(self.table.get())\r\n with open('save_file2.p', 'wb+') as file2:\r\n pickle.dump(saveList2, file2)\r\n\r\nroot = tk.Tk()\r\nExample(root).pack(side=\"top\")\r\nroot.geometry('1000x158')\r\nroot.mainloop()\r\n\r\n#-----Part 2, First optimization----------------------------------------------\r\n#-----Part 2, First optimization----------------------------------------------\r\n#-----Part 2, First optimization----------------------------------------------\r\ninverseNormalReliability = norm.ppf(reliabilityLevel)\r\n\r\n#-----Indexing for filling in variables based on user data \r\n# from the second GUI window-----\r\nMonths = range(12)\r\nListAppend1 = [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]\r\nListAppend2 = [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]\r\nListAppend3 = [36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]\r\n\r\n#-----Range for February based on the year----- \r\nif currentYear%4==0:\r\n FebruaryLength = range(29)\r\n daysInFeb = 29\r\n numberOfDays = 366\r\nelse:\r\n FebruaryLength = range(28)\r\n daysInFeb = 28\r\n numberOfDays = 365\r\n\r\n#-----Range for months that are 30 days----- \r\nShortMonthLength = range(30)\r\n#-----Range for months that are 31 days----- \r\nLongMonthLength = range(31)\r\n\r\ndaysInMonths = [31, daysInFeb, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\r\n\r\ndaysInYear = [LongMonthLength, FebruaryLength, LongMonthLength, \r\n ShortMonthLength, LongMonthLength, ShortMonthLength, \r\n LongMonthLength, LongMonthLength, ShortMonthLength, \r\n LongMonthLength, ShortMonthLength, LongMonthLength]\r\n\r\n#-----Monthly line holders, crew scheduled, averges provided, \r\n# variances provided, and the difference between monthlyLineHolders \r\n# and monthlyCrew-----\r\nmonthlyLineHolders = [] \r\nfor m in range(12):\r\n monthlyLineHolders.append(int(innerList2[m])) \r\n \r\n\r\nmonthlyCrewScheduled = [] \r\nfor m in ListAppend1:\r\n monthlyCrewScheduled.append(int(innerList2[m])) \r\n\r\n\r\naverageMonthlyAbsences = []\r\nfor m in ListAppend2:\r\n averageMonthlyAbsences.append(float(innerList2[m])) \r\n\r\n\r\nvarOfMonthlyExpectedAbsences = []\r\nfor m in ListAppend3:\r\n varOfMonthlyExpectedAbsences.append(float(innerList2[m])) \r\n\r\n#-----Calculating the standard deviations based on given variances----- \r\nstdDevMonthlyExpectedAbsences = []\r\nfor m in Months:\r\n stdDevOfAbsences = sqrt(varOfMonthlyExpectedAbsences[m])\r\n stdDevMonthlyExpectedAbsences.append(stdDevOfAbsences)\r\n\r\n#-----Calculating the expected absences for each month based on the average \r\n# absences, std. dev. of absences, and the reliability level-----\r\nexpectedMonthlyAbsences = []\r\nfor m in Months:\r\n expectedAbsenses = inverseNormalReliability * stdDevMonthlyExpectedAbsences[m] + averageMonthlyAbsences[m]\r\n expectedMonthlyAbsences.append(expectedAbsenses)\r\n\r\n#-----By subtracting crew scheduled from line holders, we're left with a \r\n# variable that represents vacation days plus expected absences-----\r\nvacationAllottedPlusExpectedAbsences = [(monthlyLineHolders[m]-monthlyCrewScheduled[m]) for m in Months]\r\n\r\n#-----Calculated monthly upper bound based on subtracting expected absences \r\n# subtracted from vacation days plus expected absences-----\r\nmonthlyUpperBoundVacationAllotted = []\r\nfor m in Months:\r\n vacationAllotted = trunc(vacationAllottedPlusExpectedAbsences[m] - expectedMonthlyAbsences[m])\r\n monthlyUpperBoundVacationAllotted.append(vacationAllotted)\r\n \r\n#-----Calculated upper bound for the year-----\r\nannualUpperBoundVacationAllotted = quicksum(monthlyUpperBoundVacationAllotted[m] for m in Months)\r\n\r\nn.update()\r\n\r\n#-----Decision variable-----\r\nvacationVariable = []\r\nfor m in Months:\r\n vacationVariable.append(n.addVars(daysInMonths[m], \r\n lb = dailyLowerBoundVacationAllotted, \r\n ub = dailyUpperBoundVacationAllotted, \r\n obj = 1, name = \"Month\" + str(m)))\r\n\r\n#-----Implementing monthly contraints-----\r\nfor m in Months:\r\n n.addConstr(vacationVariable[m].sum() <= monthlyUpperBoundVacationAllotted[m], \r\n name = \"monthlyUpperBoundVacationAllottedForMonth\" + str(m))\r\nfor m in Months:\r\n n.addConstr(vacationVariable[m].sum() <= userMonthlyUpperBoundVacationAllotted, \r\n name = \"userMonthlyUpperBoundVacationAllottedForMonth\" + str(m))\r\nfor m in Months:\r\n n.addConstr(vacationVariable[m].sum() >= monthlyLowerBoundVacationAllotted, \r\n name = \"monthlyLowerBoundVacationAllotted\" + str(m)) \r\n\r\n#-----Calculating the objective value-----\r\ntotalObjectiveValue = 0\r\nfor m in Months:\r\n objectiveValue = 0\r\n objectiveValue = vacationVariable[m].sum()\r\n totalObjectiveValue = totalObjectiveValue + objectiveValue\r\n\r\n#-----Implementing annual constraints-----\r\nn.addConstr(totalObjectiveValue <= annualUpperBoundVacationAllotted, \r\n name = \"TotannualUpperBoundVacationAllotted\") \r\nn.addConstr(totalObjectiveValue <= userAnnualUpperBoundVacationAllotted, \r\n name = \"userTotannualUpperBoundVacationAllotted\") \r\nn.addConstr(totalObjectiveValue >= annualLowerBoundVacationAllotted, \r\n name = \"TotannualLowerBoundVacationAllotted\")\r\n\r\n#-----Objective function-----\r\nfor m in Months:\r\n n.setObjective(totalObjectiveValue, GRB.MAXIMIZE)\r\n\r\n#-----Execute optimization-----\r\nn.update()\r\nn.optimize()\r\n\r\n#-----Creating files with the deatils of the formulation and optimization for \r\n# further inspection/review-----\r\nn.write('Solution.sol')\r\nn.write(\"LP.lp\")\r\nn.write(\"MPS.mps\")\r\n\r\n#-----Scatter plot-----\r\nstandRelyLevels = [.5,.52,.54,.56,.58,.60,.62,.64,.66,.68,.70,.72,.74,.76,.78,\r\n .80,.82,.84,.86,.88,.90,.92,.94,.96,.98]\r\nstandInverseNorm =[]\r\n\r\nfor l in range(25):\r\n tempCalculation = 0\r\n tempCalculation = norm.ppf(standRelyLevels[l])\r\n standInverseNorm.append(tempCalculation)\r\n \r\nnewDevAndVacation = []\r\n\r\nfor m in Months:\r\n newDevAndVacation.append(vacationAllottedPlusExpectedAbsences[m]-averageMonthlyAbsences[m])\r\n \r\nscatterPlotValues = []\r\n \r\nfor l in range(25):\r\n tempTotalList = []\r\n for m in Months:\r\n tempCalculation = newDevAndVacation[m] - stdDevMonthlyExpectedAbsences[m]*standInverseNorm[l]\r\n tempTotalList.append(tempCalculation)\r\n if tempTotalList[m] > userMonthlyUpperBoundVacationAllotted:\r\n tempTotalList[m] = userMonthlyUpperBoundVacationAllotted\r\n else:\r\n pass\r\n if tempTotalList[m] < monthlyLowerBoundVacationAllotted:\r\n tempTotalList[m] = monthlyLowerBoundVacationAllotted\r\n else:\r\n pass\r\n if tempTotalList[m] > dailyUpperBoundVacationAllotted*daysInMonths[m]:\r\n tempTotalList[m] = dailyUpperBoundVacationAllotted*daysInMonths[m]\r\n else:\r\n pass\r\n if tempTotalList[m] < dailyLowerBoundVacationAllotted*daysInMonths[m]:\r\n tempTotalList[m] = dailyLowerBoundVacationAllotted*daysInMonths[m]\r\n else:\r\n pass\r\n if len(tempTotalList) == 12:\r\n tempCalculation = sum(tempTotalList)\r\n scatterPlotValues.append(trunc(tempCalculation))\r\n else:\r\n pass\r\n if scatterPlotValues[l] > userAnnualUpperBoundVacationAllotted:\r\n scatterPlotValues[l] = userAnnualUpperBoundVacationAllotted\r\n else:\r\n pass\r\n if scatterPlotValues[l] < annualLowerBoundVacationAllotted: \r\n scatterPlotValues[l] = annualLowerBoundVacationAllotted\r\n else:\r\n pass \r\n\r\n#-----Initializing lists for extracting the post-optimization decision \r\n# variable values-----\r\ndecisionVariables = []\r\noptimalValues = []\r\njanOptimalVs = []\r\nfebOptimalVs = []\r\nmarOptimalVs = []\r\naprOptimalVs = []\r\nmayOptimalVs = []\r\njunOptimalVs = []\r\njulOptimalVs = []\r\naugOptimalVs = []\r\nsepOptimalVs = []\r\noctOptimalVs = []\r\nnovOptimalVs = []\r\ndecOptimalVs = []\r\n\r\n#-----Extracting a clean list of the decision variables-----\r\nfor l in n.getVars():\r\n decisionVariables.append(int(l.x))\r\n\r\n#-----Seperating the decision variables into months-----\r\nif numberOfDays == 366:\r\n for l in range(366):\r\n if 0<=l<=30: \r\n janOptimalVs.append(decisionVariables[l])\r\n elif 31<=l<=59:\r\n febOptimalVs.append(decisionVariables[l])\r\n elif 60<=l<=90:\r\n marOptimalVs.append(decisionVariables[l])\r\n elif 91<=l<=120:\r\n aprOptimalVs.append(decisionVariables[l])\r\n elif 121<=l<=151:\r\n mayOptimalVs.append(decisionVariables[l])\r\n elif 152<=l<=181:\r\n junOptimalVs.append(decisionVariables[l])\r\n elif 182<=l<=212:\r\n julOptimalVs.append(decisionVariables[l])\r\n elif 213<=l<=243:\r\n augOptimalVs.append(decisionVariables[l])\r\n elif 244<=l<=273:\r\n sepOptimalVs.append(decisionVariables[l])\r\n elif 274<=l<=304:\r\n octOptimalVs.append(decisionVariables[l])\r\n elif 305<=l<=334:\r\n novOptimalVs.append(decisionVariables[l])\r\n else:\r\n decOptimalVs.append(decisionVariables[l])\r\n#-----Making each list 31 values long-----\r\n febOptimalVs.insert(29, 0)\r\n febOptimalVs.insert(30, 0)\r\n aprOptimalVs.insert(30, 0)\r\n junOptimalVs.insert(30, 0)\r\n sepOptimalVs.insert(30, 0)\r\n novOptimalVs.insert(30, 0)\r\n \r\nelif numberOfDays == 365:\r\n for l in range(365):\r\n if 0<=l<=30: \r\n janOptimalVs.append(decisionVariables[l])\r\n elif 31<=l<=58:\r\n febOptimalVs.append(decisionVariables[l])\r\n elif 59<=l<=89:\r\n marOptimalVs.append(decisionVariables[l])\r\n elif 90<=l<=119:\r\n aprOptimalVs.append(decisionVariables[l])\r\n elif 120<=l<=150:\r\n mayOptimalVs.append(decisionVariables[l])\r\n elif 151<=l<=180:\r\n junOptimalVs.append(decisionVariables[l])\r\n elif 181<=l<=211:\r\n julOptimalVs.append(decisionVariables[l])\r\n elif 212<=l<=242:\r\n augOptimalVs.append(decisionVariables[l])\r\n elif 243<=l<=272:\r\n sepOptimalVs.append(decisionVariables[l])\r\n elif 273<=l<=303:\r\n octOptimalVs.append(decisionVariables[l])\r\n elif 304<=l<=333:\r\n novOptimalVs.append(decisionVariables[l])\r\n else:\r\n decOptimalVs.append(decisionVariables[l])\r\n\r\n febOptimalVs.insert(28, 0)\r\n febOptimalVs.insert(29, 0)\r\n febOptimalVs.insert(30, 0)\r\n aprOptimalVs.insert(30, 0)\r\n junOptimalVs.insert(30, 0)\r\n sepOptimalVs.insert(30, 0)\r\n novOptimalVs.insert(30, 0) \r\n\r\n#-----List of lists containing the decision variables sperated by month-----\r\noptimalValues = [janOptimalVs, febOptimalVs, marOptimalVs, aprOptimalVs, \r\n mayOptimalVs, junOptimalVs, julOptimalVs, augOptimalVs, \r\n sepOptimalVs, octOptimalVs, novOptimalVs, decOptimalVs]\r\n\r\n#-----Initialization for totaling the decision variables for each month-----\r\njanTotal = 0\r\nfebTotal = 0\r\nmarTotal = 0\r\naprTotal = 0\r\nmayTotal = 0\r\njunTotal = 0\r\njulTotal = 0\r\naugTotal = 0\r\nsepTotal = 0\r\noctTotal = 0\r\nnovTotal = 0\r\ndecTotal = 0\r\n\r\n#-----Totaling the decision variables for each month-----\r\nfor m in Months:\r\n for l in range(31):\r\n if m == 0:\r\n janTotal += janOptimalVs[l]\r\n elif m == 1:\r\n febTotal += febOptimalVs[l]\r\n elif m == 2:\r\n marTotal += marOptimalVs[l]\r\n elif m == 3:\r\n aprTotal += aprOptimalVs[l]\r\n elif m == 4:\r\n mayTotal += mayOptimalVs[l]\r\n elif m == 5:\r\n junTotal += junOptimalVs[l]\r\n elif m == 6:\r\n julTotal += julOptimalVs[l]\r\n elif m == 7:\r\n augTotal += augOptimalVs[l]\r\n elif m == 8:\r\n sepTotal += sepOptimalVs[l]\r\n elif m == 9:\r\n octTotal += octOptimalVs[l]\r\n elif m == 10:\r\n novTotal += novOptimalVs[l]\r\n else:\r\n decTotal += decOptimalVs[l]\r\n \r\n#-----List of monthly totals-----\r\noptimalValuesMonths = [janTotal, febTotal, marTotal, aprTotal, mayTotal, \r\n junTotal, julTotal, augTotal, sepTotal, octTotal, \r\n novTotal, decTotal]\r\ntempTotal = 0\r\nfor m in Months:\r\n tempTotal += optimalValuesMonths[m]\r\n\r\n#-----Part 3, First Menu-----------------------------------------------------\r\n#-----Part 3, First Menu-----------------------------------------------------\r\n#-----Part 3, First Menu-----------------------------------------------------\r\n#-----First Menu-----\r\nclass SaberApp(tk.Tk):\r\n def __init__(self, *args, **kwargs):\r\n root.__init__(self, *args, **kwargs)\r\n container = tk.Frame(self)\r\n container.pack(side='top', fill='both', expand='true')\r\n \r\n #-----Icon-----\r\n root.iconbitmap(self, \"OSabreIcon.ico\")\r\n root.wm_title(self, \"Sabre\")\r\n \r\n container.grid_rowconfigure(0, weight=1)\r\n container.grid_columnconfigure(0, weight=1)\r\n\r\n self.frames = {}\r\n \r\n for F in (StartPage, Histogram, Scatterplot):\r\n\r\n frame = F(container, self)\r\n \r\n self.frames[F] = frame\r\n \r\n frame.grid(row=0, column=0, sticky='nsew')\r\n \r\n self.show_frame(StartPage)\r\n \r\n def show_frame(self, cont):\r\n frame = self.frames[cont]\r\n frame.tkraise()\r\n \r\nclass StartPage(tk.Frame):\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n label = tk.Label(self, text=\"Menu\")\r\n label.pack(side = 'top', pady=10, padx=10)\r\n \r\n \r\n self.HistButton = ttk.Button(self, text='Bar Chart', command=lambda: controller.show_frame(Histogram))\r\n self.HistButton.pack(side='top')\r\n \r\n self.ScatterButton = ttk.Button(self, text='Scatter Plot', command=lambda: controller.show_frame(Scatterplot))\r\n self.ScatterButton.pack(side='top', pady=10)\r\n \r\n self.ScatterButton = ttk.Button(self, text='Write Results', command=self.Writefile)\r\n self.ScatterButton.pack(side='top')\r\n \r\n self.Opt2Button = ttk.Button(self, text=' Secondary\\nOptimization', command=self.SecondOpt)\r\n self.Opt2Button.pack(side='top', pady=10)\r\n \r\n self.ExitButton = ttk.Button(self, text='Exit Program', command=self.Exit)\r\n self.ExitButton.pack(side='top')\r\n\r\n def Writefile(self):\r\n with open('Optimization Results.txt', 'w') as file3:\r\n file3.write(\"Results of the initital optimization.\\n\\nThe total number of vataction days for the year is \")\r\n file3.write(str(tempTotal))\r\n file3.write(\".\\n\\n\")\r\n for m in Months:\r\n file3.write(\"The optimal value for \")\r\n file3.write(listOfMonths[m])\r\n file3.write(\" is \")\r\n file3.write(str(optimalValuesMonths[m]))\r\n file3.write(\".\\n\")\r\n \r\n def SecondOpt(self):\r\n App.destroy() \r\n \r\n def Exit(self):\r\n os._exit(1)\r\n \r\nclass Histogram(tk.Frame):\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n label = tk.Label(self, text=\"Bar Chart\")\r\n label.pack(side = 'top', pady=10, padx=10)\r\n \r\n self.MenuButton = ttk.Button(self, text='Menu', command=lambda: controller.show_frame(StartPage))\r\n self.MenuButton.pack(side='top')\r\n \r\n f = Figure(figsize = (8,5), dpi = 100)\r\n a = f.add_subplot(111)\r\n \r\n xList = []\r\n yList = []\r\n for m in Months:\r\n xList.append(m+1)\r\n yList.append(int(optimalValuesMonths[m]))\r\n a.clear()\r\n a.bar(xList, yList, label = \"Initial Optimization\")\r\n \r\n a.legend(bbox_to_anchor=(0, 1.08, 2, .102), loc=3,\r\n ncol=2, borderaxespad=0)\r\n \r\n canvas = FigureCanvasTkAgg(f, self)\r\n canvas.show()\r\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)\r\n \r\n toolbar = NavigationToolbar2TkAgg(canvas, self)\r\n toolbar.update()\r\n canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\r\n \r\n a.set_title(\"Vacation Days per Month\")\r\n a.set_xlabel(\"Month\")\r\n a.set_ylabel(\"Vacation Days\")\r\n \r\nclass Scatterplot(tk.Frame):\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n label = tk.Label(self, text=\"Scatter Plot\")\r\n label.pack(side = 'top', pady=10, padx=10)\r\n \r\n self.MenuButton = ttk.Button(self, text='Menu', command=lambda: controller.show_frame(StartPage))\r\n self.MenuButton.pack(side='top')\r\n \r\n f = Figure(figsize = (8,5), dpi = 100)\r\n a = f.add_subplot(111)\r\n \r\n a.clear()\r\n a.scatter(standRelyLevels, scatterPlotValues, label = \"Initial Optimization\")\r\n \r\n a.legend(bbox_to_anchor=(0, 1.08, 1, .102), loc=3,\r\n ncol=2, borderaxespad=0)\r\n \r\n canvas = FigureCanvasTkAgg(f, self)\r\n canvas.show()\r\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)\r\n \r\n toolbar = NavigationToolbar2TkAgg(canvas, self)\r\n toolbar.update()\r\n canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\r\n \r\n a.set_title(\"Annual Vacation Days vs. Reliability Level\")\r\n a.set_xlabel(\"Reliability Level\")\r\n a.set_ylabel(\"Annual Vacation Days\") \r\n \r\nroot = tk.Tk\r\nApp = SaberApp()\r\nApp.mainloop()\r\n\r\n#-----Output GUI window-----\r\n\"\"\"\r\nclass SimpleTableInput(tk.Frame):\r\n def __init__(self, parent, rows, columns):\r\n tk.Frame.__init__(self, parent)\r\n #-----Title----- \r\n root.title(\"Sabre\")\r\n #-----Icon-----\r\n root.wm_iconbitmap(\"OSabreIcon.ico\")\r\n #-----Initializations----- \r\n self._entry = {}\r\n self.rows = rows\r\n self.columns = columns\r\n\r\n #-----Create the table of widgets-----\r\n for column in range(self.columns):\r\n for row in range(self.rows):\r\n #------Month Labels---------\r\n labelMonth = tk.Label(self, text=str(listOfMonths[column]))\r\n labelMonth.grid(row=0, column=column+1)\r\n \r\n #------Display Vars---------\r\n index = (row+1, column)\r\n e = tk.Label(self, text = str(optimalValues[column][row]))\r\n e.grid(row=row+1, column=column+1, stick=\"nsew\")\r\n self._entry[index] = e\r\n \r\n #-----Adjust column weights so they all expand equally-----\r\n for column in range(self.columns):\r\n self.grid_columnconfigure(column+1, weight=1)\r\n \r\n #-----Designate a final, empty row to fill up any extra space-----\r\n self.grid_rowconfigure(rows+1, weight=1)\r\n \r\n\r\nclass Example(tk.Frame):\r\n def __init__(self, parent):\r\n tk.Frame.__init__(self, parent)\r\n #-----Table-----\r\n self.table = SimpleTableInput(self, 31, 12)\r\n self.table.pack(side=\"top\")\r\n \r\nroot = tk.Tk()\r\nExample(root).pack(side=\"top\")\r\nroot.mainloop()\r\n\"\"\"\r\n\r\n#-----Part 4, Secondary values-----------------------------------------------\r\n#-----Part 4, Secondary values-----------------------------------------------\r\n#-----Part 4, Secondary values-----------------------------------------------\r\n#-----Second Parameter GUI window-----\r\nclass SimpleTableInput3(tk.Frame):\r\n def __init__(self, parent, rows, columns):\r\n tk.Frame.__init__(self, parent)\r\n #-----Title-----\r\n root.title(\"Sabre\")\r\n #-----Icon-----\r\n root.wm_iconbitmap(\"OSabreIcon.ico\")\r\n #-----Initializations-----\r\n self._entry = {}\r\n self.rows = rows\r\n self.columns = columns \r\n #-----\"Default\" button-----\r\n self.default = ttk.Button(self, text=\"Default\", command=self.autofill)\r\n self.default.grid(row=9,column=0)\r\n #-----\"Load Values\" button-----\r\n self.loadValues = ttk.Button(self, text=\"Load Values\", \r\n command=self.load_save)\r\n self.loadValues.grid(row=9,column=1)\r\n\r\n #-----Default values-----\r\n self.LookUpList=[2019,0.85,9,2,300,20,3000,200]\r\n \r\n #-----register a command to use for validation-----\r\n vcmd = (self.register(self._validate), \"%P\")\r\n\r\n #-----create the table of widgets-----\r\n for row in range(self.rows):\r\n for column in range(self.columns):\r\n index = (row, column+1)\r\n e = tk.Entry(self, validate=\"key\", validatecommand=vcmd)\r\n e.grid(row=row, column=column+1, stick=\"nsew\")\r\n enterYear = tk.Label(self, text=\"Enter the year\")\r\n enterYear.grid(row=0, column=0, sticky=W)\r\n enterRely = tk.Label(self, text=\r\n \"Enter the reliability (must be a value between 0 and 1)\")\r\n enterRely.grid(row=1, column=0, sticky=W)\r\n enterMaxDays = tk.Label(self, text=\r\n \"Enter the maximum number of vacation days allotted for any given day\")\r\n enterMaxDays.grid(row=2, column=0, sticky=W)\r\n enterMinDays = tk.Label(self, text=\r\n \"Enter the minimum number of vacation days allotted for any given day\")\r\n enterMinDays.grid(row=3, column=0, sticky=W)\r\n enterMaxMonth = tk.Label(self, text=\r\n \"Enter the maximum number of vacation days for any given month\")\r\n enterMaxMonth.grid(row=4, column=0, sticky=W)\r\n enterMinMonth = tk.Label(self, text=\r\n \"Enter the minimum number of vacation days for any given month\")\r\n enterMinMonth.grid(row=5, column=0, sticky=W)\r\n enterMaxYear = tk.Label(self, text=\r\n \"Enter the maximum number of vacation days allotted for the year\")\r\n enterMaxYear.grid(row=6, column=0, sticky=W)\r\n enterMinYear = tk.Label(self, text=\r\n \"Enter the minimum number of vacation days allotted for the year\")\r\n enterMinYear.grid(row=7, column=0, sticky=W)\r\n self._entry[index] = e\r\n \r\n #-----adjust column weights so they all expand equally-----\r\n for column in range(self.columns):\r\n self.grid_columnconfigure(column+1, weight=1)\r\n \r\n #-----designate a final, empty row to fill up any extra space-----\r\n self.grid_rowconfigure(rows, weight=1)\r\n \r\n\r\n #-----Return a list containing the data in the table-----\r\n def get(self):\r\n for row in range(self.rows):\r\n for column in range(self.columns):\r\n index = (row, column+1)\r\n innerList3.append(float(self._entry[index].get()))\r\n return innerList3\r\n \r\n #-----Fill table with default values------\r\n def autofill(self):\r\n for row in range(self.rows):\r\n self._entry[row,1].delete(0, END)\r\n self._entry[row,1].insert(row, str(self.LookUpList[row]))\r\n \r\n #-----\"Load Values\" button function----- \r\n def load_save(self): \r\n loadList3 = []\r\n subLoadList =[]\r\n with open('save_file3.db', 'rb') as file:\r\n while True:\r\n try:\r\n loadList3=pickle.load(file)\r\n y = len(loadList3)\r\n x = y-9\r\n for m in range(8):\r\n subLoadList.append(loadList3[0][m+x])\r\n except EOFError:\r\n break\r\n for row in range(self.rows):\r\n self._entry[row,1].delete(0, END)\r\n self._entry[row,1].insert(row, str(subLoadList[row]))\r\n \r\n #-----Perform input validation-----\r\n def _validate(self, P):\r\n #-----Allow only an empty value, or a value that can be converted to \r\n # a float-----\r\n if P.strip() == \"\":\r\n return True\r\n try:\r\n f = float(P)\r\n except ValueError:\r\n self.bell()\r\n return False\r\n return True\r\n\r\nclass Example(tk.Frame):\r\n def __init__(self, parent):\r\n tk.Frame.__init__(self, parent)\r\n #-----Table-----\r\n self.table = SimpleTableInput3(self, 8, 1)\r\n self.table.pack(side=\"top\")\r\n #-----\"Submit\" button-----\r\n self.submit = ttk.Button(self, text=\"Submit\", command=self.on_submit)\r\n self.submit.pack(side='bottom') \r\n #-----\"Save Values\" button-----\r\n self.saveValues = ttk.Button(self, text=\"Save Values\", \r\n command=self.save_values)\r\n self.saveValues.place(x=405,y=193) \r\n \r\n #-----\"Submit\" button function----- \r\n def on_submit(self):\r\n innerList3.append(self.table.get())\r\n root.destroy()\r\n \r\n #-----\"Save Values\" button function----- \r\n def save_values(self):\r\n saveList3 = []\r\n saveList3.append(self.table.get())\r\n with open('save_file3.db', 'wb+') as file2:\r\n pickle.dump(saveList3, file2)\r\n \r\nroot = tk.Tk()\r\nExample(root).pack(side=\"top\")\r\nroot.mainloop()\r\n\r\n#-----Fill in variables with inputs from user-----\r\nfor m in range(8):\r\n if m == 0:\r\n currentYear2 = int(innerList3[m])\r\n elif m == 1:\r\n reliabilityLevel2 = float(innerList3[m])\r\n elif m == 2:\r\n dailyUpperBoundVacationAllotted2 = int(innerList3[m])\r\n elif m == 3:\r\n dailyLowerBoundVacationAllotted2 = int(innerList3[m])\r\n elif m == 4:\r\n userMonthlyUpperBoundVacationAllotted2 = int(innerList3[m])\r\n elif m == 5:\r\n monthlyLowerBoundVacationAllotted2 = int(innerList3[m])\r\n elif m == 6:\r\n userAnnualUpperBoundVacationAllotted2 = int(innerList3[m])\r\n else:\r\n annualLowerBoundVacationAllotted2 = int(innerList3[m])\r\n\r\n\r\n#-----Second Crew GUI window-----\r\nclass SimpleTableInput4(tk.Frame):\r\n def __init__(self, parent, rows, columns):\r\n tk.Frame.__init__(self, parent)\r\n #-----Title-----\r\n root.title(\"Sabre\")\r\n #-----Icon-----\r\n root.wm_iconbitmap(\"OSabreIcon.ico\")\r\n #-----Initializations-----\r\n self._entry = {}\r\n self.rows = rows\r\n self.columns = columns\r\n #-----\"Default\" button-----\r\n self.default = ttk.Button(self, text=\"Default\", command=self.autofill)\r\n self.default.grid(row=6,column=5)\r\n #-----\"Load Values\" button-----\r\n self.loadValues = ttk.Button(self, text=\"Load Values\", \r\n command=self.load_save)\r\n self.loadValues.grid(row=6,column=7)\r\n \r\n #-----Default values-----\r\n self.LookUpList=[\r\n [700,700,700,700,700,700,700,700,700,700,700,700],\r\n [476,432,469,447,465,481,475,482,436,460,480,478],\r\n [49.8,32.1,48.4,35.3,42.4,45.3,47.0,40.0,35.8,41.0,46.7,\r\n 55.7],\r\n [95.8,84.6,98.2,88.7,92.4,97.4,100.0,98.0,86.7,83.2,84.6,\r\n 91.0]]\r\n \r\n #-----register a command to use for validation-----\r\n vcmd = (self.register(self._validate), \"%P\")\r\n\r\n #-----create the table of widgets-----\r\n for row in range(self.rows):\r\n for column in range(self.columns):\r\n index = (row+1, column+1)\r\n e = tk.Entry(self, validate=\"key\", validatecommand=vcmd)\r\n e.grid(row=row+1, column=column+1, stick=\"nsew\")\r\n labelMonth = tk.Label(self, text=str(listOfMonths[column]))\r\n labelMonth.grid(row=0, column=column+1)\r\n enterLh = tk.Label(self, text=\"Lineholders\")\r\n enterLh.grid(row=1, column=0, sticky=W)\r\n enterCr = tk.Label(self, text=\"Crew Scheduled\")\r\n enterCr.grid(row=2, column=0, sticky=W)\r\n enterAvg = tk.Label(self, text=\"Average Absences\")\r\n enterAvg.grid(row=3, column=0, sticky=W)\r\n enterVar = tk.Label(self, text=\"Variance of Absences\")\r\n enterVar.grid(row=4, column=0, sticky=W)\r\n self._entry[index] = e\r\n \r\n #-----adjust column weights so they all expand equally-----\r\n for column in range(self.columns):\r\n self.grid_columnconfigure(column+1, weight=1)\r\n \r\n #-----designate a final, empty row to fill up any extra space-----\r\n self.grid_rowconfigure(rows+1, weight=1)\r\n \r\n #-----Fill table with default values------ \r\n def autofill(self):\r\n for row in range(self.rows):\r\n for column in range(self.columns):\r\n index = (row+1, column+1)\r\n self._entry[index].delete(0, END)\r\n self._entry[index].insert(0, str(self.LookUpList[row][column]))\r\n \r\n #-----\"Load Values\" button function----- \r\n def load_save(self):\r\n loadList4 = []\r\n subLoadList = []\r\n with open('save_file4.p', 'rb') as file2:\r\n loadList4=pickle.load(file2)\r\n y = len(loadList4)\r\n x = y-49\r\n for m in range(48):\r\n subLoadList.append(loadList4[0][m+x])\r\n for row in range(self.rows):\r\n for column in range(self.columns):\r\n index = (row+1, column+1)\r\n self._entry[index].delete(0, END)\r\n self._entry[index].insert(0, str(subLoadList[column+row*12]))\r\n \r\n #-----Return a list of lists, containing the data in the table------\r\n def get(self):\r\n for row in range(self.rows):\r\n for column in range(self.columns):\r\n index = (row+1, column+1)\r\n innerList4.append(float(self._entry[index].get()))\r\n return innerList4\r\n \r\n #-----Perform input validation-----\r\n def _validate(self, P):\r\n #-----Allow only an empty value, or a value that can be converted to \r\n # a float-----\r\n if P.strip() == \"\":\r\n return True\r\n try:\r\n f = float(P)\r\n except ValueError:\r\n self.bell()\r\n return False\r\n return True\r\n \r\n\r\nclass Example(tk.Frame):\r\n def __init__(self, parent):\r\n tk.Frame.__init__(self, parent)\r\n #-----Table-----\r\n self.table = SimpleTableInput4(self, 4, 12)\r\n self.table.pack(side=\"top\")\r\n #-----\"Submit\" button-----\r\n self.submit = ttk.Button(self, text=\"Submit\", command=self.on_submit)\r\n self.submit.pack(side=\"bottom\")\r\n #-----\"Save Values\" button-----\r\n self.saveValues = ttk.Button(self, text=\"Save Values\", \r\n command=self.save_values)\r\n self.saveValues.place(x=558,y=130)\r\n \r\n #-----\"Submit\" button function----- \r\n def on_submit(self):\r\n innerList4.append(self.table.get())\r\n root.destroy()\r\n \r\n #-----\"Save Values\" button function----- \r\n def save_values(self):\r\n saveList4 = []\r\n saveList4.append(self.table.get())\r\n with open('save_file4.p', 'wb+') as file2:\r\n pickle.dump(saveList4, file2)\r\n\r\nroot = tk.Tk()\r\nExample(root).pack(side=\"top\")\r\nroot.geometry('1000x158')\r\nroot.mainloop()\r\n\r\n#-----Part 5, Second optimization--------------------------------------------\r\n#-----Part 5, Second optimization--------------------------------------------\r\n#-----Part 5, Second optimization--------------------------------------------\r\ninverseNormalReliability = norm.ppf(reliabilityLevel2)\r\n\r\n#-----Range for February based on the year----- \r\nif currentYear2%4==0:\r\n FebruaryLength2 = range(29)\r\n daysInFeb2 = 29\r\n numberOfDays2 = 366\r\nelse:\r\n FebruaryLength2 = range(28)\r\n daysInFeb2 = 28\r\n numberOfDays2 = 365\r\n\r\ndaysInMonths2 = [31, daysInFeb2, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\r\n \r\n#-----Monthly line holders, crew scheduled, averges provided, \r\n# variances provided, and the difference between monthlyLineHolders \r\n# and monthlyCrew-----\r\nmonthlyLineHolders2 = [] \r\nfor m in range(12):\r\n monthlyLineHolders2.append(int(innerList4[m])) \r\n \r\n\r\nmonthlyCrewScheduled2 = [] \r\nfor m in ListAppend1:\r\n monthlyCrewScheduled2.append(int(innerList4[m])) \r\n\r\n\r\naverageMonthlyAbsences2 = []\r\nfor m in ListAppend2:\r\n averageMonthlyAbsences2.append(float(innerList4[m])) \r\n\r\n\r\nvarOfMonthlyExpectedAbsences2 = []\r\nfor m in ListAppend3:\r\n varOfMonthlyExpectedAbsences2.append(float(innerList4[m])) \r\n\r\n#-----Calculating the standard deviations based on given variances----- \r\nstdDevMonthlyExpectedAbsences2 = []\r\nfor m in Months:\r\n stdDevOfAbsences = sqrt(varOfMonthlyExpectedAbsences2[m])\r\n stdDevMonthlyExpectedAbsences2.append(stdDevOfAbsences)\r\n\r\n#-----Calculating the expected absences for each month based on the average \r\n# absences, std. dev. of absences, and the reliability level-----\r\nexpectedMonthlyAbsences2 = []\r\nfor m in Months:\r\n expectedAbsenses = inverseNormalReliability * stdDevMonthlyExpectedAbsences2[m] + averageMonthlyAbsences2[m]\r\n expectedMonthlyAbsences2.append(expectedAbsenses)\r\n\r\n#-----By subtracting crew scheduled from line holders, we're left with a \r\n# variable that represents vacation days plus expected absences-----\r\nvacationAllottedPlusExpectedAbsences2 = [(monthlyLineHolders2[m]-monthlyCrewScheduled2[m]) for m in Months]\r\n\r\n#-----Calculated monthly upper bound based on subtracting expected absences \r\n# subtracted from vacation days plus expected absences-----\r\nmonthlyUpperBoundVacationAllotted2 = []\r\nfor m in Months:\r\n vacationAllotted = trunc(vacationAllottedPlusExpectedAbsences2[m] - expectedMonthlyAbsences2[m])\r\n monthlyUpperBoundVacationAllotted2.append(vacationAllotted)\r\n \r\n#-----Calculated upper bound for the year-----\r\nannualUpperBoundVacationAllotted2 = quicksum(monthlyUpperBoundVacationAllotted2[m] for m in Months)\r\n\r\nn.remove(n.getVars()[0:numberOfDays])\r\nn.update()\r\n\r\n#-----Decision variable-----\r\nvacationVariable = []\r\nfor m in Months:\r\n vacationVariable.append(n.addVars(daysInMonths2[m], \r\n lb = dailyLowerBoundVacationAllotted2, \r\n ub = dailyUpperBoundVacationAllotted2, \r\n obj = 1, name = \"2ndMonth\" + str(m)))\r\n#-----Deleting monthly contraints-----\r\n\r\nn.remove(n.getConstrs()[0:39])\r\n \r\n#-----Implementing monthly contraints-----\r\nfor m in Months:\r\n n.addConstr(vacationVariable[m].sum() <= monthlyUpperBoundVacationAllotted2[m], \r\n name = \"monthlyUpperBoundVacationAllottedForMonth2\" + str(m))\r\nfor m in Months:\r\n n.addConstr(vacationVariable[m].sum() <= userMonthlyUpperBoundVacationAllotted2, \r\n name = \"userMonthlyUpperBoundVacationAllottedForMonth2\" + str(m))\r\nfor m in Months:\r\n n.addConstr(vacationVariable[m].sum() >= monthlyLowerBoundVacationAllotted2, \r\n name = \"monthlyLowerBoundVacationAllotted2\" + str(m)) \r\n\r\n#-----Calculating the objective value-----\r\ntotalObjectiveValue2 = 0\r\nfor m in Months:\r\n objectiveValue = 0\r\n objectiveValue = vacationVariable[m].sum()\r\n totalObjectiveValue2 = totalObjectiveValue2 + objectiveValue\r\n\r\n#-----Implementing annual constraints-----\r\nn.addConstr(totalObjectiveValue2 <= annualUpperBoundVacationAllotted2, \r\n name = \"TotannualUpperBoundVacationAllotted2\") \r\nn.addConstr(totalObjectiveValue2 <= userAnnualUpperBoundVacationAllotted2, \r\n name = \"userTotannualUpperBoundVacationAllotted2\") \r\nn.addConstr(totalObjectiveValue2 >= annualLowerBoundVacationAllotted2, \r\n name = \"TotannualLowerBoundVacationAllotted2\")\r\n\r\n#-----Objective function-----\r\nfor m in Months:\r\n n.setObjective(totalObjectiveValue2, GRB.MAXIMIZE)\r\n#-----Execute optimization-----\r\nn.update()\r\nn.optimize()\r\n\r\n#-----Creating files with the deatils of the formulation and optimization for \r\n# further inspection/review-----\r\nn.write('2ndSolution.sol')\r\nn.write(\"2ndLP.lp\")\r\nn.write(\"2ndMPS.mps\")\r\n\r\n#-----Initializing lists for extracting the post-optimization decision \r\n# variable values-----\r\ndecisionVariables2 = []\r\noptimalValues2 = []\r\njanOptimalVs2 = []\r\nfebOptimalVs2 = []\r\nmarOptimalVs2 = []\r\naprOptimalVs2 = []\r\nmayOptimalVs2 = []\r\njunOptimalVs2 = []\r\njulOptimalVs2 = []\r\naugOptimalVs2 = []\r\nsepOptimalVs2 = []\r\noctOptimalVs2 = []\r\nnovOptimalVs2 = []\r\ndecOptimalVs2 = []\r\n\r\n#-----Extracting a clean list of the decision variables-----\r\nfor l in n.getVars():\r\n decisionVariables2.append(int(l.x))\r\n\r\n\r\nif numberOfDays2 == 366:\r\n for l in range(366):\r\n if 0<=l<=30: \r\n janOptimalVs2.append(decisionVariables2[l])\r\n elif 31<=l<=59:\r\n febOptimalVs2.append(decisionVariables2[l])\r\n elif 60<=l<=90:\r\n marOptimalVs2.append(decisionVariables2[l])\r\n elif 91<=l<=120:\r\n aprOptimalVs2.append(decisionVariables2[l])\r\n elif 121<=l<=151:\r\n mayOptimalVs2.append(decisionVariables2[l])\r\n elif 152<=l<=181:\r\n junOptimalVs2.append(decisionVariables2[l])\r\n elif 182<=l<=212:\r\n julOptimalVs2.append(decisionVariables2[l])\r\n elif 213<=l<=243:\r\n augOptimalVs2.append(decisionVariables2[l])\r\n elif 244<=l<=273:\r\n sepOptimalVs2.append(decisionVariables2[l])\r\n elif 274<=l<=304:\r\n octOptimalVs2.append(decisionVariables2[l])\r\n elif 305<=l<=334:\r\n novOptimalVs2.append(decisionVariables2[l])\r\n else:\r\n decOptimalVs2.append(decisionVariables2[l])\r\n#-----Making each list 31 values long-----\r\n febOptimalVs2.insert(29, 0)\r\n febOptimalVs2.insert(30, 0)\r\n aprOptimalVs2.insert(30, 0)\r\n junOptimalVs2.insert(30, 0)\r\n sepOptimalVs2.insert(30, 0)\r\n novOptimalVs2.insert(30, 0)\r\n \r\nelif numberOfDays2 == 365:\r\n for l in range(365):\r\n if 0<=l<=30: \r\n janOptimalVs2.append(decisionVariables2[l])\r\n elif 31<=l<=58:\r\n febOptimalVs2.append(decisionVariables2[l])\r\n elif 59<=l<=89:\r\n marOptimalVs2.append(decisionVariables2[l])\r\n elif 90<=l<=119:\r\n aprOptimalVs2.append(decisionVariables2[l])\r\n elif 120<=l<=150:\r\n mayOptimalVs2.append(decisionVariables2[l])\r\n elif 151<=l<=180:\r\n junOptimalVs2.append(decisionVariables2[l])\r\n elif 181<=l<=211:\r\n julOptimalVs2.append(decisionVariables2[l])\r\n elif 212<=l<=242:\r\n augOptimalVs2.append(decisionVariables2[l])\r\n elif 243<=l<=272:\r\n sepOptimalVs2.append(decisionVariables2[l])\r\n elif 273<=l<=303:\r\n octOptimalVs2.append(decisionVariables2[l])\r\n elif 304<=l<=333:\r\n novOptimalVs2.append(decisionVariables2[l])\r\n else:\r\n decOptimalVs2.append(decisionVariables2[l])\r\n\r\n febOptimalVs2.insert(28, 0)\r\n febOptimalVs2.insert(29, 0)\r\n febOptimalVs2.insert(30, 0)\r\n aprOptimalVs2.insert(30, 0)\r\n junOptimalVs2.insert(30, 0)\r\n sepOptimalVs2.insert(30, 0)\r\n novOptimalVs2.insert(30, 0) \r\n\r\n#-----List of lists containing the decision variables sperated by month-----\r\noptimalValues2 = [janOptimalVs2, febOptimalVs2, marOptimalVs2, aprOptimalVs2, \r\n mayOptimalVs2, junOptimalVs2, julOptimalVs2, augOptimalVs2, \r\n sepOptimalVs2, octOptimalVs2, novOptimalVs2, decOptimalVs2]\r\n\r\n#-----Initialization for totaling the decision variables for each month-----\r\njanTotal2 = 0\r\nfebTotal2 = 0\r\nmarTotal2 = 0\r\naprTotal2 = 0\r\nmayTotal2 = 0\r\njunTotal2 = 0\r\njulTotal2 = 0\r\naugTotal2 = 0\r\nsepTotal2 = 0\r\noctTotal2 = 0\r\nnovTotal2 = 0\r\ndecTotal2 = 0\r\n\r\n#-----Totaling the decision variables for each month-----\r\nfor m in Months:\r\n for l in range(31):\r\n if m == 0:\r\n janTotal2 += janOptimalVs2[l]\r\n elif m == 1:\r\n febTotal2 += febOptimalVs2[l]\r\n elif m == 2:\r\n marTotal2 += marOptimalVs2[l]\r\n elif m == 3:\r\n aprTotal2 += aprOptimalVs2[l]\r\n elif m == 4:\r\n mayTotal2 += mayOptimalVs2[l]\r\n elif m == 5:\r\n junTotal2 += junOptimalVs2[l]\r\n elif m == 6:\r\n julTotal2 += julOptimalVs2[l]\r\n elif m == 7:\r\n augTotal2 += augOptimalVs2[l]\r\n elif m == 8:\r\n sepTotal2 += sepOptimalVs2[l]\r\n elif m == 9:\r\n octTotal2 += octOptimalVs2[l]\r\n elif m == 10:\r\n novTotal2 += novOptimalVs2[l]\r\n else:\r\n decTotal2 += decOptimalVs2[l]\r\n \r\n#-----List of monthly totals-----\r\noptimalValuesMonths2 = [janTotal2, febTotal2, marTotal2, aprTotal2, mayTotal2, \r\n junTotal2, julTotal2, augTotal2, sepTotal2, octTotal2, \r\n novTotal2, decTotal2]\r\n\r\ntempTotal2 = 0\r\nfor m in Months:\r\n tempTotal2 += optimalValuesMonths2[m]\r\n \r\n#-----Scatterplot-----\r\nnewDevAndVacation = []\r\n\r\nfor m in Months:\r\n newDevAndVacation.append(vacationAllottedPlusExpectedAbsences2[m]-averageMonthlyAbsences2[m])\r\n \r\nscatterPlotValues2 = []\r\n \r\nfor l in range(25):\r\n tempTotalList = []\r\n for m in Months:\r\n tempCalculation = newDevAndVacation[m] - stdDevMonthlyExpectedAbsences2[m]*standInverseNorm[l]\r\n tempTotalList.append(tempCalculation)\r\n if tempTotalList[m] > userMonthlyUpperBoundVacationAllotted2:\r\n tempTotalList[m] = userMonthlyUpperBoundVacationAllotted2\r\n else:\r\n pass\r\n if tempTotalList[m] < monthlyLowerBoundVacationAllotted2:\r\n tempTotalList[m] = monthlyLowerBoundVacationAllotted2\r\n else:\r\n pass\r\n if tempTotalList[m] > dailyUpperBoundVacationAllotted2*daysInMonths2[m]:\r\n tempTotalList[m] = dailyUpperBoundVacationAllotted2*daysInMonths2[m]\r\n else:\r\n pass\r\n if tempTotalList[m] < dailyLowerBoundVacationAllotted2*daysInMonths2[m]:\r\n tempTotalList[m] = dailyLowerBoundVacationAllotted2*daysInMonths2[m]\r\n else:\r\n pass\r\n if len(tempTotalList) == 12:\r\n tempCalculation = sum(tempTotalList)\r\n scatterPlotValues2.append(trunc(tempCalculation))\r\n else:\r\n pass\r\n if scatterPlotValues2[l] > userAnnualUpperBoundVacationAllotted2:\r\n scatterPlotValues2[l] = userAnnualUpperBoundVacationAllotted2\r\n else:\r\n pass\r\n if scatterPlotValues2[l] < annualLowerBoundVacationAllotted2: \r\n scatterPlotValues2[l] = annualLowerBoundVacationAllotted2\r\n else:\r\n pass \r\n \r\n#-----Part 6, Second Menu----------------------------------------------------\r\n#-----Part 6, Second Menu----------------------------------------------------\r\n#-----Part 6, Second Menu----------------------------------------------------\r\n#-----Second Menu-----\r\nclass SaberApp(tk.Tk):\r\n def __init__(self, *args, **kwargs):\r\n root.__init__(self, *args, **kwargs)\r\n container = tk.Frame(self)\r\n container.pack(side='top', fill='both', expand='true')\r\n \r\n container.grid_rowconfigure(0, weight=1)\r\n container.grid_columnconfigure(0, weight=1)\r\n \r\n root.wm_title(self, \"Sabre\")\r\n #-----Icon-----\r\n root.iconbitmap(self, \"OSabreIcon.ico\")\r\n \r\n self.frames = {}\r\n\r\n for F in (StartPage, Histogram, Scatterplot):\r\n\r\n frame = F(container, self)\r\n \r\n self.frames[F] = frame\r\n \r\n frame.grid(row=0, column=0, sticky='nsew')\r\n \r\n self.show_frame(StartPage)\r\n \r\n def show_frame(self, cont):\r\n frame = self.frames[cont]\r\n frame.tkraise()\r\n \r\nclass StartPage(tk.Frame):\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n label = tk.Label(self, text=\"Menu\")\r\n label.pack(side = 'top', pady=10, padx=10)\r\n \r\n self.HistButton = ttk.Button(self, text='Comparison\\n Chart', command=lambda: controller.show_frame(Histogram))\r\n self.HistButton.pack(side='top', pady=10)\r\n \r\n self.ScatterButton = ttk.Button(self, text='Scatter Plot', command=lambda: controller.show_frame(Scatterplot))\r\n self.ScatterButton.pack(side='top')\r\n \r\n self.ScatterButton = ttk.Button(self, text='Write Results', command=self.Writefile)\r\n self.ScatterButton.pack(side='top', pady=10)\r\n \r\n self.ExitButton = ttk.Button(self, text='Exit Program', command=self.Exit)\r\n self.ExitButton.pack(side='top')\r\n \r\n def Writefile(self):\r\n with open('Optimization Results.txt', 'w') as file5:\r\n file5.write(\"Results of the initital optimization.\\n\\nThe total number of vataction days for the year is \")\r\n file5.write(str(tempTotal))\r\n file5.write(\".\\n\\n\")\r\n for m in Months:\r\n file5.write(\"The optimal value for \")\r\n file5.write(listOfMonths[m])\r\n file5.write(\" is \")\r\n file5.write(str(optimalValuesMonths[m]))\r\n file5.write(\" days.\\n\")\r\n file5.write(\"\\n\\n\")\r\n file5.write(\"Results of the secondary optimization.\\n\\nThe total number of vataction days for the year is \")\r\n file5.write(str(tempTotal2))\r\n file5.write(\".\\n\\n\")\r\n for m in Months:\r\n file5.write(\"The optimal value for \")\r\n file5.write(listOfMonths[m])\r\n file5.write(\" is \")\r\n file5.write(str(optimalValuesMonths2[m]))\r\n file5.write(\" days.\\n\")\r\n file5.write(\"\\n\\n\")\r\n file5.write(\"Analysis\\n\\n\")\r\n if tempTotal - tempTotal2 == 0:\r\n file5.write(\"The optimal value did not change.\\n\")\r\n elif tempTotal - tempTotal2 > 0:\r\n file5.write(\"The optimal value decreased by \")\r\n file5.write(tempTotal - tempTotal2)\r\n file5.write(\" days.\\n\")\r\n else:\r\n file5.write(\"The optimal value increased by \")\r\n file5.write(str(abs(tempTotal - tempTotal2)))\r\n file5.write(\" days.\\n\")\r\n file5.write(\"\\n\")\r\n for m in Months:\r\n if optimalValuesMonths[m] - optimalValuesMonths2[m] == 0:\r\n pass\r\n elif optimalValuesMonths[m] - optimalValuesMonths2[m] > 0:\r\n file5.write(\"The optimal value for \")\r\n file5.write(listOfMonths[m])\r\n file5.write(\" decreased by \")\r\n file5.write(str(optimalValuesMonths[m] - optimalValuesMonths2[m]))\r\n file5.write(\" days.\\n\")\r\n else:\r\n file5.write(\"The optimal value for \")\r\n file5.write(listOfMonths[m])\r\n file5.write(\" increased by \")\r\n file5.write(str(abs(optimalValuesMonths[m] - optimalValuesMonths2[m])))\r\n file5.write(\" days.\\n\")\r\n \r\n if tempTotal == userAnnualUpperBoundVacationAllotted:\r\n file5.write(\"\\n\\nThe first optimal value is equal to the annual upper bound that you entered.\")\r\n file5.write(\"\\nThis is because the true optimal value is greater than or equal to your upper bound.\")\r\n else:\r\n pass\r\n if tempTotal == annualLowerBoundVacationAllotted:\r\n file5.write(\"\\n\\nThe first optimal value is equal to the annual lower bound that you entered.\")\r\n file5.write(\"\\nThis is because the true optimal value is less than or equal to your lower bound.\")\r\n else:\r\n pass\r\n if tempTotal2 == userAnnualUpperBoundVacationAllotted2:\r\n file5.write(\"\\n\\nThe second optimal value is equal to the second annual upper bound that you entered.\")\r\n file5.write(\"\\nThis is because the true optimal value is greater than or equal to your upper bound.\")\r\n else:\r\n pass\r\n if tempTotal2 == annualLowerBoundVacationAllotted2:\r\n file5.write(\"\\n\\nThe second optimal value is equal to the second annual lower bound that you entered.\")\r\n file5.write(\"\\nThis is because the true optimal value is less than or equal to your lower bound.\")\r\n else:\r\n pass\r\n \r\n if currentYear == currentYear2:\r\n pass\r\n elif currentYear > currentYear2:\r\n file5.write(\"\\n\\nThe year was decreased by \")\r\n file5.write(currentYear - currentYear2)\r\n if currentYear - currentYear2 == 1:\r\n file5.write(\" year.\")\r\n elif currentYear - currentYear2 == -1:\r\n file5.write(\" year.\")\r\n else:\r\n file5.write(\" years.\")\r\n else:\r\n file5.write(\"\\n\\nThe year was increased by \")\r\n file5.write(str(abs(currentYear - currentYear2)))\r\n if currentYear - currentYear2 == 1:\r\n file5.write(\" year.\")\r\n elif currentYear - currentYear2 == -1:\r\n file5.write(\" year.\")\r\n else:\r\n file5.write(\" years.\")\r\n \r\n if reliabilityLevel == reliabilityLevel2:\r\n pass\r\n elif reliabilityLevel > reliabilityLevel2:\r\n file5.write(\"\\n\\nThe reliability level was decreased by \")\r\n file5.write(str(reliabilityLevel - reliabilityLevel2))\r\n file5.write(\".\")\r\n else:\r\n file5.write(\"\\n\\nThe reliability level was increased by \")\r\n file5.write(str(abs(reliabilityLevel - reliabilityLevel2)))\r\n file5.write(\".\")\r\n \r\n if dailyUpperBoundVacationAllotted == dailyUpperBoundVacationAllotted2:\r\n pass\r\n elif dailyUpperBoundVacationAllotted > dailyUpperBoundVacationAllotted2:\r\n file5.write(\"\\n\\nThe daily upper bound was decreased by \")\r\n file5.write(dailyUpperBoundVacationAllotted - dailyUpperBoundVacationAllotted2)\r\n if dailyUpperBoundVacationAllotted - dailyUpperBoundVacationAllotted2 == 1:\r\n file5.write(\" vacation day.\")\r\n elif dailyUpperBoundVacationAllotted - dailyUpperBoundVacationAllotted2 == -1:\r\n file5.write(\" vacation day.\")\r\n else:\r\n file5.write(\" vacation days.\")\r\n else:\r\n file5.write(\"\\n\\nThe daily upper bound was increased by \")\r\n file5.write(str(abs(dailyUpperBoundVacationAllotted - dailyUpperBoundVacationAllotted2)))\r\n if dailyUpperBoundVacationAllotted - dailyUpperBoundVacationAllotted2 == 1:\r\n file5.write(\" vacation day.\")\r\n elif dailyUpperBoundVacationAllotted - dailyUpperBoundVacationAllotted2 == -1:\r\n file5.write(\" vacation day.\")\r\n else:\r\n file5.write(\" vacation days.\")\r\n \r\n if dailyLowerBoundVacationAllotted == dailyLowerBoundVacationAllotted2:\r\n pass\r\n elif dailyLowerBoundVacationAllotted > dailyLowerBoundVacationAllotted2:\r\n file5.write(\"\\n\\nThe daily lower bound was decreased by \")\r\n file5.write(str(dailyLowerBoundVacationAllotted - dailyLowerBoundVacationAllotted2))\r\n if dailyLowerBoundVacationAllotted - dailyLowerBoundVacationAllotted2 == 1:\r\n file5.write(\" vacation day.\")\r\n elif dailyLowerBoundVacationAllotted - dailyLowerBoundVacationAllotted2 == -1:\r\n file5.write(\" vacation day.\")\r\n else:\r\n file5.write(\" vacation days.\")\r\n else:\r\n file5.write(\"\\n\\nThe daily lower bound was increased by \")\r\n file5.write(str(abs(dailyLowerBoundVacationAllotted - dailyLowerBoundVacationAllotted2)))\r\n if dailyLowerBoundVacationAllotted - dailyLowerBoundVacationAllotted2 == 1:\r\n file5.write(\" vacation day.\")\r\n elif dailyLowerBoundVacationAllotted - dailyLowerBoundVacationAllotted2 == -1:\r\n file5.write(\" vacation day.\")\r\n else:\r\n file5.write(\" vacation days.\")\r\n \r\n if userMonthlyUpperBoundVacationAllotted == userMonthlyUpperBoundVacationAllotted2:\r\n pass\r\n elif userMonthlyUpperBoundVacationAllotted > userMonthlyUpperBoundVacationAllotted2:\r\n file5.write(\"\\n\\nThe monthly upper bound was decreased by \")\r\n file5.write(str(userMonthlyUpperBoundVacationAllotted - userMonthlyUpperBoundVacationAllotted2))\r\n if userMonthlyUpperBoundVacationAllotted - userMonthlyUpperBoundVacationAllotted2 == 1:\r\n file5.write(\" vacation day.\")\r\n elif userMonthlyUpperBoundVacationAllotted - userMonthlyUpperBoundVacationAllotted2 == -1:\r\n file5.write(\" vacation day.\")\r\n else:\r\n file5.write(\" vacation days.\")\r\n else:\r\n file5.write(\"\\n\\nThe monthly upper bound was increased by \")\r\n file5.write(str(abs(userMonthlyUpperBoundVacationAllotted - userMonthlyUpperBoundVacationAllotted2)))\r\n if userMonthlyUpperBoundVacationAllotted - userMonthlyUpperBoundVacationAllotted2 == 1:\r\n file5.write(\" vacation day.\")\r\n elif userMonthlyUpperBoundVacationAllotted - userMonthlyUpperBoundVacationAllotted2 == -1:\r\n file5.write(\" vacation day.\")\r\n else:\r\n file5.write(\" vacation days.\")\r\n \r\n if monthlyLowerBoundVacationAllotted == monthlyLowerBoundVacationAllotted2:\r\n pass\r\n elif monthlyLowerBoundVacationAllotted > monthlyLowerBoundVacationAllotted2:\r\n file5.write(\"\\n\\nThe monthly lower bound was decreased by \")\r\n file5.write(str(monthlyLowerBoundVacationAllotted - monthlyLowerBoundVacationAllotted2))\r\n if monthlyLowerBoundVacationAllotted - monthlyLowerBoundVacationAllotted2 == 1:\r\n file5.write(\" vacation day.\")\r\n elif monthlyLowerBoundVacationAllotted - monthlyLowerBoundVacationAllotted2 == -1:\r\n file5.write(\" vacation day.\")\r\n else:\r\n file5.write(\" vacation days.\")\r\n else:\r\n file5.write(\"\\n\\nThe monthly lower bound was increased by \")\r\n file5.write(str(abs(monthlyLowerBoundVacationAllotted - monthlyLowerBoundVacationAllotted2)))\r\n if monthlyLowerBoundVacationAllotted - monthlyLowerBoundVacationAllotted2 == 1:\r\n file5.write(\" vacation day.\")\r\n elif monthlyLowerBoundVacationAllotted - monthlyLowerBoundVacationAllotted2 == -1:\r\n file5.write(\" vacation day.\")\r\n else:\r\n file5.write(\" vacation days.\")\r\n \r\n if userAnnualUpperBoundVacationAllotted == userAnnualUpperBoundVacationAllotted2:\r\n pass\r\n elif userAnnualUpperBoundVacationAllotted > userAnnualUpperBoundVacationAllotted2:\r\n file5.write(\"\\n\\nThe annual upper bound was decreased by \")\r\n file5.write(str(userAnnualUpperBoundVacationAllotted - userAnnualUpperBoundVacationAllotted2))\r\n if userAnnualUpperBoundVacationAllotted - userAnnualUpperBoundVacationAllotted2 == 1:\r\n file5.write(\" vacation day.\")\r\n elif userAnnualUpperBoundVacationAllotted - userAnnualUpperBoundVacationAllotted2 == -1:\r\n file5.write(\" vacation day.\")\r\n else:\r\n file5.write(\" vacation days.\")\r\n else:\r\n file5.write(\"\\n\\nThe annual upper bound was increased by \")\r\n file5.write(str(abs(userAnnualUpperBoundVacationAllotted - userAnnualUpperBoundVacationAllotted2)))\r\n if userAnnualUpperBoundVacationAllotted - userAnnualUpperBoundVacationAllotted2 == 1:\r\n file5.write(\" vacation day.\")\r\n elif userAnnualUpperBoundVacationAllotted - userAnnualUpperBoundVacationAllotted2 == -1:\r\n file5.write(\" vacation day.\")\r\n else:\r\n file5.write(\" vacation days.\")\r\n \r\n if annualLowerBoundVacationAllotted == annualLowerBoundVacationAllotted2:\r\n pass\r\n elif annualLowerBoundVacationAllotted > annualLowerBoundVacationAllotted2:\r\n file5.write(\"\\n\\nThe annual lower bound was decreased by \")\r\n file5.write(str(annualLowerBoundVacationAllotted - annualLowerBoundVacationAllotted2))\r\n if annualLowerBoundVacationAllotted - annualLowerBoundVacationAllotted2 == 1:\r\n file5.write(\" vacation day.\")\r\n elif annualLowerBoundVacationAllotted - annualLowerBoundVacationAllotted2 == -1:\r\n file5.write(\" vacation day.\")\r\n else:\r\n file5.write(\" vacation days.\")\r\n else:\r\n file5.write(\"\\n\\nThe annual lower bound was increased by \")\r\n file5.write(str(abs(annualLowerBoundVacationAllotted - annualLowerBoundVacationAllotted2)))\r\n if annualLowerBoundVacationAllotted - annualLowerBoundVacationAllotted2 == 1:\r\n file5.write(\" vacation day.\")\r\n elif annualLowerBoundVacationAllotted - annualLowerBoundVacationAllotted2 == -1:\r\n file5.write(\" vacation day.\")\r\n else:\r\n file5.write(\" vacation days.\")\r\n \r\n \r\n def Exit(self):\r\n os._exit(1)\r\n \r\nclass Histogram(tk.Frame):\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n label = tk.Label(self, text=\"Comparison Chart\")\r\n label.pack(side = 'top', pady=10, padx=10)\r\n \r\n self.MenuButton = ttk.Button(self, text='Menu', command=lambda: controller.show_frame(StartPage))\r\n self.MenuButton.pack(side='top')\r\n \r\n f = Figure(figsize = (8,5), dpi = 100)\r\n a = f.add_subplot(111)\r\n \r\n xList = [1,3,5,7,9,11,13,15,17,19,21,23]\r\n yList = []\r\n for m in Months:\r\n yList.append(int(optimalValuesMonths[m]))\r\n a.clear()\r\n \r\n xList2 = [2,4,6,8,10,12,14,16,18,20,22,24]\r\n yList2 = []\r\n for m in Months:\r\n yList2.append(int(optimalValuesMonths2[m]))\r\n a.clear()\r\n a.bar(xList, yList, label = \"Optimization One\", color = 'r')\r\n a.bar(xList2, yList2, label = \"Secondary Optimization\", color = 'c')\r\n \r\n a.legend(bbox_to_anchor=(0, 1.08, 1, .102), loc=3,\r\n ncol=2, borderaxespad=0)\r\n \r\n canvas = FigureCanvasTkAgg(f, self)\r\n canvas.show()\r\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)\r\n \r\n toolbar = NavigationToolbar2TkAgg(canvas, self)\r\n toolbar.update()\r\n canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\r\n \r\n a.set_title(\"Vacation Days per Month\")\r\n a.set_xlabel(\"Month\")\r\n a.set_ylabel(\"Vacation Days\")\r\n \r\nclass Scatterplot(tk.Frame):\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n label = tk.Label(self, text=\"Scatter Plot\")\r\n label.pack(side = 'top', pady=10, padx=10)\r\n \r\n self.MenuButton = ttk.Button(self, text='Menu', command=lambda: controller.show_frame(StartPage))\r\n self.MenuButton.pack(side='top')\r\n \r\n f = Figure(figsize = (8,5), dpi = 100)\r\n a = f.add_subplot(111)\r\n \r\n a.clear()\r\n a.scatter(standRelyLevels, scatterPlotValues, color = 'r', label = \"Initial Optimization\")\r\n a.scatter(standRelyLevels, scatterPlotValues2, color = 'c', label = \"Secondary Optimization\")\r\n \r\n a.legend(bbox_to_anchor=(0, 1.08, 1, .102), loc=3,\r\n ncol=2, borderaxespad=0)\r\n \r\n canvas = FigureCanvasTkAgg(f, self)\r\n canvas.show()\r\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)\r\n \r\n toolbar = NavigationToolbar2TkAgg(canvas, self)\r\n toolbar.update()\r\n canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\r\n \r\n a.set_title(\"Annual Vacation Days vs. Reliability Level\")\r\n a.set_xlabel(\"Reliability Level\")\r\n a.set_ylabel(\"Annual Vacation Days\") \r\n \r\nroot = tk.Tk\r\nApp = SaberApp()\r\nApp.mainloop()\r\n ","repo_name":"jiadongwang/CapstoneProject","sub_path":"Sabre Code_Team Python.py","file_name":"Sabre Code_Team Python.py","file_ext":"py","file_size_in_byte":74359,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"39317095558","text":"#!/usr/bin/python\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nDOCUMENTATION = '''\n---\nmodule: as_group\nshort_description: Create/Update/Remove AutoScaling group from the OTC\nextends_documentation_fragment: opentelekomcloud.cloud.otc\nversion_added: \"0.2.0\"\nauthor:\n - \"Polina Gubina (@Polina-Gubina)\"\n - \"Irina Pereiaslavskaia (@irina-pereiaslavskaia)\"\ndescription:\n - Create/Update/Remove AutoScaling group from the OTC.\noptions:\n scaling_group:\n description:\n - Name or ID of the AS Group.\n required: true\n type: dict\n suboptions:\n id:\n description:\n - Specifies the AS Group ID.\n - Mandatory for updating and deleting AS Group.\n type: str\n name:\n description:\n - Specifies the AS Group name.\n - Mandatory for creating AS Group.\n type: str\n scaling_configuration:\n description:\n - The AS configuration ID or name.\n type: str\n desire_instance_number:\n description:\n - Specifies the expected number of instances.\n - The default value is the minimum number of instances.\n type: int\n default: 0\n min_instance_number:\n description:\n - Specifies the minimum number of instances.\n - The default value is 0.\n type: int\n default: 0\n max_instance_number:\n description:\n - Specifies the maximum number of instances.\n - The default value is 0.\n type: int\n default: 0\n cool_down_time:\n description:\n - Specifies the cooldown period (in seconds).\n - The value ranges from 0 to 86400 and is 300 by default.\n - After a scaling action is triggered, the system starts the cooldown \\\n period. During the cooldown period, scaling actions triggered by alarms \\\n will be denied. Scheduled, periodic, and manual scaling actions are not \\\n affected.\n type: int\n default: 300\n lb_listener:\n description:\n - Specifies ID or name of a classic load balancer listener. The system \\\n supports the binding of up to six load balancer listeners, the IDs of \\\n which are separated using a comma (,).\n - Mandatory when 'lbaas_listeners' is not specified.\n type: str\n lbaas_listeners:\n description:\n - Specifies information about an enhanced load balancer.\n - Mandatory when 'lb_listener' is not specified.\n type: list\n elements: dict\n suboptions:\n pool_id:\n description:\n - Specifies the backend ECS group ID.\n type: str\n required: true\n protocol_port:\n description:\n - Specifies the backend protocol ID, which is the port on which a \\\n backend ECS listens for traffic. The port ID ranges from 1 to 65535.\n type: int\n required: true\n weight:\n description:\n - Specifies the weight, which determines the portion of requests a \\\n backend ECS processes when being compared to other backend ECSs \\\n added to the same listener.\n type: int\n required: true\n availability_zones:\n description:\n - Specifies the AZ information. The ECS associated with a scaling \\\n action will be created in a specified AZ.If you do not specify an AZ, \\\n the system automatically specifies one.\n type: list\n elements: str\n networks:\n description:\n - Specifies network information. The system supports up to five subnets.\\\n The first subnet transferred serves as the primary NIC of the ECS by \\\n default.\n - Mandatory for creation of AS group.\n type: list\n elements: dict\n suboptions:\n id:\n description:\n - Specifies the network ID.\n type: str\n required: true\n security_groups:\n description:\n - A maximum of one security group can be selected.\n - Specifies the security group. If the security group is specified both \\\n in the AS configuration and AS group, the security group specified in \\\n the AS configuration prevails.\n - If the security group is not specified in either of them, the default \\\n security group is used.\n type: list\n elements: dict\n suboptions:\n id:\n description:\n - Specifies the security group ID.\n type: str\n required: true\n router:\n description:\n - The router ID or name.\n - Mandatory for creating AS group.\n type: str\n health_periodic_audit_method:\n description:\n - Specifies the health check method for instances in the AS group.\\\n When load balancing is configured for an AS group, the default value \\\n is ELB_AUDIT. Otherwise, the default value is NOVA_AUDIT.\n - ELB_AUDIT indicates the ELB health check, which takes effect in an \\\n AS group with a listener.\n - NOVA_AUDIT indicates the ECS health check, which is the health check \\\n method delivered with AS.\n choices: [elb_audit, nova_audit]\n type: str\n health_periodic_audit_time:\n description:\n - Specifies the instance health check period.\n - The value can be 1, 5, 15, 60, or 180 in the unit of minutes.\n - If this parameter is not specified, the default value is 5.\n - If the value is set to 0, health check is performed every 10 seconds.\n type: int\n default: 5\n health_periodic_audit_grace_period:\n description:\n - Specifies the grace period for instance health check.\n - The unit is second and value range is 0-86400.\n - The default value is 600.\n - The health check grace period starts after an instance is added to an \\\n AS group and is enabled.The AS group will start checking the instance \\\n status only after the grace period ends.\n - This parameter is valid only when the instance health check method \\\n of the AS group is ELB_AUDIT.\n type: int\n default: 600\n instance_terminate_policy:\n description:\n - Specifies the instance removal policy.\n - OLD_CONFIG_OLD_INSTANCE (default). The earlier-created instances \\\n based on the earlier-created AS configurations are removed first.\n - OLD_CONFIG_NEW_INSTANCE. The later-created instances based on the \\\n earlier-created AS configurations are removed first.\n - OLD_INSTANCE. The earlier-created instances are removed first.\n - NEW_INSTANCE. The later-created instances are removed first.\n choices: [old_config_old_instance, old_config_new_instance,\n old_instance, new_instance]\n type: str\n default: 'old_config_old_instance'\n notifications:\n description:\n - Specifies the notification mode.\n type: list\n elements: str\n delete_publicip:\n description:\n - Specifies whether to delete the EIP bound to the ECS when \\\n deleting the ECS.\n - The default value is false.\n type: bool\n default: 'no'\n delete_volume:\n description:\n - Specifies whether to delete the data disks attached to the \\\n ECS when deleting the ECS.\n - The default value is false.\n type: bool\n default: 'no'\n force_delete:\n description:\n - Specifies whether to forcibly delete an AS group, remove the ECS \\\n instances and release them when the AS group is running instances or \\\n performing scaling actions.\n type: bool\n default: 'no'\n multi_az_priority_policy:\n description:\n - Specifies the priority policy used to select target AZs when \\\n adjusting the number of instances in an AS group.\n - EQUILIBRIUM_DISTRIBUTE (default). When adjusting the number of \\\n instances, ensure that instances in each AZ in the available_zones list \\\n is evenly distributed. If instances cannot be added in the target AZ, \\\n select another AZ based on the PICK_FIRST policy.\n - PICK_FIRST. When adjusting the number of instances, target AZs are \\\n determined in the order in the available_zones list.\n choices: [equilibrium_distribute, pick_first]\n type: str\n default: 'equilibrium_distribute'\n action:\n description:\n - Specifies a flag for enabling or disabling an AS group.\n type: str\n choices: [resume, pause]\n state:\n description:\n - Whether resource should be present or absent.\n choices: [present, absent]\n type: str\n default: 'present'\n wait:\n description:\n - If the module should wait for the AS Group to be created or deleted.\n type: bool\n default: 'yes'\n timeout:\n description:\n - The duration in seconds that module should wait.\n default: 200\n type: int\nrequirements: [\"openstacksdk\", \"otcextensions\"]\n'''\n\nRETURN = '''\nas_group:\n description: AS groups object.\n type: complex\n returned: On Success.\n contains:\n id:\n description: Specifies the AS group ID.\n type: str\n sample: \"39007a7e-ee4f-4d13-8283-b4da2e037c69\"\n'''\n\nEXAMPLES = '''\n#Create AS Group\n - opentelekomcloud.cloud.as_group:\n scaling_group:\n name: \"as_group_test\"\n networks:\n - id: \"a64b4561-af18-4440-9976-b2398ed39ce5\"\n router: \"5d1ac1f4-bec6-4b8c-aae0-7c4345c68f5d\"\n scaling_configuration: \"as_config_test\"\n desire_instance_number: 1\n max_instance_number: 1\n action: \"resume\"\n state: \"present\"\n wait: true\n timeout: 360\n register: result\n\n#Delete AS Group\n - opentelekomcloud.cloud.as_group:\n scaling_group:\n name: \"as_group_test\"\n state: \"absent\"\n force_delete: true\n wait: true\n timeout: 360\n register: result\n\n'''\n\nfrom ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule\n\n\ndef is_value_changed(old: list, new: list):\n \"\"\"Compare two lists of parameters.\n\n This function compares two lists and returns True, if the two lists\n contain different elements.\n\n :param old: The list of initial parameters.\n :param new: The list of new parameters.\n\n :returns: Result of comparison\n :rtype: bool\n \"\"\"\n result = [x for x in old + new if x not in old or x not in new]\n return True if result else False\n\n\ndef new_list_with_dict_ids(old: list):\n \"\"\"Create new list with dicts\n\n This function aggregate dict elements with only one key \"id\" in new list.\n\n :param old: The initial list with dicts.\n\n :returns: New list with dicts that contain only id.\n :rtype: list\n \"\"\"\n new_list = []\n for elem in old:\n if isinstance(elem, dict):\n new_elem = {\"id\": elem.get(\"id\")}\n new_list.append(new_elem)\n return new_list\n\n\nclass ASGroupModule(OTCModule):\n argument_spec = dict(\n scaling_group=dict(\n required=True, type='dict', options=dict(\n id=dict(type='str'),\n name=dict(type='str')\n )\n ),\n scaling_configuration=dict(required=False),\n desire_instance_number=dict(required=False, type='int', default=0),\n min_instance_number=dict(required=False, type='int', default=0),\n max_instance_number=dict(required=False, type='int', default=0),\n cool_down_time=dict(required=False, type='int', default=300),\n lb_listener=dict(required=False, type='str'),\n lbaas_listeners=dict(\n required=False, type='list', elements='dict', options=dict(\n pool_id=dict(required=True, type='str'),\n protocol_port=dict(required=True, type='int'),\n weight=dict(required=True, type='int')\n )\n ),\n availability_zones=dict(required=False, type='list', elements='str'),\n networks=dict(\n required=False, type='list', elements='dict', options=dict(\n id=dict(required=True, type='str')\n )\n ),\n security_groups=dict(\n required=False, type='list', elements='dict', options=dict(\n id=dict(required=True, type='str')\n )\n ),\n router=dict(required=False, type='str'),\n health_periodic_audit_method=dict(\n required=False, type='str', choices=['elb_audit', 'nova_audit']\n ),\n health_periodic_audit_time=dict(required=False, type='int', default=5),\n health_periodic_audit_grace_period=dict(\n required=False, type='int', default=600\n ),\n instance_terminate_policy=dict(\n required=False,\n choices=['old_config_old_instance', 'old_config_new_instance',\n 'old_instance', 'new_instance'],\n default='old_config_old_instance'),\n notifications=dict(required=False, type='list', elements='str'),\n delete_publicip=dict(required=False, type='bool', default=False),\n delete_volume=dict(required=False, type='bool', default=False),\n force_delete=dict(required=False, type='bool', default=False),\n multi_az_priority_policy=dict(\n required=False, choices=['equilibrium_distribute', 'pick_first'],\n default='equilibrium_distribute'\n ),\n action=dict(required=False, type='str', choices=['resume', 'pause']),\n state=dict(\n type='str', choices=['present', 'absent'], default='present'\n ),\n wait=dict(type='bool', default=True),\n timeout=dict(type='int', default=200)\n\n )\n module_kwargs = dict(\n supports_check_mode=True\n )\n\n def _is_as_config_find(self, as_config):\n return self.conn.auto_scaling.find_config(as_config)\n\n def _attrs_id_config(self, attrs, as_config):\n config = self._is_as_config_find(as_config)\n if config:\n attrs['scaling_configuration_id'] = config.id\n return attrs\n else:\n self.fail(\n changed=False,\n msg=\"Scaling configuration {0} not found\".format(as_config)\n )\n\n def _attrs_lb_listeners(self, attrs, lb_listener):\n lb_listener_list = lb_listener.split(',')\n if 0 < len(lb_listener_list) <= 6:\n attrs['lb_listener_id'] = ','.join(lb_listener_list)\n return attrs\n else:\n self.fail(\n changed=False,\n msg=\"More then 6 classical load balancers are specified\"\n )\n\n def _attrs_id_router(self, attrs, router):\n rtr = self.conn.network.find_router(router)\n if rtr:\n attrs['router_id'] = rtr.id\n return attrs\n else:\n self.fail(\n changed=False,\n msg=\"Router {0} not found\".format(router)\n )\n\n def _attrs_lbaas_listeners(self, attrs, lbaas_listeners):\n if 0 < len(lbaas_listeners) <= 6:\n lb_listeners = []\n lstnr = {}\n for listener in lbaas_listeners:\n pool = self.conn.network.find_pool(listener['pool_id'])\n if pool:\n lstnr['pool_id'] = pool.id\n else:\n self.fail(\n changed=False,\n msg=\"Pool {0} not found\".format(listener['pool_id'])\n )\n lstnr['protocol_port'] = listener['protocol_port']\n lstnr['weight'] = listener['weight']\n lb_listeners.append(lstnr)\n attrs['lbaas_listeners'] = lb_listeners\n return attrs\n else:\n self.fail(\n changed=False,\n msg=\"More then 6 enhanced load balancers are specified\"\n )\n\n def _attrs_networks(self, attrs, networks):\n networks = new_list_with_dict_ids(networks)\n if 0 < len(networks) <= 5:\n netwrks = []\n netwrk = {}\n for network in networks:\n net = self.conn.network.find_network(network['id'])\n if net:\n netwrk['id'] = net.id\n netwrks.append(netwrk)\n else:\n self.fail(\n changed=False,\n msg=\"Network {0} not found\".format(network['id'])\n )\n attrs['networks'] = netwrks\n return attrs\n else:\n self.fail(\n changed=False,\n msg=\"More than 5 networks are specified\"\n )\n\n def _attrs_security_groups(self, attrs, security_groups, as_config=None):\n security_groups = new_list_with_dict_ids(security_groups)\n if as_config:\n config = self._is_as_config_find(as_config)\n if config and config.security_groups:\n attrs['security_groups'] = config.security_groups\n else:\n if len(security_groups) == 1:\n sec_groups = []\n sec_group = {}\n security_group = self.conn.network.find_security_group(\n name_or_id=security_groups[0][\"id\"]\n )\n if security_group:\n sec_group['id'] = security_group.id\n sec_groups.append(sec_group)\n attrs['security_groups'] = sec_groups\n return attrs\n else:\n self.fail(\n changed=False,\n msg=\"The number of security groups in the AS group \"\n \"exceeds the upper limit.\"\n )\n\n def _find_as_group(self, as_group):\n if as_group.get('id'):\n return self.conn.auto_scaling.find_group(\n name_or_id=as_group.get('id')\n )\n elif as_group.get('name'):\n return self.conn.auto_scaling.find_group(\n name_or_id=as_group.get('name')\n )\n\n def _attrs_for_as_group_create(\n self, as_group, as_configuration, desire_instance_number,\n min_instance_number, max_instance_number, cool_down_time,\n lb_listener, lbaas_listeners, availability_zones, networks,\n security_groups, router, hp_audit_method, hp_audit_time,\n hp_audit_grace_period, instance_terminate_policy, notifications,\n delete_publicip, delete_volume, multi_az_priority_policy\n ):\n attrs = {}\n if as_group.get('name') and not as_group.get('id'):\n attrs['scaling_group_name'] = as_group.get('name')\n else:\n self.fail(\n changed=False,\n msg=\"Name is mandatory for creating AS Group.\"\n )\n\n if networks:\n attrs = self._attrs_networks(attrs, networks)\n else:\n self.fail(\n changed=False,\n msg=\"'networks' is mandatory for creating an AS Group.\"\n )\n\n if router:\n attrs = self._attrs_id_router(attrs, router)\n else:\n self.fail(\n changed=False,\n msg=\"'router' is mandatory for creating an AS group.\"\n )\n\n if as_configuration:\n attrs = self._attrs_id_config(attrs, as_configuration)\n\n if desire_instance_number:\n attrs['desire_instance_number'] = desire_instance_number\n\n if min_instance_number:\n attrs['min_instance_number'] = min_instance_number\n\n if max_instance_number:\n attrs['max_instance_number'] = max_instance_number\n\n if cool_down_time:\n attrs['cool_down_time'] = cool_down_time\n\n if lb_listener and lbaas_listeners:\n self.fail(\n changed=False,\n msg=\"Either 'lb_listener' or 'lbaas_listener' \"\n \"can be specified\"\n )\n\n if lb_listener:\n attrs = self._attrs_lb_listeners(attrs, lb_listener)\n\n if lbaas_listeners:\n attrs = self._attrs_lbaas_listeners(attrs, lbaas_listeners)\n\n if not hp_audit_method:\n if lb_listener or lbaas_listeners:\n attrs['health_periodic_audit_method'] = \"elb_audit\".upper()\n else:\n attrs['health_periodic_audit_method'] = \"nova_audit\".upper()\n else:\n if not lb_listener and not lbaas_listeners:\n if hp_audit_method == 'elb_audit':\n self.fail(\"Without LB only 'nova_audit' is available\")\n else:\n attrs['health_periodic_audit_method'] = \\\n hp_audit_method.upper()\n else:\n attrs['health_periodic_audit_method'] = \\\n hp_audit_method.upper()\n\n if availability_zones:\n attrs['availability_zones'] = availability_zones\n\n if security_groups:\n attrs = self._attrs_security_groups(attrs, security_groups)\n\n if hp_audit_time:\n attrs['health_periodic_audit_time'] = hp_audit_time\n\n if delete_publicip:\n attrs['delete_publicip'] = delete_publicip\n\n if delete_volume:\n attrs['delete_volume'] = delete_volume\n\n if hp_audit_grace_period:\n attrs['health_periodic_audit_grace_period'] = \\\n hp_audit_grace_period\n\n if instance_terminate_policy:\n attrs['instance_terminate_policy'] = \\\n instance_terminate_policy.upper()\n\n if notifications:\n attrs['notifications'] = notifications\n\n if multi_az_priority_policy:\n attrs['multi_az_priority_policy'] = \\\n multi_az_priority_policy.upper()\n\n return attrs\n\n def _attrs_for_as_group_update(\n self, as_group, as_configuration, desire_instance_number,\n min_instance_number, max_instance_number, cool_down_time,\n lb_listener, lbaas_listeners, availability_zones, networks,\n security_groups, hp_audit_method, hp_audit_time,\n hp_audit_grace_period, instance_terminate_policy, notifications,\n delete_publicip, delete_volume, multi_az_priority_policy, group\n ):\n attrs = {}\n if (as_group.get('id')) and as_group.get('name'):\n if (as_group.get('id') == group.id\n and group.name != as_group.get('name')):\n attrs['scaling_group_name'] = as_group.get('name')\n\n if (as_configuration\n and as_configuration != group.scaling_configuration_id\n and as_configuration != group.scaling_configuration_name):\n attrs = self._attrs_id_config(attrs, as_configuration)\n\n if (desire_instance_number\n and (group.desire_instance_number != desire_instance_number)):\n attrs['desire_instance_number'] = desire_instance_number\n\n if (min_instance_number\n and (group.min_instance_number != min_instance_number)):\n attrs['min_instance_number'] = min_instance_number\n\n if (max_instance_number\n and (group.max_instance_number != max_instance_number)):\n attrs['max_instance_number'] = max_instance_number\n\n if cool_down_time and group.cool_down_time != cool_down_time:\n attrs['cool_down_time'] = cool_down_time\n\n if lb_listener and lbaas_listeners:\n self.fail(\n changed=False,\n msg=\"Either 'lb_listener' or 'lbaas_listener' \"\n \"can be specified\"\n )\n\n if lb_listener and group.lb_listner_id != lb_listener:\n attrs = self._attrs_lb_listeners(attrs, lb_listener)\n\n if (lbaas_listeners\n and is_value_changed(group.lbaas_listeners, lbaas_listeners)):\n attrs = self._attrs_lbaas_listeners(attrs, lbaas_listeners)\n\n if (availability_zones\n and is_value_changed(\n group.availability_zones, availability_zones\n )):\n attrs['availability_zones'] = availability_zones\n\n if (networks\n and is_value_changed(\n new_list_with_dict_ids(group.networks), networks\n )):\n attrs = self._attrs_networks(attrs, networks)\n\n if (security_groups\n and is_value_changed(\n new_list_with_dict_ids(group.security_groups),\n security_groups\n )):\n attrs = self._attrs_security_groups(attrs, security_groups)\n\n if hp_audit_method\\\n and group.health_periodic_audit_method != \\\n hp_audit_method.upper():\n\n if (not group.lb_listener_id\n and not group.lbaas_listeners\n and hp_audit_method == 'elb_audit'.upper()):\n self.fail_json(\n msg=\"Without LB only 'nova_audit' is available\"\n )\n\n attrs['health_periodic_audit_method'] = hp_audit_method.upper()\n\n if (hp_audit_time\n and group.health_periodic_audit_time != hp_audit_time):\n attrs['health_periodic_audit_time'] = hp_audit_time\n\n if hp_audit_grace_period\\\n and group.health_periodic_audit_grace_period != \\\n hp_audit_grace_period:\n attrs['health_periodic_audit_grace_period'] = hp_audit_grace_period\n\n if instance_terminate_policy\\\n and group.instance_terminate_policy != \\\n instance_terminate_policy.upper():\n attrs['instance_terminate_policy'] = \\\n instance_terminate_policy.upper()\n\n if notifications and group.notifications != notifications:\n attrs['notifications'] = notifications\n\n if delete_publicip and group.delete_publicip != delete_publicip:\n attrs['delete_publicip'] = delete_publicip\n\n if delete_volume and group.delete_volume != delete_volume:\n attrs['delete_volume'] = delete_volume\n\n if multi_az_priority_policy and group.multi_az_priority_policy != \\\n multi_az_priority_policy.upper():\n attrs['multi_az_priority_policy'] = multi_az_priority_policy.upper()\n\n return attrs\n\n def _wait_for_instances(self, as_group, timeout, desire_instance_number=0):\n for count in self.sdk.utils.iterate_timeout(\n timeout=timeout,\n message=\"Timeout waiting for AS Instances\"\n ):\n instances = list(self.conn.auto_scaling.instances(\n group=as_group\n ))\n instances_with_id = [instance.id for instance in instances\n if instance.id]\n if (len(instances) == len(instances_with_id) == desire_instance_number):\n for instance in instances:\n self.conn.auto_scaling.wait_for_instance(instance=instance)\n return\n\n def _resume_group(self, group, wait, timeout, desire_instance_number=0):\n result_group = group\n self.conn.auto_scaling.resume_group(group=group)\n if wait:\n try:\n if desire_instance_number > 0:\n self._wait_for_instances(\n as_group=group,\n timeout=timeout,\n desire_instance_number=desire_instance_number\n )\n result_group = self.conn.auto_scaling.wait_for_group(\n group=group,\n wait=timeout\n )\n except self.sdk.exceptions.ResourceTimeout:\n self.fail(\n msg=\"Timeout failure waiting for AS Group\"\n )\n return result_group\n\n def _pause_group(self, group, wait, timeout):\n result_group = group\n self.conn.auto_scaling.pause_group(group=group)\n if wait:\n try:\n result_group = self.conn.auto_scaling.wait_for_group(\n group=group,\n status='PAUSED',\n wait=timeout\n )\n except self.sdk.exceptions.ResourceTimeout:\n self.fail(\n msg=\"Timeout failure waiting for AS Group\"\n )\n return result_group\n\n def _action_group(\n self, action, group, wait, timeout, desire_instance_number=0\n ):\n if action == 'resume':\n return self._resume_group(group, wait, timeout,\n desire_instance_number)\n elif action == 'pause':\n return self._pause_group(group, wait, timeout)\n\n def _needs_update(\n self, as_group, as_configuration, desire_instance_number,\n min_instance_number, max_instance_number, cool_down_time,\n lb_listener, lbaas_listeners, availability_zones, networks,\n security_groups, hp_audit_method, hp_audit_time,\n hp_audit_grace_period, instance_terminate_policy, notifications,\n delete_publicip, delete_volume, multi_az_priority_policy, group\n ):\n if as_group.get('id') and as_group.get('name'):\n if (as_group.get('id') == group.id\n and group.name != as_group.get('name')):\n return True\n\n if (as_configuration\n and group.scaling_configuration_id != as_configuration\n and group.scaling_configuration_name != as_configuration):\n return True\n\n if (desire_instance_number\n and group.desire_instance_number != desire_instance_number):\n return True\n\n if (min_instance_number\n and group.min_instance_number != min_instance_number):\n return True\n\n if (max_instance_number\n and group.max_instance_number != max_instance_number):\n return True\n\n if (cool_down_time\n and group.cool_down_time != cool_down_time):\n return True\n\n if (lb_listener\n and group.lb_listner_id != lb_listener):\n return True\n\n if (lbaas_listeners\n and is_value_changed(group.lbaas_listeners, lbaas_listeners)):\n return True\n\n if (availability_zones\n and is_value_changed(\n group.availability_zones, availability_zones\n )):\n return True\n\n if (networks\n and is_value_changed(\n new_list_with_dict_ids(group.networks), networks\n )):\n return True\n\n if (security_groups and is_value_changed(new_list_with_dict_ids(\n group.security_groups), security_groups)):\n return True\n\n if hp_audit_method \\\n and group.health_periodic_audit_method != \\\n hp_audit_method.upper():\n return True\n\n if (hp_audit_time\n and group.health_periodic_audit_time != hp_audit_time):\n return True\n\n if hp_audit_grace_period \\\n and group.health_periodic_audit_grace_period != \\\n hp_audit_grace_period:\n return True\n\n if instance_terminate_policy \\\n and group.instance_terminate_policy != \\\n instance_terminate_policy.upper():\n return True\n\n if notifications and group.notifications != notifications:\n return True\n\n if delete_publicip and group.delete_publicip != delete_publicip:\n return True\n\n if delete_volume and group.delete_volume != delete_volume:\n return True\n\n if multi_az_priority_policy \\\n and group.multi_az_priority_policy != \\\n multi_az_priority_policy.upper():\n return True\n\n return False\n\n def _is_group_can_be_deleted(self, as_group):\n as_instances = list(self.conn.auto_scaling.instances(as_group))\n return False if as_instances else True\n\n def _delete_as_group(self, as_group, force_delete, wait, timeout):\n self.conn.auto_scaling.delete_group(\n group=as_group,\n force_delete=force_delete\n )\n if wait:\n try:\n self.conn.auto_scaling.wait_for_delete_group(\n group=as_group,\n wait=timeout\n )\n except self.sdk.exceptions.ResourceTimeout:\n self.fail(\n msg=\"Timeout failure waiting for delete AS Group\"\n )\n\n def _system_state_change(\n self, as_group, as_configuration, desire_instance_number,\n min_instance_number, max_instance_number, cool_down_time,\n lb_listener, lbaas_listeners, availability_zones, networks,\n security_groups, hp_audit_method, hp_audit_time,\n hp_audit_grace_period, instance_terminate_policy, notifications,\n delete_publicip, delete_volume, multi_az_priority_policy, group\n ):\n state = self.params['state']\n if state == 'present':\n if not group:\n return True\n return self._needs_update(\n as_group=as_group, as_configuration=as_configuration,\n desire_instance_number=desire_instance_number,\n min_instance_number=min_instance_number,\n max_instance_number=max_instance_number,\n cool_down_time=cool_down_time,\n lb_listener=lb_listener, lbaas_listeners=lbaas_listeners,\n availability_zones=availability_zones, networks=networks,\n security_groups=security_groups,\n hp_audit_method=hp_audit_method,\n hp_audit_time=hp_audit_time,\n hp_audit_grace_period=hp_audit_grace_period,\n instance_terminate_policy=instance_terminate_policy,\n notifications=notifications, delete_publicip=delete_publicip,\n delete_volume=delete_volume,\n multi_az_priority_policy=multi_az_priority_policy, group=group\n )\n elif state == 'absent' and group:\n return True\n return False\n\n def run(self):\n\n as_group = self.params['scaling_group']\n as_configuration = self.params['scaling_configuration']\n desire_instance_number = self.params['desire_instance_number']\n min_instance_number = self.params['min_instance_number']\n max_instance_number = self.params['max_instance_number']\n cool_down_time = self.params['cool_down_time']\n lb_listener = self.params['lb_listener']\n lbaas_listeners = self.params['lbaas_listeners']\n availability_zones = self.params['availability_zones']\n networks = self.params['networks']\n security_groups = self.params['security_groups']\n router = self.params['router']\n hp_audit_method = self.params['health_periodic_audit_method']\n hp_audit_time = self.params['health_periodic_audit_time']\n hp_audit_gr_period = self.params['health_periodic_audit_grace_period']\n instance_terminate_policy = self.params['instance_terminate_policy']\n notifications = self.params['notifications']\n delete_publicip = self.params['delete_publicip']\n delete_volume = self.params['delete_volume']\n force_delete = self.params['force_delete']\n multi_az_priority_policy = self.params['multi_az_priority_policy']\n action = self.params['action']\n wait = self.params['wait']\n timeout = self.params['timeout']\n state = self.params['state']\n\n changed = False\n\n if as_group:\n group = self._find_as_group(as_group)\n\n if self.ansible.check_mode:\n self.exit(\n changed=self._system_state_change(\n as_group=as_group,\n as_configuration=as_configuration,\n desire_instance_number=desire_instance_number,\n min_instance_number=min_instance_number,\n max_instance_number=max_instance_number,\n cool_down_time=cool_down_time,\n lb_listener=lb_listener,\n lbaas_listeners=lbaas_listeners,\n availability_zones=availability_zones,\n networks=networks,\n security_groups=security_groups,\n hp_audit_method=hp_audit_method,\n hp_audit_time=hp_audit_time,\n hp_audit_grace_period=hp_audit_gr_period,\n instance_terminate_policy=instance_terminate_policy,\n notifications=notifications,\n delete_publicip=delete_publicip,\n delete_volume=delete_volume,\n multi_az_priority_policy=multi_az_priority_policy,\n group=group)\n )\n\n if group:\n\n if state == 'present':\n\n if self._needs_update(\n as_group=as_group,\n as_configuration=as_configuration,\n desire_instance_number=desire_instance_number,\n min_instance_number=min_instance_number,\n max_instance_number=max_instance_number,\n cool_down_time=cool_down_time,\n lb_listener=lb_listener,\n lbaas_listeners=lbaas_listeners,\n availability_zones=availability_zones,\n networks=networks,\n security_groups=security_groups,\n hp_audit_method=hp_audit_method,\n hp_audit_time=hp_audit_time,\n hp_audit_grace_period=hp_audit_gr_period,\n instance_terminate_policy=instance_terminate_policy,\n notifications=notifications,\n delete_publicip=delete_publicip,\n delete_volume=delete_volume,\n multi_az_priority_policy=multi_az_priority_policy,\n group=group\n ):\n attrs = self._attrs_for_as_group_update(\n as_group=as_group,\n as_configuration=as_configuration,\n desire_instance_number=desire_instance_number,\n min_instance_number=min_instance_number,\n max_instance_number=max_instance_number,\n cool_down_time=cool_down_time,\n lb_listener=lb_listener,\n lbaas_listeners=lbaas_listeners,\n availability_zones=availability_zones,\n networks=networks, security_groups=security_groups,\n hp_audit_method=hp_audit_method,\n hp_audit_time=hp_audit_time,\n hp_audit_grace_period=hp_audit_gr_period,\n instance_terminate_policy=instance_terminate_policy,\n notifications=notifications,\n delete_publicip=delete_publicip,\n delete_volume=delete_volume,\n multi_az_priority_policy=multi_az_priority_policy,\n group=group\n )\n group = self.conn.auto_scaling.update_group(\n group=group, **attrs\n )\n changed = True\n if action:\n group = self._action_group(\n action=action,\n group=group,\n wait=wait,\n timeout=timeout,\n desire_instance_number=desire_instance_number\n )\n self.exit(\n changed=changed,\n as_group=group,\n msg=\"AS Group {0} was updated\".format(group.id)\n )\n elif action:\n group = self._action_group(\n action=action,\n group=group,\n wait=wait,\n timeout=timeout,\n desire_instance_number=desire_instance_number\n )\n changed = True\n self.exit(\n changed=changed,\n as_group=group,\n msg=\"Action {0} for AS Group {1} was done\".format(\n action, group.id\n )\n )\n else:\n self.fail(\n changed=changed,\n msg=\"AS Group {0} exists\".format(group.id)\n )\n\n else:\n if force_delete or self._is_group_can_be_deleted(group):\n self._delete_as_group(\n as_group=group,\n force_delete=force_delete,\n wait=wait,\n timeout=timeout\n )\n changed = True\n self.exit(\n changed=changed,\n msg=\"AS Group {0} was deleted\".format(group.id)\n )\n else:\n changed = False\n self.fail(\n changed=changed,\n msg=\"AS Group {0} can not be deleted due to \"\n \"AS Instances presence\".format(group.id)\n )\n\n else:\n\n if state == 'present':\n attrs = self._attrs_for_as_group_create(\n as_group=as_group,\n as_configuration=as_configuration,\n desire_instance_number=desire_instance_number,\n min_instance_number=min_instance_number,\n max_instance_number=max_instance_number,\n cool_down_time=cool_down_time,\n lb_listener=lb_listener,\n lbaas_listeners=lbaas_listeners,\n availability_zones=availability_zones,\n networks=networks, security_groups=security_groups,\n router=router, hp_audit_method=hp_audit_method,\n hp_audit_time=hp_audit_time,\n hp_audit_grace_period=hp_audit_gr_period,\n instance_terminate_policy=instance_terminate_policy,\n notifications=notifications,\n delete_publicip=delete_publicip,\n delete_volume=delete_volume,\n multi_az_priority_policy=multi_az_priority_policy\n )\n group = self.conn.auto_scaling.create_group(**attrs)\n changed = True\n if (\n as_configuration\n and self._is_as_config_find(as_configuration)\n and action\n ):\n group = self._action_group(\n action=action,\n group=group,\n wait=wait,\n timeout=timeout,\n desire_instance_number=desire_instance_number\n )\n self.exit(\n changed=changed,\n as_group=group,\n msg=\"AS Group {0} was created\".format(as_group.get(\n \"name\"))\n )\n else:\n self.fail(\n changed=changed,\n msg=\"AS Group {0} not found\".format(as_group.get('id'))\n )\n\n else:\n self.fail(\n changed=changed,\n msg=\"Name or/and ID should be specified\"\n )\n\n\ndef main():\n module = ASGroupModule()\n module()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"opentelekomcloud/ansible-collection-cloud","sub_path":"plugins/modules/as_group.py","file_name":"as_group.py","file_ext":"py","file_size_in_byte":44853,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"67"} +{"seq_id":"9989165983","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', 'sales_record.views.display_results', name='results'),\n #url(r'^chart/', 'chart_test.views.chart'),\n url(r'^admin/', include(admin.site.urls)),\n)\n","repo_name":"BlakeLawson/Beachglow","sub_path":"Inventory/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40371102120","text":"# JAX\nimport jax.numpy as jnp\nfrom jax import random, jit, ops, vmap\nfrom jax.experimental import stax\n\n# Numpyro\nimport numpyro\nimport numpyro.distributions as dist\nfrom numpyro import optim\nfrom numpyro.infer import SVI, Trace_ELBO, Predictive\nfrom numpyro.diagnostics import hpdi\n\nfrom functools import partial\nimport numpy as np\n\nimport random\n\n\n# kernel functions\n# @jit\n# def exp_kernel1(x, z, d, var, ls, noise, jitter=1.0e-6):\n\n# deltaXsq = jnp.power((x[:, None] - z), 2.0)\n# k = var * jnp.exp(-0.5 * deltaXsq / ls)\n# k += (noise + jitter) * jnp.eye(x.shape[0])\n\n# return k\n\n# check spatial dimension\ndef check_d(func):\n \"\"\"Check spatial dimension of input, transform 1d array to column vector.\n \"\"\"\n def reshape(x, z, *args, **kwargs):\n assert len(x.shape)==len(z.shape)\n # reshape to column vectors\n if len(x.shape)==1:\n # reshape to column vector if d==1\n x = jnp.reshape(x, (x.shape[0], 1))\n z = jnp.reshape(z, (z.shape[0], 1))\n return func(x, z, *args, **kwargs)\n return reshape\n\n# reshape x,z before apply to kernel\n@check_d\n# @jit\ndef exp_kernel2(x, z, \n # d,\n var, \n ls, \n # noise, \n jitter=1.0e-5\n ):\n \"\"\"Exponential kernel for 1D and 2D spatial-temporal data.\n\n Args: \n x, z (ndarray) - spatial-temporal data.\n d (int) - spatial dimension 1 or 2.\n var (float) - marginal variance.\n ls (float) - lengthscale of the kernel.\n noise (float) - additional noise to diagonal entries.\n jiitte (float) - tiny noise added to diagonal for numerical stability.\n include_noise (bool) - if True include jitter and noise (square Gram matrix).\n\n Returns:\n kernel gram matrix.\n \"\"\"\n assert len(x.shape)==len(z.shape)\n assert len(x.shape)==1 or len(x.shape)==2\n\n # print(\"exp kernel\", x.shape)\n # print(z.shape)\n\n # sqaured norm on the spatial dim\n deltaX = jnp.linalg.norm(x[:, None] - z, ord=2, axis=2) \n k = var * jnp.exp(-0.5 * jnp.power(deltaX/ls, 2.0) )\n\n # ckeck if kernel matrix is a square matrix -- \n # stablise inversion with jitter on the diagonal\n if k.shape[0] == k.shape[1]:\n k += jitter * jnp.eye(x.shape[0])\n return k\n\n\n# approximate k^* over [0,1] grid\ndef agg_kernel_grid(rng_key,\n d, # spatial dim\n m, # number of MC sample\n # n, # number of intervals on each axis\n kernel, \n var, \n ls, \n # noise,\n grid1,\n grid2=None,\n jitter=1.0e-6,\n ):\n\n xloc = dist.Uniform()\n n = grid1.shape()[0]\n # grid = jnp.arange(0, 1, 1/n)\n\n # note that we must have a column vector for spatial locs\n x = xloc.sample(rng_key, (n**d, m, d)) \n\n # 1D: sample from [0, 0.1], [0.1, 0.2] etc. uniformly\n if d==1:\n _x = x/n + jnp.expand_dims(grid1, axis=(1, 2))\n\n elif d==2:\n if grid2 is None:\n grid2 = grid1\n u, v = jnp.meshgrid(grid1, grid2)\n _x = x/n + jnp.array([[u.flatten()], [v.flatten()]]).transpose((2, 1, 0))\n print(_x)\n\n else:\n raise Warning(\"Function is only implemented for d=1,2\")\n\n _kernel = partial(kernel, var=var, ls=ls, jitter=jitter)\n __kernel = lambda x, z: jnp.sum(_kernel(x, z))\n\n # the first dim of sample gives the batch dim, i.e. n**d\n agg__kernel_v1 = vmap(__kernel, (0, None), 0)\n agg__kernel_v2 = vmap(agg__kernel_v1, (None, 0), 1)\n\n # print(agg__kernel_v1(_x, _x[0]))\n return agg__kernel_v2(_x, _x) / (m ** 2)\n\n# may want to rewrite this to include spatial dim 2\nclass GP():\n \"\"\"Class for GP.\n\n Attributes:\n kernel - kernel function.\n var (float) - marginal variance of kernel.\n noise (float) - added noise of kernel.\n ls (float) - lengthscale of kernel.\n jitter (float) - small positive noise on diagonal entries.\n d (int) - spatial dimension 1 or 2.\n \"\"\"\n def __init__(\n self, \n kernel=exp_kernel2, \n # var=1,\n # noise=0,\n # ls=0.01, # this is default\n jitter=1.0e-5,\n d=1\n ):\n\n self.kernel = kernel\n # self.var = var\n # self.noise = noise\n # self.ls = ls\n self.jitter = jitter\n self.d = d\n \n # update the function with user defined variance\n def sample(self, x, y=None, ls=None, var=None, sigma=None):\n \"\"\"Sample from GP with a given lengthscale and marginal vaiance.\n\n Args:\n ls (float) - lengthscale of kernel.\n x (ndaray) - spatial location.\n y (ndarray) - (function) value at x.\n \n Returns:\n sampler for y.\n \"\"\"\n\n if ls is None:\n ls = numpyro.sample(\"length\", dist.InverseGamma(1,0.1))\n if var is None:\n var = numpyro.sample(\"var\", dist.LogNormal(0.0, 0.1))\n if sigma is None:\n sigma = numpyro.sample(\"noise\", dist.HalfNormal(0.1))\n\n\n ## Sanity check: if length/dx ->1: OK, if length/dx -> Inf: covariance becomes degenerate\n # logdetK = np.linalg.slogdet(np.asarray(k))[0] * np.linalg.slogdet(np.asarray(k))[1]\n # dx = 1/k.shape[0]\n # print(k[0:5, 0:5])\n # print(\"dx =\" + str(dx))\n # print(\"log(det(K)) = \" + str(logdetK))\n # print(\"length / dx = \" + str(ls/dx))\n\n # sample Y according to the standard gaussian process formula\n k = self.kernel(x, x, var, ls, self.jitter)\n\n f = numpyro.sample(\n \"f\",\n dist.MultivariateNormal(loc=jnp.zeros(x.shape[0]), covariance_matrix=k)\n )\n numpyro.sample(\"y\", dist.Normal(f, sigma), obs=y)\n\n\nclass PoiGP(GP):\n \"\"\"Class for GP.\n\n Attributes:\n kernel - kernel function.\n jitter (float) - small positive noise on diagonal entries.\n d (int) - spatial dimension 1 or 2.\n \"\"\"\n \n # note that, there is no noise in the current implementation\n def sample(self, m, x1, x2=None, y=None, ls=None, var=None, sigma=None, seed=0):\n \"\"\"Sample from LGCP with given grid(s) over [0,1], lengthscale and marginal vaiance.\n\n Args:\n m (int) - the number of MC samples to draw from Uniform distribution.\n x1 (ndarray) - spatial grid over [0,1].\n x2 (ndarray) - spatial grid over [0,1].\n ls (float) - lengthscale of kernel.\n var (float) - marginal variance of kernel.\n sigma (float) - variance for additive noise of GP.\n y (ndarray) - (function) value at x.\n \n Returns:\n sampler for y.\n \"\"\"\n if x2 is None:\n x2 = x1\n if ls is None:\n ls = numpyro.sample(\"length\", dist.InverseGamma(1,0.1))\n if var is None:\n var = numpyro.sample(\"var\", dist.LogNormal(0.0, 0.1))\n # if sigma is None:\n # sigma = numpyro.sample(\"noise\", dist.HalfNormal(0.1))\n\n rng_key = random.PRNGKey(seed)\n\n # sample Y according to the standard gaussian process formula\n k = agg_kernel_grid(rng_key,\n self.d, # spatial dim\n m, # number of MC sample\n self.kernel, \n var, \n ls, \n x1, x2,\n jitter=self.jitter\n )\n\n f = numpyro.sample(\n \"f\",\n dist.MultivariateNormal(loc=jnp.zeros(x1.shape[0]), covariance_matrix=k)\n )\n # note that there is no noise\n rate = numpyro.deterministic(\"rate\", jnp.exp(f))\n numpyro.sample(\"y\", dist.Poisson(rate), obs=y)","repo_name":"edenx/gp-vae","sub_path":"src/model/gp.py","file_name":"gp.py","file_ext":"py","file_size_in_byte":8180,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"11080649619","text":"class LinkedList:\n def __init__(self, data):\n self.data = data\n self.next = None\n\ndef transversal( head ):\n curNode = head\n while curNode is not None:\n print (curNode.data)\n curNode = curNode.next\n\n return \"end\"\n\ndef unorderedSearch(head, target):\n curNode = head\n n = 0\n while curNode is not None and curNode.data != target:\n n +=1\n curNode = curNode.next\n\n return curNode is not None, n\n\n\n# Given the head reference, remove a target from the linked list\ndef deleteNode(head, target):\n prenode = None\n curNode = head\n while curNode is not None and curNode.data != target:\n prenode = curNode\n curNode = curNode.next\n\n if curNode is not None:\n if target == head.data:\n head = curNode.next\n\n else:\n prenode.next = curNode.next\n return head\n\n\nhead = LinkedList(\"a\")\nhead.next = LinkedList(\"g\")\nhead.next.next = LinkedList(0)\nNewNode = LinkedList(4)\nNewNode.next = head\nhead = NewNode\n\ntransversal(head)\nprint(unorderedSearch(head, 4))\nprint(\"\\n\")\ntransversal(deleteNode(head, \"a\"))\n\n# Add a Node when Head and Tail is referenced\ndef add(head, tail, target):\n NewNode = LinkedList( target )\n if head is None:\n head = NewNode\n else:\n tail.next = NewNode\n tail = NewNode\n\n# Add with only head referenced\ndef addUnsorted(head, target):\n NewNode = LinkedList(target)\n if head is None:\n NewNode = head\n else:\n NewNode.next = head\n head = NewNode\n\n# Remove a Node when Tail and Head is Given\ndef remove(head, tail, target):\n prenode = None\n curNode = head\n while curNode is not None and curNode.data != target:\n prenode = curNode\n curNode = curNode.next\n\n if curNode is not None:\n if curNode == head:\n head = curNode.next\n if curNode == tail:\n tail = prenode\n else:\n prenode.next = curNode.next\n\n# Search Sorted LinkedList\ndef SearchSorted(head, target):\n curNode = head\n while curNode is not None and curNode.data < target:\n if curNode.data == target:\n return True\n\n else:\n curNode = curNode.next\n return False\n# Add in a Sorted Linked List\ndef addSorted(head, target):\n preNode = None\n curNode = head\n while curNode is not None and curNode.data < target:\n preNode = curNode\n curNode = curNode.next\n \n NewNode = LinkedList( target )\n NewNode.next = curNode\n if curNode is head:\n head = NewNode\n else:\n preNode.next = NewNode\n\n return head\n\ndef deleteSorted(head, target):\n preNode = None\n curNode = head\n\n while curNode is not None and curNode.data < target:\n preNode = curNode\n curNode = curNode.next\n\n if curNode is not None:\n if head.data == target:\n head = curNode.next\n elif curNode.data == target:\n preNode.next = curNode.next\n\ndef removeall(head):\n curNode = head\n preNode = None\n while curNode is not None:\n preNode = curNode.next\n curNode = preNode\n\n return curNode\n\ndef splitInHalf(head):\n n = length(head)\n curNode = head\n n = n//2\n m = 0\n while m != n :\n m += 1\n curNode = curNode.next\n\n return curNode\n\ndef length(head):\n curNode = head\n n = 0\n if curNode is None:\n return None \n\n while curNode is not None:\n n +=1\n curNode = curNode.next\n\n return n\n \n","repo_name":"Predstan/Algorithm-and-Data-Structure","sub_path":"ch6/LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7775153480","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, url\nfrom . import views\n\nurlpatterns = patterns('myproject.myapp.views',\n url(r'^list/$', 'list', name='list'),\n url(r'^mail/$', 'mail', name='mail'),\n url(r'^viewer/$', 'viewer', name='viewer'),\n url(r'^aboutme/$', 'aboutme', name='aboutme'),\n url(r'^aboutapp/$', 'aboutapp', name='aboutapp'),\n url(r'^userguide/$', 'userguide', name='userguide'),\n url(r'^contact/$', 'contact', name='contact'), \n url(r'^single_helix_stats/$', 'single_helix_stats', name='single_helix_stats'),\n url(r'^helix_pair_stats/$', 'helix_pair_stats', name='helix_pair_stats'),\n url(r'^helix_triplet_stats/$', 'helix_triplet_stats', name='helix_triplet_stats'),\n url(r'^embedding/$', 'embedding', name='embedding'),\n url(r'^t(?P[0-9]+)/$', views.triplet, name='triplet'),\n url(r'^p(?P[0-9]+)/$', views.pair, name='pair'),\n url(r'^h(?P[0-9]+)/$', views.helix, name='helix'),\n\n\n url(r'^list/Clear/$', view='Clear', name='Clear')\n)\n","repo_name":"michalstepniewski/TMProteins","sub_path":"myproject/myproject/myapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35022435099","text":"import random\nfrom blockClass import Block\nfrom AddressClass import Address\n\ndef interfaz(cache, mainMemo):\n Exit = False\n while not Exit:\n print(\"\"\"\n - - - - - - - - - - - - -\n 1) Leer de memoria cache.\n 2) Escribir en memoria cache.\n 3) Salir.\n \"\"\")\n opt = input(\"Digite su opción: \")\n\n for c in cache.content:\n print(\"|{}|{}|\".format(c[0].dirty+\" \"+c[0].tag + \" \"+ c[0].data, c[1].dirty+\" \"+c[1].tag + \" \" + c[1].data))\n \n totalRates = cache.cntHits+cache.cntMiss\n if totalRates > 0:\n print(\"- - - - -\")\n totalRates = cache.cntHits+cache.cntMiss\n hitsRate = (cache.cntHits*100)/totalRates\n print(\"Hits rate: {:.2f}% | Miss rate: {:.2f}%\".format( hitsRate, 100-hitsRate))\n print(\"- - - - -\")\n\n if opt in [\"1\", \"2\", \"3\"]:\n if opt == \"3\":\n Exit = True\n else:\n ##Leer en memoria o escribir en memoria\n ##Generar el número aleatorio\n\n addressRandom = random.randint(0, 2047)\n addressString = \"{0:011b}\".format(addressRandom)\n address = Address(addressString)\n\n print(\"Address: |{}|{}|{}|\\n\".format(address.tag, address.index, address.offset))\n print(\"- - - - -\")\n\n if opt == \"1\":\n cache.read(addressString, mainMemo)\n else:\n cache.write(addressString, mainMemo)\n else:\n print(\"Opcion incorrecta\")\n \n for c in cache.content:\n print(\"|{}|{}|\".format(c[0].dirty+\" \"+c[0].tag + \" \"+ c[0].data, c[1].dirty+\" \"+c[1].tag + \" \" + c[1].data))\n \n ##Guardar los datos en txt\n cacheFile = open(\"cacheOutput.txt\", \"w\")\n\n for c in cache.content:\n line = \"|{}|{}|\\n\".format(c[0].dirty+\" \"+c[0].tag + \" \"+ c[0].data, c[1].dirty+\" \"+c[1].tag + \" \" + c[1].data)\n cacheFile.write(line)\n \n totalRates = cache.cntHits+cache.cntMiss\n if totalRates > 0:\n line = \"- - - - -\\n\"\n totalRates = cache.cntHits+cache.cntMiss\n hitsRate = (cache.cntHits*100)/totalRates\n line += \"Hits rate: {:.2f}% | Miss rate: {:.2f}%\\n\".format( hitsRate, 100-hitsRate)\n line += \"- - - - -\\n\"\n cacheFile.write(line)\n\n cacheFile.close()\n\n ##Guardar los datos en txt\n RAMoutput = open(\"RAMoutput.txt\", \"w\")\n\n for c in mainMemo.content:\n line = \"{}\\n\".format(c)\n RAMoutput.write(line)\n\n RAMoutput.close()","repo_name":"rohaquinlop/CacheMemory","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38404628589","text":"class Solution:\n def rob(self, nums: List[int]) -> int:\n \"\"\"\n Decision tree Rob House1 -> can either Rob House3/House4\n Two variables: Rob1 Rob2\n \"\"\"\n rob1 = 0 # House before that\n rob2 = 0 # Last house we robbed\n\n for num in nums:\n current = max(num + rob1, rob2)\n rob1 = rob2\n rob2 = current\n\n return rob2","repo_name":"JohnMerlino1235/LeetCodePractice","sub_path":"House Robber/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32203683","text":"from sortedcontainers import SortedDict, SortedSet\nfrom collections import OrderedDict\nfrom sortedcontainers import SortedListWithKey\n\n\nclass ElementWrapper:\n def __init__(self, value):\n self.value = value\n self.count = 1\n\n def __hash__(self):\n return hash(self.value)\n\n def __eq__(self, other):\n return self.value == other.value\n\n def __gt__(self, other):\n return self.value > other.value\n\n def __lt__(self, other):\n return self.value < other.value\n\n def __repr__(self):\n return '{} {}'.format(self.value, self.count)\n\n\nclass ElementOrderWrapper:\n def __init__(self, value, occurence):\n self.value = value\n self.occurence = occurence\n\n def __hash__(self):\n return hash(str(self.value) + str(self.occurence))\n\n def __eq__(self, other):\n return (str(self.value) + str(self.occurence)) == (str(other.value) + str(other.occurence))\n\n # def __gt__(self, other):\n # return self.overall_occurence > other.overall_occurence\n #\n # def __lt__(self, other):\n # return self.overall_occurence < other.overall_occurence\n #\n def __repr__(self):\n return '{} {}'.format(self.value, self.occurence)\n\n\nclass FirstLastList:\n def __init__(self):\n self.sorted_elements = SortedSet()\n self.ordered_elements = OrderedDict()\n\n def count(self):\n return len(self.ordered_elements)\n\n def clear(self):\n self.sorted_elements = SortedSet()\n self.ordered_elements = OrderedDict()\n\n def add(self, element):\n # add in the sorted container\n sorted_element = ElementWrapper(element)\n if sorted_element in self.sorted_elements:\n # increment his count\n sorted_element_idx = self.sorted_elements.index(sorted_element)\n self.sorted_elements[sorted_element_idx].count += 1\n else:\n self.sorted_elements.add(sorted_element)\n\n element_occurence = self.sorted_elements[self.sorted_elements.index(sorted_element)].count\n # add in the ordered container\n ordered_element = ElementOrderWrapper(element, element_occurence)\n self.ordered_elements[ordered_element] = True\n\n def min(self, count):\n count_left = count\n min_items = []\n to_break = False\n for i in range(len(self.sorted_elements)):\n min_obj = self.sorted_elements[i]\n for _ in range(min_obj.count):\n count_left -= 1\n min_items.append(min_obj.value)\n if count_left == 0:\n to_break = True\n break\n if to_break:\n break\n\n return min_items\n\n def max(self, count):\n count_left = count\n max_items = []\n to_break = False\n for i in range(1, len(self.sorted_elements)+1):\n max_obj = self.sorted_elements[-i]\n for _ in range(max_obj.count):\n count_left -= 1\n max_items.append(max_obj.value)\n if count_left == 0:\n to_break = True\n break\n if to_break:\n break\n\n return max_items\n\n def first(self, count):\n return list(self.ordered_elements.keys())[:count]\n\n def last(self, count):\n start = len(self.ordered_elements) - count\n if start < 0:\n start = 0\n keys = list(self.ordered_elements.keys())\n return [keys[i] for i in reversed(range(start, len(keys)))]\n\n def remove_all(self, element):\n el_obj = ElementWrapper(element)\n if el_obj not in self.sorted_elements:\n return 0\n # remove from the sorted collection\n el_idx = self.sorted_elements.index(el_obj)\n element_obj = self.sorted_elements[el_idx]\n self.sorted_elements.remove(element_obj)\n\n # remove from the order collection\n for occurence in range(1, element_obj.count + 1):\n el_wrapper = ElementOrderWrapper(element, occurence)\n del self.ordered_elements[el_wrapper]\n\n return element_obj.count\n\n\nls = FirstLastList()\nls.add(5)\nls.add(1)\nls.add(2.5)\nls.add(2)\nls.add(2)\nls.add(2)\nls.add(2)\nls.add(2)\n\n\nprint(ls.min(5))\nprint(ls.max(4))\n\nprint(ls.first(2))\nprint(ls.last(2))\n\nprint('BEFORE REMOVE' + '-'*100)\nprint(ls.remove_all(2))\n\n\nprint(ls.min(5))\nprint(ls.max(2))\n\nprint(ls.first(2))\nprint(ls.last(2))","repo_name":"stanislavkozlovski/data_structures_feb_2016","sub_path":"SoftUni/Exam Preparation/First-Last-List/first_last_list.py","file_name":"first_last_list.py","file_ext":"py","file_size_in_byte":4430,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"2611677027","text":"\n# import pygame\nimport random\nfrom sys import exit\n# import os\n# import copy\nimport json\n\nimport pygame.sprite\n\nimport modules.graphics_module as graphics_module\nimport modules.pilot_module as pilot_module\nimport modules.mission_module as mission_module\nfrom settings import *\nimport modules.shop as module_shop\nimport modules.ui_module as ui_module\nimport modules.navigation_module as nav\nfrom modules.vfx_module import VisualEffect\nimport modules.sound_module as sound_module\n\n# press q to toggle debug mode\n# press s to toggle shop if debug mode is enabled\nprint(\"Welcome to the Mission Control Demo\")\nprint(\"To toggle debug mode, press 'q'\")\nprint(\"To toggle shop while debug is enabled, press 's'\")\nprint(\"To toggle combat while debug is enabled, press 'x'\")\nprint(\"To set combat to the testing setup press 'z' while combat is enabled.\")\nprint(\"To return to cockpit while debug is enabled, press 'c'\")\nprint(\"To pause or unpause combat press space\")\nprint(\"To toggle the menu press 'o'\")\nprint(\"To toggle pilot frames press 'p'\")\n\n\n# def create_rect(x, y, width, height):\n# rect = pygame.Rect(screen_width * x, screen_height * y, screen_width * width, screen_height * height)\n# return rect\n\ndef toggle_hidden(manager, name):\n if manager.hidden:\n manager.hidden = False\n print(f\"{name} is now active\")\n else:\n manager.hidden = True\n print(f\"{name} is now hidden\")\n\n\ndef update_fps():\n fps = str(int(clock.get_fps()))\n fps_text = text_font.render(fps, 1, pygame.Color(\"coral\"))\n return fps_text\n\n\nclass GameManager:\n def __init__(self):\n self.mode = \"cockpit\"\n self.scene_id = \"intro\"\n self.debug_mode = False\n self.paused = False\n\n # load managers\n self.ui = ui_module.InterfaceManager(self)\n self.mission = mission_module.MissionManager(self)\n self.sound = sound_module.SoundController()\n\n # self.ui.create_button(\"default_button\", \"test_button\", screen_width*0.5, screen_height*0.5)\n\n self.pilots = pygame.sprite.Group()\n\n self.player_inventory = []\n self.player_resources = {\n \"Credits\": 2000,\n \"Fuel\": 10,\n \"Scrap\": 10,\n \"Meds\": 10\n }\n\n self.vfx_group = pygame.sprite.Group()\n\n def spawn_explosion(self, origin):\n explosion = VisualEffect(\"explosion\", None, origin, origin)\n self.vfx_group.add(explosion)\n self.sound.play_sound(\"explosion\")\n\n def spawn_lightning(self, origin):\n lightning = VisualEffect(\"lightning\", None, origin, origin)\n self.vfx_group.add(lightning)\n\n def draw_vfx_group(self):\n self.vfx_group.draw(screen)\n\n def update_graphics(self):\n graphics.draw_black()\n if self.mode == \"shop\":\n graphics.draw_window_frames()\n graphics.draw_shop_headers()\n # shop.highlight_shop_items()\n # shop.draw_shop_item_text()\n shop.update_shop_items()\n elif self.mode == \"cockpit\":\n graphics.draw_cockpit()\n elif self.mode == \"combat\":\n # draw terrain\n graphics.draw_terrain(mission.terrain)\n\n # draw objectives\n game.mission.objectives.draw(screen)\n\n # draw mobile entities\n game.pilots.draw(screen)\n game.mission.enemies.draw(screen)\n\n # draw vfx\n for pilot in game.pilots:\n pilot.draw_vfx()\n for enemy in game.mission.enemies:\n enemy.draw_vfx()\n\n # draw crosshair\n if self.paused and self.ui.selected_pilot is not None:\n try:\n target = self.ui.selected_pilot.target[\"move\"]\n graphics.draw_crosshair(target.pos_x, target.pos_y)\n except(Exception,):\n pass\n\n # draw explosions\n # temp\n try:\n if self.mode == \"combat\":\n if self.paused is False:\n self.vfx_group.update()\n self.vfx_group.draw(screen)\n except(Exception,):\n pass\n\n # self.ui.buttons.draw(screen)\n # self.ui.button_labels.draw(screen)\n\n if game.mode != \"cockpit\":\n graphics.draw_green()\n\n # draw the menu if it's not hidden\n if not game.ui.menu.hidden:\n self.ui.menu.draw_menu()\n\n # draw a character frame on the left side of the screen for each pilot if not hidden\n if not game.ui.status_frame_manager.hidden:\n self.ui.status_frame_manager.draw_status_frames()\n\n def explode_mobs(self):\n for mob in game.pilots:\n if not mob.alive:\n self.spawn_explosion(mob)\n mob.kill()\n for mob in game.mission.enemies:\n if not mob.alive:\n self.spawn_explosion(mob)\n mob.kill()\n\n def update(self):\n if game.mode == \"combat\":\n self.mission.run_combat()\n self.explode_mobs()\n\n # update ui\n self.ui.update()\n\n # render graphics on screen\n self.update_graphics()\n\n\ngame = GameManager()\nmission = game.mission\nshop = module_shop.ShopManager()\ngraphics = graphics_module.GraphicsManager()\ngame.sound.play_music(\"me_map\")\n\n# pilots\npilot_data = json.load(open(\"data/pilot_data.json\", \"r\"))\n\nrose = pilot_module.PilotCharacter(\"Rose\")\nrose.target[\"move\"] = nav.Waypoint(screen_width*0.5, screen_height*0.5)\n# rose.targeting_mode = \"manual\"\nnasha = pilot_module.PilotCharacter(\"Nasha\")\n# roger = pilot_module.PilotCharacter(\"Roger\")\ngame.pilots.add(rose)\ngame.pilots.add(nasha)\n# game.mission.load_enemy_for_test_mission(roger)\ngame.mission.spawn_enemy(\"drone\", 500, 500)\nrose.pos_y = 500\nrose.pos_x = 500\n\nwhile True: # game Cycle\n mouse_pos = pygame.mouse.get_pos()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT: # Quit\n pygame.quit()\n exit()\n\n if event.type == pygame.KEYDOWN:\n # toggle debug mode\n if event.key == pygame.K_q:\n if not game.debug_mode:\n game.debug_mode = True\n print(\"debug_mode enabled\")\n elif game.debug_mode:\n game.debug_mode = False\n print(\"debug_mode disabled\")\n if game.debug_mode:\n if event.key == pygame.K_s:\n # toggle shop\n if game.mode != \"shop\":\n game.mode = \"shop\"\n print(\"shop enabled with debug_mode\")\n else:\n game.mode = \"cockpit\"\n print(\"shop disabled with debug_mode\")\n # toggle combat\n elif event.key == pygame.K_x:\n if game.mode != \"combat\":\n game.mode = \"combat\"\n # switch to combat music\n game.sound.play_music(\"me_rude_awakening\")\n print(\"combat enabled with debug_mode\")\n else:\n game.mode = \"cockpit\"\n # switch to map music\n game.sound.play_music(\"me_map\")\n print(\"combat disabled with debug_mode\")\n # return to cockpit\n elif event.key == pygame.K_c:\n game.mode = \"cockpit\"\n # switch to map music\n game.sound.play_music(\"me_map\")\n # toggle menu\n elif event.key == pygame.K_o:\n toggle_hidden(game.ui.menu, \"menu\")\n game.ui.menu.update_menu_options(game.ui.selected_pilot)\n\n # toggle pilot_status_frames\n elif event.key == pygame.K_p:\n toggle_hidden(game.ui.status_frame_manager, \"pilot_status_frames\")\n\n # load combat test\n if game.debug_mode and game.mode == \"combat\":\n if event.key == pygame.K_z:\n mission.load_combat_test()\n\n # temp function to test teleport\n if event.key == pygame.K_t:\n game.mission.teleport_pilot(rose)\n\n # pause and unpause combat\n if event.key == pygame.K_SPACE and game.mode == \"combat\":\n if not game.paused:\n game.paused = True\n game.ui.selected_pilot = None\n print(\"Combat has been paused\")\n else:\n game.paused = False\n game.ui.selected_pilot = None\n for pilot in game.pilots:\n pilot.deselect()\n print(\"Combat has been unpaused\")\n\n # click to select\n if event.type == pygame.MOUSEBUTTONDOWN and game.ui.click_cooldown == 0:\n # reset cooldown\n game.ui.reset_cooldown()\n\n # select button by clicking\n for button in game.ui.buttons:\n if button.rect.collidepoint(event.pos) and button.status != \"hidden\":\n button.click_button()\n\n # toggle overcharge lights\n for pilot_frame in game.ui.status_frame_manager.status_frame_group:\n if not game.ui.status_frame_manager.hidden:\n for button in pilot_frame.light_group:\n if button.rect.collidepoint(event.pos):\n button.click_button()\n print(pilot_frame.pilot.overcharge_system)\n for button in pilot_frame.toggle_group:\n if button.rect.collidepoint(event.pos):\n button.click_button()\n\n # toggle automatic control mode\n\n # select pilot by clicking\n if game.mode == \"combat\" and game.paused:\n for pilot in game.pilots:\n # check if mouse is on pilot\n if pilot.rect.collidepoint(event.pos):\n # select the pilot if not currently selected\n if pilot != game.ui.selected_pilot:\n game.ui.select_pilot(pilot)\n # deselect the pilot if already selected\n else:\n game.ui.deselect_pilot(pilot)\n # deselect all but the most recently selected pilot\n for pilot in game.pilots:\n if game.ui.selected_pilot != pilot:\n pilot.selected = False\n\n # issue orders to waypoint if pilot is selected\n if game.ui.selected_pilot is not None:\n pilot = game.ui.selected_pilot\n mouse_pos = pygame.mouse.get_pos()\n if pilot.targeting_mode == \"manual\":\n print(\"issuing orders to\", pilot.name)\n ui_module.issue_orders(game.ui.selected_pilot, \"waypoint\", mouse_pos)\n # reset cooldown\n # game.ui.click_cooldown = game.ui.click_cooldown_max\n\n # if event.type == pygame.MOUSEBUTTONDOWN and game.ui.click_cooldown == 0:\n # for item in shop.shop_items:\n # item.add_to_cart(game, shop)\n\n game.update()\n\n pygame.display.update()\n clock.tick(60)\n\n\n","repo_name":"DiceCold/Mission_Control","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11415,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"18094515268","text":"from django import template\n\nregister = template.Library()\n\n#заменяет отдельные параметры запроса\n@register.simple_tag(takes_context=True)\ndef url_replace(context, **kwargs):\n d = context['request'].GET.copy()\n for k, v in kwargs.items():\n d[k] = v\n return d.urlencode()\n","repo_name":"EvgeniyaKruglova/Callboard","sub_path":"Callboard/Сallboardproject/Callboardapp/templatetags/custom_tags.py","file_name":"custom_tags.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11221051556","text":"__author__ = ''\n\n# HEADER BLOCK: See naturaldocs examples and info: http://www.naturaldocs.org/features/output/\nimport logging\nimport webbrowser\n\nfrom GUI_defs import window\nfrom scandir_index_maker import scandir_index_maker\nfrom show_images import show_images\nfrom upload_images import upload_images\n\n\"\"\"\nfunction: do_local_demo(window)\n\nPurpose:\n When we can't access the sona_server we can provide a way to DEMO the program using stored images\n\nParameters: \n\n window (window_handle): -- the GUI window object to display the DEMO on\n\nReturns: None\n\nDescription: \n\n When we can't access the sona_server we can provide a way to DEMO the program using stored images\n\nRaises: None\n\n\nUsage: do_local_demo(window)\n\nDependencies: Any libraries, devices, etc. that must be present.\n\n src.GUI_defs -- library of GUI definitions\n\nInheritance: None\n\nTesting: TODO: To be determined\n\nWarnings: None\n\nUpdates: \n\n Scott McGregor, modified 22-Nov-2021, Added header documentation\n\nNotes: None\n\nTO DO: None\n\n\"\"\"\n\n\nimport os\n\nfrom time import sleep\nimport glob\n\nimport PySimpleGUI as sg\n\nfrom image_processing import convert_to_bytes\nfrom sonascan_file_paths import demo_dir, demo_stl\n\n\ndef update_window_on_scan(scan_image_name, full_scan_id, image_counter, max_cameras, main_window):\n logging.debug(f'\\nENTERING: update_window_on_scan({scan_image_name}, {full_scan_id}, {image_counter}, {max_cameras}, {main_window})')\n\n image_name = os.path.basename(scan_image_name)\n main_window['_ACTION_STATUS_LINE_3_'].update(full_scan_id + \"/\" + image_name + \" new scan image.\")\n main_window['_ACTION_STATUS_LINE_2_'].update(scan_image_name)\n main_window['_PROGRESS_BAR_'].update(100 * image_counter / max_cameras)\n thumbnail = convert_to_bytes(scan_image_name, [96, 54])\n main_window['_IMAGE_ELEMENT_'].update(data=thumbnail)\n # window['_IMAGE_ELEMENT_'].update(data=camera_file_path)\n main_window.Refresh()\n image_counter = image_counter + 1\n\n logging.debug(f'EXITING: update_window_on_scan() RETURNS: {image_counter}\\n')\n logging.debug(\n f'EXITING: update_window_on_scan({scan_image_name}, {full_scan_id}, {image_counter}, {max_cameras}, {main_window})\\n')\n\n return image_counter\n\n\ndef demo(the_scan_data):\n print(f'\\n\\tENTERING: demo({the_scan_data})')\n\n main_window = the_scan_data['window']\n # Do this if there are not enough cameras responding:\n full_scan_id = \"DEMO\"\n the_scan_data['scanner_id'] = 'DEMO'\n the_scan_data['long_scan_id'] = 'DEMO'\n the_scan_data['scan_dir'] = demo_dir\n the_scan_data['scan_id'] = 'DEMO'\n demo_images_list = glob.glob(demo_dir+\"/*.jpg\")\n max_cameras = len(demo_images_list)\n # print(f'\\t{max_cameras}, {demo_images_list}')\n sorted_images_list = sorted(demo_images_list)\n # print(f'\\t{max_cameras}, {sorted_images_list}')\n image_counter = 0\n main_window['_ACTION_STATUS_LINE_1_'].update(\"SCANNING (DEMO MODE)\")\n i = 1\n for image_file in sorted_images_list:\n update_window_on_scan(image_file, full_scan_id, image_counter, max_cameras, window)\n main_window['_PROGRESS_BAR_'].update(100 * i / 18)\n sleep(.5) # delay to simulate actual camera capture time per image\n i = i + 1\n image_counter = image_counter + 1\n sleep(1)\n main_window['_ACTION_STATUS_LINE_1_'].update(\"SCANNING COMPLETE (DEMO MODE)\")\n main_window.Refresh()\n sleep(1)\n\n # No need to simulate, We will actually upload it!\n the_scan_data['images_list'] = sorted_images_list\n the_scan_data['scanner_id'] = 'DEMO'\n the_scan_data['long_scan_id'] = 'DEMO'\n the_scan_data['scan_dir'] = demo_dir\n the_scan_data['scan_id'] = 'DEMO'\n print(f'\\tCALLING scandir_index_maker()')\n scandir_index_maker(the_scan_data)\n print(f'\\tRETURNING FROM scandir_index_maker()')\n # print(f'\\t(Demo) scan_dir = {the_scan_data[\"scan_dir\"]}')\n # print(f'\\t{the_scan_data}')\n\n scan_dir = the_scan_data['scan_dir']\n # print(f'\\tshow_images(): scan_dir = {scan_dir}')\n # print(f'\\tshow_images(): show_images() = {the_scan_data}' )\n\n if the_scan_data['Show images html']:\n # print(f'\\tShow images html = {the_scan_data[\"scan_dir\"]}')\n # print(f'\\t{the_scan_data}')\n\n print(f'\\n\\t\\tENTERING show_images({the_scan_data})')\n\n url_to_show = 'file://' + os.path.join(os.path.realpath(scan_dir), 'index.html')\n # print(f'\\t\\tshow_images(): scan_dir = {os.path.realpath(scan_dir)}')\n webbrowser.open(url_to_show)\n\n print(f'\\t\\tEXITING show_images({the_scan_data})\\n')\n\n # window.perform_long_operation(lambda: show_images(the_scan_data), '-END SHOW IMAGES-')\n\n print(f'\\tsimulate_unloading_flag = {the_scan_data[\"Simulate uploading\"]}')\n\n if not the_scan_data['Simulate uploading']:\n\n # window.write_event_value('_UPLOAD_IMAGES_', the_scan_data)\n print(f'\\n\\tCALLING upload_images()')\n upload_images(the_scan_data)\n print(f'\\tRETURNING FROM upload_images()\\n')\n\n else:\n\n # If we get here, we aren't going to actually upload the DEMO files, we'll just simulate it.\n main_window['_ACTION_STATUS_LINE_1_'].update(\"UPLOADING TO SERVER (DEMO MODE)\")\n main_window['_ACTION_STATUS_LINE_3_'].update(\"\")\n image_counter = 0\n i = 1\n for image_file in sorted_images_list:\n # scan_image_name = os.path.basename(image_file)\n update_window_on_scan(image_file, full_scan_id, image_counter, max_cameras, window)\n main_window['_PROGRESS_BAR_'].update(100 * i / 18)\n i = i + 1\n sleep(.5) # delay to simulate file upload time\n\n sleep(1)\n main_window['_ACTION_STATUS_LINE_1_'].update(\"UPLOADING COMPLETE (DEMO MODE)\")\n main_window.Refresh()\n sleep(1)\n\n if not the_scan_data['Simulate modeling']:\n\n print(f'\\tEXITING 2: demo() RETURNS: {the_scan_data})\\n')\n return the_scan_data\n\n # If we get here, we aren't going to actually process the DEMO files, we'll just simulate it.\n main_window['_ACTION_STATUS_LINE_1_'].update(\"PROCESSING MODEL (DEMO MODE)\")\n main_window.Refresh()\n\n for i in range(1, 11):\n main_window['_PROGRESS_BAR_'].update(100 * i / 10)\n sleep(.5)\n\n # Display a message that model is complete\n sleep(1)\n main_window['_ACTION_STATUS_LINE_1_'].update(\"PROCESSING COMPLETE (DEMO MODE)\")\n main_window.Refresh()\n sleep(1)\n\n if not the_scan_data['Show 3D model']:\n return the_scan_data\n if sg.running_mac():\n sg.execute_command_subprocess('/Applications/meshlab.app/Contents/MacOS/meshlab', demo_stl,\n wait=False, cwd=None, pipe_output=False)\n else: # sg.running_linux:\n sg.execute_command_subprocess('/usr/bin/meshlab', demo_stl,\n wait=False, cwd=None, pipe_output=False)\n sg.execute_command_subprocess('wmctrl', '-r', 'MeshLab v1.3', '-e', '1', '50', '50', '500', '500',\n wait=False, cwd=None, pipe_output=False)\n print(\"\\tlaunched meshlab to view 3D model for photoscene:\", \"photoscene-DEMO\")\n\n print(f'\\tEXITING 3: demo() RETURNS: {the_scan_data})\\n')\n return the_scan_data\n\n\nif __name__ == '__main__': # use for unit testing\n window_x = sg.Window(\"\")\n scan_data = {'window': window_x}\n new_scan_data = demo(scan_data)\n","repo_name":"mcgregor94086/long_op_src","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":7414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7808614462","text":"\"\"\"Converter for LLaMa checkpoints in the original format from Meta.\"\"\"\r\n\r\nimport argparse\r\nimport gc\r\nimport glob\r\nimport os\r\nimport json\r\n\r\nimport ctranslate2\r\nimport numpy as np\r\nimport sentencepiece as spm\r\nimport torch\r\n\r\nfrom ctranslate2.converters.utils import permute_for_sliced_rotary\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser(\r\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\r\n )\r\n parser.add_argument(\r\n \"--model_dir\", required=True, help=\"Path to the model directory.\"\r\n )\r\n parser.add_argument(\r\n \"--tokenizer_model\", required=True, help=\"Path to the tokenizer model.\"\r\n )\r\n ctranslate2.converters.Converter.declare_arguments(parser)\r\n args = parser.parse_args()\r\n converter = LlamaConverter(args.model_dir, args.tokenizer_model)\r\n converter.convert_from_args(args)\r\n\r\n\r\nclass LlamaConverter(ctranslate2.converters.Converter):\r\n def __init__(self, model_dir, tokenizer_model_path):\r\n self._model_dir = model_dir\r\n self._tokenizer_model_path = tokenizer_model_path\r\n\r\n def _load(self):\r\n sp = spm.SentencePieceProcessor(self._tokenizer_model_path)\r\n tokens = [sp.id_to_piece(i) for i in range(len(sp))]\r\n\r\n params_path = os.path.join(self._model_dir, \"params.json\")\r\n with open(params_path, encoding=\"utf-8\") as params_file:\r\n params = json.load(params_file)\r\n\r\n spec = ctranslate2.specs.TransformerDecoderModelSpec.from_config(\r\n params[\"n_layers\"],\r\n params[\"n_heads\"],\r\n activation=ctranslate2.specs.Activation.SWISH,\r\n pre_norm=True,\r\n ffn_glu=True,\r\n rms_norm=True,\r\n rotary_dim=0,\r\n rotary_interleave=False,\r\n )\r\n\r\n spec.register_vocabulary(tokens)\r\n spec.register_file(self._tokenizer_model_path)\r\n\r\n pattern = os.path.join(self._model_dir, \"consolidated.0*.pth\")\r\n\r\n for path in sorted(glob.glob(pattern)):\r\n model = torch.load(path, map_location=\"cpu\")\r\n\r\n self.set_decoder_spec(spec.decoder, model)\r\n\r\n del model\r\n\r\n # Finalize fused self attention input projection.\r\n for layer_spec in spec.decoder.layer:\r\n linear_spec = layer_spec.self_attention.linear[0]\r\n\r\n wi = linear_spec.weight\r\n wi = wi.reshape(wi.shape[0] * wi.shape[1], wi.shape[2])\r\n\r\n wq, wk, wv = np.split(wi, 3)\r\n\r\n wq = permute_for_sliced_rotary(wq, params[\"n_heads\"])\r\n wk = permute_for_sliced_rotary(wk, params[\"n_heads\"])\r\n\r\n linear_spec.weight = np.concatenate([wq, wk, wv])\r\n\r\n return spec\r\n\r\n def set_decoder_spec(self, spec, model):\r\n spec.scale_embeddings = False\r\n\r\n spec.layer_norm.gamma = model.pop(\"norm.weight\").clone().numpy()\r\n spec.embeddings.weight = append(\r\n spec.embeddings.weight,\r\n model.pop(\"tok_embeddings.weight\").clone().numpy(),\r\n axis=1,\r\n )\r\n spec.projection.weight = append(\r\n spec.projection.weight, model.pop(\"output.weight\").clone().numpy(), axis=0\r\n )\r\n\r\n for i, layer_spec in enumerate(spec.layer):\r\n self.set_decoder_layer_spec(i, layer_spec, model)\r\n gc.collect()\r\n\r\n def set_decoder_layer_spec(self, layer, spec, model):\r\n prefix = \"layers.%d\" % layer\r\n\r\n spec.self_attention.layer_norm.gamma = (\r\n model.pop(\"%s.attention_norm.weight\" % prefix).clone().numpy()\r\n )\r\n spec.ffn.layer_norm.gamma = (\r\n model.pop(\"%s.ffn_norm.weight\" % prefix).clone().numpy()\r\n )\r\n\r\n wq = model.pop(\"%s.attention.wq.weight\" % prefix).clone().numpy()\r\n wk = model.pop(\"%s.attention.wk.weight\" % prefix).clone().numpy()\r\n wv = model.pop(\"%s.attention.wv.weight\" % prefix).clone().numpy()\r\n wo = model.pop(\"%s.attention.wo.weight\" % prefix).clone().numpy()\r\n\r\n spec.self_attention.linear[0].weight = append(\r\n spec.self_attention.linear[0].weight, np.stack([wq, wk, wv]), axis=1\r\n )\r\n spec.self_attention.linear[1].weight = append(\r\n spec.self_attention.linear[1].weight, wo, axis=1\r\n )\r\n\r\n w1 = model.pop(\"%s.feed_forward.w1.weight\" % prefix).clone().numpy()\r\n w2 = model.pop(\"%s.feed_forward.w2.weight\" % prefix).clone().numpy()\r\n w3 = model.pop(\"%s.feed_forward.w3.weight\" % prefix).clone().numpy()\r\n\r\n spec.ffn.linear_0.weight = append(spec.ffn.linear_0.weight, w1, axis=0)\r\n spec.ffn.linear_0_noact.weight = append(\r\n spec.ffn.linear_0_noact.weight, w3, axis=0\r\n )\r\n spec.ffn.linear_1.weight = append(spec.ffn.linear_1.weight, w2, axis=1)\r\n\r\n\r\ndef append(current, weight, axis):\r\n if isinstance(current, np.ndarray):\r\n weight = np.concatenate([current, weight], axis=axis)\r\n return weight\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"moseshu/deeplearning","sub_path":"llama-finetune/llama_converter.py","file_name":"llama_converter.py","file_ext":"py","file_size_in_byte":4962,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"7085268011","text":"\"\"\"\nProject Euler Problem 7: 10,001st prime\n\"\"\"\n\n# What is the 10,001st prime number?\n\n# NOTE: this solution implements the pseudocode explained in the posted solutions on projecteuler.net\n# url: (https://projecteuler.net/overview=007)\n\n\ndef is_prime(n):\n if n == 1:\n return False\n elif n < 4:\n return True\n elif n%2 == 0:\n return False\n elif n < 9:\n return True\n elif n%3 == 0:\n return False\n else:\n r = int(n**(1/2))\n f = 5\n while f <= r:\n if n%f == 0:\n return False\n elif n%(f+2) == 0:\n return False\n f += 6\n return True\n\n\nlimit = 10001\ncount = 1\ncandidate = 1\nwhile count < limit:\n candidate += 2\n if is_prime(candidate):\n count = count + 1\n\nprint(candidate)\n","repo_name":"wandrewjam/project-euler","sub_path":"python/problem7_soln.py","file_name":"problem7_soln.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37849536471","text":"def minNumberOfCoinsForChange(n, denoms):\n minCoins = [float('inf') for amount in range(n+1)]\n minCoins[0] = 0\n\n for denom in denoms:\n for money in range(1, n+1):\n if money >= denom:\n minCoins[money] = min((minCoins[ money - denom] + 1), minCoins[money])\n \n return minCoins[n] if minCoins[n] != float('inf') else -1","repo_name":"MinaKhamesi/Problem-solving-questions","sub_path":"Medium/MinNumberOfCoinsToChange.py","file_name":"MinNumberOfCoinsToChange.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6341768219","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch import optim, autograd\r\n\r\nfrom dolfin import *\r\nimport mshr\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nimport os\r\nimport time\r\n\r\nfrom matplotlib import pyplot as plt\r\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\r\nimport pylab as pylt\r\n\r\nparameters['allow_extrapolation'] = True\r\nparameters[\"form_compiler\"][\"optimize\"] = True # optimize compiler options\r\n# optimize code when compiled in c++\r\nparameters[\"form_compiler\"][\"cpp_optimize\"] = True\r\nset_log_active(False) # handling of log messages, warnings and errors.\r\n\r\n\r\n# Try to solve the poisson equation:\r\n''' Solve the following PDE\r\n-\\Delta u(x) = 1, x\\in \\Omega,\r\nu(x) = 0, x\\in \\partial \\Omega\r\n\\Omega = (-1,1) * (-1,1) \\ [0,1) *{0}\r\n'''\r\n\r\n# Expression for exact solution\r\n\r\n\r\nclass Expression_u(UserExpression):\r\n\r\n def __init__(self, omega, **kwargs):\r\n super().__init__(**kwargs) # This part is new!\r\n self.omega = omega\r\n\r\n def eval(self, value, x):\r\n\r\n r = sqrt(x[0]*x[0] + x[1]*x[1])\r\n theta = np.arctan2(abs(x[1]), abs(x[0]))\r\n\r\n if x[0] < 0 and x[1] > 0:\r\n theta = pi - theta\r\n\r\n elif x[0] <= 0 and x[1] <= 0:\r\n theta = pi + theta\r\n\r\n if r == 0.0:\r\n value[0] = 0.0\r\n else:\r\n value[0] = pow(r, pi/self.omega)*sin(theta*pi/self.omega)\r\n\r\n def eval_at_point(self, x):\r\n\r\n r = np.sqrt(x[0]*x[0] + x[1]*x[1])\r\n theta = np.arctan2(abs(x[1]), abs(x[0]))\r\n\r\n if x[0] < 0 and x[1] > 0:\r\n theta = pi - theta\r\n\r\n elif x[0] <= 0 and x[1] <= 0:\r\n theta = pi + theta\r\n\r\n if r == 0.0:\r\n value = 0.0\r\n else:\r\n value = pow(r, pi/self.omega)*sin(theta*pi/self.omega)\r\n\r\n return value\r\n\r\n def value_shape(self):\r\n return ()\r\n\r\n\r\nclass PowerReLU(nn.Module):\r\n \"\"\"\r\n Implements simga(x)^(power)\r\n Applies a power of the rectified linear unit element-wise.\r\n\r\n NOTE: inplace may not be working.\r\n Can set inplace for inplace operation if desired.\r\n BUT I don't think it is working now.\r\n\r\n INPUT:\r\n x -- size (N,*) tensor where * is any number of additional\r\n dimensions\r\n OUTPUT: d_v = vertex_to_dof_map(V)\r\n y -- size (N,*)\r\n \"\"\"\r\n\r\n def __init__(self, inplace=False, power=3):\r\n super(PowerReLU, self).__init__()\r\n self.inplace = inplace\r\n self.power = power\r\n\r\n def forward(self, input):\r\n y = F.relu(input, inplace=self.inplace)\r\n return torch.pow(y, self.power)\r\n\r\n\r\ndef swish(x):\r\n return x*torch.sigmoid(x)\r\n\r\n\r\nclass Block(nn.Module):\r\n \"\"\"\r\n IMplementation of the block used in the Deep Ritz\r\n Paper\r\n\r\n Parameters:\r\n in_N -- dimension of the input\r\n width -- number of nodes in the interior middle layer\r\n out_N -- dimension of the output\r\n phi -- activation function used\r\n \"\"\"\r\n\r\n def __init__(self, in_N, width, out_N, phi=PowerReLU()):\r\n super(Block, self).__init__()\r\n # create the necessary linear layers\r\n self.L1 = nn.Linear(in_N, width)\r\n self.L2 = nn.Linear(width, out_N)\r\n # choose appropriate activation function\r\n self.phi = nn.Tanh()\r\n #self.phi = phi\r\n #self.phi = nn.Sigmoid()\r\n\r\n def forward(self, x):\r\n return self.phi(self.L2(self.phi(self.L1(x)))) + x\r\n\r\n\r\nclass drrnn(nn.Module):\r\n \"\"\"\r\n drrnn -- Deep Ritz Residual Neural Network\r\n\r\n Implements a network with the architecture used in the\r\n deep ritz method paper\r\n\r\n Parameters:\r\n in_N -- input dimension\r\n out_N -- output dimension\r\n m -- width of layers that form blocks\r\n depth -- number of blocks to be stacked\r\n phi -- the activation function\r\n \"\"\"\r\n\r\n def __init__(self, in_N, m, out_N, depth=4, phi=PowerReLU()):\r\n super(drrnn, self).__init__()\r\n # set parameters\r\n self.in_N = in_N\r\n self.m = m\r\n self.out_N = out_N\r\n self.depth = depth\r\n self.phi = nn.Tanh()\r\n # list for holding all the blocks\r\n self.stack = nn.ModuleList()\r\n\r\n # add first layer to list\r\n self.stack.append(nn.Linear(in_N, m))\r\n\r\n # add middle blocks to list\r\n for i in range(depth):\r\n self.stack.append(Block(m, m, m))\r\n\r\n # add output linear layer\r\n self.stack.append(nn.Linear(m, out_N))\r\n\r\n def forward(self, x):\r\n # first layer\r\n for i in range(len(self.stack)):\r\n x = self.stack[i](x)\r\n return x\r\n\r\n\r\ndef weights_init(m):\r\n if isinstance(m, (nn.Conv2d, nn.Linear)):\r\n nn.init.xavier_normal_(m.weight)\r\n nn.init.constant_(m.bias, 0.0)\r\n\r\n\r\ndef get_interior_points(N=150, d=2):\r\n \"\"\"\r\n randomly sample N points from interior of [-1,1]^d\r\n \"\"\"\r\n # return points for each block\r\n n = N//3\r\n\r\n x = (torch.rand(2*n)*2 - 1).unsqueeze(-1)\r\n y = torch.rand(2*n).unsqueeze(-1)\r\n X1 = torch.cat((x, y), dim=1)\r\n\r\n X2 = -torch.rand(n, 2)\r\n X_final = torch.cat((X1, X2), dim=0)\r\n\r\n return X_final\r\n\r\n\r\ndef get_boundary_points(N=33):\r\n index = torch.rand(N, 1)\r\n index1 = torch.rand(N, 1) * 2 - 1\r\n # x in (0,1) y = 0\r\n xb1 = torch.cat((index, torch.zeros_like(index)), dim=1)\r\n # x = 1 y in (0,1)\r\n xb2 = torch.cat((torch.ones_like(index1), index), dim=1)\r\n # x in (-1,1) y = 1\r\n xb3 = torch.cat((index1, torch.ones_like(index)), dim=1)\r\n # x = -1 y in (-1,1)\r\n xb4 = torch.cat((torch.full_like(index1, -1), index1), dim=1)\r\n # x in (-1,0) y = -1\r\n xb5 = torch.cat((-index, torch.full_like(index1, -1)), dim=1)\r\n # x = 0 y in (-1,0)\r\n xb6 = torch.cat((torch.full_like(index1, 0), -index), dim=1)\r\n xb = torch.cat((xb1, xb2, xb3, xb4, xb5, xb6), dim=0)\r\n\r\n return xb\r\n\r\n\r\ndef get_interior_boundary_mesh(coords):\r\n\r\n xr = []\r\n xb = []\r\n\r\n for z in coords:\r\n if z[0] >= 0.99 or z[1] >= 0.99 or z[0] <= -0.99 or z[1] <= -0.99:\r\n xb.append(z)\r\n\r\n elif(near(z[1], 0) and z[0] >= 0):\r\n xb.append(z)\r\n\r\n elif(near(z[0], 0) and z[1] <= 0):\r\n xb.append(z)\r\n else:\r\n xr.append(z)\r\n\r\n xr = np.array(xr)\r\n xb = np.array(xb)\r\n\r\n xr = torch.tensor(xr).float()\r\n xb = torch.tensor(xb).float()\r\n\r\n return xr, xb\r\n\r\n\r\ndef inside_domain(Z):\r\n Z_f = Z.clone()\r\n\r\n for i, z in enumerate(Z):\r\n if (z[0] > 1e-16) & (z[1] < 1e-16):\r\n Z_f[i] = np.nan\r\n\r\n return Z_f\r\n\r\n\r\n# 65,225,833,3201,12545,49665,197633,225,833\r\ndef main(beta_, dof_):\r\n\r\n torch.cuda.empty_cache()\r\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n print('device used is: ', device)\r\n\r\n plt_exact = False\r\n plt_pred = True\r\n OT_mesh = True\r\n eval_OT = False\r\n save_mesh = False\r\n save_sol = True\r\n save_coll_points = True\r\n training = False\r\n\r\n omega = 3/2*pi\r\n dof_OT = dof_\r\n domain_vertices = [Point(0.0, 0.0),\r\n Point(1.0, 0.0),\r\n Point(1.0, 1.0),\r\n Point(-1.0, 1.0),\r\n Point(-1.0, -1.0), Point(0.0, -1.0)]\r\n\r\n geometry = mshr.Polygon(domain_vertices)\r\n\r\n max_epochs = 50000\r\n lr = 1e-4\r\n in_N = 2\r\n m = 20\r\n out_N = 1\r\n depth = 4\r\n full_path = os.path.realpath(__file__)\r\n path, filename = os.path.split(full_path)\r\n\r\n string_mesh = f'mesh_OT/mesh_OT_0.44_{dof_OT}.xml.gz'\r\n mesh_OT = Mesh(string_mesh)\r\n \r\n geometry = mshr.Polygon(domain_vertices)\r\n df = pd.read_csv('coords_to_res.csv')\r\n if dof_OT in df['coords'].values:\r\n res = df[df['coords'] == dof_OT]['res']\r\n else:\r\n while dof_OT not in df['coords'].values:\r\n dof_OT += 1\r\n res = df[df['coords'] == dof_OT]['res']\r\n mesh_del = mshr.generate_mesh(geometry, res)\r\n\r\n V = FunctionSpace(mesh_OT, \"CG\", 1) # function space for solution u\r\n u_exp = Expression_u(omega, degree=5)\r\n coords = V.tabulate_dof_coordinates()\r\n xr_mesh, xb_mesh = get_interior_boundary_mesh(coords)\r\n \r\n if not eval_OT:\r\n V = FunctionSpace(mesh_del, \"CG\", 1) # function space for solution u\r\n u = Function(V)\r\n u_best = Function(V)\r\n coords = V.tabulate_dof_coordinates()\r\n mesh = mesh_del\r\n \r\n Nr = len(xr_mesh)\r\n Nb = len(xb_mesh)\r\n \r\n if OT_mesh:\r\n xr = xr_mesh\r\n xb = xb_mesh\r\n else:\r\n xr = get_interior_points(Nr)\r\n xb = get_boundary_points(Nb)\r\n\r\n if plt_exact:\r\n u = interpolate(u_exp, V)\r\n pylt.figure(figsize=(10, 10))\r\n p = plot(u)\r\n # set colormap\r\n p.set_cmap(\"coolwarm\")\r\n # pylt.colorbar(p)\r\n pylt.xlabel('x')\r\n pylt.ylabel('y')\r\n pylt.show()\r\n\r\n var_name = f'/Nets_DGM/dof_{dof_OT}_m_{m}_depth_{depth}_beta_{beta}_lr_{lr}_epochs_{max_epochs}/Var/vars_coll_{OT_mesh}'\r\n net_name = f'/Nets_DGM/dof_{dof_OT}_m_{m}_depth_{depth}_beta_{beta}_lr_{lr}_epochs_{max_epochs}/Net_coll_{OT_mesh}'\r\n var_model = path + var_name\r\n net_model = path + net_name\r\n os.makedirs(var_model, exist_ok=True)\r\n os.makedirs(net_model, exist_ok=True)\r\n\r\n if save_mesh:\r\n File(net_model + f'/Mesh/mesh_{OT_mesh}.pvd') << mesh\r\n\r\n model = drrnn(in_N, m, out_N, depth).to(device)\r\n model.apply(weights_init)\r\n optimizer = optim.Adam(model.parameters(), lr=lr)\r\n\r\n best_epoch = 0\r\n best_loss = 1e5\r\n\r\n t0 = time.time()\r\n XB = xb.detach().cpu().numpy()\r\n values = torch.tensor([u_exp.eval_at_point(x) for x in XB]).to(device)\r\n rel_tol = 1\r\n loss_prev = 1\r\n epoch = 0\r\n\r\n while True:\r\n\r\n if not training or (rel_tol <= 1e-5 or epoch == max_epochs+1):\r\n break\r\n\r\n # Sample random points at each iteration\r\n #xb = get_boundary_points(N=Nb)\r\n #xr = get_interior_points(N=Nr)\r\n\r\n # save collocation of points once\r\n if epoch == 2 and save_coll_points:\r\n np.save(net_model + f'/collocation_points.npy',\r\n xr.detach().cpu().numpy())\r\n np.save(net_model + f'/boundary_points.npy',\r\n xb.detach().cpu().numpy())\r\n\r\n xr = xr.to(device)\r\n xb = xb.to(device)\r\n\r\n XB = xb.detach().cpu().numpy()\r\n values = torch.tensor([u_exp.eval_at_point(x) for x in XB]).to(device)\r\n output_b = model(xb)\r\n\r\n # loss function for the inside of the domain\r\n xr.requires_grad_()\r\n output_r = model(xr)\r\n\r\n grad = autograd.grad(outputs=output_r, inputs=xr, grad_outputs=torch.ones_like(output_r),\r\n create_graph=True, retain_graph=True, only_inputs=True)[0]\r\n\r\n laplacian = autograd.grad(outputs=grad, inputs=xr, grad_outputs=torch.ones_like(grad),\r\n create_graph=True, retain_graph=True, only_inputs=True)[0]\r\n\r\n # array to store values of laplacian\r\n loss_r = torch.sum(torch.square(laplacian), dim=1)\r\n loss_r = torch.mean(loss_r)\r\n loss_b = beta*torch.mean(torch.square(output_b.squeeze() - values))\r\n loss = loss_r + loss_b\r\n\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n if epoch % 1000 == 0:\r\n\r\n u.vector()[:] = model(torch.tensor(\r\n coords).float().to(device)).detach().cpu().squeeze().numpy()\r\n\r\n L2_err = np.sqrt(assemble((u - u_exp)*(u - u_exp)\r\n * dx(mesh))/assemble(u_exp*u_exp*dx(mesh)))\r\n\r\n if loss.item() < best_loss:\r\n best_loss = loss\r\n best_epoch = epoch\r\n best_err = L2_err\r\n torch.save(model.state_dict(), net_model +\r\n f'/deep_ritz.mdl')\r\n\r\n rel_tol = abs(loss - loss_prev)/loss_prev\r\n loss_prev = loss\r\n np.savez(var_model + f'/vars_{epoch}.npz', time=time.time() - t0, loss_r=loss_r.item(\r\n ), loss_b=loss_b.item(), loss=loss_r.item() + loss_b.item(), err=L2_err)\r\n\r\n print('epoch:', epoch, 'loss:', loss.item(), 'loss_r:',\r\n loss_r.item(), 'loss_b:', loss_b.item())\r\n\r\n epoch += 1\r\n\r\n #print('best epoch:', best_epoch, 'best loss:',\r\n # best_loss, 'L2_err', best_err)\r\n \r\n\r\n # plot figure\r\n if plt_pred:\r\n model.load_state_dict(torch.load(net_model + f'/deep_ritz.mdl'))\r\n \r\n u_best.vector()[:] = model(torch.tensor(\r\n coords).float().to(device)).detach().cpu().squeeze().numpy()\r\n if save_sol:\r\n File(net_model + f'/u.pvd') << u_best\r\n with torch.no_grad():\r\n\r\n x = torch.linspace(-1, 1, 1001)\r\n y = torch.linspace(-1, 1, 1001)\r\n\r\n X, Y = torch.meshgrid(x, y)\r\n Z = torch.cat(\r\n (Y.flatten()[:, None], Y.T.flatten()[:, None]), dim=1)\r\n Z_f = inside_domain(Z)\r\n Z_f = Z_f.to(device)\r\n pred = model(Z_f)\r\n\r\n plt.figure()\r\n pred = pred.cpu().numpy()\r\n pred = pred.reshape(1001, 1001)\r\n\r\n ax = plt.subplot(1, 1, 1)\r\n plt.imshow(pred, interpolation='nearest', cmap='coolwarm',\r\n extent=[-1, 1, -1, 1],\r\n origin='lower', aspect='auto')\r\n plt.scatter(xr[:, 0].detach().cpu().numpy(), xr[:, 1].detach(\r\n ).cpu().numpy(), c='black', marker='o', s=0.5)\r\n plt.scatter(xb[:, 0].detach().cpu().numpy(), xb[:, 1].detach(\r\n ).cpu().numpy(), c='green', marker='x', s=5, alpha=0.7)\r\n\r\n #divider = make_axes_locatable(ax)\r\n #cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\r\n #plt.colorbar(h, cax=cax)\r\n plt.savefig(net_model + f'/sol.png')\r\n # plt.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n #dof_to_Nr = {65:33, 225: 161, 833: 705, 3201:2945}\r\n #beta_vec = [1,10,100,500,1000]\r\n beta_vec = [1000]\r\n dof_vec = [833]\r\n for beta in beta_vec:\r\n for dof in dof_vec:\r\n main(beta, dof)\r\n","repo_name":"Zarasim/Poisson_equation_2D_dGM_dRM","sub_path":"DGM.py","file_name":"DGM.py","file_ext":"py","file_size_in_byte":14153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13934373857","text":"import numpy as np\n\nfrom utils import ts2windows, l1norm, nearest_neighbor\n\n\ndef smoothness(win_dist, neighbr_idx, mappedw):\n map_dist = l1norm(mappedw, mappedw[neighbr_idx.flatten()])\n win_dist = np.where(win_dist == 0, 1e-9, win_dist)\n return np.sum(map_dist / win_dist) / len(win_dist)\n\n\ndef smoothness_drop(arr, mapped_arr, time_delay=1, patience=0, min_inc=0.01):\n argmin_win = time_delay\n since_last_min = 0\n i = 1\n while i < len(arr) // time_delay:\n win_sz = max((1, i * time_delay))\n windows = ts2windows(arr, win_sz, time_delay)\n mappedw = ts2windows(mapped_arr, win_sz, time_delay)\n win_dist, neighbr_idx = nearest_neighbor(windows)\n smooth = smoothness(win_dist, neighbr_idx, mappedw)\n smooth = np.round(smooth, 1)\n if i > 1:\n smooth_inc = (smooth - smooth_old) / smooth_old\n print(\"{}\\t{} pts: smoothness {:.1f} ({:.1f}%)\".format(\n since_last_min, win_sz, smooth, smooth_inc * 100))\n if smooth < smooth_old and abs(smooth_inc) >= min_inc:\n smooth_old = smooth\n argmin_win = win_sz\n since_last_min = 0\n else:\n since_last_min += 1\n\n if since_last_min > patience:\n break\n else:\n smooth_old = smooth\n\n i += 1 + since_last_min\n\n return argmin_win\n\n","repo_name":"procter-gamble-tech/window-size-NN","sub_path":"smoothness_mapping.py","file_name":"smoothness_mapping.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37578629194","text":"from django.urls import path\nfrom .views import statistic, rating, upload_rating_ajax\n\napp_name = 'rating'\n\nurlpatterns = [\n path('statistic/', statistic, name='statistic'),\n path('rating/', rating, name='rating'),\n path('uploadRating/', upload_rating_ajax),\n]","repo_name":"karamzi/bowl_spb","sub_path":"rating/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"44009967341","text":"def cancel(a,b):\n val = a/b\n for i in [0,1]:\n for j in [0,1]:\n if str(b)[j] == \"0\":\n continue\n if int(str(a)[i])/int(str(b)[j]) == val and str(b)[1-j] == str(a)[1-i]:\n return True\n\n return False\n\nnume = 1\ndenom = 1\n\nfor i in range(11,99):\n for j in range(i+1,99):\n if not (i%10 == 0 and j%10 == 0) and cancel(i,j):\n nume *= i\n denom *= j\n\nfrom fractions import Fraction\n\nprint(Fraction(nume, denom))\n","repo_name":"polybeandip/project-euler","sub_path":"p33.py","file_name":"p33.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31036142933","text":"#coding=utf-8\nimport os\nimport sys\nimport random\n\n\"\"\"\n排序思想:\n它重复地走访要排序的数列,一次比较两个元素,如果他们的顺序错误就把他们交换过来。\n走访数列的工作是重复地进行直到没有再需要交换,也就是说该数列已经排序完成。\n例如:源数据为:[36, 98, 23, 8, 98, 33, 60, 7, 75, 96] 共10个数据\n第一次比较的数据个数为0-9个数据,他们两两比较,最后把10个数据中最大的数据存储到数据的最后一位\n结果为:[36, 23, 8, 98, 33, 60, 7, 75, 96, 98]\n第二次比较的数据个数为0-8,因为第一次比较已经把最带的数据放到了最后一位,所以只比较前9个数据\n比较的结果为:[23, 8, 36, 33, 60, 7, 75, 96, 98, 98]\n第三次比较的数据个数为0-7,因为第二次比较已经把最带的数据放到了最后两位,所以只比较前8个数据\n比较的结果为:[8, 23, 33, 36, 7, 60, 75, 96, 98, 98]\n依次类推:结果为\n[36, 23, 8, 98, 33, 60, 7, 75, 96, 98]\n[23, 8, 36, 33, 60, 7, 75, 96, 98, 98]\n[8, 23, 33, 36, 7, 60, 75, 96, 98, 98]\n[8, 23, 33, 7, 36, 60, 75, 96, 98, 98]\n[8, 23, 7, 33, 36, 60, 75, 96, 98, 98]\n[8, 7, 23, 33, 36, 60, 75, 96, 98, 98]\n[7, 8, 23, 33, 36, 60, 75, 96, 98, 98]\n[7, 8, 23, 33, 36, 60, 75, 96, 98, 98]\n冒泡排序的平均时间复杂度为O(N^2)\n算法优化:\n对冒泡排序常见的改进方法是加入标志性变量Bchange,用于标志某一趟排序过程中是否有数据交换。\n如果进行某一趟排序时并没有进行数据交换,则说明所有数据已经有序,可立即结束排序,避免不必要的比较过程。\n\"\"\"\n\ndataList = [random.randint(1,100) for x in range(10)]\n\n\ndef Bubble(data)->list:\n dataTemp = data.copy()\n size = len(dataTemp)\n for outer_loop in range(size):\n Bchange = False\n for inner_loop in range(size-outer_loop-1):\n if dataTemp[inner_loop] > dataTemp[inner_loop+1]:\n dataTemp[inner_loop], dataTemp[inner_loop+1] = dataTemp[inner_loop+1], dataTemp[inner_loop]\n Bchange = True\n print(dataTemp)\n if not Bchange:\n break\n return dataTemp\n\ndef main(argc, argv, envp)->None:\n print(\"源数据为: \", dataList)\n sortdata = Bubble(dataList)\n print(\"排序后为: \", sortdata)\n\nif __name__ == \"__main__\":\n main(len(sys.argv), sys.argv, os.environ)\n\n\n\n\n","repo_name":"chenganmin2017/Sort","sub_path":"Bubble.py","file_name":"Bubble.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36334525712","text":"#creating a node class\r\nclass Node:\r\n def __init__(self,val):\r\n self.childleft= None\r\n self.childright= None\r\n self.nodedata= val\r\n\r\n# creating an instance to construct tree\r\nroot = Node(1)\r\nroot.childleft= Node(2)\r\nroot.childright= Node(3)\r\nroot.childleft.childleft= Node(4)\r\nroot.childleft.childright= Node(5)\r\n\"\"\" This is the tree for reference\r\n 1\r\n 2 3\r\n 4 5 \"\"\"\r\ndef InOrd(root):\r\n if root:\r\n InOrd(root.childleft)\r\n print(root.nodedata)\r\n InOrd(root.childright)\r\nInOrd(root)\r\n\r\ndef Preord(root):\r\n if root:\r\n print (root.nodedata)\r\n Preord(root.childleft)\r\n Preord(root.childright)\r\nPreord(root)\r\n\r\ndef Postord(root):\r\n if root:\r\n Postord(root.childright)\r\n print(root.nodedata)\r\n Postord(root.childleft)\r\nPostord(root)\r\n \r\n\r\n\r\n\r\n \r\n","repo_name":"DharmilRvl/Tree_traverse_algorithm","sub_path":"Tree_traversal_algorithms.py","file_name":"Tree_traversal_algorithms.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12719505223","text":"from typing import Dict, Optional, Tuple, cast\n\nimport torch\nfrom torch.distributions import constraints\n\nfrom gluonts.core.component import validated\nfrom gluonts.torch.distributions import BinnedUniforms, GeneralizedPareto\n\nfrom .distribution_output import DistributionOutput\n\n\nclass SplicedBinnedPareto(BinnedUniforms):\n r\"\"\"\n Spliced Binned-Pareto univariate distribution.\n\n Arguments\n ----------\n bins_lower_bound: The lower bound of the bin edges\n bins_upper_bound: The upper bound of the bin edges\n numb_bins: The number of equidistance bins to allocate between\n `bins_lower_bound` and `bins_upper_bound`. Default value is 100.\n tail_percentile_gen_pareto: The percentile of the distribution that is\n each tail. Default value is 0.05. NB: This symmetric percentile\n can still represent asymmetric upper and lower tails.\n \"\"\"\n arg_constraints = {\n \"logits\": constraints.real,\n \"lower_gp_xi\": constraints.positive,\n \"lower_gp_beta\": constraints.positive,\n \"upper_gp_xi\": constraints.positive,\n \"upper_gp_beta\": constraints.positive,\n }\n support = constraints.real\n has_rsample = False\n\n def __init__(\n self,\n bins_lower_bound: float,\n bins_upper_bound: float,\n logits: torch.Tensor,\n upper_gp_xi: torch.Tensor,\n upper_gp_beta: torch.Tensor,\n lower_gp_xi: torch.Tensor,\n lower_gp_beta: torch.Tensor,\n numb_bins: int = 100,\n tail_percentile_gen_pareto: float = 0.05,\n validate_args=None,\n ):\n assert (\n tail_percentile_gen_pareto > 0 and tail_percentile_gen_pareto < 0.5\n ), \"tail_percentile_gen_pareto must be between (0,1)\"\n self.tail_percentile_gen_pareto = torch.tensor(\n tail_percentile_gen_pareto\n )\n\n device = logits.device\n self.tail_percentile_gen_pareto = self.tail_percentile_gen_pareto.to(\n device\n )\n\n self.lower_gp_xi = lower_gp_xi\n self.lower_gp_beta = lower_gp_beta\n self.lower_gen_pareto = GeneralizedPareto(\n self.lower_gp_xi, self.lower_gp_beta\n )\n\n self.upper_gp_xi = upper_gp_xi\n self.upper_gp_beta = upper_gp_beta\n self.upper_gen_pareto = GeneralizedPareto(\n self.upper_gp_xi, self.upper_gp_beta\n )\n\n setattr(self, \"lower_gp_xi\", self.lower_gp_xi)\n setattr(self, \"lower_gp_beta\", self.lower_gp_beta)\n setattr(self, \"upper_gp_xi\", self.upper_gp_xi)\n setattr(self, \"upper_gp_beta\", self.upper_gp_beta)\n\n super(SplicedBinnedPareto, self).__init__(\n bins_lower_bound,\n bins_upper_bound,\n logits,\n numb_bins,\n validate_args,\n )\n\n # TODO:\n # - need another implementation of the mean dependent on the tails\n\n def log_prob(self, x: torch.Tensor, for_training=True):\n \"\"\"\n Arguments\n ----------\n x: a tensor of size 'batch_size', 1\n for_training: boolean to indicate a return of the log-probability, or\n of the loss (which is an adjusted log-probability)\n \"\"\"\n\n # Compute upper and lower tail thresholds at current time from\n # their percentiles\n upper_percentile = self._icdf_binned(\n torch.ones_like(x) * (1 - self.tail_percentile_gen_pareto)\n )\n lower_percentile = self._icdf_binned(\n torch.ones_like(x) * self.tail_percentile_gen_pareto\n )\n # upper_percentile.shape: (*batch_shape)\n # lower_percentile.shape: (*batch_shape)\n\n upper_percentile = upper_percentile.detach()\n lower_percentile = lower_percentile.detach()\n\n # Log-prob given binned distribution\n logp_bins = self.log_binned_p(x)\n logp = logp_bins.double()\n # logp.shape: (*batch_shape)\n\n # We obtain the log probabilities under the tail distributions:\n upper_gen_pareto_log_prob = self.upper_gen_pareto.log_prob(\n torch.abs(x.squeeze(dim=-1) - upper_percentile)\n ) + torch.log(self.tail_percentile_gen_pareto)\n lower_gen_pareto_log_prob = self.lower_gen_pareto.log_prob(\n torch.abs(lower_percentile - x.squeeze(dim=-1))\n ) + torch.log(self.tail_percentile_gen_pareto)\n # For the two log prob calls above, we adjust the value so that it\n # corresponds to the value in the tail. We take the absolute value of\n # what we give to the gen pareto because else the gradients are nan.\n # The torch,where select the correct ones and so the values lower than\n # zero are ignored, but the backward pass of pytorch has an issue with\n # nans in where even if they are not selected\n\n # By default during training we want to optimise the log-prob of both\n # the binned and the gen pareto at the tails\n # if not for training, we want to only have the gen pareto at the tails\n if for_training:\n # Log-prob given upper tail distribution\n logp += torch.where(\n x > upper_percentile,\n upper_gen_pareto_log_prob,\n torch.zeros_like(logp),\n )\n\n # Log-prob given upper tail distribution\n logp += torch.where(\n x < lower_percentile,\n lower_gen_pareto_log_prob,\n torch.zeros_like(logp),\n )\n else:\n # Log-prob given upper tail distribution\n logp = torch.where(\n x > upper_percentile,\n upper_gen_pareto_log_prob,\n logp,\n )\n\n # Log-prob given upper tail distribution\n logp = torch.where(\n x < lower_percentile,\n lower_gen_pareto_log_prob,\n logp,\n )\n return logp\n\n def pdf(self, x):\n \"\"\"\n Probability for a tensor of data points `x`.\n 'x' is to have shape (*batch_shape)\n \"\"\"\n # By default we put the for training parameter of the pdf on false as\n # one tends to train with the log-prob\n return torch.exp(self.log_prob(x, for_training=False))\n\n def _inverse_cdf(self, quantiles: torch.Tensor):\n \"\"\"\n Inverse cdf of a tensor of quantile `quantiles`\n 'quantiles' is of shape (*batch_shape) with values between (0.0, 1.0)\n \"\"\"\n\n # The quantiles for the body of the distribution:\n icdf_body = self._icdf_binned(quantiles)\n\n # The quantiles if they are in the lower tail:\n adjusted_percentile_for_lower = 1 - (\n quantiles / self.tail_percentile_gen_pareto\n )\n icdf_lower = self._icdf_binned(\n torch.ones_like(quantiles) * self.tail_percentile_gen_pareto\n ) - self.lower_gen_pareto.icdf(adjusted_percentile_for_lower)\n\n # The quantiles if they are in the upper tail:\n adjusted_percentile_for_upper = (\n quantiles - (1.0 - self.tail_percentile_gen_pareto)\n ) / self.tail_percentile_gen_pareto\n icdf_upper = self.upper_gen_pareto.icdf(\n adjusted_percentile_for_upper\n ) + self._icdf_binned(\n torch.ones_like(quantiles) * (1 - self.tail_percentile_gen_pareto)\n )\n\n # Putting them together:\n icdf_value = icdf_body\n\n icdf_value = torch.where(\n quantiles < self.tail_percentile_gen_pareto, icdf_lower, icdf_value\n )\n\n icdf_value = torch.where(\n quantiles > 1 - self.tail_percentile_gen_pareto,\n icdf_upper,\n icdf_value,\n )\n\n return icdf_value\n\n def cdf(self, x: torch.Tensor):\n \"\"\"\n Cumulative density tensor for a tensor of data points `x`.\n 'x' is expected to be of shape (*batch_shape)\n \"\"\"\n for i in range(0, len(x.shape)):\n assert (\n x.shape[i] == self.batch_shape[i]\n ), \"We expect the input to be a tensor of size batch_shape\"\n\n lower_percentile_value = self.icdf(self.tail_percentile_gen_pareto)\n upper_percentile_value = self.icdf(1 - self.tail_percentile_gen_pareto)\n\n # The cdf of the main distribution body:\n cdf_body = self._cdf_binned(x)\n\n # The cdf for the lower tail:\n adjusted_x_for_lower = lower_percentile_value - x\n cdf_lower = (\n 1.0 - self.lower_gen_pareto.cdf(adjusted_x_for_lower)\n ) * self.tail_percentile_gen_pareto\n\n # The cdf for the upper tail:\n adjusted_x_for_upper = x - upper_percentile_value\n cdf_upper = self.upper_gen_pareto.cdf(\n adjusted_x_for_upper\n ) * self.tail_percentile_gen_pareto + (\n 1 - self.tail_percentile_gen_pareto\n )\n\n # Putting them together:\n cdf_value = cdf_body\n\n cdf_value = torch.where(\n x < lower_percentile_value, cdf_lower, cdf_value\n )\n\n cdf_value = torch.where(\n x > upper_percentile_value,\n cdf_upper,\n cdf_value,\n )\n return cdf_value\n\n\nclass SplicedBinnedParetoOutput(DistributionOutput):\n distr_cls: type = SplicedBinnedPareto\n\n @validated()\n def __init__(\n self,\n bins_lower_bound: float,\n bins_upper_bound: float,\n num_bins: int,\n tail_percentile_gen_pareto: float,\n ) -> None:\n super().__init__(self)\n\n assert (\n tail_percentile_gen_pareto > 0 and tail_percentile_gen_pareto < 0.5\n ), \"tail_percentile_gen_pareto must be between (0,0.5)\"\n assert (\n isinstance(num_bins, int) and num_bins > 1\n ), \"num_bins should be an integer and greater than 1\"\n assert bins_lower_bound < bins_upper_bound, (\n f\"bins_lower_bound {bins_lower_bound} needs to less than \"\n f\"bins_upper_bound {bins_upper_bound}\"\n )\n\n self.num_bins = num_bins\n self.bins_lower_bound = bins_lower_bound\n self.bins_upper_bound = bins_upper_bound\n\n self.tail_percentile_gen_pareto = tail_percentile_gen_pareto\n\n self.args_dim = cast(\n Dict[str, int],\n {\n \"logits\": num_bins,\n \"upper_gp_xi\": 1,\n \"upper_gp_beta\": 1,\n \"lower_gp_xi\": 1,\n \"lower_gp_beta\": 1,\n },\n )\n\n @classmethod\n def domain_map( # type: ignore\n cls,\n logits: torch.Tensor,\n upper_gp_xi: torch.Tensor,\n upper_gp_beta: torch.Tensor,\n lower_gp_xi: torch.Tensor,\n lower_gp_beta: torch.Tensor,\n ) -> Tuple[\n torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor\n ]:\n logits = torch.abs(logits)\n\n upper_gp_xi = torch.abs(upper_gp_xi)\n upper_gp_beta = torch.abs(upper_gp_beta)\n lower_gp_xi = torch.abs(lower_gp_xi)\n lower_gp_beta = torch.abs(lower_gp_beta)\n\n return logits, upper_gp_xi, upper_gp_beta, lower_gp_xi, lower_gp_beta\n\n def distribution(\n self,\n distr_args,\n loc: Optional[torch.Tensor] = None,\n scale: Optional[torch.Tensor] = None,\n ) -> BinnedUniforms:\n return self.distr_cls(\n self.bins_lower_bound,\n self.bins_upper_bound,\n *distr_args,\n self.num_bins,\n self.tail_percentile_gen_pareto,\n )\n\n @property\n def event_shape(self) -> Tuple:\n return ()\n","repo_name":"awslabs/gluonts","sub_path":"src/gluonts/torch/distributions/spliced_binned_pareto.py","file_name":"spliced_binned_pareto.py","file_ext":"py","file_size_in_byte":11529,"program_lang":"python","lang":"en","doc_type":"code","stars":3904,"dataset":"github-code","pt":"67"} +{"seq_id":"11008479681","text":"import re\nimport typing\n\nimport numpy as np\n\nfrom deeplearning.clgen.preprocessors import opencl\nfrom gpu.cldrive.legacy import args\nfrom labm8.py import app\nfrom labm8.py import fmt\n\nFLAGS = app.FLAGS\n\n\nclass OpenClFunction(object):\n \"\"\"Representation of an OpenCL function.\n\n Can be either: a kernel definition, a function definition, or a function\n declaration.\n \"\"\"\n\n def __init__(self, src: str, is_kernel: bool = True):\n self.src = src\n self.is_kernel = is_kernel\n\n def SetFunctionName(self, new_name: str):\n \"\"\"Set function name to a new name.\n\n Args:\n new_name: The new name to set.\n\n Raises:\n ValueError: If the name could not be set.\n \"\"\"\n if self.is_kernel:\n self.src, num_replacements = re.subn(\n r\"^kernel void ([A-Z]+)\\(\", f\"kernel void {new_name}(\", self.src\n )\n else:\n self.src, num_replacements = re.subn(\n r\"^void ([A-Z]+)\\(\", f\"void {new_name}(\", self.src\n )\n if num_replacements != 1:\n raise ValueError(\n f\"{num_replacements} substitutions made when trying to \"\n f\"set function name to '{new_name}'\"\n )\n\n def InsertBlockIntoKernel(\n self, rand: np.random.RandomState, block_to_insert: str\n ) -> None:\n \"\"\"Insert a code block at a random position in the kernel.\n\n Args:\n rand: A random seed.\n block_to_insert: The code block to insert, as a string.\n \"\"\"\n if not self.is_kernel:\n raise TypeError(\"Cannot insert block into non-kernel.\")\n\n lines = self.src.split(\"\\n\")\n if len(lines) < 2:\n raise ValueError(\"OpenCL kernel is less than two lines long.\")\n # Try and find a point to\n indices = list(range(1, len(lines)))\n rand.shuffle(indices)\n for insertion_line_idx in indices:\n previous_line = lines[insertion_line_idx - 1]\n if previous_line[-1] == \";\" or previous_line[-1] == \"{\":\n # The previous line was either a statement or the start of a new block: we\n # can insert the block here.\n break\n else:\n app.Log(\n 2,\n 'Previous line \"%s\" not valid as a code block insertion ' \"point\",\n previous_line,\n )\n else:\n raise ValueError(\n f\"Failed to find a position to insert block in function '{self.src}'\"\n )\n\n pre = lines[:insertion_line_idx]\n post = lines[insertion_line_idx:]\n\n indendation_at_point_of_insertion = 0\n for c in pre[-1]:\n if c == \" \":\n indendation_at_point_of_insertion += 1\n else:\n break\n else:\n raise ValueError(f\"Line contains nothing but whitespace: '{pre[-1]}'\")\n\n if previous_line[-1] == \"{\":\n # Inserting block at the start of a new block, increase indentation.\n indendation_at_point_of_insertion += 2\n\n if indendation_at_point_of_insertion < 2:\n raise ValueError(\n \"Line has insufficient indentation \"\n f\"({indendation_at_point_of_insertion}): '{pre[-1]}'\"\n )\n\n block = fmt.Indent(indendation_at_point_of_insertion, block_to_insert)\n\n self.src = \"\\n\".join(pre + [block] + post)\n\n\ndef GetKernelArguments(kernel: str):\n try:\n # Extract everything up to the function body, and use an empty function\n # body for parsing. This means that errors that are in the function body\n # will not cause this to fail. E.g. given kernel:\n #\n # kernel void A(const int a, global float* b) {\n # b[0] += a;\n # }\n #\n # This will parse:\n #\n # kernel void A(const int a, global float* b) {}\n kernel_declaration = kernel[: kernel.index(\"{\")] + \"{}\"\n return args.GetKernelArguments(kernel_declaration)\n except ValueError as e:\n app.Error(\"Failure processing kernel: '%s'\", kernel)\n raise e\n\n\ndef KernelToFunctionDeclaration(kernel: str) -> OpenClFunction:\n \"\"\"Build a function declaration for an OpenCL kernel.\n\n Args:\n kernel: The kernel function to declare.\n\n Returns:\n A single line function declaration.\n\n Raises:\n ValueError: If kernel is invalid.\n \"\"\"\n match = re.match(r\"^(kernel )?void ([A-Z]+)\\(\", kernel)\n if not match:\n raise ValueError(\"Not a valid OpenCL function\")\n name = match.group(2)\n args_string = \", \".join(str(a) for a in GetKernelArguments(kernel))\n return OpenClFunction(f\"void {name}({args_string});\", is_kernel=False)\n\n\ndef KernelArgumentToVariableDeclaration(arg: args.KernelArg) -> str:\n s = []\n for qual in arg.quals:\n if (\n qual != \"global\"\n and qual != \"local\"\n and qual != \"constant\"\n and qual != \"const\"\n ):\n s.append(qual)\n s.append(arg.typename)\n if arg.is_pointer:\n s.append(\"*\")\n if arg.name:\n s.append(arg.name)\n return \" \".join(s) + \";\"\n\n\ndef KernelToDeadCodeBlock(kernel: str) -> str:\n # Convert arguments to variable declarations.\n declarations = [\n KernelArgumentToVariableDeclaration(a) for a in GetKernelArguments(kernel)\n ]\n # The block header is the list of argument variable declarations.\n header = \"\\n\".join(fmt.IndentList(2, declarations))\n # The block body is the kernel body. The kernel body is already indented, no\n # need to indent further.\n body = \"\\n\".join(kernel.split(\"\\n\")[1:-1])\n # Wrap block in `if (0) { ... }` conditional.\n return f\"if (0) {{\\n{header}\\n{body}\\n}}\"\n\n\ndef KernelToFunction(kernel: str) -> OpenClFunction:\n if not kernel.startswith(\"kernel void \"):\n raise ValueError(\"Invalid kernel\")\n else:\n return OpenClFunction(kernel[len(\"kernel \") :], is_kernel=False)\n\n\nclass UniqueNameSequence(object):\n \"\"\"A unique name sequence generator.\n\n Generates name sequences from a base characeter.\n E.g. 'a', 'b', 'c', ... 'aa', 'ab', ...\n \"\"\"\n\n def __init__(self, base_char: str, prefix: str = \"\", suffix: str = \"\"):\n \"\"\"Instantiate a unique name sequence.\n\n Args:\n base_char: The first character in the sequence. Must be 'a' or 'A'.\n prefix: An optional prefix to include in sequence names.\n suffix: An optional suffix to include in sequence names.\n\n Raises:\n ValueError: If base_char is not 'a' or 'A'.\n \"\"\"\n if base_char not in {\"a\", \"A\"}:\n raise ValueError(f\"Invalid base_char '{base_char}'\")\n self._base_ord = ord(base_char)\n self._prefix = prefix\n self._suffix = suffix\n self._i = 0\n\n def StringInSequence(self, i: int) -> str:\n \"\"\"Return the i-th string in the sequence.\n\n Args:\n i: The index into the name sequence.\n\n Returns:\n The i-th name in the sequence.\n\n Raises:\n ValueError: If i is out of range (negative).\n \"\"\"\n if i < 0:\n raise ValueError\n s = [self._prefix]\n\n while i > 25:\n k = i // 26\n i %= 26\n s.append(chr(self._base_ord - 1 + k))\n s.append(chr(self._base_ord + i))\n\n s.append(self._suffix)\n\n return \"\".join(s)\n\n def __iter__(self):\n return self\n\n def __next__(self) -> str:\n \"\"\"Generate the next name in the sequence.\"\"\"\n s = self.StringInSequence(self._i)\n self._i += 1\n return s\n\n\nclass OpenClDeadcodeInserter(object):\n \"\"\"A dead code OpenCL source mutator.\"\"\"\n\n def __init__(\n self,\n rand: np.random.RandomState,\n kernel: str,\n candidate_kernels: typing.List[str],\n ):\n \"\"\"Constructor.\n\n Args:\n rand: A random number state.\n kernel: An OpenCL kernel string.\n candidate_kernels: A list of OpenCL kernel strings to use for deadcode\n insertion.\n \"\"\"\n\n def _PreprocessKernel(src: str) -> str:\n \"\"\"Format a kernel for use and check that it meets requirements.\"\"\"\n src = opencl.StripDoubleUnderscorePrefixes(src.strip())\n if not src.startswith(\"kernel void \"):\n raise ValueError(\"Invalid kernel\")\n # Strip trailing whitespace, and exclude blank lines.\n return \"\\n\".join(ln.rstrip() for ln in src.split(\"\\n\") if ln.rstrip())\n\n self._rand = rand\n\n # A list of code blocks, where each code block is a function definition or\n # declaration.\n self._functions = [\n OpenClFunction(_PreprocessKernel(kernel), is_kernel=True)\n ]\n\n if not len(candidate_kernels):\n raise ValueError(\"Must have one or more candidate kernels.\")\n\n self._candidates = [_PreprocessKernel(k) for k in candidate_kernels]\n\n @property\n def opencl_source(self) -> str:\n \"\"\"Serialize the mutated source to a string.\"\"\"\n # Rename the functions.\n gen = UniqueNameSequence(base_char=\"A\")\n [cb.SetFunctionName(next(gen)) for cb in self._functions]\n\n return \"\\n\\n\".join([cb.src for cb in self._functions])\n\n def Mutate(self) -> None:\n \"\"\"Run a random mutation.\"\"\"\n mutator = self._rand.choice(\n [\n self.PrependUnusedFunction,\n self.AppendUnusedFunction,\n self.PrependUnusedFunctionDeclaration,\n self.AppendUnusedFunctionDeclaration,\n self.InsertBlockIntoKernel,\n ]\n )\n mutator()\n\n def PrependUnusedFunctionDeclaration(self) -> None:\n # Select a random function to declare.\n to_prepend = self._rand.choice(self._candidates)\n self._functions = [\n KernelToFunctionDeclaration(to_prepend)\n ] + self._functions\n\n def AppendUnusedFunctionDeclaration(self) -> None:\n # Select a random function to declare.\n to_append = self._rand.choice(self._candidates)\n self._functions.append(KernelToFunctionDeclaration(to_append))\n\n def PrependUnusedFunction(self) -> None:\n to_prepend = self._rand.choice(self._candidates)\n self._functions = [KernelToFunction(to_prepend)] + self._functions\n\n def AppendUnusedFunction(self) -> None:\n to_append = self._rand.choice(self._candidates)\n self._functions.append(KernelToFunction(to_append))\n\n def InsertBlockIntoKernel(self) -> None:\n to_modify = self._rand.choice([f for f in self._functions if f.is_kernel])\n to_insert = self._rand.choice(self._candidates)\n to_modify.InsertBlockIntoKernel(\n self._rand, KernelToDeadCodeBlock(to_insert)\n )\n\n\ndef GenerateDeadcodeMutations(\n kernels: typing.Iterator[str],\n rand: np.random.RandomState,\n num_permutations_of_kernel: int = 5,\n num_mutations_per_kernel: typing.Tuple[int, int] = (1, 5),\n) -> typing.Iterator[str]:\n \"\"\"Generate dead code mutations for a set of kernels.\n\n Args:\n rand: A random seed.\n kernels: The OpenCL kernels to mutate.\n num_permutations_of_kernel: The number of permutations of each kernel to\n generate.\n num_mutations_per_kernel: The minimum and maximum number of mutations to\n apply to each generated kernel.\n \"\"\"\n for kernel in kernels:\n for _ in range(num_permutations_of_kernel):\n # Apply random mutations to kernel and yield.\n rand_ = np.random.RandomState(rand.randint(0, int(1e9)))\n\n # Use all kernels (including the current one we're mutating) as candidates\n # for mutation.\n dci = OpenClDeadcodeInserter(rand_, kernel, candidate_kernels=kernels)\n\n # RandomState.randint() is in range [low,high), hence add one to max to\n # make it inclusive.\n num_mutations = rand.randint(\n num_mutations_per_kernel[0], num_mutations_per_kernel[1] + 1\n )\n\n for _ in range(num_mutations):\n dci.Mutate()\n yield dci.opencl_source\n","repo_name":"ChrisCummins/phd","sub_path":"deeplearning/deeptune/opencl/adversary/opencl_deadcode_inserter.py","file_name":"opencl_deadcode_inserter.py","file_ext":"py","file_size_in_byte":11041,"program_lang":"python","lang":"en","doc_type":"code","stars":181,"dataset":"github-code","pt":"67"} +{"seq_id":"20300500880","text":"\nfrom itertools import product\nfrom math import prod as multiply\nfrom typing import Iterable, Union\n\ninfile = open('day1-input', 'r')\n\n# our report is a list of all of the numbers in the file, with line breaks removed\nreport = [int(line.strip()) for line in infile]\n\ninfile.close()\n\n\n# part 1\n# I iterate through the cartesian product of the report with itself,\n# which is to say, I iterate through every possible pairing of a number\n# another in the report, and check that they sum to 2020, if so, return\n# their (arithmetic) product and exit the loop\nfor pair in product(report, repeat = 2):\n if sum(pair) == 2020:\n print(multiply(pair))\n break\n\n# part 2\n# same as part 1 but with 3 copies of report\nfor trio in product(report, repeat = 3):\n if sum(trio) == 2020:\n print(multiply(trio))\n break\n\n\n# for fun, here's a function that'll do the above for any number of elements n\ndef tuple_sum_to(l: Iterable, n: int, target) -> Union[tuple, None]:\n for t in product(l, repeat = n):\n if sum(t) == target:\n return t\n\n# the solutions using this function would look like:\n\nmultiply(tuple_sum_to(report, n = 2, target = 2020))\nmultiply(tuple_sum_to(report, n = 3, target = 2020))","repo_name":"le-birb/advent-of-code_2020","sub_path":"day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28907578284","text":"class Solution:\n def isLongPressedName(self, name: str, typed: str) -> bool:\n name_dict = self.check([], name)\n name_typed = self.check([], typed)\n if len(name_dict) != len(name_typed):\n return False\n for i, j in zip(name_dict, name_typed):\n if j < i:\n return False\n return True\n \n def check(self, ndict, name):\n last = \"0\"\n for i in name:\n if i != last:\n ndict.append(1)\n last = i\n else:\n ndict[-1] += 1\n return ndict\n\n def faster(self, name: str, typed:str) -> bool:\n i = 0\n for j in range(len(typed)):\n if i < len(name) and name[i] == typed[j]:\n i += 1\n elif j == 0 or typed[j] != typed[j - 1]:\n return False\n return i == len(name)\n","repo_name":"longhao54/leetcode","sub_path":"easy/925.py","file_name":"925.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8468718801","text":"from pytube import YouTube\nimport argparse, concurrent.futures, threading\nfrom pathlib import Path\nfrom pytube.cli import on_progress\n\n\nclass customException(Exception):\n def __init__(self, message):\n self.message = message\n\n @property\n def __str__(self) -> str:\n return self.message\n\n\nclass get_StreamFile:\n\n def download_file(self, url: str, type: str, filename: str, res: str) -> None:\n try:\n yt = YouTube(url=url, on_progress_callback=on_progress)\n stream = None\n extension: str\n\n if type == \"audio\":\n stream = yt.streams.filter(only_audio=True, mime_type=\"audio/mp4\", type=\"audio\").first()\n extension = \"mp3\"\n\n if(stream is None):\n raise customException(\"No file format Stream is available\")\n \n elif type == \"video\":\n stream = yt.streams.filter(only_video=True, mime_type=\"video/mp4\", type=\"video\", res=res).first()\n extension = \"mp4\"\n\n if(stream is None):\n raise customException(\"No file format Stream is available\")\n \n else:\n raise customException(\"Unknown file type entered ..\")\n # quit()\n\n print(\"Downloading the file. please wait for few Seconds ...\")\n print(f\"Title: {stream.title}\")\n print(f\"filesize: {stream.filesize/(1024*1024)} MB\")\n\n if not filename:\n stream.download()\n else:\n stream.download(filename=f\"{filename}.{extension}\")\n\n print(f\"{type} downloaded successfully at Path: {Path.cwd()}/{stream.title}\")\n\n except customException as e:\n print(f\"An error occured: {str(e)}\")\n except Exception as e:\n print(f\"An error occured: {str(e)}\")\n\n\n def create_arge_parser(self):\n parser = argparse.ArgumentParser(description=\"\"\"Welcome Here to convert your\n favourate video to audio...\"\"\")\n \n parser.add_argument(\"--url\", nargs='*', help=\"Enter the Url(s) of the youtube video here\", default=None)\n parser.add_argument(\"--type\", type=str, help=\"Enter the Type format of the file (audio/video)\", default=None)\n parser.add_argument(\"--name\", type=str, help=\"Enter the name of file without .ext Example : 'audio' \", default=\"\")\n parser.add_argument(\"--res\", type=str, help=\"Enter the resolution type when downloading video Example: '1080p'. Default set to 480p\", default=\"480p\")\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n\n obj = get_StreamFile()\n args = obj.create_arge_parser()\n url: list = args.url\n filetype: str = args.type\n name: str = args.name\n res: str = args.res\n\n if(url is None or not url):\n print(\"Enter the url .\")\n quit()\n elif(filetype == \"audio\" and url) or (filetype == \"video\" and url):\n for i in url:\n threading.Thread(target=obj.download_file, args=(i, filetype, name, res,)).start()\n else:\n print(\"Please enter the correct input format\")","repo_name":"Devanshkanda/VAtuber","sub_path":"get_audio_video.py","file_name":"get_audio_video.py","file_ext":"py","file_size_in_byte":3159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13676306017","text":"class Solution:\n def minSubArrayLen(self, s: int, nums: List[int]) -> int:\n if not nums:\n return 0\n left, right = 0, 0\n current_sum = nums[left]\n min_length = float('inf')\n while right < len(nums):\n if current_sum >= s:\n min_length = min(min_length, right - left + 1)\n if min_length == 1:\n return 1\n current_sum -= nums[left]\n left += 1\n else:\n right += 1\n if right == len(nums):\n break\n current_sum += nums[right]\n return min_length if min_length != float('inf') else 0","repo_name":"yuansun86/leetcode","sub_path":"Code/209. Minimum Size Subarray Sum.py","file_name":"209. Minimum Size Subarray Sum.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27651412766","text":"import unittest\n\nfrom venus.verification.venus import Venus\nfrom venus.tests.verification.satisfaction_reader import SatisfactionReader\n\nclass TestMnistFC(unittest.TestCase):\n\n def setUp(self):\n self.true_results = SatisfactionReader('venus/tests/data/mnistfc/queries.csv').read_results()\n self.properties = {\n 1 : [(1, x, y) for x in range(1, 6) for y in range(1, 10)],\n 2 : [(2, x, y) for x in range(1, 6) for y in range(1, 10)],\n 3 : [(3, x, y) for x in range(1, 6) for y in range(1, 10)],\n 4 : [(4, x, y) for x in range(1, 6) for y in range(1, 10)],\n 5 : [(5, 1, 1)],\n 6 : [(6, 1, 1)],\n 7 : [(7, 1, 9)],\n 8 : [(8, 2, 9)],\n 9 : [(9, 3, 3)],\n 10 : [(10, 4, 5)]\n }\n\n\n def test_fc(self):\n \"\"\"\n Tests the verification results MNISTFC.\n \"\"\"\n for (nn, spec) in self.true_results:\n venus = Venus(\n nn='venus/tests/data/mnistfc/' + nn,\n spec='venus/tests/data/mnistfc/' + spec\n )\n report = venus.verify()\n self.assertTrue(self.true_results[(nn, spec)] == report.result)\n","repo_name":"vas-group-imperial/venus2","sub_path":"venus/tests/verification/mnistfc.py","file_name":"mnistfc.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"32623116502","text":"#!/usr/bin/env python3\nfrom bs4 import BeautifulSoup\nimport requests\nimport warnings\nimport time\nimport sys\nfrom requests.auth import HTTPBasicAuth\nwarnings.filterwarnings(\"ignore\")\n\n\ndef do_get(params):\n\tbaseurl = \"https://example.myliferayhost.com/api/jsonws\"\n\tproxies = {\"http\":\"http://127.0.0.1:8080\",\"https\":\"http://127.0.0.1:8080\"}\n\t#proxies = {}\n\ttry:\n\t\tresponse = requests.get(baseurl, verify=False, proxies=proxies, params=params,timeout=20,auth=HTTPBasicAuth('user','password'))\n\t\thtml = BeautifulSoup(response.text, \"html.parser\")\n\t\tif html.title == \"Burp Suite Professional\":\n\t\t\treturn do_get(params)\n\t\telse:\n\t\t\treturn response.text\n\texcept Exception as e:\n\t\tprint(f\"Error: {e}\")\n\t\ttime.sleep(1)\n\t\treturn do_get(params)\n\ncontent = do_get({})\n#print(content)\nsoup = BeautifulSoup(content, \"html.parser\")\n#Find all a with class of \"lfr-panel-title\"\nresults = soup.find_all('a',{'class':'lfr-api-service-result'})\nfor method in results:\n\t#add-user-group-roles\n\t#print(method.find_all('span')[0].text)\n\thref = method['href']\n\thref = href[1:]\n\tparameters = href.split('=')\n\tparams = {parameters[0]:parameters[1]}\n\tprint(params)\n\n\tmethodcontent = do_get(params)\n\tmethodsoup = BeautifulSoup(methodcontent,\"html.parser\")\n\tmethodresults = methodsoup.find_all('div',{'class':'lfr-api-parameters'})\n\n\tfor apiParamGroup in methodresults:\n\t#
\n\t\tif apiParamGroup.find('h3').text == \"Parameters\":\n\t\t\t#if the div contains:

Parameters

\n\t\t\t# print(apiParamGroup)\n\t\t\t# print(type(apiParamGroup))\n\t\t\t#print(f\"apiParamGroup: {apiParamGroup} of type {type(apiParamGroup)}\")\n\t\t\tapiParamSpans = apiParamGroup.find_all('span')\n\t\t\t#print(f\"apiParamSpans: {apiParamSpans} of type {type(apiParamSpans)}\")\n\n\t\t\tnewline = 0\n\t\t\tfor span in apiParamSpans:\n\t\t\t\t#print(span)\n\t\t\t\tif newline == 0:\n\t\t\t\t\tprint(f\"{span.text} - \", end='')\n\t\t\t\t\tnewline = 1\n\t\t\t\telse:\n\t\t\t\t\tprint(f\"{span.text}\")\n\t\t\t\t\tnewline = 0\n\n","repo_name":"caryhooper/scripts","sub_path":"liferay-jsonws-scan.py","file_name":"liferay-jsonws-scan.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"72227969494","text":"import rdflib\nfrom model import check_fact_veracity\n\ndef write_fact_veracity_to_file(fact_id, veracity, result_file):\n \"\"\"Writes the veracity of a fact to a file.\"\"\"\n fact_iri = rdflib.URIRef(fact_id)\n predicate = rdflib.URIRef('http://swc2017.aksw.org/hasTruthValue')\n value = rdflib.Literal(str(veracity), datatype=rdflib.URIRef('http://www.w3.org/2001/XMLSchema#double'))\n\n triple = (fact_iri, predicate, value)\n result_file.write(triple[0].n3() + ' ' + triple[1].n3() + ' ' + triple[2].n3() + ' .\\n')\n\ndef generate_result_file(facts, graph, embeddings_model, scaler, model, result_file_path):\n \"\"\"Generates a result file containing the veracities of all facts.\"\"\"\n # Open the result file for writing\n result_file = open(result_file_path, 'w')\n\n # Iterate over the facts in the training dataset\n for fact_id, fact in facts.items():\n fact_subject = fact['subject']\n fact_predicate = fact['predicate']\n fact_object = fact['object']\n\n # Perform fact-checking for each fact\n veracity = check_fact_veracity(fact_subject, fact_predicate, fact_object, graph, embeddings_model, scaler, model)\n\n # Write fact veracity to result file\n if veracity is not None:\n write_fact_veracity_to_file(fact_id, veracity, result_file)\n\n # Close the result file\n result_file.close()\n\n","repo_name":"nehapokharel/fact-checking-engine","sub_path":"output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18126260101","text":"import pytest\n\nfrom wasc import utils\n\n\nclass TestReadCheckers:\n def test_no_file(self):\n with pytest.raises(FileNotFoundError):\n utils.read_checkers(\"foo.txt\")\n def test_read_crit_example(self):\n expected_checkers = {\n \"HeadNbChecker\", \"HeadLvlChecker\",\n \"AccessChecker\", \"AccessLinkChecker\", \"AccessRateChecker\",\n \"LegalChecker\", \"LangChecker\", \"DoctypeChecker\"\n }\n read_checkers = utils.read_checkers(\"tests/data/checkers_example.csv\")\n assert isinstance(read_checkers, list)\n assert set(read_checkers) == expected_checkers\n\nclass TestReadWebsites:\n def test_no_file(self):\n with pytest.raises(FileNotFoundError):\n utils.read_websites(\"foo.txt\")\n def test_read_url_example(self):\n expected_url = [\n (\"Design Gouv\", \"https://design.numerique.gouv.fr/\"),\n (\"Example\", \"http://example.com\")\n ]\n url_example = utils.read_websites(\"tests/data/url_example.csv\")\n assert isinstance(url_example, list)\n for website in url_example:\n assert website in expected_url\n\nEXAMPLE_TEST_URL = \"https://www.example.com/fr/test\"\nEXAMPLE_ROOT = \"https://www.example.com/fr\"\nEXAMPLE_ROOT_SLASH = \"https://www.example.com/fr/\"\n\nclass TestCheckAndCorrectUrl:\n def test_empty(self):\n assert not utils.check_and_correct_url(\"\", \"\")\n\n def test_correct(self):\n assert utils.check_and_correct_url(EXAMPLE_TEST_URL, \"https://www.example.com\") == EXAMPLE_TEST_URL\n assert utils.check_and_correct_url(\"/fr/test/\", EXAMPLE_ROOT_SLASH) == EXAMPLE_TEST_URL\n assert utils.check_and_correct_url(\"fr/test/\", EXAMPLE_ROOT_SLASH) == EXAMPLE_TEST_URL\n assert utils.check_and_correct_url(\"/fr/test/\", EXAMPLE_ROOT) == EXAMPLE_TEST_URL\n","repo_name":"atelierPartage/wasc","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"726336661","text":"from otree.api import (\n models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,\n Currency as c, currency_range\n)\nimport random\nimport itertools\nimport json\n\nauthor = 'Victor van Pelt'\n\ndoc = \"\"\"\nQuestionnaire\n\"\"\"\n\n\nclass Constants(BaseConstants):\n name_in_url = 'Questionnaire'\n players_per_group = None\n surveys = ['1', '2', '3', '4']\n num_rounds = len(surveys)\n # Choices=[\n # [1, 'Disagree strongly'],\n # [2, 'Disagree moderately'],\n # [3, 'Disagree a little'],\n # [4, 'Neither agree nor disagree'],\n # [5, 'Agree a little'],\n # [6, 'Agree moderately'],\n # [7, 'Agree strongly'],\n # ]\n Choices = [1, 2, 3, 4, 5]\n\nclass Subsession(BaseSubsession):\n pass\n # def creating_session(self):\n # if self.round_number == 1:\n # for p in self.get_players():\n # round_numbers = list(range(3, Constants.num_rounds + 3))\n # p.participant.vars['surveys_rounds'] = dict(zip(Constants.surveys, round_numbers))\n # p.participant.vars['final'] = Constants.num_rounds + 3\n # p.participant.vars['demographics'] = 2\n # print(p.participant.vars)\n\n # def creating_session(self):\n # from .pages import initial_page_sequence\n # aaa = [i.__name__.split('_') for i in initial_page_sequence]\n # page_blocks = [list(group) for key, group in itertools.groupby(aaa, key=lambda x: x[0])]\n # for p in self.get_players():\n # pb = page_blocks.copy()\n # random.shuffle(pb)\n # level1 = list(itertools.chain.from_iterable(pb))\n # level2 = ['_'.join(i) for i in level1]\n # p.participant.vars['initial_page_sequence'] = json.dumps(level2)\n\nclass Group(BaseGroup):\n pass\n\n\nclass Player(BasePlayer):\n prolific_id = models.StringField(default=str(\" \"))\n #Demographics\n gender = models.IntegerField(\n label=\"Please select your gender.\",\n choices=[\n [1, 'Male'],\n [2, 'Female'],\n [3, 'Other'],\n [4, 'I prefer not to say.'],\n ]\n )\n\n age = models.IntegerField(label=\"Please enter your age.\", min=14, max=125, blank=True)\n\n # prolific_id = models.StringField(label=\"Please enter your ProlificID\")\n\n risk_attitude = models.IntegerField(\n label=\"How willing are you in general to take risks on a scale from 0 (not willing to take risks at all) to 10 \"\n \"(highly willing to take risks)?\",\n choices=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n widget=widgets.RadioSelectHorizontal\n )\n trust_degree = models.IntegerField(\n label=\"How much do you in general trust other people on a scale from 0 (I don’t trust other people at all) to \"\n \"10 (I fully trust other people)?\",\n choices=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n widget=widgets.RadioSelectHorizontal\n )\n bat_ball = models.FloatField(\n label=\"A bat and a ball cost 1.10 dollars in total. The bat costs 1.00 dollar more than the ball. How much \"\n \"does the ball cost? Your answer (in cents) e.g. $100 = 10000 cents:\",\n min=0\n )\n machine_widget = models.IntegerField(\n label=\"If it takes 5 machines 5 minutes to make 5 widgets, how long would it take 100 machines to make 100 \"\n \"widgets? Your answer (in minutes):\",\n min=0\n )\n lake_lily_pad = models.IntegerField(\n label=\"In a lake, there is a patch of lily pads. Every day, the patch doubles in size. If it takes 48 days for \"\n \"the patch to cover the entire lake, how long would it take for the patch to cover half of the lake? \"\n \"Your answer (in number of days):\",\n min=0\n )\n give_up = models.IntegerField(\n label=\"In comparison to others, are you a person who is generally willing to give up something today to \"\n \"benefit from that in the future or are you not willing to do so?\",\n choices=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n widget=widgets.RadioSelectHorizontal\n )\n share_others = models.IntegerField(\n label=\"How do you assess your willingness to share with others without expecting anything in return \"\n \"when it comes to charity?\",\n choices=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n widget=widgets.RadioSelectHorizontal\n )\n lost_way = models.IntegerField(\n label=\"Imagine the following situation: you are shopping in an unfamiliar city and realize you lost your way. \"\n \"You ask a stranger for directions. The stranger offers to take you with their car to your destination. \"\n \"The ride takes about 20 minutes and costs the stranger about 20 Euro in total. The stranger does not want money for it. \"\n \"You carry six bottles of wine with you. The cheapest bottle costs 5 Euro, the most expensive one 30 Euro. \"\n \"You decide to give one of the bottles to the stranger as a thank-you gift. Which bottle do you give?\",\n choices=[\n [5, '5 Euro'],\n [10, '10 Euro'],\n [15, '15 Euro'],\n [20, '20 Euro'],\n [25, '25 Euro'],\n [30, '30 Euro']\n ],\n widget=widgets.RadioSelectHorizontal\n )\n punish_unfair = models.IntegerField(\n label=\"4.\tHow do you see yourself: Are you a person who is generally willing to punish unfair behaviour even if this is costly?\",\n choices=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n widget=widgets.RadioSelectHorizontal\n )\n\n mach_1 = models.IntegerField(\n label = \"It’s not wise to tell your secrets.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n mach_2 = models.IntegerField(\n label=\"I like to use clever manipulation to get my way.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n mach_3 = models.IntegerField(\n label=\"Whatever it takes, you must get the important people on your side.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n mach_4 = models.IntegerField(\n label=\"Avoid direct conflict with others because they maybe useful in the future.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n mach_5 = models.IntegerField(\n label=\"It’s wise to keep track of information that you can use against people later.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n mach_6 = models.IntegerField(\n label=\"You should wait for the right time to get back at people.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n mach_7 = models.IntegerField(\n label=\"there are things you should hide from other people to preserve your reputation.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n mach_8 = models.IntegerField(\n label=\"Make sure your plans benefit yourself, not others.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n mach_9 = models.IntegerField(\n label=\"Most people can be manipulated.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n narc_1 = models.IntegerField(\n label=\"People see me as a natural leader.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n narc_2 = models.IntegerField(\n label=\"I hate being the center of attention.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n narc_3 = models.IntegerField(\n label=\"Many group activities tend to be dull without me.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n narc_4 = models.IntegerField(\n label=\"I know that I am special because everyone keeps telling me so.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n narc_5 = models.IntegerField(\n label=\"I like to get acquainted with important people.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n narc_6 = models.IntegerField(\n label=\"I feel embarrassed if someone compliments me.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n narc_7 = models.IntegerField(\n label=\"I have been compared to famous people.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n narc_8 = models.IntegerField(\n label=\"I am an average person.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n narc_9 = models.IntegerField(\n label=\"I insist on getting the respect I deserve.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n psych_1 = models.IntegerField(\n label=\"I like to get revenge on authorities.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n psych_2 = models.IntegerField(\n label=\"I avoid dangerous situations.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n psych_3 = models.IntegerField(\n label=\"Payback needs to be quick and nasty.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n psych_4 = models.IntegerField(\n label=\"People often say I’m out of control.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n psych_5 = models.IntegerField(\n label=\"It’s true that I can be mean to others.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n psych_6 = models.IntegerField(\n label=\"People who mess with me always regret it.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n psych_7 = models.IntegerField(\n label=\"I have never gotten into trouble with the law.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n psych_8 = models.IntegerField(\n label=\"I enjoy having sex with people I hardly know.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )\n psych_9 = models.IntegerField(\n label=\"I’ll say anything to get what I want.\",\n choices=Constants.Choices,\n widget=widgets.RadioSelectHorizontal\n )","repo_name":"AAA0109/Prisoner-s-dilemma","sub_path":"question/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10573,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"16952984179","text":"from copy import deepcopy\n\n\nclass Stack:\n def __init__(self):\n self.datas=[]\n\n def push(self, item):\n self.datas.append(item)\n\n def pop(self):\n try:\n return self.datas.pop()\n except IndexError:\n print('Empty list, pop blocked')\n\n def peek(self):\n if not self.datas:\n return []\n\n print(self.datas[-1])\n return self.datas[-1] \n\n def __repr__(self):\n return f'{self.datas}'\n\n def __iter__(self):\n self.index = len(self.datas)\n return self\n \n def __next__(self):\n if self.index == 0:\n raise StopIteration\n\n self.index -= 1\n return self.datas[self.index]\n \n\nif __name__ == \"__main__\":\n stack = Stack()\n stack.push('A')\n stack.push(2)\n stack.push('Z')\n\n print('Peek: ', end=\"\")\n stack.peek()\n\n copy_stack= deepcopy(stack)\n copy_stack.pop()\n copy_stack.pop()\n copy_stack.pop()\n print('\\nPop exception because copy_stack is clean: ', end=\"\");\n \n copy_stack.pop()\n print('\\nElements in original Stack:')\n for element in stack:\n print (f'[ {element} ]')\n","repo_name":"MarcosMMarques/Logica-de-Programacao","sub_path":"Python/Stack_basic.py","file_name":"Stack_basic.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38412058512","text":"# note to self. Have BioTechTopics pickle the key objects and matricies in different files\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n#import nltk\nfrom nltk.stem.porter import *\n#from nltk.corpus import stopwords\nimport pickle\nfrom BioTechTopics import Topics\nfrom BTT_functions import tokenizeAndStemStrings,cleanString\n#from sklearn.decomposition import LatentDirichletAllocation\nimport time\n\nif __name__ == '__main__':\n print('executed main')\n\n t=Topics()\n t.getText(json_file_loc='/home/ryan/Dropbox/Code/DataIncubatorChallenge/BioTechTopics/data/all_reports.json')\n #t.processCorpus()\n \n # train and save tfidf representation: 20 minutes\n print('\\nTraining tf-idf Vectorizer')\n start = time.time()\n tfidf_vectorizer = TfidfVectorizer(tokenizer=tokenizeAndStemStrings, stop_words='english',ngram_range=(1,1), use_idf=True, smooth_idf = False, norm=None, min_df=0.002, max_df=0.2) \n tfidf = tfidf_vectorizer.fit_transform(t.text_df['text_body'].apply(cleanString))\n end = time.time()\n print('Done training after ' + str(end-start) + ' seconds')\n \n with open('./data/tfidf_vectorizer_maxdf0_2.p', 'wb') as f:\n pickle.dump(tfidf_vectorizer,f)\n with open('./data/tfidf_maxdf0_2.p', 'wb') as f:\n pickle.dump(tfidf,f)\n \n del tfidf_vectorizer,tfidf\n\nprint(\"done\")","repo_name":"ryanmdavis/BioTechTopics","sub_path":"tests/BTT_run_and_save.py","file_name":"BTT_run_and_save.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"12128388557","text":"import os\nimport select\nimport ROOT\nimport sys\nsys.path.append('..')\nimport time\nfrom datetime import datetime\nfrom itertools import groupby, product\nfrom objects.Payload import Payload\nfrom utils.beamSpotMerge import cleanAndSort\nfrom utils.beamSpotMerge import splitByDrift\nfrom utils.beamSpotMerge import averageBeamSpot\nfrom utils.getFileList import get_files\nfrom utils.readJson import readJson\nfrom utils.fillRunDict import _createFillList\n\nfiles = get_files('/Users/manzoni/Documents/beamspot/Run1/fromDB_BeamSpotObjects_allRun1.txt', prependPath=True)\n\nprint ('start loading payloads ...')\nmyPayload = Payload(files)\nprint ('... payloads loaded')\n\n# convert results into a dictionary { Run : {Lumi Range: BeamSpot Fit Object} }\nallBS = myPayload.fromTextToBS() \n\nfor irun, ivalues in allBS.items():\n allBS[irun] = cleanAndSort(ivalues, cleanBadFits = True, iov = True)\n\nbs_by_run = []\n\n# check drifts and create IOV\nfor irun, ibs in allBS.items():\n aveBeamSpot = averageBeamSpot(ibs.values())\n bs_by_run.append(aveBeamSpot)\n\n# keep only 3.8T\njson2010 = readJson(fileName = '/Users/manzoni/Desktop/BeamspotTools/data/json2010/json_DCSONLY.txt')\njson2011 = readJson(fileName = '/Users/manzoni/Desktop/BeamspotTools/data/json2011/json_DCSONLY.txt')\njson2012 = readJson(fileName = '/Users/manzoni/Desktop/BeamspotTools/data/json2012/json_DCSONLY.txt')\n\nruns3p8T = sorted([i for i in json2010.keys() + json2011.keys() + json2012.keys()])\nbs_by_run = [ibs for ibs in bs_by_run if ibs.Run in runs3p8T]\n\n# since these BS are from the database, they don't carry a timestamp.\n# Do the matching by hand through the Fill thingy\n# An approximation is assumed for simplicity sake: the time stamp of \n# the begin of stable beams is assigned to all runs in the fill.\n# Good enough for now\n\nfillrun = _createFillList()\n\nfor ibs, ifill in product(bs_by_run, fillrun.values()):\n if ibs.Run in ifill.Runs:\n ibs.IOVBeginTime = time.mktime(ifill.BeginTime.timetuple())\n\n# prune from runs which weren't in any stable fill (there are some in 2010)\nbs_by_run = [ibs for ibs in bs_by_run if ibs.IOVBeginTime > 0.]\n\n\n# create container for by run bs\nnewbs = []\n\n# create a container for runs to merge\ntomerge =[]\n\nmonth = -1\n\nfor ibs in bs_by_run:\n #print 'processing run', ibs.Run\n date = datetime.utcfromtimestamp(ibs.IOVBeginTime) \n imonth = date.month\n if (imonth != month and month > 0) or ibs == bs_by_run[-1]:\n print ('processing run %d year %d month %d' %(ibs.Run, date.year, imonth))\n aveBeamSpot = averageBeamSpot(tomerge, doNotCheck=['Run'])\n aveBeamSpot.Dump('beamspot_run1_bymonth.txt', 'a+')\n newbs.append(aveBeamSpot)\n tomerge = []\n run = irun\n tomerge.append(ibs)\n month = imonth\n\noutfile = open('run1_bs_xy_vs_month.csv', 'w+')\nprint ('year,month,x,xerr,y,yerr', file=outfile)\n\nfor ibs in newbs:\n date_start = datetime.utcfromtimestamp(ibs.IOVBeginTime)\n print ('year {}, month {}\\t'\\\n 'X = {:3.6f} +/- {:3.4E} [cm]\\t' \\\n 'Y = {:3.6f} +/- {:3.4E} [cm]' \\\n .format(date_start.year, date_start.month,\n ibs.X , ibs.Xerr ,\n ibs.Y , ibs.Yerr ,))\n print (','.join([str(date_start.year), str(date_start.month), str(ibs.X), str(ibs.Xerr), str(ibs.Y), str(ibs.Yerr)]),file=outfile)\n \noutfile.close()\n\n\n# year 2011, month 3\tX = 0.077839 +/- 9.7902E-07 [cm]\tY = 0.028113 +/- 9.7715E-07 [cm]\n# year 2011, month 4\tX = 0.073770 +/- 7.4586E-07 [cm]\tY = 0.031826 +/- 7.4746E-07 [cm]\n# year 2011, month 5\tX = 0.073375 +/- 6.7365E-07 [cm]\tY = 0.033227 +/- 6.7243E-07 [cm]\n# year 2011, month 6\tX = 0.073288 +/- 4.6439E-07 [cm]\tY = 0.035653 +/- 4.6361E-07 [cm]\n# year 2011, month 7\tX = 0.070141 +/- 8.5694E-07 [cm]\tY = 0.041313 +/- 8.5277E-07 [cm]\n# year 2011, month 8\tX = 0.069972 +/- 1.1458E-06 [cm]\tY = 0.042607 +/- 1.1408E-06 [cm]\n# year 2011, month 9\tX = 0.075704 +/- 4.5109E-07 [cm]\tY = 0.041030 +/- 4.4913E-07 [cm]\n# year 2011, month 10\tX = 0.075489 +/- 4.4468E-07 [cm]\tY = 0.041007 +/- 4.4240E-07 [cm]\n# year 2012, month 4\tX = 0.073277 +/- 4.1297E-07 [cm]\tY = 0.056441 +/- 4.1600E-07 [cm]\n# year 2012, month 5\tX = 0.072273 +/- 4.4371E-07 [cm]\tY = 0.061710 +/- 4.4601E-07 [cm]\n# year 2012, month 6\tX = 0.071984 +/- 4.7545E-07 [cm]\tY = 0.063612 +/- 4.7791E-07 [cm]\n# year 2012, month 7\tX = 0.071005 +/- 4.5817E-07 [cm]\tY = 0.060969 +/- 4.6010E-07 [cm]\n# year 2012, month 8\tX = 0.070873 +/- 4.4563E-07 [cm]\tY = 0.063639 +/- 4.4742E-07 [cm]\n# year 2012, month 9\tX = 0.069778 +/- 5.6908E-07 [cm]\tY = 0.063423 +/- 5.7218E-07 [cm]\n# year 2012, month 10\tX = 0.069772 +/- 4.1637E-07 [cm]\tY = 0.062735 +/- 4.1755E-07 [cm]\n# year 2012, month 11\tX = 0.069930 +/- 4.2480E-07 [cm]\tY = 0.062511 +/- 4.2626E-07 [cm]\n# year 2012, month 12\tX = 0.069043 +/- 9.4942E-07 [cm]\tY = 0.062314 +/- 9.5232E-07 [cm]\n\n\n","repo_name":"MilanoBicocca-pix/BeamspotTools","sub_path":"test/LHC_ring_floating/position_vs_time_run1.py","file_name":"position_vs_time_run1.py","file_ext":"py","file_size_in_byte":4873,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"18697640240","text":"import math\nfrom enum import Enum\n\nimport numpy as np\nimport cv2\n\n\nclass Environment:\n\n def __init__(self, settings):\n\n self.settings = settings\n\n if self.settings.inputSizeDeg:\n self.widthm = 2*self.settings.viewDist*math.tan((self.settings.inputSizeDeg*math.pi/180)/2)\n self.dotPitchMethod = 'FROM_VIS_ANGLE_SIZE'\n else:\n self.dotPitchMethod = 'FROM_DEG2PIX'\n\n self.scene = None\n self.sceneWithFixations = None\n self.width = -1\n self.height = -1\n self.wPadded = -1\n self.hPadded = -1\n self.prevFix = None\n self.dotPitch = -1\n\n\n def loadStaticStimulus(self, imgPath):\n self.scene = cv2.imread(imgPath)\n\n if self.scene is None:\n raise IOError('Cannot open image {0}!'.format(imgPath))\n\n self.sceneWithFixations = self.scene.astype(np.float32).copy()\n self.height, self.width, self.depth = self.scene.shape\n #self.prevFix = np.array([self.height/2, self.width/2], dtype=np.int32)\n\n self.padStaticStimulus()\n self.updateDotPitch()\n\n def padStaticStimulus(self):\n if self.settings.paddingRGB[0] < 0:\n self.settings.paddingRGB = self.scene.mean(axis=(0,1))\n\n self.scenePadded = cv2.copyMakeBorder(self.scene, round(self.height/2), round(self.height/2), round(self.width/2), round(self.width/2), cv2.BORDER_CONSTANT, value=self.settings.paddingRGB.astype(np.float64))\n\n self.wPadded = self.scenePadded.shape[1]\n self.hPadded = self.scenePadded.shape[0]\n\n\n def updateDotPitch(self):\n if self.dotPitchMethod == 'FROM_VIS_ANGLE_SIZE':\n self.dotPitch = self.widthm/self.width;\n self.settings.pix2deg = self.width/self.settings.inputSizeDeg;\n elif self.dotPitchMethod == 'FROM_DEG2PIX':\n self.settings.inputSizeDeg = self.width/self.settings.pix2deg;\n self.widthm = 2*self.settings.viewDist*math.tan(self.settings.inputSizeDeg/2)\n self.dotPitch = self.widthm/self.width\n else:\n raise ValueError('Unrecognized option for updateDotPitch!')\n\n def getEyeView(self, gazeCoords):\n return self.scenePadded[gazeCoords[0]:gazeCoords[0]+self.height, gazeCoords[1]:gazeCoords[1]+self.width, :]\n\n def drawFixation(self, newFix, prevFix):\n cv2.line(self.sceneWithFixations, (prevFix[1], prevFix[0]), (newFix[1], newFix[0]),(62, 62, 250), 2, cv2.LINE_AA)\n cv2.circle(self.sceneWithFixations, (prevFix[1], prevFix[0]), 1, (62, 62, 250), cv2.LINE_AA)\n cv2.circle(self.sceneWithFixations, (newFix[1], newFix[0]), 1, (62, 62, 250), cv2.LINE_AA)\n #self.prevFix = newFix.copy();\n","repo_name":"ykotseruba/pySTAR-FC","sub_path":"src/Environment.py","file_name":"Environment.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"67"} +{"seq_id":"7037205784","text":"from typing import TYPE_CHECKING, Optional\n\nfrom ...account.models import Address\nfrom ...channel.models import Channel\n\nif TYPE_CHECKING:\n from ...graphql.account.types import AddressInput\n\n\ndef get_active_country(\n channel: \"Channel\",\n shipping_address: Optional[\"Address\"] = None,\n billing_address: Optional[\"Address\"] = None,\n address_data: Optional[\"AddressInput\"] = None,\n):\n \"\"\"Get country code for orders, checkouts and tax calculations.\n\n For checkouts and orders, there are following rules for determining the country\n code that should be used for tax calculations:\n - use country code from shipping address if it's provided in the first place\n - use country code from billing address if shipping address is not provided\n - if both shipping and billing addresses are not provided use the default country\n from channel\n\n To get country code from address data from mutation input use address_data parameter\n \"\"\"\n if address_data is not None:\n return address_data.country\n\n if shipping_address:\n return shipping_address.country.code\n\n if billing_address:\n return billing_address.country.code\n\n return channel.default_country.code\n","repo_name":"saleor/saleor","sub_path":"saleor/core/utils/country.py","file_name":"country.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":19331,"dataset":"github-code","pt":"67"} +{"seq_id":"31403212330","text":"## Parser functions\n## Karla Godinez, Jon Mohl\n\n\nimport datetime\n\n# Creates dictionary with genes as keys and a list of lists for values.\ndef geneToInfo(genefile):\n f = open(genefile, 'r')\n geneFile = f.readlines()\n f.close()\n genedict = {}\n for line in geneFile:\n pos = line.find('gene')\n if (line[0] != '#') and (pos != -1) and (pos < 10):\n chr = line[0:line.find('\\t')]\n loc = line[pos:line.find('.',pos)].split('\\t')\n pos = line.find('gene_name',pos)\n gene = line[pos:line.find(';',pos)].split('\"')[1]\n genedict[gene] = ['chr'+chr,loc[1]+':'+loc[2]]\n return genedict\n\n# Creates a consolidated txt file for use in getDict function\ndef createGeneInfoDict(infile,outfile):\n geneDict = geneToInfo(infile)\n f = open(outfile,'w')\n try:\n for key, value in geneDict.iteritems():\n temp = key + '\\t' + '\\t'.join(value) + '\\n'\n f.write(temp)\n except:\n for key, value in geneDict.items():\n temp = key + '\\t' + '\\t'.join(value) + '\\n'\n f.write(temp)\n f.close()\n return 'Complete'\n\n# Returns a dictionary based on list\ndef getGeneInfoDict(infile):\n d = {}\n read = open(infile,'r').readlines()\n for r in read:\n tmp = r.strip().split('\\t')\n d[tmp[0]] = tmp[1:]\n return d\n\n# Find index for corrected significance\ndef getCorrectedIndex(header):\n\tfor i in header:\n\t\tif (i == 'PValue') or (i == 'FDR') or (i == 'q_value') or (i == 'corrected') or (i == 'padj'):\n\t\t\treturn header.index(i)\n\treturn 'NaN'\n\n# Find index for Log Fold change\ndef getLogFold(header):\n\tfor i in header:\n\t\tif (i == 'log2(fold_change)') or (i == 'log2') or (i == 'logFC') or (i == 'log2FoldChange'):\n\t\t\treturn header.index(i)\n\treturn 'NaN'\n\n# Find index for name in header\ndef getColumnHeader(name,header):\n\tfor i in header:\n\t\tif (i == 'fold') or (i == 'Fold'):\n\t\t\treturn header.index(i)\n\t\tif (i == '-1/fold') or (i == '-1/Fold'):\n\t\t\treturn header.index(i)\n\tif name in header:\n\t\treturn header.index(name)\n\treturn 'NaN'\n\n# Find index for Numerical values in header\ndef getStats(header):\n\tfor i in header:\n\t\tif (i != 'gene') and (i != 'chromosome') and (i != 'position') and (i != 'sample_1') and (i != 'sample_2') and (i != 'status'):\n\t\t\treturn header.index(i)\n\treturn 'NaN'\n\n\n# Return ok/significant data for Cuffdiff\ndef getDataCD(header,reads,sigthreshold):\n\t# Output new list: gene chromosome position sample_1 sample_2 status value_1 value_2 log2,test_stat,p_value,q_value,significant\n\theader = header.split()\n\theader = header[2:]\n\theader[1] = 'chromosome'\n\theader.insert(2,'position')\n\n\t# Get significant and OK elements\n\tsig_list = []\n\tok_list = []\n\n\t# Find index for significance\n\tindex = getCorrectedIndex(header)\n\tif index == 'NaN':\n\t\treturn False,header,ok_list,sig_list\n\n\t# Get data\n\tfor read in reads:\n\t\ttemp = read.split('\\t')\n\t\tif(temp[2] != '-' and temp[6] == 'OK'):\n\t\t\t# Get significance status\n\t\t\tif (float(temp[-2]) < sigthreshold):\n\t\t\t\tsignificance = 'yes'\n\t\t\telse:\n\t\t\t\tsignificance = 'no'\n\n\t\t\t# Get chromosome/position\n\t\t\tlocus = temp[3].split(':')\n\t\t\tlocus[1] = locus[1].replace('-',':')\n\t\t\tok_list.append([temp[2]]+locus+temp[4:-1]+[significance])\n\t\t\t# Get SIGNIFICANT reads\n\t\t\tif (significance == 'yes'):\n\t\t\t\tsig_list.append([temp[2]]+locus+temp[4:-1]+[significance])\n\treturn True,header,ok_list,sig_list\n\n# Return ok/significant data for R\ndef getDataR(header,reads,sigthreshold):\n\t# Minimum output header: gene, chr, position, log2, corrected, significant\n\theader = header.strip().replace('\"','').split(',') + ['significant']\n\n\t# Get significant and OK elements\n\tsig_list = []\n\tok_list = []\n\n\t# Find index for significance\n\tindex = getCorrectedIndex(header)\n\tif index == 'NaN':\n\t\treturn False,header,ok_list,sig_list\n\n\t# Get data\n\tfor read in reads:\n\t\ttemp = read.replace('\"','').strip().split(',')\n\t\tif (temp[0] != '-') or (temp[0] != ''):\n\t\t\tif temp[index] == 'NA':\n\t\t\t\tsignificance = 'no'\n\t\t\telif (float(temp[index]) < sigthreshold):\n\t\t\t\tsignificance = 'yes'\n\t\t\telse:\n\t\t\t\tsignificance = 'no'\n\n\t\t\tok_list.append(temp + [significance])\n\t\t\tif(significance == 'yes'):\n\t\t\t\tsig_list.append(temp + [significance])\n\treturn True,header,ok_list,sig_list\n\n# Append chromosome and position data into every read\ndef addGeneInfo(header,reads,geneInfo):\n\theader = 'gene,chromosome,position'+header\n\ttmp_reads = []\n\tfor read in reads:\n\t\ttmp_read = read.split(',')\n\t\ttmp_gene = tmp_read[0].replace('\"','').split('_')[0]\n\t\ttmp_data = ','.join(geneInfo.get(tmp_gene,['N/A','N/A']))\n\t\ttmp_reads.append(tmp_gene+','+tmp_data+','+','.join(tmp_read[1:]))\n\n\treturn header,tmp_reads\n\n# Get summary data\ndef getSummary(summary_sheets,threshold,species,filetype,sigthreshold,fileVersions):\n\tsummary = []\n\t# Date\n\tsummary.append(['Date',datetime.datetime.now().strftime(\"%Y-%m-%d\")])\n\t# File Versions\n\tfor f in fileVersions:\n\t\tsummary.append(f.split('\\t'))\n\t# Species\n\tif species == 'hs':\n\t\tsummary.append(['Species','Human'])\n\telif species == 'mm':\n\t\tsummary.append(['Species','Mouse'])\n\telif species == 'dr':\n\t\tsummary.append(['Species','Zebrafish'])\n\telse:\n\t\tsummary.append(['Species','Other'])\n\t# Input file type\n\tif filetype == 'cd':\n\t\tsummary.append(['Input file type','CuffDiff'])\n\telse:\n\t\tsummary.append(['Input file type','R'])\n\t# Thresholds\n\tsummary.append(['Significance threshold value',+sigthreshold])\n\tsummary.append(['Log > threshold ',threshold[0]])\n\tsummary.append(['Log < threshold ',threshold[1]])\n\tsummary.append(['Fold > threshold ',threshold[2]])\n\tsummary.append(['Fold < threshold ',threshold[3]])\n\t# Sheets\n\tfor s in summary_sheets:\n\t\tsummary.append(s.split(':'))\n\n\treturn summary\n\n\n\n\n\n","repo_name":"godinezmaciask/RNASeq-PF","sub_path":"BaseFunctions/parseFunctions.py","file_name":"parseFunctions.py","file_ext":"py","file_size_in_byte":5876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70567779733","text":"\ndef reverse_complement(sequence):\n complement = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}\n return ''.join(complement[base] for base in reversed(sequence))\n\ndef graphconvey(gfafile,selectref):\n from collections import defaultdict\n with open(gfafile, 'r') as file:\n lines = file.readlines()\n selectref = selectref\n # 使用字典存储每个节点的个体\n node_individuals = defaultdict(set)\n # 存储每个个体的节点顺序\n individual_nodes = defaultdict(list)\n dicseq = {}\n diclength = {}\n\n for line in lines:\n if line.startswith('S'):\n line =line.strip()\n i = line.split()\n dicseq[i[1]] = i[2]\n diclength[i[1]] = len(i[2])\n\n dicacc = {}\n for line in lines:\n if line.startswith('P'): \n line =line.strip()\n i = line.split()\n dicacc[i[1]] = {}\n acc = 0\n for node in i[2].split(\",\"):\n acc += diclength[node[:-1]]\n direaction = node[-1]\n node = node[:-1]\n dicacc[i[1]][node] = [acc,acc-diclength[node],direaction]\n reflist = list(dicacc[selectref].keys())\n reflisting = list(dicacc[selectref].keys())\n\n runlist = [selectref]\n print(\">\" + selectref)\n for i in list(dicacc.keys()):\n\n uniqlist = []\n if i != selectref:\n runlist.append(i) \n nowlist = list(dicacc[i].keys())\n for j in list(dicacc[i].keys()):\n if j not in reflisting:\n uniqlist.append(j)\n # print(i,uniqlist) \n reflisting = reflisting+uniqlist\n wholeacc = []\n wholeaccname = []\n\n for j in uniqlist:\n wholeacc.append([dicacc[i][j][1],dicacc[i][j][0]]) \n wholeaccname.append(j)\n merged_data = []\n merged_row_names = []\n\n while wholeacc:\n start, end = wholeacc.pop(0)\n row_name = [wholeaccname.pop(0)]\n changed = True\n while changed:\n changed = False\n for l, (a, b) in enumerate(wholeacc[:]):\n if a == end:\n end = b\n row_name.append(wholeaccname.pop(l))\n wholeacc.pop(l)\n changed = True\n break\n if b == start:\n start = a\n row_name.insert(0, wholeaccname.pop(l))\n wholeacc.pop(l)\n changed = True\n break\n merged_data.append([start, end])\n merged_row_names.append(row_name)\n # print(merged_data,merged_row_names)\n for j in range(len(merged_row_names)):\n nodepoi = merged_data[j]\n nodename = merged_row_names[j]\n\n headnodes = nodename[0]\n endnodes = nodename[-1]\n\n if dicacc[i][headnodes][2] == \"+\":\n prevheadnodes = nowlist[nowlist.index(headnodes) -1]\n nextendnodes = nowlist[nowlist.index(endnodes) +1]\n else:\n prevheadnodes = nowlist[nowlist.index(endnodes) +1]\n nextendnodes = nowlist[nowlist.index(headnodes) -1]\n # print(nodename,headnodes,endnodes,prevheadnodes,nextendnodes)\n if nowlist.index(headnodes) -1 < 0 or nowlist.index(headnodes) -1 < 0:\n continue\n print(\">\"+i,str(nodepoi[0]),str(nodepoi[1]),sep = \"_\",end = \"\\t\")\n for k in runlist:\n\n if prevheadnodes in dicacc[k].keys():\n if dicacc[k][prevheadnodes][2] == \"+\":\n print(\">\"+k+\":\"+str(dicacc[k][prevheadnodes][0]),end = \"\\t\")\n break\n if dicacc[k][prevheadnodes][2] == \"-\":\n print(\">\"+k+\":\"+str(dicacc[k][prevheadnodes][1]),end = \"\\t\")\n break\n\n for k in runlist: \n if nextendnodes in dicacc[k].keys():\n print(\">\"+k+\":\"+str(dicacc[k][nextendnodes][1]),end = \"\\t\")\n break\n if dicacc[k][nextendnodes][2] == \"+\":\n print(\">\"+k+\":\"+str(dicacc[k][nextendnodes][0]),end = \"\\t\")\n if dicacc[k][nextendnodes][2] == \"-\": \n print(\">\"+k+\":\"+str(dicacc[k][nextendnodes][1]),end = \"\\t\")\n print()\n","repo_name":"lipingfangs/VAP","sub_path":"src/graphconvey.py","file_name":"graphconvey.py","file_ext":"py","file_size_in_byte":4766,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"67"} +{"seq_id":"22096947511","text":"\nclass Solution(object):\n def findWords(self, words):\n\n # Step1 - Create output variable\n output = []\n\n # Steo2 - Create sets manually\n row1Set = {'q','w','e','r','t','y','u','i','o','p'}\n row2Set = {'a','s','d','f','g','h','j','k','l'}\n row3Set = {'z','x','c','v','b','n','m'}\n\n # Step3 - Traverse words in wordArray\n for word in words:\n print(\"Current Word: \" + word)\n\n # Step4 - Create a flag which checks if the letters exists within the set\n isInSet = True\n\n # Step5 - Fetch the current set\n currentSet = self.fetchSet(word, row1Set, row2Set, row3Set)\n\n # Step6 - Traverse each letter and check if exists in appropriate set\n for letter in word:\n letter = letter.lower()\n if letter not in currentSet:\n isInSet = False\n break\n\n\n # Append only if all letters exists in set\n if isInSet:\n output.append(word)\n\n\n return output\n\n def fetchSet(self, word, set1, set2, set3):\n\n if word[0].lower() in set1:\n return set1\n\n if word[0].lower() in set2:\n return set2\n\n if word[0].lower() in set3:\n return set3\n\n\ndef main():\n words = [\"Hello\", \"Alaska\", \"Dad\", \"Peace\"]\n solution = Solution()\n output = solution.findWords(words)\n print(\"Words which can be typed in one row of keyboard: \", output)\n\n\nif __name__ == '__main__':\n main()\n\n'''\nNotes\n- Super easy\n\nSolution\n1. Create sets for each row\n2. Traverse the words array one by one\n3. Check if the letters in current word are in the respective set\n4. Add to outputlist list of all letters exists in th set \n\n'''","repo_name":"timManas/Practice","sub_path":"Python/project/src/Misc/KeyboardRow/KeyboardRow.py","file_name":"KeyboardRow.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19432198613","text":"##########\n#### This Package contains components to build optical setups. \n#### Absorption grating class\n####Author: Lionel Fiske \n####Last update 9/15/2021\n#####\n\n\nimport torch\nimport numpy as np\nimport math \nimport warnings\nfrom Helper_Functions import * \n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning) \nwarnings.filterwarnings(\"ignore\", category=UserWarning) \n\n\nclass Absorption_Mask(torch.nn.Module):\n \n def __init__(self, transmission , device = torch.device(\"cpu\") , fixed_pattern = False, dtype=torch.double):\n \"\"\"\n Applies ideal absorption mask to the incident field. \n \n \n transmission : float Tensor\n transmission coefficients to be multiplied in. Internally clamped to between 0-1.\n \n \n device : torch device\n \n fixed_pattern : Bool\n If True the phase delay will not be set to an nn.parameter to be optimized for \n \n \n \"\"\"\n super().__init__()\n \n \n if fixed_pattern== False:\n self.transmission = torch.nn.Parameter(transmission.to(device),requires_grad=True).type(dtype)\n else:\n self.transmission = transmission.to(device).type(dtype)\n \n \n #Set internal variables\n self.device = device\n self.fixed_pattern = fixed_pattern\n \n \n \n def forward(self, field):\n \"\"\"\n Takes in complex tensor and applies an absorption mask to it. \n \n Parameters\n ==========\n field : torch.complex128\n Complex field (MxN).\n \"\"\"\n Eout_trans = field*self.transmission.clamp(0,1)\n \n if field.ndim == 2:\n return Eout_trans.squeeze()\n else:\n return Eout_trans \n\n return Eout_trans \n\n","repo_name":"mich-lee/SWH-Code","sub_path":"OTHER/Tocohpy Test/Tocohpy/Optical_Components/.ipynb_checkpoints/Absorption_Mask-checkpoint.py","file_name":"Absorption_Mask-checkpoint.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"11153613322","text":"class TrackingInfo :\n \"\"\"\n tracking할 비디오 정보\n 이전에 나온 사람 정보, frame 번호, 부여된 아이디 수를 가짐\n \"\"\"\n def __init__(self, \n frame_id=-1, # frame id\n tracked_stracks=[], # tracking에 사용하는 STrack list\n lost_stracks=[], # tracking에 사용하는 STrack list\n removed_stracks=[], # tracking에 사용하는 STrack list\n count=0, # 부여한 아이디 개수\n max_time_lost = 30, # track을 제거하기 위한 객체가 검출 되지 않은 최소 횟수\n size = (1280, 720), # 원본 이미지 크기(height, width)\n track_thres = 0.6, # 기억할 사람의 최소 점수\n match_thres = 0.9, # 이전 정보와 비교할 점수 기준\n **params # 추가 정보\n ) :\n self.frame_id = frame_id\n self.tracked_stracks = tracked_stracks\n self.lost_stracks = lost_stracks\n self.removed_stracks = removed_stracks\n self.count = count\n self.max_time_lost = max_time_lost\n self.size = size\n self.track_thres = track_thres\n self.match_thres = match_thres\n self.det_thres = self.track_thres + 0.1\n \n for key, value in params.items() :\n setattr(self, key, value)\n\n def __str__(self) :\n return f\"<\\n\\tframe_id : {self.frame_id}\\n\\tlenght tracked_strakcs :{len(self.tracked_stracks)}\\n\\tlength lost_stracks : {len(self.lost_stracks)}\\n\\tcount : {self.count}\\n>\"\n\n\n","repo_name":"injung1008/workspace","sub_path":"pangyo/tracker/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4209031244","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 21 18:35:04 2023\n\n@author: solene\n\"\"\"\n\n\n\n\n####This sccript takes as inputs the spectral decomposition and the diffusion communities made at time tau\n####It outputs the Cheeger mixing for each community\n\nimport numpy as np\nimport scipy\nimport networkx as nx\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse.linalg import inv\nfrom scipy import sparse\nfrom sklearn.cluster import KMeans\nimport time\nimport mat73\nfrom scipy.io import savemat, loadmat\n\n\n\nstr_network='config_model_2p8'\npath_adj=\"/home/solene/Documents/Solene/DataMarseille/ganglion_lymph/paper_draft4/diffusion_pipeline/repository_github/Adjacency_matrices/\"\npath_com='/home/solene/Documents/Solene/DataMarseille/ganglion_lymph/paper_draft4/diffusion_pipeline/repository_github/Diffusion_communities/'\npath_save='/home/solene/Documents/Solene/DataMarseille/ganglion_lymph/paper_draft4/diffusion_pipeline/repository_github/output/'\nA=scipy.sparse.load_npz(path_adj+'A_config_model_2p8.npz')\nidx=np.load(path_com+'clusters_'+str_network+'.npy')\n\ndef find_Cheeger_mix(A,idx,ic,d):\n ind=np.where(idx==ic)[0]\n not_ind=np.where(idx!=ic)[0]\n sub_A=A[ind,:]\n sub_A=sub_A[:,not_ind]\n E_inter=np.sum(sub_A)\n V_ind=np.sum(d[ind])\n V_not_ind=np.sum(d[not_ind])\n hG=E_inter/np.min([V_ind,V_not_ind])\n return hG\n\n###########################################\ng=nx.from_numpy_array(A)\nd=nx.degree(g)\nd=np.array(d)\nd=d[:,1]\n\n\n#######################################################\nstart_time = time.time()\nprint('starting analysis of '+str_network)\nk=100\nhGs=np.zeros((k,1))\nfor ic in range(k):\n hG=find_Cheeger_mix(A,idx,ic,d)\n hGs[ic]=hG\nnp.save(path_save+'hGs_'+str_network+'.npy',hGs)\n\n","repo_name":"SoleneS/RW_lymph_node","sub_path":"Workflow4_Cheeger_mixing.py","file_name":"Workflow4_Cheeger_mixing.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10206929690","text":"\"\"\"\n@brief test log(time=4s)\n\"\"\"\nimport os\nimport json\nimport unittest\nimport pandas\nfrom pyquickhelper.pycode import ExtTestCase, get_temp_folder\nfrom pyquickhelper.filehelper.compression_helper import unzip_files\nfrom mlprodict.asv_benchmark import export_asv_json, create_asv_benchmark\nfrom mlprodict.asv_benchmark.asv_exports import (\n _figures2dict, _coor_to_str, _dict2str)\nfrom mlprodict.asv_benchmark._create_asv_helper import _display_code_lines\n\n\nclass TestAsvJsonText(ExtTestCase):\n\n data = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data')\n\n def test__display_code_lines(self):\n code = \"import h\\nh(4)\"\n res = _display_code_lines(code)\n self.assertIn('001', res)\n self.assertIn('002', res)\n\n def test_update_obs(self):\n values = list(range(0, 18))\n coor = [[\"'skl'\", \"'pyrt'\", \"'ort'\"], [\n '1', '100', '10000'], ['4', '20']]\n res = _figures2dict(values, coor)\n exp = {'M-skl-1-4': 0, 'M-skl-1-20': 1, 'M-skl-100-4': 2, 'M-skl-100-20': 3,\n 'M-skl-10000-4': 4, 'M-skl-10000-20': 5, 'M-pyrt-1-4': 6, 'M-pyrt-1-20': 7,\n 'M-pyrt-100-4': 8, 'M-pyrt-100-20': 9, 'M-pyrt-10000-4': 10,\n 'M-pyrt-10000-20': 11, 'M-ort-1-4': 12, 'M-ort-1-20': 13, 'M-ort-100-4': 14,\n 'M-ort-100-20': 15, 'M-ort-10000-4': 16, 'M-ort-10000-20': 17}\n self.assertEqual(res, exp)\n res = _figures2dict(values, coor, baseline='skl')\n self.assertIn('R-ort-10000-20', res)\n\n def test_asv_json_simplify(self):\n temp = get_temp_folder(__file__, 'temp_asv_json_simplify')\n filenames = [\n os.path.join(TestAsvJsonText.data, 'benchmarks.json'),\n os.path.join(r\"C:\\temp\\results\", 'benchmarks.json'),\n ]\n for i, filename in enumerate(filenames):\n if not os.path.exists(filename):\n continue\n with open(filename, 'r', encoding='utf-8') as f:\n content = json.load(f)\n\n for _, v in content.items():\n if isinstance(v, dict) and 'code' in v:\n v['code'] = \"\"\n\n res = os.path.join(temp, 'benchmarks_%d.json' % i)\n with open(res, 'w', encoding='utf-8') as f:\n json.dump(content, f)\n\n with open(res, \"r\", encoding='utf-8') as f:\n content = f.read()\n self.assertIn('{', content)\n\n def test_unzip_and_convert(self):\n file_zip = os.path.join(TestAsvJsonText.data, 'results.zip')\n temp = get_temp_folder(__file__, 'temp_unzip_and_convert')\n unzip_files(file_zip, temp)\n data = os.path.join(temp, 'results')\n exp = export_asv_json(data, baseline=\"skl\")\n self.assertIsInstance(exp, list)\n self.assertTrue(all(map(lambda x: isinstance(x, dict), exp)))\n cc = 0\n for e in exp:\n ms = [k for k in e if k.startswith(\"M-\")]\n rs = [k for k in e if k.startswith(\"R-\")]\n if len(ms) > 0 and len(rs) > 0:\n cc += 1\n if cc == 0:\n raise AssertionError(\"No rs\")\n\n def test_unzip_and_convert2(self):\n file_zip = os.path.join(TestAsvJsonText.data, 'results2.zip')\n temp = get_temp_folder(__file__, 'temp_unzip_and_convert2')\n unzip_files(file_zip, temp)\n data = os.path.join(temp, 'results')\n exp = export_asv_json(data, baseline=\"skl\")\n self.assertIsInstance(exp, list)\n self.assertTrue(all(map(lambda x: isinstance(x, dict), exp)))\n cc = 0\n for e in exp:\n ms = [k for k in e if k.startswith(\"M-\")]\n rs = [k for k in e if k.startswith(\"R-\")]\n if len(ms) > 0 and len(rs) > 0:\n cc += 1\n if cc == 0:\n raise AssertionError(\"No rs\")\n df = export_asv_json(data, baseline=\"skl\", as_df=True)\n df.to_excel(os.path.join(temp, \"res.xlsx\"))\n\n def test_unzip_and_convert_metadata(self):\n file_zip = os.path.join(TestAsvJsonText.data, 'results2.zip')\n temp = get_temp_folder(__file__, 'temp_unzip_and_convert_metadata')\n create_asv_benchmark(\n location=temp, models={'LogisticRegression', 'LinearRegression'})\n unzip_files(file_zip, temp)\n data = os.path.join(temp, 'results')\n conf = os.path.join(temp, 'asv.conf.json')\n exp = export_asv_json(data, baseline=\"skl\", conf=conf)\n par_problem = []\n par_scenario = []\n for row in exp:\n if 'par_problem' in row:\n par_problem.append(row['par_problem'])\n if 'par_scenario' in row:\n par_scenario.append(row['par_scenario'])\n s = set(par_scenario)\n self.assertEqual(s, {'default', 'liblinear'})\n s = set(par_problem)\n self.assertEqual(s, {'m-cl', '~m-reg-64', 'b-cl',\n 'm-reg', 'b-reg', '~b-cl-64',\n '~b-reg-64'})\n out = os.path.join(temp, \"df.xlsx\")\n df = pandas.DataFrame(exp)\n df.to_excel(out)\n\n def test_to_str_coordinates(self):\n val = ['ort', '1', '20', '11', 'double',\n \"{: \"\n \"{'optim': 'cdist'}}\"]\n sval = _coor_to_str(val)\n self.assertEqual(sval, \"M-ort-1-20-11-double-optimcdist\")\n val = ['ort', '1', '20', '11', 'double',\n \"{: \"\n \"{'optim': 'cdist', 'return_std': True}}\"]\n sval = _coor_to_str(val)\n self.assertEqual(sval, \"M-ort-1-20-11-double-optimcdist-return_std1\")\n\n def test__dict2str(self):\n d1 = {'e': 2, 'r': 4}\n self.assertEqual('e2-r4', _dict2str(d1))\n d2 = {'e': 2, 'd1': d1}\n self.assertEqual('e2-d1e2-r4', _dict2str(d2))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"sdpython/mlprodict","sub_path":"_unittests/ut_asv_benchmark/test_asv_json_text.py","file_name":"test_asv_json_text.py","file_ext":"py","file_size_in_byte":5935,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"67"} +{"seq_id":"15536952184","text":"import pickle\nimport numpy as np\n\nfrom numpy.testing import assert_array_almost_equal, assert_array_equal\nimport pytest\n\nfrom oddt.scoring.models import classifiers, regressors\n\n\n@pytest.mark.filterwarnings('ignore:Stochastic Optimizer')\n@pytest.mark.parametrize('cls',\n [classifiers.svm(probability=True),\n classifiers.neuralnetwork(random_state=42)])\ndef test_classifiers(cls):\n # toy data\n X = np.concatenate((np.zeros((5, 2)), np.ones((5, 2))))\n Y = np.concatenate((np.ones(5), np.zeros(5)))\n\n np.random.seed(42)\n\n cls.fit(X, Y)\n\n assert_array_equal(cls.predict(X), Y)\n assert cls.score(X, Y) == 1.0\n\n prob = cls.predict_proba(X)\n assert_array_almost_equal(prob, [[0, 1]] * 5 + [[1, 0]] * 5, decimal=1)\n log_prob = cls.predict_log_proba(X)\n assert_array_almost_equal(np.log(prob), log_prob)\n\n pickled = pickle.dumps(cls)\n reloaded = pickle.loads(pickled)\n prob_reloaded = reloaded.predict_proba(X)\n assert_array_almost_equal(prob, prob_reloaded)\n\n\n@pytest.mark.parametrize('reg',\n [regressors.svm(C=10),\n regressors.randomforest(random_state=42),\n regressors.neuralnetwork(solver='lbfgs',\n random_state=42,\n hidden_layer_sizes=(20, 20)),\n regressors.mlr()])\ndef test_regressors(reg):\n X = np.vstack((np.arange(30, 10, -2, dtype='float64'),\n np.arange(100, 90, -1, dtype='float64'))).T\n\n Y = np.arange(10, dtype='float64')\n\n np.random.seed(42)\n\n reg.fit(X, Y)\n\n pred = reg.predict(X)\n assert (np.abs(pred.flatten() - Y) < 1).all()\n assert reg.score(X, Y) > 0.9\n\n pickled = pickle.dumps(reg)\n reloaded = pickle.loads(pickled)\n pred_reloaded = reloaded.predict(X)\n assert_array_almost_equal(pred, pred_reloaded)\n","repo_name":"oddt/oddt","sub_path":"tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":369,"dataset":"github-code","pt":"67"} +{"seq_id":"29212944458","text":"import xlsxwriter\nimport openpyxl\n# workbook = xlsxwriter.Workbook('dictExcel.xlsx')\n# worksheet = workbook.add_worksheet()\ndict_perguntas = {\n \"nome\": \"Qual é o seu nome?\", \n \"idade\": \"Qual é a sua idade?\",\n \"sexo\": \"Qual é o seu sexo?\",\n \"cidade\": \"Onde você mora?\"\n}\n\ndict_respostas = dict.fromkeys(dict_perguntas.keys(), \"\") # e se fosse um valor com string cheia?\n\nfor k,v in dict_perguntas.items():\n r = input(v)\n dict_respostas[k]= r\n# def mergeDict(dict_perguntas, dict_respostas):\n# dict_combined = {**dict_perguntas, **dict_respostas}\n# for key, value in dict_combined.items():\n# if key in dict_perguntas and key in dict_respostas:\n# dict_combined[key] = [value , dict_perguntas[key]]\n \n# return dict_combined\n \n# dict_combined = mergeDict(dict_perguntas, dict_respostas)\nwb = openpyxl.load_workbook(filename='dictExcel.xlsx')\nws = wb.active\nrow = 1\ncol = 1\n\nfor key,value in dict_respostas.items():\n ws.cell(row, col, key)\n ws.cell(row, col+1, value)\n row+=1\nfor row in dict_respostas:\n ws.append(row)\n\n \nwb.save('dictExcel.xlsx')\n# workbook.close()\n\n'''\nhttps://thispointer.com/how-to-merge-two-or-more-dictionaries-in-python/\nhttps://stackoverflow.com/questions/33575376/using-user-input-to-create-dictionaries-in-python\nhttps://stackoverflow.com/questions/45201288/how-to-create-header-in-excel-from-a-python-dictionary-keys\n'''\n'''\nMake Excel read + write\nStudy list comprehension\nRotate list and keep only one key\n'''\n\n","repo_name":"edwardkonig/Aulas_de_python","sub_path":"Criar_Excel.py","file_name":"Criar_Excel.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10360894458","text":"import sys\n\ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\npokemons = {}\nfor i in range(1, N + 1):\n p = input().strip()\n pokemons[i] = p\n pokemons[p] = i\n\nfor i in range(M):\n q = input().strip()\n if q.isdigit():\n print(pokemons[int(q)])\n else:\n print(pokemons[q])\n","repo_name":"702criticcal/1Day1Commit","sub_path":"Baekjoon/1620.py","file_name":"1620.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"11009832818","text":"try:\n set\nexcept NameError:\n from sets import Set as set\n\nimport kusu.core.database as kusudb\nfrom kusu.shell import Status\n\n# NOTE:\n# The code below can be a bit confusing because the term 'node' is overloaded.\n# It can mean both a node in the cluster as well as a node in the XML DOM. Be\n# careful!\nclass TestStatus(object):\n def test_nodegroups(self):\n db = kusudb.DB('sqlite', ':memory:')\n db.bootstrap()\n\n status = Status(db)\n nodegroups_xml = status.nodegroups()\n\n expected_ngname_set = set([\"installer\", \"compute\", \"unmanaged\"])\n ngname_nodes = nodegroups_xml.getElementsByTagName(\"ngname\")\n ngname_set = set([ngname.firstChild.nodeValue for ngname in ngname_nodes])\n\n assert status._db == db, \"Setting status._db failed\"\n assert expected_ngname_set == ngname_set, \\\n \"XML document contains the following nodes: %s, expected: %s\" % (ngname_set, expected_ngname_set)\n\n expected_column_set = set(db.NodeGroups.cols)\n for ng in nodegroups_xml.getElementsByTagName(\"nodegroup\"):\n ngname_nodes = ng.getElementsByTagName(\"ngname\")\n assert len(ngname_nodes) == 1, \"More than 1 'ngname' node in nodegroup\"\n\n ngname = ngname_nodes[0].firstChild.nodeValue\n column_set = set([n.tagName for n in ng.childNodes])\n assert expected_column_set == column_set, \\\n \"Nodegroup '%s' in XML document contains the following columns: %s, expected: %s\" % (ngname, column_set, expected_column_set)\n\n def test_nodes_summary(self):\n db = kusudb.DB('sqlite', ':memory:')\n db.bootstrap()\n\n status = Status(db)\n nodes_summary_xml = status.nodes_summary()\n\n expected_node_count = '1'\n node_count_nodes = nodes_summary_xml.getElementsByTagName(\"node_count\")\n assert len(node_count_nodes) == 1, \"More than 1 'node_count' node in nodes summary\"\n\n node_count = node_count_nodes[0].firstChild.nodeValue\n\n assert expected_node_count == node_count, \"Node summary reports %s nodes, expected %s\" % (node_count, expected_node_count)\n\n db.Nodes(name='test-00', ngid=1).flush()\n db.Nodes(name='test-01', ngid=1).flush()\n\n nodes_summary_xml = status.nodes_summary()\n\n expected_node_count = '3'\n node_count_nodes = nodes_summary_xml.getElementsByTagName(\"node_count\")\n assert len(node_count_nodes) == 1, \"More than 1 'node_count' node in nodes summary\"\n\n node_count = node_count_nodes[0].firstChild.nodeValue\n\n assert expected_node_count == node_count, \"Node summary reports %s nodes, expected %s\" % (node_count, expected_node_count)\n","repo_name":"georgegoh/CLUMP","sub_path":"src/kits/base/packages/kusu-shell/src/test/test_status.py","file_name":"test_status.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"4089901831","text":"import os\nfrom itertools import product\n\nimport numpy as np\nimport pytest\n\nfrom graph_pkg_core.algorithm.graph_edit_distance import GED\nfrom graph_pkg_core.edit_cost.edit_cost_vector import EditCostVector\nfrom graph_pkg_core.graph.edge import Edge\nfrom graph_pkg_core.graph.graph import Graph\nfrom graph_pkg_core.graph.label.label_edge import LabelEdge\nfrom graph_pkg_core.graph.label.label_node_vector import LabelNodeVector\nfrom graph_pkg_core.graph.node import Node\nfrom graph_pkg_core.loader.loader_vector import LoaderVector\n\nFOLDER_DATA = os.path.join(os.path.dirname(__file__),\n '../test_data/proteins_old')\n\n\n@pytest.fixture()\ndef test_graphs():\n loader = LoaderVector(FOLDER_DATA)\n graphs = loader.load()\n\n return graphs\n\n\n@pytest.fixture()\ndef defined_graph():\n ged = GED(EditCostVector(1., 1., 1., 1., 'euclidean'))\n\n n, m = 4, 3\n graph_source = Graph('gr_source', 'gr_source.gxl', n)\n graph_target = Graph('gr_target', 'gr_targe.gxl', m)\n\n # Init graph source: add nodes and edges\n graph_source.add_node(Node(0, LabelNodeVector(np.array([1.]))))\n graph_source.add_node(Node(1, LabelNodeVector(np.array([2.]))))\n graph_source.add_node(Node(2, LabelNodeVector(np.array([1.]))))\n graph_source.add_node(Node(3, LabelNodeVector(np.array([3.]))))\n\n graph_source.add_edge(Edge(0, 1, LabelEdge(0)))\n graph_source.add_edge(Edge(1, 2, LabelEdge(0)))\n graph_source.add_edge(Edge(1, 3, LabelEdge(0)))\n graph_source.add_edge(Edge(2, 3, LabelEdge(0)))\n\n # Init graph target: add nodes and edges\n graph_target.add_node(Node(0, LabelNodeVector(np.array([3.]))))\n graph_target.add_node(Node(1, LabelNodeVector(np.array([2.]))))\n graph_target.add_node(Node(2, LabelNodeVector(np.array([2.]))))\n graph_target.add_edge(Edge(0, 1, LabelEdge(0)))\n graph_target.add_edge(Edge(1, 2, LabelEdge(0)))\n\n return ged, graph_source, graph_target\n\n\ndef test_simple_ged(defined_graph):\n ged, graph_source, graph_target = defined_graph\n\n cost = ged.compute_edit_distance(graph_source, graph_target)\n\n expected_cost = 4.\n\n expected_C = np.array([[2., 1., 1., 1., np.inf, np.inf, np.inf],\n [1., 0., 0., np.inf, 1., np.inf, np.inf],\n [2., 1., 1., np.inf, np.inf, 1., np.inf],\n [0., 1., 1., np.inf, np.inf, np.inf, 1.],\n [1., np.inf, np.inf, 0., 0., 0., 0.],\n [np.inf, 1., np.inf, 0., 0., 0., 0.],\n [np.inf, np.inf, 1., 0., 0., 0., 0.]])\n\n expected_C_star = np.array([[2., 2., 1., 2., np.inf, np.inf, np.inf],\n [3., 1., 2., np.inf, 4., np.inf, np.inf],\n [3., 1., 2., np.inf, np.inf, 3., np.inf],\n [1., 1., 2., np.inf, np.inf, np.inf, 3.],\n [2., np.inf, np.inf, 0., 0., 0., 0.],\n [np.inf, 3., np.inf, 0., 0., 0., 0.],\n [np.inf, np.inf, 2., 0., 0., 0., 0.]])\n # np.set_printoptions(precision=2)\n # print('c')\n # print(ged.C.base)\n # print('c_star')\n # print(ged.C_star.base)\n\n assert np.array_equal(np.asarray(ged.C), expected_C)\n assert np.array_equal(np.asarray(ged.C_star), expected_C_star)\n assert len(graph_source) == 4\n assert len(graph_target) == 3\n assert cost == expected_cost\n\n\ndef test_ged_same_graph(defined_graph):\n ged, graph_source, graph_target = defined_graph\n\n cost = ged.compute_edit_distance(graph_source, graph_source)\n\n expected_cost = 0.\n\n assert cost == expected_cost\n\n\ndef test_heuristic_size(defined_graph):\n ged, graph_source, graph_target = defined_graph\n\n cost_1 = ged.compute_edit_distance(graph_source, graph_target, heuristic=True)\n cost_2 = ged.compute_edit_distance(graph_target, graph_source, heuristic=True)\n\n assert cost_1 == cost_2\n\n\n@pytest.mark.parametrize('idx_tr, idx_te, expected_dist',\n [\n ([0, 1, 2, 3],\n [4, 5, 6],\n np.array([[22.4, 43.6, 98.6],\n [44.0391919, 43.44507935, 84.4],\n [24.45685425, 40.4, 96.2],\n [26.3254834, 42.2, 97.6]]))\n\n ])\ndef test_real_graphs(test_graphs, idx_tr, idx_te, expected_dist):\n ged = GED(EditCostVector(1., 1., 1., 1., 'euclidean', alpha=0.8))\n dists = []\n for idx1, idx2 in product(idx_tr, idx_te):\n gr_1 = test_graphs[idx1]\n gr_2 = test_graphs[idx2]\n dists.append(ged.compute_edit_distance(gr_1, gr_2, heuristic=True))\n\n results = np.array(dists).reshape(len(idx_tr), len(idx_te))\n\n assert np.linalg.norm(results - expected_dist) < 1e-8\n","repo_name":"Icewater1337/graph-matching-core","sub_path":"tests/unit_ged/test_ged.py","file_name":"test_ged.py","file_ext":"py","file_size_in_byte":4881,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"70094430293","text":"\"\"\"Test suite for kmer.\"\"\"\n\nfrom unittest import TestCase\n\n\nfrom gimmebio.sample_seqs import EcoliGenome\n\nfrom gimmebio.kmers import make_kmers\nfrom gimmebio.kmers.clustering import GreedyRadialCover, hamming_distance\n\nECOLI = EcoliGenome()\n\n\nclass TestKmerClustering(TestCase):\n \"\"\"Test suite for wasabi.\"\"\"\n\n def test_make_kmers(self):\n \"\"\"Idiot check myself.\"\"\"\n make_kmers(ECOLI.longest_contig()[:1000], 31, canon=True)\n\n def test_init_cover(self):\n GreedyRadialCover(hamming_distance, 2)\n\n def test_add_to_cover(self):\n kmers = make_kmers(ECOLI.longest_contig()[:1000], 31, canon=True)\n radial_cover = GreedyRadialCover(hamming_distance, 2)\n for kmer in kmers:\n radial_cover.add(kmer)\n\n def test_kmer_stats(self):\n kmers = make_kmers(ECOLI.longest_contig()[:1000], 31, canon=True)\n radial_cover = GreedyRadialCover(hamming_distance, 2)\n for kmer in kmers:\n radial_cover.add(kmer)\n radial_cover.stats()\n for kmer in kmers[:10]:\n radial_cover.search(kmer, 1)\n","repo_name":"dcdanko/gimmebio","sub_path":"gimmebio/kmers/tests/test_kmer_clustering.py","file_name":"test_kmer_clustering.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"72988253012","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def isSubtree(self, root: Optional[TreeNode], subRoot: Optional[TreeNode]) -> bool:\n\n def check(root, subRoot):\n if root==None or subRoot ==None:\n return root==subRoot\n # print(root.val,subRoot.val)\n if root.val != subRoot.val:\n return False \n return check(root.left, subRoot.left) and check(root.right, subRoot.right) \n \n #traversing the main tree \n def preorder(root):\n if root ==None:\n return False\n if root.val == subRoot.val:\n if check(root, subRoot):\n return True \n \n return preorder(root.left) or preorder(root.right)\n\n return preorder(root)\n \n \n\n ","repo_name":"prbln/Leetcode","sub_path":"572-subtree-of-another-tree/subtree-of-another-tree.py","file_name":"subtree-of-another-tree.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13752529777","text":"#!/usr/bin/env python\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc, animation, rcParams\nimport sys\nfont = {'weight': 'bold',\n 'size': 22}\nrc('font', **font)\nrcParams['animation.convert_path'] = '/usr/bin/convert'\nrcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'\nrpm_per_rps = 60/(2*np.pi)\n\n\n\nclass ArmSegment:\n def __init__(self, length, constraints):\n self.length = length\n self.constraints = constraints\n self._angle = 0\n self._last_angle = 0\n self.angle = 0\n\n @property\n def angle(self):\n return self._angle\n\n @angle.setter\n def angle(self, angle):\n self._last_angle = self._angle\n self._angle = np.clip(angle, self.constraints[0], self.constraints[1])\n\n def forwardKinematics(self):\n return self.length * np.array([np.cos(self.angle), np.sin(self.angle)])\n\n def getDelta(self):\n return self._angle - self._last_angle\n\n def __str__(self):\n return f\"({self.length}, {self.angle})\"\n\n\nclass Arm:\n def __init__(self, length, num_segments):\n num_segments = 2\n self.segment_0 = ArmSegment(length, [-np.pi*10, np.pi*10])\n self.segment_1 = ArmSegment(length, [-np.pi*10, np.pi*10])\n self.max_radius = length * num_segments\n\n def inverseKinematics(self, point):\n point = np.array(point)\n radius = np.linalg.norm(point)\n\n if radius >= self.max_radius:\n point = (point / radius) * self.max_radius\n # raise ValueError(\n # f\"{radius} is further than arm can reach ({self.max_radius})\")\n\n cos_alpha = (point[0] ** 2 + point[1] ** 2 - self.segment_0.length**2 -\n self.segment_1.length**2)/(2 * self.segment_0.length * self.segment_1.length)\n alpha = -np.arccos(cos_alpha)\n\n beta = np.arctan2(point[1], point[0]) - np.arctan2(self.segment_1.length * np.sin(\n alpha), self.segment_0.length + self.segment_1.length*np.cos(alpha))\n\n self.segment_1.angle = beta + alpha\n self.segment_0.angle = beta\n print(f'{rpm_per_rps*self.segment_0.getDelta()/dt} {rpm_per_rps*self.segment_1.getDelta()/dt}')\n\n def forwardKinematics(self):\n p0 = self.segment_0.forwardKinematics()\n p1 = p0 + self.segment_1.forwardKinematics()\n return [p0, p1]\n\n def __str__(self):\n return \"(\" + \", \".join([str(s) for s in (self.segment_0, self.segment_1)]) + \")\"\n\n\narm = Arm(5, 2)\n2\nfig = plt.figure()\nax = plt.axes(xlim=(-12, 12), ylim=(-12, 12), aspect='equal')\n# plt.gca().set_aspect('equal', adjustable='box')\nline, = ax.plot([], [], color='k', linewidth=3)\nt0 = 0\nt1 = 1\ndt = 0.02\n\n\ndef init():\n line.set_data([], [])\n return line,\n\n\nstates = np.zeros((int((t1-t0)/dt), 2, 2))\n\nfor i in range(0, int((t1-t0)/dt)):\n angle = np.deg2rad(i * 360/((t1-t0)/dt))\n end_point = np.array([np.cos(angle), np.sin(angle)]\n ) * (i/int((t1-t0)/dt)) * 10\n arm.inverseKinematics(end_point)\n forward = arm.forwardKinematics()\n states[i][0] = forward[0]\n states[i][1] = forward[1]\nstates = np.append(states, np.flip(states, 0), axis=0)\n\n\ndef animate(i):\n joints = np.array([\n [0, 0],\n states[i][0],\n states[i][1],\n ])\n # print(joints)\n x, y = joints.T\n drawn = []\n line.set_data(x, y)\n drawn.append(line)\n # print(i)\n return drawn\n\n\nanim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=2*int(t1/dt), interval=dt * 1000, blit=True)\n_writer = animation.writers['ffmpeg']\nwriter = _writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)\nplt.show()\n","repo_name":"grantpauker/arm19","sub_path":"sim/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24941762323","text":"import os\nimport stat\nimport unittest\nimport tempfile\n\nimport qibuild\n\nclass InstallTestCase(unittest.TestCase):\n def setUp(self):\n self.tmp = tempfile.mkdtemp(prefix=\"tmp-install-test\")\n\n def tearDown(self):\n qibuild.sh.rm(self.tmp)\n\n def test_install_ro(self):\n src = os.path.join(self.tmp, \"src\")\n os.mkdir(src)\n ro = os.path.join(src, \"ro\")\n with open(ro, \"w\") as fp:\n fp.write(\"ro\\n\")\n # 200:\n os.chmod(ro, stat.S_IRUSR)\n dest = os.path.join(self.tmp, \"dest\")\n qibuild.sh.install(src, dest)\n\n\n\nif __name__ == \"__main__\":\n unittest.main()\n\n","repo_name":"ysuga/qibuild","sub_path":"python/qibuild/test/test_install.py","file_name":"test_install.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"45265158933","text":"import requests\n\n\ndef get_auth_url():\n weibo_auth_url = \"https://api.weibo.com/oauth2/authorize\"\n redirect_url = \"http://192.168.1.5:8000/weibo/\"\n auth_url = weibo_auth_url + \"?client_id={client_id}&redirect_url={redirect_url}\".format(client_id=\"2326635018\",\n redirect_url=redirect_url)\n return auth_url\n\n\ndef get_access_token(code):\n weibo_access_token_url = \"https://api.weibo.com/oauth2/access_token\"\n redirect_url = \"http://192.168.1.5:8000/weibo/\"\n\n re_url = requests.post(weibo_access_token_url, data={\n \"client_id\": \"2326635018\",\n \"client_secret\": \"eb310df011e1830e807e492b5ba9deb9\",\n \"code\": code,\n \"redirect_url\": redirect_url,\n })\n\n\n","repo_name":"maverick-zhang/Shop","sub_path":"Shop/apps/utils/weibo_login.py","file_name":"weibo_login.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5679990490","text":"def get_post_info(name, post_qty):\r\n info = f\"{name} read a book by {post_qty}\"\r\n return info\r\n\r\n\r\ninfo = get_post_info('I', 'Svyatoslav Kulikov')\r\nprint(info)\r\n\r\n\r\ndef get_post_info(**person):\r\n print(person)\r\n\r\n print(type(person))\r\n info = (\r\n f\"{person['name']} wrote\"\r\n f\"{person['post_qty']} posts\"\r\n )\r\n return info\r\n","repo_name":"Vlstee/MyProjects","sub_path":"Project_4.py","file_name":"Project_4.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38832613107","text":"import pygame\nimport datetime\n\nnow = datetime.datetime.now()\n\ncurrent_time = now.strftime(\"%H:%M:%S\")\nm = int(current_time[3] + current_time[4])\ns = int(current_time[6] + current_time[7])\n\npygame.init()\nscreen = pygame.display.set_mode((800, 800))\nclock = pygame.time.Clock()\nimage1 = pygame.image.load(\"C:/Users/amina/Downloads/main-clock.png\")\nimage2 = pygame.image.load(\"C:/Users/amina/Downloads/right-hand.png\")\nimage3 = pygame.image.load(\"C:/Users/amina/Downloads/left-hand.png\")\n\ndef blitRotate(surf, image, pos, originPos, angle):\n\n # offset from pivot to center\n image_rect = image.get_rect(topleft = (pos[0] - originPos[0], pos[1]-originPos[1]))\n offset_center_to_pivot = pygame.math.Vector2(pos) - image_rect.center\n \n # roatated offset from pivot to center\n rotated_offset = offset_center_to_pivot.rotate(-angle)\n\n # roatetd image center\n rotated_image_center = (pos[0] - rotated_offset.x, pos[1] - rotated_offset.y)\n\n # get a rotated image\n rotated_image = pygame.transform.rotate(image, angle)\n rotated_image_rect = rotated_image.get_rect(center = rotated_image_center)\n\n # rotate and blit the image\n surf.blit(rotated_image, rotated_image_rect)\n \n # draw rectangle around the image\nw1, h1 = image1.get_size()\nw2, h2 = image2.get_size()\nw3, h3 = image3.get_size()\nanglemin = 90 - m*6\nanglesec = 90 - s*6\ndone = False\nwhile not done:\n clock.tick(60)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n\n \n screen.fill(0)\n screen.blit(image1, (0, 0))\n blitRotate(screen, image2, (w1/2, h1/2), (w2/2, h2/2), anglemin)\n blitRotate(screen, image3, (w1/2, h1/2), (w3/2, h3/2), anglesec)\n # anglemin -= 1/600\n anglesec -= 1/10\n \n pygame.display.flip()","repo_name":"aminanursaitova/pp2","sub_path":"week7/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25562148944","text":"import asyncio, os, sys\nsys.path.append(os.getcwd())\n\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.asyncio import AsyncSession, create_async_engine\nfrom contextlib import asynccontextmanager\n\nfrom src.configs.config import DATABASE_URI\nfrom src.models.base import Base\n\n\nlocal_db_engine = create_async_engine(DATABASE_URI, echo=False, pool_pre_ping=True)\nSession = sessionmaker(bind=local_db_engine, class_=AsyncSession, expire_on_commit=False) #фабрика сеансов\n\n@asynccontextmanager\nasync def session_scope():\n async with Session() as session:\n async with session.begin(): \n try:\n yield session\n await session.commit()\n except Exception:\n await session.rollback()\n raise\n finally:\n await session.close()\n\n\nasync def recreate_database(conn: object) -> None:\n '''Пересоздает таблицы в БД'''\n\n await conn.run_sync(Base.metadata.drop_all) #удалить все в local_db_engine\n await conn.run_sync(Base.metadata.create_all) #создать все в local_db_engine\n\n\nasync def main() -> None:\n async with local_db_engine.begin() as conn: \n await recreate_database(conn)\n await local_db_engine.dispose()\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n\n","repo_name":"Dadoxr/wg_and_ol_bot","sub_path":"src/db/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"32607304674","text":"import numpy\nimport pytest\n\nfrom grunnur import dtypes\nfrom grunnur.modules import render_with_modules\n\n\ndef test_ctype_builtin():\n assert dtypes.ctype(numpy.int32) == \"int\"\n\n\ndef test_is_complex():\n assert dtypes.is_complex(numpy.complex64)\n assert dtypes.is_complex(numpy.complex128)\n assert not dtypes.is_complex(numpy.float64)\n\n\ndef test_is_double():\n assert dtypes.is_double(numpy.float64)\n assert dtypes.is_double(numpy.complex128)\n assert not dtypes.is_double(numpy.complex64)\n\n\ndef test_is_integer():\n assert dtypes.is_integer(numpy.int32)\n assert not dtypes.is_integer(numpy.float32)\n\n\ndef test_is_real():\n assert dtypes.is_real(numpy.float32)\n assert not dtypes.is_real(numpy.complex64)\n assert not dtypes.is_real(numpy.int32)\n\n\ndef test_promote_type():\n assert dtypes._promote_type(numpy.dtype(\"int8\")) == numpy.int32\n assert dtypes._promote_type(numpy.dtype(\"uint8\")) == numpy.uint32\n assert dtypes._promote_type(numpy.dtype(\"float16\")) == numpy.float32\n assert dtypes._promote_type(numpy.dtype(\"csingle\")) == numpy.complex64\n assert dtypes._promote_type(numpy.dtype(\"int32\")) == numpy.int32\n assert dtypes._promote_type(numpy.dtype(\"int64\")) == numpy.int64\n\n\ndef test_result_type():\n assert dtypes.result_type(numpy.int32, numpy.float32) == numpy.float64\n\n\ndef test_min_scalar_type():\n assert dtypes.min_scalar_type(1) == numpy.uint32\n assert dtypes.min_scalar_type(-1) == numpy.int32\n assert dtypes.min_scalar_type(1.0) == numpy.float32\n assert dtypes.min_scalar_type(1 + 2j) == numpy.complex64\n\n\ndef test_complex_for():\n assert dtypes.complex_for(numpy.float32) == numpy.complex64\n assert dtypes.complex_for(numpy.float64) == numpy.complex128\n with pytest.raises(ValueError):\n assert dtypes.complex_for(numpy.complex64)\n with pytest.raises(ValueError):\n assert dtypes.complex_for(numpy.int32)\n\n\ndef test_real_for():\n assert dtypes.real_for(numpy.complex64) == numpy.float32\n assert dtypes.real_for(numpy.complex128) == numpy.float64\n with pytest.raises(ValueError):\n assert dtypes.real_for(numpy.float32)\n with pytest.raises(ValueError):\n assert dtypes.real_for(numpy.int32)\n\n\ndef test_complex_ctr():\n assert dtypes.complex_ctr(numpy.complex64) == \"COMPLEX_CTR(float2)\"\n\n\ndef test_c_constant():\n # scalar values\n assert dtypes.c_constant(1) == \"1\"\n assert dtypes.c_constant(numpy.uint64(1)) == \"1UL\"\n assert dtypes.c_constant(numpy.int64(-1)) == \"-1L\"\n assert dtypes.c_constant(numpy.float64(1.0)) == \"1.0\"\n assert dtypes.c_constant(numpy.float32(1.0)) == \"1.0f\"\n assert dtypes.c_constant(numpy.complex64(1 + 2j)) == \"COMPLEX_CTR(float2)(1.0f, 2.0f)\"\n assert dtypes.c_constant(numpy.complex128(1 + 2j)) == \"COMPLEX_CTR(double2)(1.0, 2.0)\"\n\n # array\n assert dtypes.c_constant(numpy.array([1, 2, 3], numpy.float32)) == \"{1.0f, 2.0f, 3.0f}\"\n\n # struct type\n dtype = numpy.dtype([(\"val1\", numpy.int32), (\"val2\", numpy.float32)])\n val = numpy.empty((), dtype)\n val[\"val1\"] = 1\n val[\"val2\"] = 2\n assert dtypes.c_constant(val) == \"{1, 2.0f}\"\n\n # custom dtype\n assert dtypes.c_constant(1, numpy.float32) == \"1.0f\"\n\n message = r\"Cannot render a value of type as a C constant\"\n with pytest.raises(TypeError, match=message):\n dtypes.c_constant(numpy.array([\"a\", \"b\"]))\n\n\ndef test__align_simple():\n dtype = numpy.dtype(\"int32\")\n res = dtypes._align(dtype)\n ref = dtypes.WrappedType.non_struct(dtype, dtype.itemsize)\n assert res == ref\n\n\ndef test__align_array():\n dtype = numpy.dtype(\"int32\")\n dtype_arr = numpy.dtype((dtype, 3))\n res = dtypes._align(dtype_arr)\n ref = dtypes.WrappedType.non_struct(dtype_arr, dtype.itemsize)\n assert res == ref\n\n\ndef test__align_non_aligned_struct():\n dtype = numpy.dtype(dict(names=[\"x\", \"y\", \"z\"], formats=[numpy.int8, numpy.int16, numpy.int32]))\n res = dtypes._align(dtype)\n\n dtype_aligned = numpy.dtype(\n dict(\n names=[\"x\", \"y\", \"z\"],\n formats=[numpy.int8, numpy.int16, numpy.int32],\n offsets=[0, 2, 4],\n itemsize=8,\n aligned=True,\n )\n )\n\n wt_x = dtypes.WrappedType.non_struct(numpy.dtype(\"int8\"), 1)\n wt_y = dtypes.WrappedType.non_struct(numpy.dtype(\"int16\"), 2)\n wt_z = dtypes.WrappedType.non_struct(numpy.dtype(\"int32\"), 4)\n ref = dtypes.WrappedType(\n dtype_aligned,\n 4,\n explicit_alignment=None,\n wrapped_fields=dict(x=wt_x, y=wt_y, z=wt_z),\n field_alignments=dict(x=None, y=None, z=None),\n )\n assert res == ref\n\n\ndef test__align_aligned_struct():\n dtype_aligned = numpy.dtype(\n dict(\n names=[\"x\", \"y\", \"z\"],\n formats=[numpy.int8, numpy.int16, numpy.int32],\n offsets=[0, 2, 4],\n itemsize=8,\n aligned=True,\n )\n )\n\n res = dtypes._align(dtype_aligned)\n\n wt_x = dtypes.WrappedType.non_struct(numpy.dtype(\"int8\"), 1)\n wt_y = dtypes.WrappedType.non_struct(numpy.dtype(\"int16\"), 2)\n wt_z = dtypes.WrappedType.non_struct(numpy.dtype(\"int32\"), 4)\n ref = dtypes.WrappedType(\n dtype_aligned,\n 4,\n explicit_alignment=None,\n wrapped_fields=dict(x=wt_x, y=wt_y, z=wt_z),\n field_alignments=dict(x=None, y=None, z=None),\n )\n assert res == ref\n\n\ndef test__align_aligned_struct_custom_itemsize():\n dtype_aligned = numpy.dtype(\n dict(\n names=[\"x\", \"y\", \"z\"],\n formats=[numpy.int8, numpy.int16, numpy.int32],\n offsets=[0, 2, 4],\n itemsize=16,\n aligned=True,\n )\n )\n\n res = dtypes._align(dtype_aligned)\n\n wt_x = dtypes.WrappedType.non_struct(numpy.dtype(\"int8\"), 1)\n wt_y = dtypes.WrappedType.non_struct(numpy.dtype(\"int16\"), 2)\n wt_z = dtypes.WrappedType.non_struct(numpy.dtype(\"int32\"), 4)\n ref = dtypes.WrappedType(\n dtype_aligned,\n 16,\n explicit_alignment=16,\n wrapped_fields=dict(x=wt_x, y=wt_y, z=wt_z),\n field_alignments=dict(x=None, y=None, z=None),\n )\n assert res == ref\n\n\ndef test__align_custom_field_offsets():\n dtype = numpy.dtype(\n dict(\n names=[\"x\", \"y\", \"z\"],\n formats=[numpy.int8, numpy.int16, numpy.int32],\n offsets=[0, 4, 16],\n itemsize=32,\n )\n )\n\n dtype_aligned = numpy.dtype(\n dict(\n names=[\"x\", \"y\", \"z\"],\n formats=[numpy.int8, numpy.int16, numpy.int32],\n offsets=[0, 4, 16],\n itemsize=32,\n aligned=True,\n )\n )\n\n res = dtypes._align(dtype_aligned)\n\n wt_x = dtypes.WrappedType.non_struct(numpy.dtype(\"int8\"), 1)\n wt_y = dtypes.WrappedType.non_struct(numpy.dtype(\"int16\"), 2)\n wt_z = dtypes.WrappedType.non_struct(numpy.dtype(\"int32\"), 4)\n ref = dtypes.WrappedType(\n dtype_aligned,\n 16,\n explicit_alignment=None,\n wrapped_fields=dict(x=wt_x, y=wt_y, z=wt_z),\n field_alignments=dict(x=None, y=4, z=16),\n )\n assert res == ref\n\n\ndef test__align_aligned_struct_invalid_itemsize():\n dtype_aligned = numpy.dtype(\n dict(\n names=[\"x\", \"y\", \"z\"],\n formats=[numpy.int8, numpy.int16, numpy.int32],\n offsets=[0, 2, 4],\n itemsize=20, # not a power of 2, an error should be raised\n aligned=True,\n )\n )\n\n with pytest.raises(ValueError):\n dtypes._align(dtype_aligned)\n\n\ndef test_align_nested():\n dtype_nested = numpy.dtype(dict(names=[\"val1\", \"pad\"], formats=[numpy.int8, numpy.int8]))\n\n dtype = numpy.dtype(\n dict(\n names=[\"pad\", \"struct_arr\", \"regular_arr\"],\n formats=[numpy.int32, numpy.dtype((dtype_nested, 2)), numpy.dtype((numpy.int16, 3))],\n )\n )\n\n dtype_ref = numpy.dtype(\n dict(\n names=[\"pad\", \"struct_arr\", \"regular_arr\"],\n formats=[numpy.int32, (dtype_nested, (2,)), (numpy.int16, (3,))],\n offsets=[0, 4, 8],\n itemsize=16,\n )\n )\n\n dtype_aligned = dtypes.align(dtype)\n\n assert dtype_aligned.isalignedstruct\n assert dtype_aligned == dtype_ref\n\n\ndef test_align_preserve_nested_aligned():\n dtype_int3 = numpy.dtype(\n dict(names=[\"x\"], formats=[(numpy.int32, 3)], itemsize=16, aligned=True)\n )\n\n dtype = numpy.dtype(dict(names=[\"x\", \"y\", \"z\"], formats=[numpy.int32, dtype_int3, numpy.int32]))\n\n dtype_ref = numpy.dtype(\n dict(\n names=[\"x\", \"y\", \"z\"],\n formats=[numpy.int32, dtype_int3, numpy.int32],\n offsets=[0, 16, 32],\n itemsize=48,\n aligned=True,\n )\n )\n\n dtype_aligned = dtypes.align(dtype)\n\n assert dtype_aligned.isalignedstruct\n assert dtype_aligned == dtype_ref\n\n\ndef test_lcm():\n assert dtypes._lcm(10) == 10\n assert dtypes._lcm(15, 20) == 60\n assert dtypes._lcm(16, 32, 24) == 96\n\n\ndef test_find_minimum_alignment():\n # simple case: base alignment is enough because 12 is the next multiple of 4 after 9\n assert dtypes._find_minimum_alignment(12, 4, 9) == 4\n # the next multiple of 4 is 12, but we want offset 16 - this means we need to set\n # the alignment equal to 8, because 16 is the next multiple of 8 after 9.\n assert dtypes._find_minimum_alignment(16, 4, 9) == 8\n\n # incorrect offset (not a multiple of the base alignment)\n with pytest.raises(ValueError):\n dtypes._find_minimum_alignment(13, 4, 9)\n\n # offset too large and not a power of 2 - cannot achieve that with alignment only,\n # will need explicit padding\n with pytest.raises(ValueError):\n dtypes._find_minimum_alignment(24, 4, 9)\n\n\ndef test_wrapped_type_repr():\n dtype_aligned = numpy.dtype(\n dict(\n names=[\"x\", \"y\", \"z\"],\n formats=[numpy.int8, numpy.int16, numpy.int32],\n offsets=[0, 4, 16],\n itemsize=32,\n aligned=True,\n )\n )\n wt_x = dtypes.WrappedType.non_struct(numpy.dtype(\"int8\"), 1)\n wt_y = dtypes.WrappedType.non_struct(numpy.dtype(\"int16\"), 2)\n wt_z = dtypes.WrappedType.non_struct(numpy.dtype(\"int32\"), 4)\n wt = dtypes.WrappedType(\n dtype_aligned,\n 16,\n explicit_alignment=None,\n wrapped_fields=dict(x=wt_x, y=wt_y, z=wt_z),\n field_alignments=dict(x=None, y=4, z=16),\n )\n\n\ndef test_ctype_struct():\n dtype = dtypes.align(numpy.dtype([(\"val1\", numpy.int32), (\"val2\", numpy.float32)]))\n ctype = dtypes.ctype(dtype)\n src = render_with_modules(\"${ctype}\", render_globals=dict(ctype=ctype)).strip()\n\n assert src == (\n \"typedef struct _mod__module_0__ {\\n\"\n \" int val1;\\n\"\n \" float val2;\\n\"\n \"} _mod__module_0_;\\n\\n\\n\"\n \"_mod__module_0_\"\n )\n\n\ndef test_ctype_struct_nested():\n dtype_nested = numpy.dtype(dict(names=[\"val1\", \"pad\"], formats=[numpy.int8, numpy.int8]))\n\n dtype = numpy.dtype(\n dict(\n names=[\"pad\", \"struct_arr\", \"regular_arr\"],\n formats=[numpy.int32, numpy.dtype((dtype_nested, 2)), numpy.dtype((numpy.int16, 3))],\n )\n )\n\n dtype = dtypes.align(dtype)\n ctype = dtypes.ctype(dtype)\n src = render_with_modules(\"${ctype}\", render_globals=dict(ctype=ctype)).strip()\n\n assert src == (\n \"typedef struct _mod__module_1__ {\\n\"\n \" char val1;\\n\"\n \" char pad;\\n\"\n \"} _mod__module_1_;\\n\\n\\n\"\n \"typedef struct _mod__module_0__ {\\n\"\n \" int pad;\\n\"\n \" _mod__module_1_ struct_arr[2];\\n\"\n \" short regular_arr[3];\\n\"\n \"} _mod__module_0_;\\n\\n\\n\"\n \"_mod__module_0_\"\n )\n\n\ndef test_ctype_to_ctype_struct():\n # Checks that ctype() on an unknown type calls ctype_struct()\n dtype = dtypes.align(numpy.dtype([(\"val1\", numpy.int32), (\"val2\", numpy.float32)]))\n ctype = dtypes.ctype(dtype)\n src = render_with_modules(\"${ctype}\", render_globals=dict(ctype=ctype)).strip()\n\n assert src == (\n \"typedef struct _mod__module_0__ {\\n\"\n \" int val1;\\n\"\n \" float val2;\\n\"\n \"} _mod__module_0_;\\n\\n\\n\"\n \"_mod__module_0_\"\n )\n\n\ndef test_ctype_struct():\n dtype = numpy.dtype(\n dict(\n names=[\"x\", \"y\", \"z\"],\n formats=[numpy.int8, numpy.int16, numpy.int32],\n offsets=[0, 4, 16],\n itemsize=64,\n aligned=True,\n )\n )\n ctype = dtypes.ctype_struct(dtype)\n src = render_with_modules(\"${ctype}\", render_globals=dict(ctype=ctype)).strip()\n assert src == (\n \"typedef struct _mod__module_0__ {\\n\"\n \" char x;\\n\"\n \" short ALIGN(4) y;\\n\"\n \" int ALIGN(16) z;\\n\"\n \"} ALIGN(64) _mod__module_0_;\\n\\n\\n\"\n \"_mod__module_0_\"\n )\n\n\ndef test_ctype_struct_ignore_alignment():\n dtype = numpy.dtype(\n dict(\n names=[\"x\", \"y\", \"z\"],\n formats=[numpy.int8, numpy.int16, numpy.int32],\n offsets=[0, 4, 16],\n itemsize=64,\n aligned=True,\n )\n )\n ctype = dtypes.ctype_struct(dtype, ignore_alignment=True)\n src = render_with_modules(\"${ctype}\", render_globals=dict(ctype=ctype)).strip()\n assert src == (\n \"typedef struct _mod__module_0__ {\\n\"\n \" char x;\\n\"\n \" short y;\\n\"\n \" int z;\\n\"\n \"} _mod__module_0_;\\n\\n\\n\"\n \"_mod__module_0_\"\n )\n\n\ndef test_ctype_struct_checks_alignment():\n dtype = numpy.dtype(dict(names=[\"x\", \"y\", \"z\"], formats=[numpy.int8, numpy.int16, numpy.int32]))\n with pytest.raises(ValueError):\n dtypes.ctype_struct(dtype)\n\n\ndef test_ctype_struct_for_non_struct():\n dtype = numpy.dtype((numpy.int32, 3))\n with pytest.raises(ValueError):\n dtypes.ctype_struct(dtype)\n\n # ctype_struct() is not applicable for simple types\n with pytest.raises(ValueError):\n dtypes.ctype_struct(numpy.int32)\n\n\ndef test_flatten_dtype():\n dtype_nested = numpy.dtype(dict(names=[\"val1\", \"pad\"], formats=[numpy.int8, numpy.int8]))\n\n dtype = numpy.dtype(\n dict(\n names=[\"pad\", \"struct_arr\", \"regular_arr\"],\n formats=[numpy.int32, numpy.dtype((dtype_nested, 2)), numpy.dtype((numpy.int16, 3))],\n )\n )\n\n res = dtypes.flatten_dtype(dtype)\n ref = [\n ([\"pad\"], numpy.dtype(\"int32\")),\n ([\"struct_arr\", 0, \"val1\"], numpy.dtype(\"int8\")),\n ([\"struct_arr\", 0, \"pad\"], numpy.dtype(\"int8\")),\n ([\"struct_arr\", 1, \"val1\"], numpy.dtype(\"int8\")),\n ([\"struct_arr\", 1, \"pad\"], numpy.dtype(\"int8\")),\n ([\"regular_arr\", 0], numpy.dtype(\"int16\")),\n ([\"regular_arr\", 1], numpy.dtype(\"int16\")),\n ([\"regular_arr\", 2], numpy.dtype(\"int16\")),\n ]\n\n assert dtypes.flatten_dtype(dtype) == ref\n\n\ndef test_c_path():\n assert dtypes.c_path([\"struct_arr\", 0, \"val1\"]) == \"struct_arr[0].val1\"\n\n\ndef test_extract_field():\n dtype_nested = numpy.dtype(dict(names=[\"val1\", \"pad\"], formats=[numpy.int8, numpy.int8]))\n\n dtype = numpy.dtype(\n dict(\n names=[\"pad\", \"struct_arr\", \"regular_arr\"],\n formats=[numpy.int32, numpy.dtype((dtype_nested, 2)), numpy.dtype((numpy.int16, 3))],\n )\n )\n\n a = numpy.empty(16, dtype)\n a[\"struct_arr\"][\"val1\"][:, 1] = numpy.arange(16)\n assert (dtypes.extract_field(a, [\"struct_arr\", 1, \"val1\"]) == numpy.arange(16)).all()\n\n b = numpy.empty(16, dtype_nested)\n b[\"val1\"] = numpy.arange(16)\n assert (dtypes.extract_field(b, [\"val1\"]) == numpy.arange(16)).all()\n","repo_name":"fjarri/grunnur","sub_path":"tests/test_dtypes.py","file_name":"test_dtypes.py","file_ext":"py","file_size_in_byte":15555,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"16199770799","text":"# 기본 bfs\n\nfrom collections import deque\n\ndx = [0, 1, 0, -1]\ndy = [1, 0, -1, 0]\n\n\ndef bfs(n, m, grid):\n check = [[0]*m for _ in range(n)]\n check[0][0] = 1\n q = deque()\n q.append((0, 0))\n cnt = 0\n\n while q:\n for _ in range(len(q)):\n curr = q.popleft()\n if (curr[0], curr[1]) == (n-1, m-1):\n return cnt+1\n for s in range(4):\n nx = curr[0]+dx[s]\n ny = curr[1]+dy[s]\n if 0 <= nx <= n-1 and 0 <= ny <= m-1 and check[nx][ny] == 0 and grid[nx][ny] == 1:\n q.append((nx, ny))\n check[nx][ny] = 1\n cnt += 1\n return -1\n\n\ndef solution(maps):\n\n return bfs(len(maps), len(maps[0]), maps)\n","repo_name":"zinozino1/Algorithm_PS","sub_path":"프로그래머스/Py/LV2. 게임 맵 최단거리.py","file_name":"LV2. 게임 맵 최단거리.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27105509690","text":"import pickle\nimport sys\nimport os\n#sys.path.append(\"../data_generation\")\n#import normalize_data\n#sys.path.append(\"../evaluation\")\n#import rmse\nimport collections\nimport random\n\n\ndef create_neighborhood(data, player):\n relevant_stats = ['DRBPM', '2PP', 'FGPM', '3PP', 'DWSPM', 'TRBP', 'PER', 'FTPM', '3PAPM', 'DRBP', 'USGP', 'TSP', 'PFPM', 'eFGP', 'STLPM', 'DBPM', '3PPM', 'GSPG', 'FGP', 'PPM', 'FTAPM', 'OBPM', 'TOVP', 'WSP48', 'MP', 'FTP', 'GS', 'BLKPM', 'G', 'BPM', 'VORP', 'ORBPM', 'TRBPM', '3PAr', 'ASTP', '2PPM', 'MPG', 'FTr', 'ORBP', 'BLKP', '2PAPM', 'STLP', 'FGAPM', 'TOVPM', 'ASTPM', 'OWSPM', 'WSPM']\n relevant_college_stats = [\"G\", \"FGP\", \"3PP\", \"FTP\", \"MP\", \"FGPM\", \"FGAPM\", \"3PPM\", \"3PAPM\", \"2PPM\", \"2PAPM\", \"FTPM\", \"FTAPM\", \"TRBPM\", \"ASTPM\", \"STLPM\", \"BLKPM\", \"TOVPM\", \"PPM\", \"MPG\", \"2PP\"]\n\n neighborhood = {}\n neighborhood[\"x\"] = collections.defaultdict(float)\n neighborhood[\"y\"] = collections.defaultdict(float)\n \n neighborhood[\"x\"][\"drafted\"] = data[player][\"drafted\"]\n neighborhood[\"x\"][\"height\"] = data[player][\"height\"]\n neighborhood[\"x\"][\"weight\"] = data[player][\"weight\"]\n neighborhood[\"x\"][\"hand\"] = data[player][\"hand\"]\n \n # draft year binary\n draft_year_feature_name = \"drafted\" + str(data[player][\"draft_year\"])\n neighborhood[\"x\"][draft_year_feature_name] = 1.\n\n # position binary\n positions = []\n for season in data[player][\"professional\"].keys():\n if data[player][\"professional\"][season][\"num_seasons\"] == -1.:\n positions = data[player][\"professional\"][season][\"position\"]\n num_positions = len(positions)\n for pos in positions:\n pos_feat_name = \"playing\" + str(pos)\n neighborhood[\"x\"][pos_feat_name] = 1. / float(num_positions)\n\n # only applicable if attended college\n if \"college\" in data[player].keys():\n\n # college attended binary\n # reducing size because pull is too strong\n college_teams = []\n for season in data[player][\"college\"].keys():\n college_teams.append(data[player][\"college\"][season][\"team\"])\n for team in set(college_teams):\n counter = 0\n for colteam in college_teams:\n if colteam == team:\n counter += 1\n college_feat_name = \"went\" + str(team)\n neighborhood[\"x\"][college_feat_name] = (float(counter) / float(len(college_teams))) #/ 10. # reducing pull *** revisit?\n \n # add college stats to neighborhood\n # weigh each more recent season 2x as much as the last\n season_weight = []\n counter = 1.\n for season in data[player][\"college\"].keys():\n season_weight.append(counter)\n counter *= 2\n total = sum(season_weight)\n for i in range(len(season_weight)):\n season_weight[i] = season_weight[i] / float(total)\n\n counter = 0\n for season in sorted(data[player][\"college\"].keys()):\n for stat in relevant_college_stats:\n neighborhood[\"x\"][\"college\" + stat] += season_weight[counter] * data[player][\"college\"][season][stat]\n counter += 1\n \n last_season = sorted(data[player][\"college\"].keys())[-1]\n # college number of seasons played\n neighborhood[\"x\"][\"college_seasons\"] = data[player][\"college\"][last_season][\"num_seasons\"]\n \n # college age\n neighborhood[\"x\"][\"draft_age\"] = data[player][\"college\"][last_season][\"age\"]\n \n first_season = sorted(data[player][\"professional\"].keys())[0]\n neighborhood[\"x\"][\"rookie_age\"] = data[player][\"professional\"][first_season][\"age\"]\n\n # add rookie stats to y component\n season = sorted(data[player][\"professional\"].keys())[0]\n for stat in relevant_stats:\n neighborhood[\"y\"][stat] = data[player][\"professional\"][season][stat]\n\n return neighborhood\n\n\ndef compute_distance(address1, address2):\n distance_vector = []\n original_keys1 = address1.keys()\n original_keys2 = address2.keys()\n\n # subtraction\n for key in address1.keys():\n distance_vector.append(address1[key] - address2[key])\n for key in address2.keys():\n if key not in address1.keys():\n distance_vector.append(address1[key] - address2[key])\n \n # prevent overwhelming dict growth\n for key in address1.keys():\n if key not in original_keys1:\n del address1[key]\n for key in address2.keys():\n if key not in original_keys2:\n del address2[key]\n\n # square\n for i in range(len(distance_vector)):\n distance_vector[i] = distance_vector[i] ** 2.\n\n # sum\n total = sum(distance_vector)\n\n # root\n return total ** 0.5\n\n\ndef find_my_neighbors(data, player, neighborhood):\n neighbors = {}\n neighbors[\"neighborhood\"] = create_neighborhood(data, player)\n neighbors[\"num_neighbors\"] = 0\n neighbor_list = [0,1,2,3,4,5,6,7]\n for neighbor in neighbor_list:\n neighbors[neighbor] = {}\n\n # search neighborhood\n for other_player in neighborhood.keys():\n if neighbors[\"num_neighbors\"] < 8:\n neighbors[neighbors[\"num_neighbors\"]][\"name\"] = other_player\n neighbors[neighbors[\"num_neighbors\"]][\"distance\"] = compute_distance(neighbors[\"neighborhood\"][\"x\"], neighborhood[other_player][\"x\"]) \n neighbors[\"num_neighbors\"] += 1\n else:\n distances = []\n for neighbor in neighbor_list:\n distances.append(neighbors[neighbor][\"distance\"])\n max_distance = max(distances)\n worst_neighbor = distances.index(max_distance)\n\n new_distance = compute_distance(neighbors[\"neighborhood\"][\"x\"], neighborhood[other_player][\"x\"]) \n \n if max_distance > new_distance:\n neighbors[worst_neighbor][\"distance\"] = new_distance\n neighbors[worst_neighbor][\"name\"] = other_player\n\n return neighbors\n\n\ndef get_rookie_comparison(data_type=2, validation_trials=1):\n \n rookie_comparison = {}\n rookie_comparison[\"validation\"] = {}\n rookie_comparison[\"test\"] = {}\n rookie_comparison[\"validation\"][\"predicted\"] = {}\n rookie_comparison[\"validation\"][\"true\"] = {}\n rookie_comparison[\"test\"][\"predicted\"] = {}\n rookie_comparison[\"test\"][\"true\"] = {}\n\n relevant_stats = ['DRBPM', '2PP', 'FGPM', '3PP', 'DWSPM', 'TRBP', 'PER', 'FTPM', '3PAPM', 'DRBP', 'USGP', 'TSP', 'PFPM', 'eFGP', 'STLPM', 'DBPM', '3PPM', 'GSPG', 'FGP', 'PPM', 'FTAPM', 'OBPM', 'TOVP', 'WSP48', 'MP', 'FTP', 'GS', 'BLKPM', 'G', 'BPM', 'VORP', 'ORBPM', 'TRBPM', '3PAr', 'ASTP', '2PPM', 'MPG', 'FTr', 'ORBP', 'BLKP', '2PAPM', 'STLP', 'FGAPM', 'TOVPM', 'ASTPM', 'OWSPM', 'WSPM']\n\n fnames = [\"data.pkl\", \"data_removeoutliers.pkl\", \"data_-1to1.pkl\", \"data_-1to1_removeoutliers.pkl\", \"data_0to1.pkl\", \"data_0to1_removeoutliers.pkl\"]\n\n # intialize as empty lists\n for value in relevant_stats:\n rookie_comparison[\"validation\"][\"predicted\"][value] = []\n rookie_comparison[\"validation\"][\"true\"][value] = []\n rookie_comparison[\"test\"][\"predicted\"][value] = []\n rookie_comparison[\"test\"][\"true\"][value] = []\n \n \n # nearest neighbors for validation set\n # load validation data\n fin = open(os.path.dirname(\"/Users/dliedtka/Documents/stanford/cs221/project/files/data_generation/\") + \"/\" + fnames[data_type][:-4] + \"_validation.pkl\", \"rb\")\n val_data = pickle.load(fin)\n fin.close()\n\n # new stuff starting here\n complete_neighborhood = {}\n for player in val_data.keys():\n complete_neighborhood[player] = create_neighborhood(val_data, player)\n\n for trial in range(validation_trials):\n #print trial\n\n neighborhood = {}\n val_players = []\n for player in val_data.keys():\n if random.randint(0,9) == 0:\n val_players.append(player)\n else:\n neighborhood[player] = complete_neighborhood[player]\n\n # find neighborhood for each player in validation set\n rookie_neighbors = {}\n for player in val_players:\n rookie_neighbors[player] = find_my_neighbors(val_data, player, neighborhood)\n\n # predict\n for player in rookie_neighbors.keys():\n #print player\n\n true_stats = rookie_neighbors[player][\"neighborhood\"][\"y\"]\n pred_stats = {}\n for stat in relevant_stats:\n pred_stats[stat] = 0.\n\n neighbor_factor = []\n for i in range(8):\n neighbor_factor.append(1/rookie_neighbors[player][i][\"distance\"])\n total = sum(neighbor_factor)\n for i in range(8):\n neighbor_factor[i] /= total\n for i in range(8):\n neighbor_name = rookie_neighbors[player][i][\"name\"]\n for stat in relevant_stats:\n pred_stats[stat] += neighbor_factor[i] * neighborhood[neighbor_name][\"y\"][stat]\n\n # add to comparison\n for stat in relevant_stats:\n rookie_comparison[\"validation\"][\"predicted\"][stat].append(pred_stats[stat])\n rookie_comparison[\"validation\"][\"true\"][stat].append(true_stats[stat])\n\n '''\n neighborhood = {}\n for player in val_data.keys():\n # create a list of veteran neighbors\n if len(val_data[player][\"professional\"].keys()) != 1 or \"2016-17\" not in val_data[player][\"professional\"].keys():\n neighborhood[player] = create_neighborhood(val_data, player)\n\n # find nearest 8 neighbors for each rookie\n rookie_neighbors = {}\n for player in val_data.keys():\n if len(val_data[player][\"professional\"].keys()) == 1 and \"2016-17\" in val_data[player][\"professional\"].keys():\n rookie_neighbors[player] = find_my_neighbors(val_data, player, neighborhood)\n \n # predict\n # validation\n for player in rookie_neighbors.keys():\n #print player\n\n true_stats = rookie_neighbors[player][\"neighborhood\"][\"y\"]\n pred_stats = {}\n for stat in relevant_stats:\n pred_stats[stat] = 0.\n\n neighbor_factor = []\n for i in range(8):\n neighbor_factor.append(1/rookie_neighbors[player][i][\"distance\"])\n total = sum(neighbor_factor)\n for i in range(8):\n neighbor_factor[i] /= total\n for i in range(8):\n neighbor_name = rookie_neighbors[player][i][\"name\"]\n for stat in relevant_stats:\n pred_stats[stat] += neighbor_factor[i] * neighborhood[neighbor_name][\"y\"][stat]\n\n # add to comparison\n for stat in relevant_stats:\n rookie_comparison[\"validation\"][\"predicted\"][stat].append(pred_stats[stat])\n rookie_comparison[\"validation\"][\"true\"][stat].append(true_stats[stat])\n '''\n\n \n # nearest neighbors for test set\n # load test data\n fin = open(os.path.dirname(\"/Users/dliedtka/Documents/stanford/cs221/project/files/data_generation/\") + \"/\" + fnames[data_type], \"rb\")\n test_data = pickle.load(fin)\n fin.close()\n neighborhood = {}\n for player in test_data.keys():\n # create a list of veteran neighbors\n if len(test_data[player][\"professional\"].keys()) != 1 or \"2017-18\" not in test_data[player][\"professional\"].keys():\n neighborhood[player] = create_neighborhood(test_data, player)\n\n # find nearest 8 neighbors for each rookie\n rookie_neighbors = {}\n for player in test_data.keys():\n if len(test_data[player][\"professional\"].keys()) == 1 and \"2017-18\" in test_data[player][\"professional\"].keys():\n rookie_neighbors[player] = find_my_neighbors(test_data, player, neighborhood)\n\n # predict\n rookie_preds = {}\n for player in rookie_neighbors.keys():\n #print player\n rookie_preds[player] = {}\n\n true_stats = rookie_neighbors[player][\"neighborhood\"][\"y\"]\n pred_stats = {}\n for stat in relevant_stats:\n pred_stats[stat] = 0.\n\n neighbor_factor = []\n for i in range(8):\n neighbor_factor.append(1/rookie_neighbors[player][i][\"distance\"])\n total = sum(neighbor_factor)\n for i in range(8):\n neighbor_factor[i] /= total\n for i in range(8):\n neighbor_name = rookie_neighbors[player][i][\"name\"]\n for stat in relevant_stats:\n pred_stats[stat] += neighbor_factor[i] * neighborhood[neighbor_name][\"y\"][stat]\n\n # add to comparison\n for stat in relevant_stats:\n rookie_comparison[\"test\"][\"predicted\"][stat].append(pred_stats[stat])\n rookie_comparison[\"test\"][\"true\"][stat].append(true_stats[stat])\n rookie_preds[player][stat] = pred_stats[stat]\n\n \n fout = open(\"rookie_preds.pkl\", \"wb\")\n pickle.dump(rookie_preds, fout)\n fout.close()\n \n return rookie_comparison\n\n\n#print get_rookie_comparison()\n","repo_name":"dliedtka/basketball","sub_path":"221_code/code/algorithm/stats/rookies.py","file_name":"rookies.py","file_ext":"py","file_size_in_byte":12915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30651900784","text":"from request import get_with_ua\nfrom lxml import html\n\n\ndef get_detail():\n url = 'https://detail.1688.com/offer/564393823842.html'\n text = get_with_ua(url).text\n # print(text)\n with open('./raw/detail.html', 'w') as f:\n f.write(text)\n xpath = '//div[@class=\"tab-pane\"]//img/@src'\n selector = html.fromstring(text)\n res = selector.xpath(xpath)\n src = (res[0] if len(res) else '').replace('.400x400', '')\n # print(src)\n img = get_with_ua(src)\n with open('./data/{}'.format(src.split('/')[-1]), 'wb') as f:\n f.write(img.content)\n\n\nif __name__ == '__main__':\n get_detail()","repo_name":"caoxiemeihao/spider-1688","sub_path":"spider_1688_detail.py","file_name":"spider_1688_detail.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70895206614","text":"import numpy as np\nfrom test import TestModule, TestFunction\nfrom import_config import configs,type\n\ntotal_num = 0\nfor i, cfg in enumerate(configs):\n\n print(\"\\033[1;31;40mTest \\033[0m\", i, \": \" + cfg['config']['name'])\n if type == 'module':\n test = TestModule(cfg)\n elif type == 'function':\n test = TestFunction(cfg)\n outputs = test.diff()\n\n for output in outputs:\n output = np.array(output)\n indices = np.where(output > 0.0001)\n values = output[indices]\n print(\"\\033[1;33;40m Indices: \\033[0m\", indices)\n print(\"\\033[1;33;40m Values: \\033[0m\", values)\n total_num += len(values)\nprint(\"total failed: \", total_num)\n","repo_name":"rubblesky/base_module","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15342580711","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 13 15:49:24 2018\n\n@author: berend\n\"\"\"\nimport numpy as np\n\ndef create_grid_pcolormesh(x,y):\n \n if len(x.shape) > 1:\n x = x[0,:]\n y = y[:,0]\n \n xnew = np.zeros(x.shape[0]+1)\n ynew = np.zeros(y.shape[0]+1)\n \n xnew[1:-1] = (x[1:] + x[:-1])/2\n ynew[1:-1] = (y[1:] + y[:-1])/2\n \n #extrapolate the end values:\n xnew[0] = xnew[1] - x[1] + x[0]\n ynew[0] = ynew[1] - y[1] + y[0]\n\n xnew[-1] = xnew[-2] + x[-1] - x[-2]\n ynew[-1] = ynew[-2] + y[-1] - y[-2]\n \n return xnew,ynew\n\n\n\ndef to_sym(A, vmin, vmax, zero = 0.0):\n \"\"\"Convert to symmetrical colorscale, this means if a divergence map\n goes from -0.5 to 1.0, you can represent it through the full colorscale\n but still keep 0.0 at 0.0.\n Args:\n A: the array to transform\n vmin: the value in array that will be transformed to -1.\n vmax: the value in the array that will be transformed to 1.\n zero: the value that will be transformed to zero\"\"\"\n \n \n return A/np.piecewise(A, [A < zero], [-vmin,vmax])\n\n\ndef sym_colorbar(ax, vmin,vmax, cmap, zero=0.0, inside_labels=[],\n textprops={}, horizontal=False):\n \n y = np.linspace(vmin,vmax, 200)\n x = np.linspace(-1,1,2)\n if horizontal:\n x, y = y, x\n \n X,Y = np.meshgrid(x,y)\n\n if horizontal:\n Z = to_sym(X,vmin,vmax,zero = zero)\n else:\n Z = to_sym(Y,vmin,vmax,zero = zero)\n\n X,Y = create_grid_pcolormesh(X,Y)\n\n ax.pcolormesh(X,Y,Z,cmap = cmap, vmin = -1, vmax = 1., rasterized=True)\n if horizontal:\n ax.set_yticks([])\n ax.axis([vmin, vmax, -1,1])\n else:\n ax.set_xticks([])\n ax.axis([-1,1,vmin,vmax])\n\n if inside_labels:\n ylen = vmax - vmin\n \n ax.text(0., vmin + 0.02*ylen, inside_labels[0],ha = 'center', va = 'bottom',rotation = 90, **textprops)\n ax.text(0., vmax - 0.02*ylen, inside_labels[1],ha = 'center', va = 'top',rotation = 90, **textprops)\n \n \n \ndef reg_colorbar(ax, vmin,vmax, cmap, zero = 0.0, inside_labels = [],\n textprops = {}, norm=None, horizontal=False):\n \n y = np.linspace(vmin,vmax, 200)\n x = np.linspace(-1,1,2)\n if horizontal:\n x, y = y, x\n \n X,Y = np.meshgrid(x,y)\n\n if horizontal:\n Z = np.array(X)\n else:\n Z = np.array(Y)\n \n X,Y = create_grid_pcolormesh(X,Y)\n \n ax.pcolormesh(X,Y,Z,cmap = cmap, vmin = vmin, vmax = vmax, norm=norm, rasterized=True)\n if horizontal:\n ax.set_yticks([])\n ax.axis([vmin, vmax, -1,1])\n else:\n ax.set_xticks([])\n ax.axis([-1,1,vmin,vmax])\n\n if inside_labels:\n ylen = vmax - vmin\n \n ax.text(0., vmin + 0.02*ylen, inside_labels[0],ha = 'center', va = 'bottom',rotation = 90, **textprops)\n ax.text(0., vmax - 0.02*ylen, inside_labels[1],ha = 'center', va = 'top',rotation = 90, **textprops)\n","repo_name":"bzwartsenberg/subfigplot","sub_path":"subfigplot/colormap_helpers.py","file_name":"colormap_helpers.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43780999782","text":"from . import BadRequestError\n\n\nclass ApiIdInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The api_id/api_hash combination is invalid.'\n )\n\n\nclass BotMethodInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The API access for bot users is restricted. The method you '\n 'tried to invoke cannot be executed as a bot.'\n )\n\n\nclass ChannelInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'Invalid channel object. Make sure to pass the right types.'\n )\n\n\nclass ChatAdminRequiredError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'Chat admin privileges are required to do that in the specified '\n 'chat (for example, to send a message in a channel which is not '\n 'yours).'\n )\n\n\nclass ChatIdInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'Invalid object ID for a chat. Make sure to pass the right types.'\n )\n\n\nclass ConnectionLangPackInvalid(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The specified language pack is not valid.'\n )\n\n\nclass ConnectionLayerInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The very first request must always be InvokeWithLayerRequest.'\n )\n\n\nclass DcIdInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'This occurs when an authorization is tried to be exported for '\n 'the same data center one is currently connected to.'\n )\n\n\nclass FieldNameEmptyError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The field with the name FIELD_NAME is missing.'\n )\n\n\nclass FieldNameInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The field with the name FIELD_NAME is invalid.'\n )\n\n\nclass FilePartsInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The number of file parts is invalid.'\n )\n\n\nclass FilePartMissingError(BadRequestError):\n def __init__(self, **kwargs):\n self.which = kwargs['extra']\n super(Exception, self).__init__(\n self,\n 'Part {} of the file is missing from storage.'.format(self.which)\n )\n\n\nclass FilePartInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The file part number is invalid.'\n )\n\n\nclass FirstNameInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The first name is invalid.'\n )\n\n\nclass InputMethodInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The invoked method does not exist anymore or has never existed.'\n )\n\n\nclass LastNameInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The last name is invalid.'\n )\n\n\nclass Md5ChecksumInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The MD5 check-sums do not match.'\n )\n\n\nclass MessageEmptyError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'Empty or invalid UTF-8 message was sent.'\n )\n\n\nclass MessageIdInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The specified message ID is invalid.'\n )\n\n\nclass MessageTooLongError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'Message was too long. Current maximum length is 4096 UTF-8 '\n 'characters.'\n )\n\n\nclass MsgWaitFailedError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'A waiting call returned an error.'\n )\n\n\nclass PasswordHashInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The password (and thus its hash value) you entered is invalid.'\n )\n\n\nclass PeerIdInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'An invalid Peer was used. Make sure to pass the right peer type.'\n )\n\n\nclass PhoneCodeEmptyError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The phone code is missing.'\n )\n\n\nclass PhoneCodeExpiredError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The confirmation code has expired.'\n )\n\n\nclass PhoneCodeHashEmptyError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The phone code hash is missing.'\n )\n\n\nclass PhoneCodeInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The phone code entered was invalid.'\n )\n\n\nclass PhoneNumberBannedError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The used phone number has been banned from Telegram and cannot '\n 'be used anymore. Maybe check https://www.telegram.org/faq_spam.'\n )\n\n\nclass PhoneNumberInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The phone number is invalid.'\n )\n\n\nclass PhoneNumberOccupiedError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The phone number is already in use.'\n )\n\n\nclass PhoneNumberUnoccupiedError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The phone number is not yet being used.'\n )\n\n\nclass PhotoInvalidDimensionsError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The photo dimensions are invalid.'\n )\n\n\nclass TypeConstructorInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The type constructor is invalid.'\n )\n\n\nclass UsernameInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'Unacceptable username. Must match r\"[a-zA-Z][\\w\\d]{4,32}\"'\n )\n\n\nclass UsernameNotModifiedError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The username is not different from the current username'\n )\n\n\nclass UsernameNotOccupiedError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'See issue #96 for Telethon - try upgrading the library.'\n )\n\n\nclass UsernameOccupiedError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The username is already taken.'\n )\n\n\nclass UsersTooFewError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'Not enough users (to create a chat, for example).'\n )\n\n\nclass UsersTooMuchError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'The maximum number of users has been exceeded (to create a '\n 'chat, for example).'\n )\n\n\nclass UserIdInvalidError(BadRequestError):\n def __init__(self, **kwargs):\n super(Exception, self).__init__(\n self,\n 'Invalid object ID for an user. Make sure to pass the right types.'\n )\n\n\nrpc_errors_400_all = {\n 'API_ID_INVALID': ApiIdInvalidError,\n 'BOT_METHOD_INVALID': BotMethodInvalidError,\n 'CHANNEL_INVALID': ChannelInvalidError,\n 'CHAT_ADMIN_REQUIRED': ChatAdminRequiredError,\n 'CHAT_ID_INVALID': ChatIdInvalidError,\n 'CONNECTION_LAYER_INVALID': ConnectionLayerInvalidError,\n 'DC_ID_INVALID': DcIdInvalidError,\n 'FIELD_NAME_EMPTY': FieldNameEmptyError,\n 'FIELD_NAME_INVALID': FieldNameInvalidError,\n 'FILE_PARTS_INVALID': FilePartsInvalidError,\n 'FILE_PART_(\\d+)_MISSING': FilePartMissingError,\n 'FILE_PART_INVALID': FilePartInvalidError,\n 'FIRSTNAME_INVALID': FirstNameInvalidError,\n 'INPUT_METHOD_INVALID': InputMethodInvalidError,\n 'LASTNAME_INVALID': LastNameInvalidError,\n 'MD5_CHECKSUM_INVALID': Md5ChecksumInvalidError,\n 'MESSAGE_EMPTY': MessageEmptyError,\n 'MESSAGE_ID_INVALID': MessageIdInvalidError,\n 'MESSAGE_TOO_LONG': MessageTooLongError,\n 'MSG_WAIT_FAILED': MsgWaitFailedError,\n 'PASSWORD_HASH_INVALID': PasswordHashInvalidError,\n 'PEER_ID_INVALID': PeerIdInvalidError,\n 'PHONE_CODE_EMPTY': PhoneCodeEmptyError,\n 'PHONE_CODE_EXPIRED': PhoneCodeExpiredError,\n 'PHONE_CODE_HASH_EMPTY': PhoneCodeHashEmptyError,\n 'PHONE_CODE_INVALID': PhoneCodeInvalidError,\n 'PHONE_NUMBER_BANNED': PhoneNumberBannedError,\n 'PHONE_NUMBER_INVALID': PhoneNumberInvalidError,\n 'PHONE_NUMBER_OCCUPIED': PhoneNumberOccupiedError,\n 'PHONE_NUMBER_UNOCCUPIED': PhoneNumberUnoccupiedError,\n 'PHOTO_INVALID_DIMENSIONS': PhotoInvalidDimensionsError,\n 'TYPE_CONSTRUCTOR_INVALID': TypeConstructorInvalidError,\n 'USERNAME_INVALID': UsernameInvalidError,\n 'USERNAME_NOT_MODIFIED': UsernameNotModifiedError,\n 'USERNAME_NOT_OCCUPIED': UsernameNotOccupiedError,\n 'USERNAME_OCCUPIED': UsernameOccupiedError,\n 'USERS_TOO_FEW': UsersTooFewError,\n 'USERS_TOO_MUCH': UsersTooMuchError,\n 'USER_ID_INVALID': UserIdInvalidError,\n}\n","repo_name":"crack00r/BitBot","sub_path":"telethon/errors/rpc_errors_400.py","file_name":"rpc_errors_400.py","file_ext":"py","file_size_in_byte":10850,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"67"} +{"seq_id":"17495398469","text":"import pandas_datareader as pdr\nimport pandas as pd\nimport statsmodels.api as sm\nfrom statsmodels.regression.rolling import RollingOLS\nfrom statsmodels.api import OLS, add_constant\nimport matplotlib.pyplot as plt\nimport seaborn\nseaborn.set_style('darkgrid')\npd.plotting.register_matplotlib_converters()\n\n#%%\nfactors = pdr.get_data_famafrench('F-F_Research_Data_Factors', start='1-1-2020')[0]\n# print(factors.head())\nindustries = pdr.get_data_famafrench('17_Industry_Portfolios', start='1-1-2020')[0]\n# print(industries.head())\n\n#%%\nexog_vars = ['Mkt-RF', 'SMB', 'HML','RF']\nexog = sm.add_constant(factors[exog_vars])\nreturns = industries.sub(factors.RF, axis=0)\n\n\n#%%\nbetas = []\nrsquared = []\n#%%\nfor industry in returns.columns:\n # print(returns.loc[returns.index, industry])\n endog = returns.loc[returns.index, industry]\n rols = RollingOLS(endog, exog, window=12)\n rres = rols.fit()\n params = rres.params.mean()\n betas.append(params.drop('const'))\n rsquared.append(rres.rsquared.mean())\n\n\nbetas = pd.DataFrame(betas,\n columns=factors.columns,\n index=industries.columns)\n# betas = betas.drop(columns='RF')\n# betas = betas.T\nbetas.info()\n\n\n#%%\nlambdas = []\n#%%\n# Second Stage regression\n\n\nfor period in industries.index:\n step2 = OLS(endog=industries.loc[period, betas.index],\n exog=betas).fit()\n lambdas.append(step2.params)\n\n\n#%%\nlambdas = pd.DataFrame(lambdas,\n index=industries.index,\n columns=betas.columns.tolist())\n\n#%%\nlambdas.mean()\n\n\n#%%\nax1 = plt.subplot2grid((1, 3), (0, 0))\nax2 = plt.subplot2grid((1, 3), (0, 1), colspan=2)\nax2.margins(0.01)\nlambdas.mean().plot.barh(ax=ax1)\nlambdas0 = lambdas.rolling(6).mean().dropna()\nlambdas0.plot(lw=2, figsize=(17,8), ax=ax2)\nax2.legend(bbox_to_anchor=(1.025, 1.05))\nplt.show()\n\nlambdas.rolling(12).mean().dropna().plot(lw=2, figsize=(14,20), subplots=True, sharey=True, sharex=True)\nplt.show()","repo_name":"MortenWillendrup/CreditRiskModelling","sub_path":"Applied_Finance/Rolling-example.py","file_name":"Rolling-example.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"46510332956","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nclass Reaction( ):\n def __init__( self ):\n self.stop = self.readData( \"data/c12.dat\" )\n self.stopUM = self.readData( \"data/c12_um.dat\" )\n self.sFactor = self.readData( \"data/sfactor.dat\" )\n self.sFactorGraph = self.createGraph( self.sFactor )\n self.stopGraph = self.createGraph( self.stop )\n self.stopGraphUM = self.createGraph( self.stopUM )\n\n M0 = 12\n M1 = 1.00727647\n Z = 6\n\n def readData( self, dataDir ):\n fIn = open( dataDir, \"r\" )\n Lines = fIn.readlines( )\n data = np.zeros( shape=( len( Lines ), 3 ) )\n for idx in range( len( Lines ) ):\n l = Lines[idx].split( )\n data[idx][0] = float( l[0] )\n data[idx][1] = float( l[1] )\n return data\n\n def createGraph( self, data ):\n fig = plt.figure( )\n graph = plt.plot( data[:,0], data[:,1] )\n plt.close( fig )\n return [graph[0].get_xdata( ), graph[0].get_ydata( )]\n\n def getValue( self, graph, value ):\n idx = (np.abs(graph[0] - value)).argmin()\n return graph[1][idx] \n\n def getCM( self, Lab ):\n CM = Lab*( self.M0 )/( self.M1 + self.M0 )\n return CM\n\n def getLab( self, CM ):\n Lab = CM*( self.M1 + self.M0 )/( self.M0 )\n return Lab\n\n def getCross( self, energy ):\n energyCM = self.getCM( energy )\n Mr = self.M0*self.M1/( self.M0 + self.M1 )\n sFactor = self.getValue( self.sFactorGraph, energyCM )\n cross = pow(10, -6)*sFactor*np.exp( -0.989534*self.Z*np.sqrt( Mr/( energyCM/1000 ) ) )\n cross /= energyCM\n return cross\n\n def convertDeltaE( self, deltaE, energy ):\n return deltaE*self.getValue( self.stopGraph, energy )/self.getValue( self.stopGraph, 380 )\n\n def run( self, deltaE, energy ):\n integral = 0\n nSteps = 1000\n step = deltaE/nSteps\n EStep = energy - deltaE + step/2\n deltaE = self.convertDeltaE( deltaE, energy ) \n for idx in range( nSteps ):\n stop = self.getValue( self.stopGraph, EStep )\n stopCM = self.getCM( stop )\n stepCM = self.getCM( step )\n cross = self.getCross( EStep )\n integral += stepCM*cross/stopCM\n EStep += step\n self.Yield = integral\n \n","repo_name":"skowrons94/Nuclear_Reaction","sub_path":"Reaction.py","file_name":"Reaction.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6867638939","text":"from i2c import i2c_device\nimport time\n\nADDRESS = 0x68\nADDRESS_AK8963 = 0x0c\n\nDEVICE_ID = 0x71\n\n# REGISTER \n\nSMPLRT_DIV = 0x19\nCONFIG = 0x1A\nGYRO_CONFIG = 0x1B\nACCEL_CONFIG = 0x1C\nACCEL_CONFIG_2 = 0x1D\nLP_ACCEL_ODR = 0x1E\nWOM_THR = 0x1F\nFIFO_EN = 0x23\nI2C_MST_CTRL = 0x24\nI2C_MST_STATUS = 0x36\nINT_PIN_CFG = 0x37\nINT_ENABLE = 0x38\nINT_STATUS = 0x3A\nACCEL_OUT = 0x3B\nTEMP_OUT = 0x41\nGYRO_OUT = 0x43\n\nI2C_MST_DELAY_CTRL = 0x67\nSIGNAL_PATH_RESET = 0x68\nMOT_DETECT_CTRL = 0x69\nUSER_CTRL = 0x6A\nPWR_MGMT_1 = 0x6B\nPWR_MGMT_2 = 0x6C\nFIFO_R_W = 0x74\nWHO_AM_I = 0x75\n\nGFS_250 = 0x00\nGFS_500 = 0x01\nGFS_1000 = 0x02\nGFS_2000 = 0x03\nAFS_2G = 0x00\nAFS_4G = 0x01\nAFS_8G = 0x02\nAFS_16G = 0x03\n\nAK8963_ST1 = 0x02\nAK8963_MAGNET_OUT = 0x03\nAK8963_CNTL1 = 0x0A\nAK8963_CNTL2 = 0x0B\nAK8963_ASAX = 0x10\n\nAK8963_MODE_DOWN = 0x00\nAK8963_MODE_ONE = 0x01\nAK8963_MODE_C8HZ = 0x02\nAK8963_MODE_C100HZ = 0x06\n\nAK8963_BIT_14 = 0x00\nAK8963_BIT_16 = 0x01\n\ndef dataConv(data1, data2):\n value = data1 | (data2 << 8)\n if (value & (1 << 16 -1)):\n value -= (1 << 16)\n return value\n\nclass AK8963(i2c_device.i2c_device):\n \n def __init__(self):\n super().__init__(addr=ADDRESS_AK8963)\n self.configAK8963(AK8963_MODE_C8HZ, AK8963_BIT_16)\n\n def configAK8963(self, mode, mfs):\n if mfs == AK8963_BIT_14:\n self.mres = 4912.0/8190.0\n else:\n self.mres = 4912.0/32760.0\n self.write_cmd_arg(AK8963_CNTL1, 0x00)\n self.write_cmd_arg(AK8963_CNTL1, 0x0f)\n data = self.read_i2c_block_data(AK8963_ASAX, 3)\n\n self.magXcoef = (data[0] - 128) / 256 + 1.0\n self.magYcoef = (data[1] - 128) / 256 + 1.0\n self.magZcoef = (data[2] - 128) / 256 + 1.0\n \n self.write_cmd_arg(AK8963_CNTL1, 0x00)\n self.write_cmd_arg(AK8963_CNTL1, (mfs << 4 | mode))\n \n def readMagnet(self):\n x = 0\n y = 0\n z = 0\n drdy = self.read_data(AK8963_ST1)\n if drdy & 0x01:\n data = self.read_i2c_block_data(AK8963_MAGNET_OUT, 7)\n if (data[6] & 0x08) != 0x08:\n x = dataConv(data[1], data[0])\n y = dataConv(data[3], data[2])\n z = dataConv(data[5], data[4])\n \n x = round(x * self.mres * self.magXcoef, 3)\n y = round(y * self.mres * self.magYcoef, 3)\n z = round(z * self.mres * self.magZcoef, 3)\n\n return {'x': x, 'y': y, 'z': z}\n\nclass MPU9250(i2c_device.i2c_device):\n \n def __init__(self):\n super().__init__(ADDRESS)\n self.configMPU9250(GFS_250, AFS_2G)\n\n def searchDevice(self):\n who_am_i = self.mpu_device.read_data(WHO_AM_I)\n return who_am_i == DEVICE_ID\n\n def configMPU9250(self, gfs, afs):\n if gfs == GFS_250:\n self.gres = 250.0 / 32768.0\n elif gfs == GFS_500:\n self.gres = 500.0 / 32768.0\n elif gfs == GFS_1000:\n self.gres = 1000.0 / 32768.0\n else:\n self.gres = 2000.0 / 32768.0\n\n if afs == AFS_2G:\n self.ares = 2.0 / 32768.0\n elif afs == AFS_4G:\n self.ares = 4.0 / 32768.0\n elif afs == AFS_8G:\n self.ares = 8.0 / 32768.0\n else:\n self.ares = 16.0 / 32768.0\n\n self.write_cmd_arg(PWR_MGMT_1, 0x00)\n time.sleep(0.1)\n self.write_cmd_arg(PWR_MGMT_1, 0x01)\n time.sleep(0.1)\n self.write_cmd_arg(CONFIG, 0x03)\n self.write_cmd_arg(SMPLRT_DIV, 0x04)\n self.write_cmd_arg(GYRO_CONFIG, gfs << 3)\n self.write_cmd_arg(ACCEL_CONFIG, afs << 3)\n self.write_cmd_arg(ACCEL_CONFIG_2, 0x03)\n self.write_cmd_arg(INT_PIN_CFG, 0x02)\n time.sleep(0.1)\n\n def checkDataReady(self):\n drdy = self.read_data(INT_STATUS)\n return drdy & 0x01\n\n def readAccel(self):\n data = self.read_i2c_block_data(ACCEL_OUT, 6)\n x = dataConv(data[1], data[0])\n y = dataConv(data[3], data[2])\n z = dataConv(data[5], data[4])\n\n x = round(x * self.ares, 3)\n y = round(y * self.ares, 3)\n z = round(z * self.ares, 3)\n\n return {\"x\":x, \"y\":y, \"z\":z}\n\n def readGyro(self):\n data = self.read_i2c_block_data(GYRO_OUT, 6)\n \n x = dataConv(data[1], data[0])\n y = dataConv(data[3], data[2])\n z = dataConv(data[5], data[4])\n x = round(x * self.gres, 3)\n y = round(y * self.gres, 3)\n x = round(z * self.gres, 3)\n \n return {\"x\":x, \"y\":y, \"z\":z}\n\n\n\n\n\n","repo_name":"Hutchby/Cheaposity","sub_path":"src/driver/i2c/gyrodriver.py","file_name":"gyrodriver.py","file_ext":"py","file_size_in_byte":4746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40647394570","text":"import io\nimport json\nimport os\nimport os.path\nimport re\nimport subprocess\nimport time\nimport zipfile\nfrom datetime import datetime, timedelta\nfrom json.decoder import JSONDecodeError\nfrom time import sleep\n\nimport click\nimport requests\nfrom rich import print\nfrom rich.columns import Columns\nfrom rich.console import Console\nfrom rich.markdown import Markdown\nfrom rich.pretty import pprint\nfrom rich.table import Table\n\nfrom uyuni_health_check.util import HealthException, podman, ssh_call\n\n# Update this number if adding more targets to the promtail config\nPROMTAIL_TARGETS = 5\n\n\nconsole = Console()\n_hints = []\n\n\ndef show_data(metrics: dict):\n \"\"\"\n Gather the data from the exporter and loki and display them\n \"\"\"\n console.print(Markdown(\"## Uyuni server and Salt Master stats\"))\n console.print()\n if metrics:\n tables = []\n tables.append(show_salt_jobs_summary(metrics))\n tables.append(show_salt_master_stats(metrics))\n tables.append(show_uyuni_summary(metrics))\n console.print(Columns(tables), justify=\"center\")\n else:\n console.print(\n \"[yellow]Some metrics are still missing. Wait some seconds and execute again\",\n justify=\"center\",\n )\n\n\ndef show_relevant_hints():\n console.print(Markdown(\"## Relevant hints. Please take a look!\"))\n console.print()\n\n if not _hints:\n console.print(\"[italic]There are no relevant hints\", justify=\"center\")\n else:\n for hint in _hints:\n console.print(hint, justify=\"center\")\n\n console.print()\n\n\ndef wait_loki_init(server):\n \"\"\"\n Try to figure out when loki is ready to answer our requests.\n There are two things to wait for:\n - loki to be up\n - promtail to have read the logs and the loki ingester having handled them\n \"\"\"\n metrics = None\n\n # Wait for promtail to be ready\n # TODO Add a timeout here in case something went really bad\n # TODO checking the lags won't work when working on older logs,\n # we could try to compare the positions with the size of the files in such a case\n while (\n not metrics\n or metrics[\"active\"] < PROMTAIL_TARGETS\n or not metrics[\"lags\"]\n or any([v >= 10 for v in metrics[\"lags\"].values()])\n ):\n sleep(1)\n response = requests.get(f\"http://{server}:9081/metrics\")\n if response.status_code == 200:\n content = response.content.decode()\n active = re.findall(\"promtail_targets_active_total ([0-9]+)\", content)\n lags = re.findall(\n 'promtail_stream_lag_seconds{filename=\"([^\"]+)\".*} ([0-9.]+)', content\n )\n metrics = {\n \"lags\": {row[0]: float(row[1]) for row in lags},\n \"active\": int(active[0]) if active else 0,\n }\n\n\ndef show_error_logs_stats(loki):\n \"\"\"\n Get and show the error logs stats\n \"\"\"\n loki_url = loki or \"http://loki:3100\"\n process = podman(\n [\n \"run\",\n \"-ti\",\n \"--rm\",\n \"--pod\",\n \"uyuni-health-check\",\n \"--name\",\n \"logcli\",\n \"logcli\",\n \"--quiet\",\n f\"--addr={loki_url}\",\n \"instant-query\",\n 'count_over_time({job=~\".+\"} |~ `(?i)error` [7d])',\n ]\n )\n response = process.stdout.read()\n try:\n data = json.loads(response)\n except JSONDecodeError:\n raise HealthException(f\"Invalid logcli response: {response}\")\n\n print(Markdown(\"- Errors in logs over the last 7 days\"))\n print()\n table = Table(show_header=True, header_style=\"bold magenta\")\n table.add_column(\"File\")\n table.add_column(\"Errors\")\n\n for metric in data:\n table.add_row(metric[\"metric\"][\"filename\"], metric[\"value\"][1])\n\n print(table)\n\n\ndef show_full_error_logs(loki):\n \"\"\"\n Get and show the error logs\n \"\"\"\n loki_url = loki or \"http://loki:3100\"\n from_time = (datetime.utcnow() - timedelta(days=7)).isoformat()\n print(Markdown(\"- Error logs of the last 7 days\"))\n podman(\n [\n \"run\",\n \"-ti\",\n \"--pod\",\n \"uyuni-health-check\",\n \"--name\",\n \"logcli\",\n \"logcli\",\n \"--quiet\",\n f\"--addr={loki_url}\",\n \"query\",\n f\"--from={from_time}Z\",\n \"--limit=100\",\n '{job=~\".+\"} |~ `(?i)error`',\n ],\n console=console,\n )\n\n\ndef show_salt_jobs_summary(metrics: dict):\n table = Table(show_header=True, header_style=\"bold magenta\")\n table.add_column(\"Salt function name\")\n table.add_column(\"Total\")\n\n for metric, value in sorted(\n metrics[\"salt_jobs\"].items(), reverse=True, key=lambda item: item[1]\n ):\n table.add_row(metric, str(int(value)))\n\n return table\n\n\ndef show_salt_master_stats(metrics: dict):\n table = Table(show_header=True, header_style=\"bold magenta\")\n table.add_column(\"Name\")\n table.add_column(\"Total\")\n\n for metric, value in sorted(\n metrics[\"salt_master_stats\"].items(), key=lambda item: item[0]\n ):\n table.add_row(metric, str(int(value)))\n\n return table\n\n\ndef show_uyuni_summary(metrics: dict):\n table = Table(show_header=True, header_style=\"bold magenta\")\n table.add_column(\"Name\")\n table.add_column(\"Total\")\n\n for metric, value in sorted(\n metrics[\"uyuni_summary\"].items(), key=lambda item: item[0]\n ):\n table.add_row(metric, str(int(value)))\n\n return table\n\n\ndef build_image(name, image_path=None, verbose=False, server=None):\n \"\"\"\n Build a container image\n \"\"\"\n expanded_path = os.path.join(os.path.dirname(__file__), image_path or name)\n process = podman(\n [\"build\", \"-t\", name, expanded_path],\n console=console if verbose else None,\n server=server,\n )\n if process.returncode != 0:\n raise HealthException(f\"Failed to build {name} image\")\n\n\ndef pod_exists(pod, server=None):\n \"\"\"\n Check if the image pod is up and running\n \"\"\"\n return (\n podman([\"pod\", \"list\", \"--quiet\", f\"-fname={pod}\"], server=server)\n .stdout.read()\n .strip()\n != \"\"\n )\n\n\ndef image_exists(image, server=None):\n \"\"\"\n Check if the image is present in podman images result\n \"\"\"\n return (\n podman([\"images\", \"--quiet\", \"-f\", f\"reference={image}\"], server=server)\n .stdout.read()\n .strip()\n != \"\"\n )\n\n\ndef check_postgres_service(server):\n \"\"\"\n Check that postgresql service is running\n \"\"\"\n try:\n process = ssh_call(server, [\"systemctl\", \"status\", \"postgresql\"])\n if process.returncode != 0:\n msg = \"[bold red]WARNING: 'postgresql' service is NOT running!\"\n _hints.append(msg)\n console.log(msg)\n else:\n console.log(\"[green]The postgresql service is running\")\n except OSError:\n raise HealthException(\n f\"The specified server '{server}' is not and Uyuni / SUSE Manager server!\"\n )\n\n\ndef check_spacewalk_services(server, verbose=False):\n \"\"\"\n Check that spacewalk services are running\n \"\"\"\n try:\n process = ssh_call(server, [\"spacewalk-service\", \"list\"])\n if process.returncode != 0:\n raise HealthException(\"Failed to check spacewalk services\")\n\n services = re.findall(r\"(.+)\\.service .*\", process.stdout.read())\n if verbose:\n console.log(f\"Spacewalk services: {services}\")\n all_running = True\n for service in services:\n process = ssh_call(server, [\"systemctl\", \"status\", service])\n if process.returncode != 0:\n msg = f\"[bold red]WARNING: '{service}' service is NOT running!\"\n console.log(msg)\n _hints.append(msg)\n all_running = False\n if all_running:\n console.log(\"[green]All spacewalk services are running\")\n\n except OSError:\n raise HealthException(\n f\"The specified server '{server}' is not and Uyuni / SUSE Manager server!\"\n )\n\n\ndef container_is_running(name, server=None):\n \"\"\"\n Check if a container with a given name is running in podman\n \"\"\"\n process = podman([\"ps\", \"--quiet\", \"-f\", f\"name={name}\"], server=server)\n return process.stdout.read() != \"\"\n\n\ndef build_loki_image(image, verbose=False, server=None):\n if image_exists(image, server=server):\n console.log(f\"[yellow]Skipped as the {image} image is already present\")\n return\n\n # Fetch the logcli binary from the latest release\n url = f\"https://github.com/grafana/loki/releases/download/v2.5.0/{image}-linux-amd64.zip\"\n dest_dir = os.path.join(os.path.dirname(__file__), image)\n response = requests.get(url)\n zip = zipfile.ZipFile(io.BytesIO(response.content))\n zip.extract(f\"{image}-linux-amd64\", dest_dir)\n build_image(image, verbose=verbose, server=server)\n console.log(f\"[green]The {image} image was built successfully\")\n\n\ndef transfer_image(server, image):\n \"\"\"\n Copy a container image over to the server\n\n :param server: the server to transfer the image to\n \"\"\"\n # Save, deploy and load the image\n # TODO Handle errors\n local_image_path = f\"/tmp/{image}.tar\"\n if os.path.exists(local_image_path):\n # podman doesn't like if the image is already present\n os.unlink(local_image_path)\n\n console.log(f\"[bold]Saving the {image} image...\")\n podman([\"save\", \"--output\", local_image_path, image])\n\n console.log(f\"[bold]Transfering the {image} image to {server}...\")\n subprocess.run([\"scp\", \"-q\", local_image_path, f\"{server}:/tmp/\"])\n\n console.log(f\"[bold]Loading the {image} image on {server}...\")\n podman([\"load\", \"--input\", f\"/tmp/{image}.tar\"], server)\n\n\ndef prepare_exporter(server, verbose=False):\n \"\"\"\n Build the prometheus exporter image and deploy it on the server\n\n :param server: the Uyuni server to deploy the exporter on\n \"\"\"\n console.log(\"[bold]Building uyuni-health-exporter image\")\n if image_exists(\"uyuni-health-exporter\"):\n console.log(\n \"[yellow]Skipped as the uyuni-health-exporter image is already present\"\n )\n else:\n build_image(\"uyuni-health-exporter\", \"exporter\", verbose=verbose)\n console.log(\"[green]The uyuni-health-exporter image was built successfully\")\n\n # Run the container\n console.log(\"[bold]Deploying uyuni-health-exporter container\")\n if container_is_running(\"uyuni-health-exporter\", server=server):\n console.log(\n \"[yellow]Skipped as the uyuni-health-exporter container is already running\"\n )\n return\n\n # Transfering the image\n if server:\n transfer_image(server, \"uyuni-health-exporter\")\n\n # Get the Salt UID/GID\n id_process = ssh_call(server, [\"id\", \"salt\"])\n if id_process.returncode != 0:\n err = id_process.stderr.read()\n if \"no such user\" in err:\n raise HealthException(\n \"Salt is not installed... is the tool running on an Uyuni server?\"\n )\n else:\n raise HealthException(f\"Failed to get Salt GID on server: {err}\")\n id_out = id_process.stdout.read()\n salt_uid = re.match(\".*uid=([0-9]+)\", id_out).group(1)\n salt_gid = re.match(\".*gid=([0-9]+)\", id_out).group(1)\n\n # Run the container\n podman(\n [\n \"run\",\n \"--pod\",\n \"uyuni-health-check\",\n \"-u\",\n f\"{salt_uid}:{salt_gid}\",\n \"-d\",\n \"--network=host\",\n \"-v\",\n \"/etc/salt:/etc/salt:ro\",\n \"-v\",\n \"/var/cache/salt/:/var/cache/salt\",\n \"--name\",\n \"uyuni-health-exporter\",\n \"uyuni-health-exporter\",\n ],\n server,\n console=console,\n )\n\n\ndef prepare_grafana(server, verbose=False):\n if container_is_running(\"uyuni-health-check-grafana\", server=server):\n console.log(\n \"[yellow]Skipped as the uyuni-health-check-grafana container is already running\"\n )\n else:\n # Copy the grafana config\n grafana_cfg = os.path.join(os.path.dirname(__file__), \"grafana\")\n\n if server:\n try:\n subprocess.run(\n [\"scp\", \"-rq\", grafana_cfg, f\"{server}:/tmp/\"], check=True\n )\n grafana_cfg = \"/tmp/grafana\"\n except Exception:\n raise HealthException(\n f\"Failed to copy grafana configuration to {server}\"\n )\n\n # Run the container\n podman(\n [\n \"run\",\n \"--pod\",\n \"uyuni-health-check\",\n \"-d\",\n \"-v\",\n f\"{grafana_cfg}/datasources.yaml:/etc/grafana/provisioning/datasources/ds.yaml\",\n \"-v\",\n f\"{grafana_cfg}/dashboard.yaml:/etc/grafana/provisioning/dashboards/main.yaml\",\n \"-v\",\n f\"{grafana_cfg}/dashboards:/var/lib/grafana/dashboards\",\n \"-e\",\n \"GF_PATHS_PROVISIONING=/etc/grafana/provisioning\",\n \"-e\",\n \"GF_AUTH_ANONYMOUS_ENABLED=true\",\n \"-e\",\n \"GF_AUTH_ANONYMOUS_ORG_ROLE=Admin\",\n \"--name\",\n \"uyuni-health-check-grafana\",\n \"docker.io/grafana/grafana:9.2.1\",\n \"run.sh\",\n ],\n server,\n console=console,\n )\n\n\ndef prepare_prometheus(server, verbose=False):\n if container_is_running(\"uyuni-health-check-prometheus\", server=server):\n console.log(\n \"[yellow]Skipped as the uyuni-health-check-prometheus container is already running\"\n )\n else:\n # Copy the prometheus config\n prometheus_cfg = os.path.join(\n os.path.dirname(__file__), \"prometheus\", \"prometheus.yml\"\n )\n\n if server:\n try:\n subprocess.run(\n [\"scp\", \"-rq\", prometheus_cfg, f\"{server}:/tmp/\"], check=True\n )\n prometheus_cfg = \"/tmp/prometheus.yml\"\n except Exception:\n raise HealthException(\n f\"Failed to copy prometheus configuration to {server}\"\n )\n\n # Run the container\n podman(\n [\n \"run\",\n \"--pod\",\n \"uyuni-health-check\",\n \"-d\",\n \"-v\",\n f\"{prometheus_cfg}:/etc/prometheus/prometheus.yml\",\n \"--name\",\n \"uyuni-health-check-prometheus\",\n \"docker.io/prom/prometheus\",\n ],\n server,\n console=console,\n )\n\n\ndef create_pod(server):\n \"\"\"\n Create uyuni-health-check pod where we run the containers\n\n :param server: the Uyuni server to create the pod on or localhost\n \"\"\"\n if pod_exists(\"uyuni-health-check\", server=server):\n console.log(\"[yellow]Skipped as the uyuni-health-check pod is already running\")\n else:\n podman(\n [\n \"pod\",\n \"create\",\n \"-p\",\n \"3100:3100\",\n \"-p\",\n \"9081:9081\",\n \"-p\",\n \"3000:3000\",\n \"-p\",\n \"9090:9090\",\n \"--replace\",\n \"-n\",\n \"uyuni-health-check\",\n ],\n server=server,\n console=console,\n )\n\n\ndef run_loki(server):\n \"\"\"\n Run promtail and loki to aggregate the logs\n\n :param server: the Uyuni server to deploy the exporter on or localhost\n \"\"\"\n if container_is_running(\"loki\", server=server):\n console.log(\"[yellow]Skipped as the loki container is already running\")\n else:\n\n # TODO Prepare config to tune the oldest message allowed\n podman(\n [\n \"run\",\n \"--pod\",\n \"uyuni-health-check\",\n \"--replace\",\n \"-d\",\n \"--name\",\n \"loki\",\n \"docker.io/grafana/loki\",\n ],\n server,\n console=console,\n )\n\n # Copy the promtail config\n promtail_cfg = os.path.join(\n os.path.dirname(__file__), \"promtail\", \"promtail.yaml\"\n )\n if server:\n try:\n subprocess.run(\n [\"scp\", \"-q\", promtail_cfg, f\"{server}:/tmp/\"], check=True\n )\n promtail_cfg = \"/tmp/promtail.yaml\"\n except Exception:\n raise HealthException(\n f\"Failed to copy promtail configuration to {server}\"\n )\n\n # Run promtail only now since it pushes data to loki\n console.log(\"[bold]Building promtail image\")\n build_loki_image(\"promtail\")\n if server:\n transfer_image(server, \"promtail\")\n podman(\n [\n \"run\",\n \"--replace\",\n \"-d\",\n \"-v\",\n f\"{promtail_cfg}:/etc/promtail/config.yml\",\n \"-v\",\n \"/var/log/:/var/log/\",\n \"--name\",\n \"promtail\",\n \"--pod\",\n \"uyuni-health-check\",\n \"promtail\",\n ],\n server,\n console=console,\n )\n\n\ndef clean_server(server):\n \"\"\"\n Remove the containers we spawned on the server now that everything is finished\n\n :param server: server to clean\n \"\"\"\n with console.status(status=None):\n console.log(\"[bold]Cleaning up containers after execution\")\n if not pod_exists(\"uyuni-health-check\", server=server):\n console.log(\"[yellow]Skipped as the uyuni-health-check pod is not running\")\n else:\n podman(\n [\n \"pod\",\n \"rm\",\n \"-f\",\n \"uyuni-health-check\",\n ],\n server,\n console=console,\n )\n console.log(\"[green]Containers have been removed\")\n\n\n@click.group()\n@click.option(\n \"-s\",\n \"--server\",\n default=None,\n help=\"Uyuni Server to connect to if not running directly on the server\",\n)\n@click.option(\n \"-v\",\n \"--verbose\",\n is_flag=True,\n help=\"Show more stdout, including image building\",\n)\n@click.pass_context\ndef cli(ctx, server, verbose):\n # ensure that ctx.obj exists and is a dict (in case `cli()` is called\n # by means other than the `if` block below)\n ctx.ensure_object(dict)\n ctx.obj[\"server\"] = server\n ctx.obj[\"verbose\"] = server\n\n try:\n console.log(\"[bold]Checking connection with podman:\")\n ssh_call(server, [\"podman\", \"--version\"], console=console, quiet=False)\n except HealthException as err:\n console.log(\"[red bold]\" + str(err))\n console.print(Markdown(\"# Execution Finished\"))\n exit(1)\n\n\n@cli.command()\n@click.pass_context\ndef clean(ctx):\n \"\"\"\n Remove all the containers we spawned on the server\n\n :param server: server where containers are running\n \"\"\"\n server = ctx.obj[\"server\"]\n clean_server(server)\n console.print(Markdown(\"# Execution Finished\"))\n\n\n@cli.command()\n@click.pass_context\ndef stop(ctx):\n \"\"\"\n Stop the containers on the server if already present\n\n :param server: server where containers are running\n \"\"\"\n server = ctx.obj[\"server\"]\n with console.status(status=None):\n console.log(\"[bold]Stopping uyuni-health-check containers\")\n if not pod_exists(\"uyuni-health-check\", server=server):\n console.log(\"[yellow]Skipped as the uyuni-health-check pod does not exist\")\n else:\n podman(\n [\n \"pod\",\n \"stop\",\n \"uyuni-health-check\",\n ],\n server,\n console=console,\n )\n console.log(\"[green]Containers have been stopped\")\n console.print(Markdown(\"# Execution Finished\"))\n\n\n@cli.command()\n@click.pass_context\ndef start(ctx):\n \"\"\"\n Start the containers on the server if already present\n\n :param server: server where to start the containers\n \"\"\"\n server = ctx.obj[\"server\"]\n with console.status(status=None):\n console.log(\"[bold]Starting uyuni-health-check containers\")\n if not pod_exists(\"uyuni-health-check\", server=server):\n console.log(\"[yellow]Skipped as the uyuni-health-check pod does not exist\")\n else:\n podman(\n [\n \"pod\",\n \"start\",\n \"uyuni-health-check\",\n ],\n server,\n console=console,\n )\n console.log(\"[green]Containers have been started\")\n console.print(Markdown(\"# Execution Finished\"))\n\n\n@cli.command()\n@click.option(\n \"-ep\",\n \"--exporter-port\",\n type=int,\n default=9000,\n help=\"uyuni health exporter metrics port\",\n)\n@click.option(\n \"--loki\",\n default=None,\n help=\"URL of an existing loki instance to use to fetch the logs\",\n)\n@click.option(\n \"--logs\",\n is_flag=True,\n help=\"Show the error logs\",\n)\n@click.option(\n \"-c\",\n \"--clean\",\n is_flag=True,\n help=\"Remove containers after execution\",\n)\n@click.pass_context\ndef run(ctx, exporter_port, loki, logs, clean):\n \"\"\"\n Start execution of Uyuni Health Check\n\n Build the necessary containers, deploy them, get the metrics and display them\n\n :param server: the server to connect to\n :param exporter_port: uyuni health exporter metrics port\n :param loki: URL to a loki instance. Setting it will skip the promtail and loki deployments\n \"\"\"\n server = ctx.obj[\"server\"]\n verbose = ctx.obj[\"verbose\"]\n try:\n with console.status(status=None):\n console.log(\"[bold]Creating POD for containers\")\n create_pod(server)\n\n console.log(\"[bold]Building logcli image\")\n build_loki_image(\"logcli\", server=server)\n\n console.log(\"[bold]Deploying promtail and Loki\")\n if not loki:\n run_loki(server)\n else:\n console.log(f\"[yellow]Skipped to use Loki at {loki}\")\n\n console.log(\"[bold]Preparing uyuni-health-exporter\")\n prepare_exporter(server, verbose=verbose)\n\n console.log(\"[bold]Preparing grafana\")\n prepare_grafana(server, verbose=verbose)\n\n console.log(\"[bold]Preparing prometheus\")\n prepare_prometheus(server, verbose=verbose)\n\n # Fetch metrics from uyuni-health-exporter\n console.log(\"[bold]Fetching metrics from uyuni-health-exporter\")\n metrics = fetch_metrics_exporter(server, exporter_port)\n\n # Check spacewalk services\n console.log(\"[bold]Checking spacewalk services\")\n check_spacewalk_services(server, verbose=verbose)\n\n # Check spacewalk services\n console.log(\"[bold]Checking postgresql service\")\n check_postgres_service(server)\n\n console.log(\"[bold]Waiting for loki to be ready\")\n host = server or \"localhost\"\n wait_loki_init(host)\n\n # Gather and show the data\n console.print(Markdown(\"# Results\"))\n show_data(metrics)\n\n console.print(Markdown(\"## Relevant Errors\"))\n loki_url = loki if loki else f\"http://{host}:3100\"\n show_error_logs_stats(loki_url)\n if logs:\n show_full_error_logs(loki_url)\n except HealthException as err:\n console.log(\"[red bold]\" + str(err))\n finally:\n if clean:\n clean_server(server)\n console.print(Markdown(\"# Execution Finished\"))\n\n\ndef fetch_metrics_exporter(host=\"localhost\", port=9000, max_retries=5):\n if not host:\n host = \"localhost\"\n\n for i in range(max_retries):\n try:\n metrics_raw = requests.get(f\"http://{host}:{port}\").content.decode()\n salt_metrics = re.findall(\n r'salt_jobs{fun=\"(.+)\",name=\"(.+)\"} (.+)', metrics_raw\n )\n uyuni_metrics = re.findall(r'uyuni_summary{name=\"(.+)\"} (.+)', metrics_raw)\n salt_master_metrics = re.findall(\n r'salt_master_stats{name=\"(.+)\"} (.+)', metrics_raw\n )\n break\n except requests.exceptions.RequestException as exc:\n if i < max_retries - 1:\n time.sleep(1)\n console.log(\"[italic]retrying...\")\n else:\n console.log(\n \"[italic red]There was an error while fetching metrics from uyuni-health-exporter[/italic red]\"\n )\n print(f\"{exc}\")\n exit(1)\n\n if not salt_metrics or not uyuni_metrics or not salt_master_metrics:\n console.log(\n \"[yellow]Some metrics are still missing. Wait some seconds and execute again\"\n )\n return {}\n\n metrics = {\n \"salt_jobs\": {},\n \"salt_master_stats\": {},\n \"uyuni_summary\": {},\n }\n\n for m in salt_metrics:\n metrics[\"salt_jobs\"][m[0]] = float(m[2])\n\n for m in salt_master_metrics:\n metrics[\"salt_master_stats\"][m[0]] = float(m[1])\n\n for m in uyuni_metrics:\n metrics[\"uyuni_summary\"][m[0]] = float(m[1])\n\n console.log(\"[green]metrics have been successfully collected\")\n return metrics\n\n\ndef main():\n print(Markdown(\"# Uyuni Health Check\"))\n cli()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"uyuni-project/poc-uyuni-health-check","sub_path":"src/uyuni_health_check/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":25689,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"22598898823","text":"# Sample code implementing LeNet-5 from Liu Liu\n\nimport tensorflow as tf\nimport numpy as np\nfrom CIFAR10 import CIFAR10\n\n# from tensorflow.examples.tutorials.mnist import input_data\n# mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\nclass cnnCIFAR10(object):\n def __init__(self, data):\n self.lr = 1e-3\n self.epochs = 100\n self.batch_size = 100 \n self.data = data\n self.num_channels = 3\n self.pixel_width = int(np.sqrt(self.data.input_size / self.num_channels))\n # self.pixel_width = self.data.input_size\n self.test_batch, self.test_labels = self.data.get_test_data()\n self.build_graph()\n\n def build_graph(self):\n num_kernels_1 = 32\n num_kernels_2 = 64\n num_neurons_final = 1024\n\n self.x = tf.placeholder(tf.float32, shape=[None, self.data.input_size])\n\n self.y_ = tf.placeholder(tf.float32, shape=[None, self.data.output_size])\n\n # define conv-layer variables\n W_conv1 = self.weight_variable([5, 5, self.num_channels, num_kernels_1]) # first conv-layer has 32 kernels, size=5\n # W_conv1 = self.weight_variable([5, 5, 32, self.num_channels])\n b_conv1 = self.bias_variable([num_kernels_1])\n W_conv2 = self.weight_variable([5, 5, num_kernels_1, num_kernels_2])\n b_conv2 = self.bias_variable([num_kernels_2])\n\n # print(self.x.shape)\n x_image = tf.reshape(self.x, [-1, self.pixel_width, self.pixel_width, self.num_channels])\n\n h_conv1 = tf.nn.relu(self.conv2d(x_image, W_conv1) + b_conv1)\n h_pool1 = self.max_pool_2x2(h_conv1)\n h_conv2 = tf.nn.relu(self.conv2d(h_pool1, W_conv2) + b_conv2)\n h_pool2 = self.max_pool_2x2(h_conv2)\n # print(h_pool2.shape)\n\n # densely/fully connected layer\n W_fc1 = self.weight_variable([int(h_pool2.shape[1] * h_pool2.shape[1]) * num_kernels_2, num_neurons_final])\n b_fc1 = self.bias_variable([num_neurons_final])\n\n h_pool2_flat = tf.reshape(h_pool2, [-1, int(h_pool2.shape[1] * h_pool2.shape[1]) * num_kernels_2])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # dropout regularization\n self.keep_prob = tf.placeholder(tf.float32)\n h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)\n\n # linear classifier\n W_fc2 = self.weight_variable([1024, 10])\n b_fc2 = self.bias_variable([10])\n\n self.y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=self.y_conv))\n self.train_step = tf.train.AdamOptimizer(self.lr).minimize(cross_entropy)\n\n def train(self):\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)\n self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n init = tf.global_variables_initializer()\n self.sess.run(init)\n self.eval() # creating evaluation\n batch = self.data.get_batch(self.batch_size)\n for i in range(self.epochs):\n # batch = mnist.train.next_batch(self.batch_size)\n if i % 100 == 0:\n train_acc = self.sess.run(self.accuracy, feed_dict={self.x: self.test_batch, self.y_: self.test_labels, self.keep_prob: 1.0})\n print('step %d, training accuracy %g' % (i, train_acc))\n try:\n x = next(batch)\n y = next(batch)\n except:\n batch = self.data.get_batch(self.batch_size)\n x = next(batch)\n y = next(batch)\n # print(x.shape, y.shape)\n # stop\n self.sess.run([self.train_step], feed_dict={self.x: x, self.y_: y, self.keep_prob: 0.5})\n\n def eval(self):\n correct_prediction = tf.equal(tf.argmax(self.y_conv, 1), tf.argmax(self.y_, 1))\n self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n def test_eval(self):\n self.eval()\n # test_acc = self.sess.run(self.accuracy, feed_dict={\n # self.x: mnist.test.images, self.y_: mnist.test.labels, self.keep_prob: 1.0})\n print(self.test_batch.shape, self.test_labels.shape)\n test_acc = self.sess.run(self.accuracy, feed_dict={self.x: self.test_batch,\n self.y_: self.test_labels,\n self.keep_prob: 1.0})\n print('test accuracy %g' % test_acc)\n\n def weight_variable(self, shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n def bias_variable(self, shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n def conv2d(self, x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n def deconv2d(self, x, W):\n return tf.nn.conv2d_transpose(x, W, strides=[1, 1, 1, 1,], padding='SAME')\n\n def max_pool_2x2(self, x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\n\n\ndef main():\n data = CIFAR10()\n cnn = cnnCIFAR10(data)\n cnn\n cnn.train()\n cnn.test_eval()\n return\n\nmain()\n","repo_name":"HoliestCow/ece692_deeplearning","sub_path":"project3/lenet5liu_CIFAR10.py","file_name":"lenet5liu_CIFAR10.py","file_ext":"py","file_size_in_byte":5240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17170911911","text":"import pickle\nfrom re import X\nimport pandas as pd\nimport os, sys\npath_name = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))\nsys.path.append(path_name)\nfrom sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score, classification_report\nfrom utils.utilities import generate_roc_curve, generate_goal_rate_curve, generate_cumu_goal_curve\nfrom utils.utilities import get_shot_array, get_goal_rate_shot_percentile\nfrom sklearn.calibration import calibration_curve, CalibrationDisplay\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\n# from matplotlib.gridspec import GridSpec\nplt.rc(\"font\", size=14)\nimport seaborn as sns\nsns.set(style=\"white\")\nsns.set(style=\"whitegrid\", color_codes=True)\n\n\nRANDOM_SEED = 1337\n\n\n\ndatafile_std = '../data/ms2Q4_STD.csv'\n\n\ndef data_load(dataset_name, columns=None):\n dataset=pd.read_csv(dataset_name)\n if columns is not None:\n dataset = dataset[columns]\n\n dataset.dropna(axis = 0, inplace = True)\n X = dataset.drop(['Is goal'], axis=1)\n y = dataset[['Is goal']]\n # print(X.head())\n # print(y.head())\n\n return train_test_split(X, y)\n\ndef generate_calibration_display(clf, arr_x_test, arr_y_test, arr_y_pred, arr_y_prob, plot_name, label_names, save = False, show = True): \n fig, ax1 = plt.subplots()\n # gs = GridSpec(4, 2)\n # ax_calibration_curve = fig.add_subplot(111)\n for i in range(len(clf)):\n display = CalibrationDisplay.from_estimator(\n clf[i],\n arr_x_test[i],\n arr_y_test[i],\n n_bins=20,\n name=label_names[i],\n ax=ax1,\n )\n # ax1.grid()\n ax1.set_title(\"Reliability Curve\")\n plt.legend(loc=\"upper left\")\n\n # plt.title('Calibration curves')\n\n if save:\n try: \n # plt.savefig(\"../../figs/\" + plot_name)\n # plt.savefig(\"../../figs/\" + 'calibration_curve.png')\n plt.savefig(plot_name)\n except: \n print(\"failed to save plot {plot_name}\")\n if show: \n plt.show()\n\nmodels = ['rbf_svc.sav', 'random_forest_300_6_10.sav', \"random_forest_300_9.sav\", \"random_forest_500_6.sav\"]\nlabels = [x[:-4] for x in models]\n\narr_y_test, arr_x_test, arr_y_pred, arr_prob, arr_shot_percentile, arr_goal_rate, arr_goal_cumu, arr_clf = [], [], [], [], [], [], [], []\nfilepath = '../models/saved/'\n\nfor i, model in enumerate(models): \n\n file = filepath + model\n loaded_model = pickle.load(open(file, 'rb'))\n\n _, x_test, _, y_test = data_load(datafile_std)\n y_pred = loaded_model.predict(x_test)\n\n y_prob = loaded_model.predict_proba(x_test)\n arr_y_test.append(y_test)\n arr_x_test.append(x_test)\n arr_y_pred.append(y_pred)\n arr_prob.append(y_prob)\n arr_clf.append(loaded_model)\n shots_array = get_shot_array(y_prob, y_test)\n shot_percentile, goal_rate, goal_cumu = get_goal_rate_shot_percentile(shots_array, bin_size=1)\n\n arr_shot_percentile.append(shot_percentile)\n arr_goal_rate.append(goal_rate)\n arr_goal_cumu.append(goal_cumu)\n\n\n##generate_roc_curve(arr_y_test, arr_y_pred, arr_prob, label_names = labels, plot_title = 'roc_curve', save = False)\n#generate_goal_rate_curve(arr_shot_percentile, arr_goal_rate, label_names = labels, plot_title='Goal_Rate', save = False)\n#generate_cumu_goal_curve(arr_shot_percentile, arr_goal_cumu, label_names = labels, plot_title='Cumulative_goals', save = False)\ngenerate_calibration_display(arr_clf, arr_x_test, arr_y_test, y_pred, y_prob, plot_name='Reliability_curve', label_names = labels, save = False) \n\n \n\n","repo_name":"Alexia0328/NHL_Hockey_Game_Prediction","sub_path":"NHL-milestone-2/figs/part6_plots.py","file_name":"part6_plots.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31959630764","text":"#Secret Message\n'''\nProgram to Encrypt the text that given by\nuser. Encryption node was reverse alphabets.\nEXAMPLE: Abcd -> zyxw\n\n'''\n\n\nimport string \nmsg=input().lower()\n#get list of revrse alphabets\nalp=string.ascii_lowercase\nralp=alp[::-1]\nsmsg=list()\nn=0\nni=0\n#compare user text& revrse list to encrypt\nfor m in msg:\n if m.isalpha():\n for al in alp:\n if al==m:\n break\n n+=1\n \n for ral in ralp:\n if ni==n:\n smsg.append(ral)\n \n break\n ni+=1\n else:\n smsg.append(m)\nsm=\"\".join(smsg)\nprint(sm) ","repo_name":"vino4d/pyProgram","sub_path":"secretMsg.py","file_name":"secretMsg.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39805064180","text":"import json\nfrom umqttsimple import MQTTClient\nimport ubinascii\nfrom machine import unique_id, reset\nfrom errorlog import ErrorLogger\nfrom led import Led\n\nclass MQTT:\n def __init__(self):\n with open('./config.json', 'r') as f:\n self.__data = json.loads(f.read())\n\n self.__host = self.__data['mqtt']['host']\n self.__port = self.__data['mqtt']['port']\n self.__topic = self.__data['mqtt']['topic']\n self.__username = self.__data['mqtt']['username']\n self.__password = self.__data['mqtt']['password']\n self.__keepalive = 60\n\n # connect to mqtt client\n self.__mqtt = MQTTClient(\n client_id=ubinascii.hexlify(unique_id()),\n server=self.__host,\n port=self.__port,\n user=self.__username,\n password=self.__password,\n keepalive=self.__keepalive\n )\n\n self.__mqtt.connect()\n\n def send_message(self, msg):\n self.__mqtt.publish(self.__topic, msg)\n\n def send_topic_message(self, topic, msg):\n self.__mqtt.publish(topic, msg)\n\n def set_on_message(self, f):\n self.__mqtt.set_callback(f)\n\n def listen(self):\n self.__mqtt.subscribe(self.__topic)\n while True:\n try:\n self.__mqtt.check_msg()\n except OSError as e:\n led = Led()\n led.flash(3)\n print(f\"MQTT error {e}\")\n self.__mqtt.disconnect()\n self.__mqtt.connect()\n self.__mqtt.subscribe(self.__topic)\n print(\"MQTT connection restored\")\n led.turn_off()\n except MemoryError as e:\n print(e)\n self.send_message(\"ERROR\")\n error_logger = ErrorLogger()\n error_times = error_logger.add_error('Memory error')\n error_logger.retry(error_times)\n","repo_name":"jimchen5209-trunk/einkImageReceiver","sub_path":"mqtt.py","file_name":"mqtt.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28419485919","text":"import math\nimport copy\nimport random\nfrom Representations.Point import Point\nfrom Representations.Circle import Circle\nimport Problems.ClassicalProblems as classics\n\n\n# Algorithm based in https://www.nayuki.io/page/smallest-enclosing-circle\n\ndef make_circle(points: list):\n shuffled = copy.deepcopy(points)\n random.shuffle(shuffled)\n\n # Adicionar progressivamente os pontos no circulo ou recalculá-lo\n c = None\n for (i, p) in enumerate(shuffled):\n # side of circle: 1 = dentro, 0 = no raio, -1 fora\n if c is None or classics.side_of_circle(c, p) == -1:\n c = _make_circle_one_point(shuffled[: i + 1], p)\n return c\n\n\n# Um ponto de fronteira conhecidos\ndef _make_circle_one_point(points: list, p: Point):\n c = Circle(p, 0)\n for (i, q) in enumerate(points):\n # side of circle: 1 = dentro, 0 = no raio, -1 fora\n if classics.side_of_circle(c, q) == -1:\n if c.get_radius() == 0.0:\n c = make_diameter(p, q)\n else:\n c = _make_circle_two_points(points[: i + 1], p, q)\n return c\n\n\n# Dois pontos de fronteira conhecido\ndef _make_circle_two_points(points: list, p: Point, q: Point):\n circ = make_diameter(p, q)\n left = None\n right = None\n px = p.get_x()\n py = p.get_y()\n qx = q.get_x()\n qy = q.get_y()\n\n # para cada ponto que nao está no circulo\n for r in points:\n if classics.side_of_circle(circ, r) >= 0:\n continue\n\n # Formar uma circunferência e classificá-la no lado esquerdo ou direito\n cross = _cross_product(px, py, qx, qy, r.get_x(), r.get_y())\n c = make_circumcircle(p, q, r)\n if c is None:\n continue\n elif cross > 0.0 and (\n left is None or _cross_product(px, py, qx, qy, c.get_centre().get_x(), c.get_centre().get_y()) >\n _cross_product(px, py, qx, qy, left.get_centre().get_x(), left.get_centre().get_y())):\n left = c\n elif cross < 0.0 and (\n right is None or _cross_product(px, py, qx, qy, c.get_centre().get_x(), c.get_centre().get_y()) <\n _cross_product(px, py, qx, qy, right.get_centre().get_x(), right.get_centre().get_y())):\n right = c\n\n # selecionar qual circulo vai retornar\n if left is None and right is None:\n return circ\n elif left is None:\n return right\n elif right is None:\n return left\n else:\n return left if (left.get_radius() <= right.get_radius()) else right\n\n\ndef make_circumcircle(p0: Point, p1: Point, p2: Point):\n ax = p0.get_x()\n ay = p0.get_y()\n bx = p1.get_x()\n by = p1.get_y()\n cx = p2.get_x()\n cy = p2.get_y()\n ox = (min(ax, bx, cx) + max(ax, bx, cx)) / 2.0\n oy = (min(ay, by, cy) + max(ay, by, cy)) / 2.0\n ax -= ox\n ay -= oy\n bx -= ox\n by -= oy\n cx -= ox\n cy -= oy\n d = (ax * (by - cy) + bx * (cy - ay) + cx * (ay - by)) * 2.0\n if d == 0.0:\n return None\n x = ox + ((ax * ax + ay * ay) * (by - cy) + (bx * bx + by * by) * (cy - ay) + (cx * cx + cy * cy) * (ay - by)) / d\n y = oy + ((ax * ax + ay * ay) * (cx - bx) + (bx * bx + by * by) * (ax - cx) + (cx * cx + cy * cy) * (bx - ax)) / d\n ra = math.hypot(x - p0.get_x(), y - p0.get_y())\n rb = math.hypot(x - p1.get_x(), y - p1.get_y())\n rc = math.hypot(x - p2.get_x(), y - p2.get_y())\n return Circle(Point(x, y), max(ra, rb, rc))\n\n\ndef make_diameter(p0: Point, p1: Point):\n cx = (p0.get_x() + p1.get_x()) / 2.0\n cy = (p0.get_y() + p1.get_y()) / 2.0\n r0 = math.hypot(cx - p0.get_x(), cy - p0.get_y())\n r1 = math.hypot(cx - p1.get_x(), cy - p1.get_y())\n return Circle(Point(cx, cy), max(r0, r1))\n\n\n# Retorna duas vezes a área assinada do triângulo definido por (x0, y0), (x1, y1), (x2, y2).\ndef _cross_product(x0, y0, x1, y1, x2, y2):\n return (x1 - x0) * (y2 - y0) - (y1 - y0) * (x2 - x0)\n\n\nif __name__ == '__main__':\n points = [Point(1, 1), Point(4, 3), Point(5, 3), Point(3, 1)]\n c = make_circle(points)\n","repo_name":"arthurmteodoro/computational-geometry","sub_path":"Problems/Complex_Problems/Smallest_Circle.py","file_name":"Smallest_Circle.py","file_ext":"py","file_size_in_byte":4023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26563490828","text":"#!/usr/bin/python3\nimport time\nimport pyautogui\nimport cv2\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom image_recognition import find_button\nfrom config_parser import get_config_string\nfrom config_parser import get_config_bool\nfrom check_alfaview import alfaview_is_opened\n\n# ------Config---------\ndisplayName = get_config_string('Alfaview', 'username')\ndriverPath = get_config_string('DEFAULT', 'chromedriverPath')\nroom_number = get_config_string('Alfaview', 'room')\nroom_link = get_config_string('rooms', room_number)\n# --------------------\n\n\ndef open_browser():\n print('Opening browser..')\n driver = webdriver.Chrome(driverPath)\n driver.get(room_link)\n\n time.sleep(2)\n\n name_field = driver.find_element_by_name('displayName')\n name_field.clear()\n name_field.send_keys(displayName)\n name_field.send_keys(Keys.ENTER)\n\n time.sleep(2)\n\n # find checkbox an click it\n terms_checkbox_list = driver.find_elements_by_name('acceptedTerms')\n for terms_checkbox in terms_checkbox_list:\n if terms_checkbox.get_attribute('type') == 'checkbox':\n terms_checkbox.click()\n\n # find second checkbox and click it\n privacy_checkbox_list = driver.find_elements_by_name('acceptedPrivacy')\n for privacy_checkbox in privacy_checkbox_list:\n if privacy_checkbox.get_attribute('type') == 'checkbox':\n privacy_checkbox.click()\n\n enter_button = driver.find_element_by_xpath(\"/html/body/div[@id='app']/div[@class='v-dialog__content v-dialog__content--active']/div[@class='v-dialog v-dialog--active']/div[@class='av-panel-wrapper']/div[@class='av-panel radius-large']/div[@class='av-panel__content']/div[@class='px-5 pt-4 av-guest-join__dialog-bottom-padding text-xs-center']/div[@class='px-3']/a[@class='button-min-width-x-large mt-4 v-btn v-btn--depressed v-btn--round theme--light av-button av-button--primary']\")\n enter_button.click()\n\n print('Opened browser..')\n\n time.sleep(2)\n open_alfaview()\n\n # close browser\n time.sleep(5)\n driver.quit()\n\n\n\n\ndef open_alfaview():\n print(\"opening alfaview\")\n\n click_skip_update = get_config_bool('Alfaview', 'alfaviewSkipUpdate')\n open_button = cv2.imread(get_config_string('Buttons','open_alfaview_button'))\n skip_update_button = cv2.imread(get_config_string('Buttons', 'skip_update_button'))\n join_rooom_button = cv2.imread(get_config_string('Buttons', 'join_room_button'))\n\n\n pyautogui.screenshot(\"screenshot1.png\")\n screenshot1 = cv2.imread('screenshot1.png')\n cord1 = find_button(screenshot1, open_button)\n pyautogui.click(cord1[0], cord1[1])\n time.sleep(1)\n\n while(not alfaview_is_opened('alfaview')):\n print('waiting for alfaview to open')\n time.sleep(0.2)\n\n if(click_skip_update):\n pyautogui.screenshot(\"screenshot2.png\")\n screenshot2 = cv2.imread('screenshot2.png')\n cord2 = find_button(screenshot2, skip_update_button)\n pyautogui.click(cord2[0], cord2[1])\n time.sleep(1)\n\n pyautogui.screenshot(\"screenshot3.png\")\n screenshot3 = cv2.imread('screenshot3.png')\n cord3 = find_button(screenshot3, join_rooom_button)\n pyautogui.click(cord3[0], cord3[1])\n time.sleep(1)\n\n print('Opened alfaview..')\n\n\n\ndef main():\n open_browser()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"xAquaCulinaris/alfaview_recorder","sub_path":"open_alfaview.py","file_name":"open_alfaview.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"30160174096","text":"\"\"\"\n# Game development with Pygame - Python Tutorial\n# https://pythonspot.com/game-development-with-pygame/\n\"\"\"\n# initialize()\n# while running():\n# game_logic()\n# get_input()\n# update_screen()\n# deinitialize()\n\nimport pygame\nfrom pygame.locals import *\n\nclass MyApp(object):\n posXY = (10, 10)\n winSize = (640, 480)\n filename_img = \"statics\\\\pygame.png\"\n\n def __init__(self):\n self._running = True\n self._display_surf = None\n self._image_surf = None\n\n def on_init(self):\n pygame.init()\n self._display_surf = pygame.display.set_mode(self.winSize, pygame.HWSURFACE)\n self._running = True\n self._image_surf = pygame.image.load(self.filename_img).convert()\n\n def on_event(self, event):\n if event.type == QUIT:\n self._running = False\n\n def on_loop(self):\n pass\n\n def on_render(self):\n self._display_surf.blit(self._image_surf, self.posXY)\n pygame.display.flip()\n\n def on_cleanup(self):\n pygame.quit()\n\n def on_execute(self):\n if self.on_init() == False:\n self._running = False\n\n while(self._running):\n for event in pygame.event.get():\n self.on_event(event)\n self.on_loop()\n self.on_render()\n self.on_cleanup()\n\n\n\n\nif __name__ == \"__main__\" :\n ma = MyApp()\n ma.on_execute()\n","repo_name":"onitonitonito/k_mooc_reboot","sub_path":"module_pygame/simple_dev_with_pg.py","file_name":"simple_dev_with_pg.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1913164728","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('',views.add_course,name='add_course'),\n path('add_student',views.add_student,name='add_student'),\n path('add_tutor',views.add_tutor,name='add_tutor'),\n path('sign_up',views.sign_up,name='sign_up'),\n path('log_in',views.log_in,name='log_in'),\n path('log_out',views.log_out,name='log_out'),\n path('home',views.home,name='home'),\n path('show_student',views.show_student,name='show_student'),\n path('show_course',views.show_course,name='show_course'),\n path('show_tutor',views.show_tutor,name='show_tutor'),\n path('student_details/',views.student_details,name='student_details'),\n path('edit_student/',views.edit_student,name='edit_student'),\n path('delete_student/',views.delete_student,name='delete_student'),\n path('edit_tutor/',views.edit_tutor,name='edit_tutor'),\n path('delete_tutor/',views.delete_tutor,name='delete_tutor'),\n path('edit_course/',views.edit_course,name='edit_course'),\n path('delete_course/',views.delete_course,name='delete_course'),\n \n]","repo_name":"rahulpml/may2020","sub_path":"collegeApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"30349456009","text":"n = int(input())\nlst = list(map(int, input().split(' ')))\nlst = sorted(lst)\nm = int(input())\nprob = list(map(int, input().split(' ')))\n\n\ndic = {}\nfor i in range(n):\n if lst[i] in dic.keys():\n dic[lst[i]] += 1\n else:\n dic[lst[i]] = 1\n\n\nanswer = []\n\nfor i in range(m):\n if prob[i] in dic.keys():\n answer.append(dic[prob[i]])\n else:\n answer.append(0)\n\n\nprint(\" \".join(map(str, answer)))\n","repo_name":"Kim-Young-Hoo/boj_algorithms","sub_path":"백준/Silver/10816. 숫자 카드 2/숫자 카드 2.py","file_name":"숫자 카드 2.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71497581974","text":"import os\nimport sys\nimport pathlib\n# test_path = os.environ[\"TEST_PATH\"]\n\n# if test_path is not None:\n\n # parent = pathlib.Path(test_path).parent.absolute()\n # child = pathlib.PurePath(test_path).name\n # sys.path.append(str(parent))\n\n # exec('from {}.models import resnet as resnet'.format(child))\n # exec('from {}.models import res16unet as res16unet'.format(child))\n # exec('from {}.models import mink_transformer as mink_transformer'.format(child))\n # exec('from {}.models import mink_transformer_voxel as mink_transformer_voxel'.format(child))\n # exec('from {}.models import point_transformer as point_transformer'.format(child))\n # exec('from {}.models import mixed_transformer as mixed_transformer'.format(child))\n\n# else:\n # import models.resnet as resnet\n # import models.res16unet as res16unet\n # import models.mink_transformer as mink_transformer\n # import models.mink_transformer_voxel as mink_transformer_voxel\n # import models.point_transformer as point_transformer\n # import models.mixed_transformer as mixed_transformer\n\n# import models.resnet as resnet\nimport models.res16unet as res16unet\n# import models.mink_transformer as mink_transformer\n# import models.mink_transformer_voxel as mink_transformer_voxel\n# import models.point_transformer as point_transformer\n# import models.mixed_transformer as mixed_transformer\n\n\nMODELS = []\n\n\ndef add_models(module):\n MODELS.extend([getattr(module, a) for a in dir(module) if ('Net' in a or 'Transformer' in a)])\n\n\n# add_models(resnet)\nadd_models(res16unet)\n# add_models(mink_transformer)\n# add_models(point_transformer)\n# add_models(mink_transformer_voxel)\n# add_models(mixed_transformer)\n\ndef get_models():\n '''Returns a tuple of sample models.'''\n return MODELS\n\ndef load_model(name):\n '''Creates and returns an instance of the model given its class name.\n '''\n # Find the model class from its name\n all_models = get_models()\n mdict = {model.__name__: model for model in all_models}\n if name not in mdict:\n print('Invalid model index. Options are:')\n # Display a list of valid model names\n for model in all_models:\n print('\\t* {}'.format(model.__name__))\n return None\n NetClass = mdict[name]\n\n return NetClass\n","repo_name":"A-suozhang/CodedVTR","sub_path":"models_/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"67"} +{"seq_id":"2028331558","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponseBadRequest, JsonResponse\nimport datetime\n\n\n# Отрисовываем страницу для запроса даты\ndef main(request):\n return render(request, 'counter/main.html')\n\n\ndef week_number(request):\n # Пробуем преобразовать присланную дату в datetime\n try:\n a_date = datetime.datetime.strptime(request.POST.get('date'), '%d.%m.%Y')\n except ValueError:\n # Если не получается преобразовать, отправляем назад кусок html, который оповестит юзера об ошибке\n html = \"

Ошибка: Неверный формат даты! Дата должна иметь вид дд.мм.гггг и должна существовать

\"\n response = JsonResponse({\"answer\": html})\n response.status_code = 400\n return response\n\n # Получаем имя дня\n a_date_name = a_date.strftime(\"%A\")\n\n # Получаем ISO номер недели\n a_week_number = a_date.isocalendar()[1]\n print(a_week_number)\n # Поучаем номер недели следующего дня для воскресенья (чтобы оно считалось первым днем недели)\n next_day_week_number = a_date + datetime.timedelta(1)\n next_day_week_number = next_day_week_number.isocalendar()[1]\n\n\n '''\n Дело в том, что существует такое понятие как производственный календарь. Согласно нему, первая неделя нового года\n может считаться последней неделей прежнего года, если год начался позже среды. Наример 1 января 2021 будет считаться \n как 53 неделя (то есть конец последней недели предыдущего года). Чтобы обойти это, необходимо раскоментить код ниже,\n он будет отсчитывать \"по человекопонятному календарю\", но тогда для всех последующих недель произойдет сдвиг на +1 неделю.\n '''\n # # Делаем проверку на номер недели первого дня выбранного года\n # first_day_week = datetime.datetime(a_date.year,1,1).isocalendar()[1]\n month_num = a_date.month\n\n # # Проверка на день с первой \"неправильной\" недели\n # if first_day_week > 1 and 50 < a_week_number < 53 and month_num == 1:\n # a_week_number = 1\n # # Прибавка к номеру всех последующих недель\n # elif first_day_week > 1:\n # a_week_number += 1\n # next_day_week_number += 1\n\n if a_date_name == 'Sunday':\n # Если выбранная дата воскресенье, то в качестве номера недели отправляем следующую неделю\n return JsonResponse({\"answer\": next_day_week_number}, status=200)\n else:\n # Если любой день кроме воскресенья просто отправляем день недели\n return JsonResponse({\"answer\": a_week_number}, status=200)\n","repo_name":"no0neCare/fromdayweekcounter","sub_path":"counter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7866411663","text":"import plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nimport plotly.io as io\nimport pandas as pd\nimport numpy as np\nimport sys\n\ntarget_cmssw = sys.argv[1]\nrefer_cmssw = target_cmssw[:-1]+str(int(target_cmssw[-1])-1)\nstep = sys.argv[2]\ndata = sys.argv[3]\n\nif len(sys.argv)==4:\n\ttarget_df = pd.read_csv(\"/eos/user/b/btae/www/Service_Work/test/cms-reco-profiling-web/comp_igprof/{0}_step{1}_{2}.csv\".format(target_cmssw,step,data))\n\trefer_df = pd.read_csv(\"/eos/user/b/btae/www/Service_Work/test/cms-reco-profiling-web/comp_igprof/{0}_step{1}_{2}.csv\".format(refer_cmssw,step,data))\n\thtml_name = \"comp_igprof/html/{0}vs{1}_step{2}_{3}.html\".format(target_cmssw,refer_cmssw,step,data)\n\tmain_title=\"Igprof_{3} Comaprison : {0} VS {1} (Step {2})\".format(target_cmssw,refer_cmssw,step,data)\nelse:\n\ttarget_df = pd.read_csv(\"/eos/user/b/btae/www/Service_Work/test/cms-reco-profiling-web/comp_igprof/{0}_step{1}_{2}.{3}.csv\".format(target_cmssw,step,data,sys.argv[4]))\n\trefer_df = pd.read_csv(\"/eos/user/b/btae/www/Service_Work/test/cms-reco-profiling-web/comp_igprof/{0}_step{1}_{2}.{3}.csv\".format(refer_cmssw,step,data,sys.argv[4]))\n\thtml_name = \"comp_igprof/html/{0}vs{1}_step{2}_{3}.{4}.html\".format(target_cmssw,refer_cmssw,step,data,sys.argv[4])\n\tmain_title=\"Igprof_{3}.{4} Comaprison : {0} VS {1} (Step {2})\".format(target_cmssw,refer_cmssw,step,data,sys.argv[4])\n\ntarget_df.columns=[\"name\",\"target_cumulative\",\"target_pct\",\"targe_spontaneous\"]\nrefer_df.columns=[\"name\",\"refer_cumulative\",\"refer_pct\",\"refer_spontaneous\"]\n\ntarget_df = target_df.rename_axis('target_rank').reset_index()\nrefer_df = refer_df.rename_axis('refer_rank').reset_index()\n\nmerged_df = pd.merge(target_df,refer_df,left_on='name',right_on='name',how='inner')\nmerged_df[\"Delta\"] = (merged_df.apply(lambda x: (x.target_cumulative-x.refer_cumulative)/x.refer_spontaneous, axis='columns')*100).round(2).astype(str)+'%'\nmerged_df['name'] = merged_df['name'].str.split('>,',n=1,expand=True)[0]+ \" ...\"\n\n\nprint(merged_df[\"target_cumulative\"].sum(),merged_df[\"refer_cumulative\"].sum())\n\nfig = make_subplots(\n\trows=2, cols=1,\n\trow_heights=[0.5,0.5],\n\tvertical_spacing=0.05,\n\tspecs=[[{\"type\":\"table\"}],\n\t [{\"type\":\"xy\"}]]\n)\n\nfig.add_trace(go.Table(\n columnorder = [1,2,3,4,5,6,7,8,9],\n columnwidth = [30,30,30,30,30,30,30,30,200],\n header=dict(values=list([\"index\",\"Rank(New)\",\"Rank(Old)\",\"Percent(New)\",\"Percent(Old)\",\"Cumulative(New)\",\"Cumulative(Old)\",\"Delta\",\"name\"]),\n fill_color=['black','#4b778d','#28b5b5','#4b778d','#28b5b5','#4b778d','#28b5b5','#194350','#194350'],\n\t\theight = 40,\n\t\tfont=dict(color='white',size=12),\n align='center'),\n cells=dict(values=[merged_df.index,merged_df.target_rank,merged_df.refer_rank,merged_df.target_pct,merged_df.refer_pct,merged_df.target_cumulative,merged_df.refer_cumulative,merged_df.Delta,merged_df.name],\n fill_color='white',\n\t\tfont=dict(color='black',size=12),\n\t\theight = 20,\n align=['center','center','center','right','right','right','right','right','left'])),\n\t\trow=1,col=1\n)\n\nfig.add_trace(go.Bar(name=\"old\",x=merged_df.index[:50],y=merged_df.target_cumulative[:50]),\n\t\trow=2,col=1\n)\n\nfig.add_trace(go.Bar(name=\"new\",x=merged_df.index[:50],y=merged_df.refer_cumulative[:50]),\n\t\trow=2,col=1\n)\n\nfig.update_layout(\n\ttitle = {\n\t'text':main_title,\n\t'x':0.5,\n\t'y':0.98,\n 'xanchor':'center',\n 'yanchor':'top',\n 'font':dict(size=20)},\n\tlegend=dict(\n orientation=\"h\",\n yanchor=\"bottom\",\n y=0.5,\n xanchor=\"right\",\n x=1\n))\nfig.update_xaxes(title_text=\"index\",row=2,col=1)\nfig.update_yaxes(title_text=\"cumulative\",row=2,col=1)\n\nio.write_html(fig,html_name)\n","repo_name":"xoqhdgh1002/cms-reco-profiling-web","sub_path":"doEvent_table.py","file_name":"doEvent_table.py","file_ext":"py","file_size_in_byte":3768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71136896855","text":"from flask import Flask, render_template, request, redirect, url_for, flash\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime\n\napp = Flask(__name__)\napp.secret_key = 'your_secret_key' # Замените на секретный ключ для безопасности\n\n# Настройки базы данных SQLite\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///tasks.db'\ndb = SQLAlchemy(app)\n\nclass Task(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(200), nullable=False)\n description = db.Column(db.String(500), nullable=True)\n priority = db.Column(db.String(10), nullable=False, default='Low')\n due_date = db.Column(db.DateTime, nullable=True)\n completed = db.Column(db.Boolean, default=False)\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n title = request.form['title']\n description = request.form['description']\n priority = request.form['priority']\n due_date = datetime.strptime(request.form['due_date'], '%Y-%m-%d')\n task = Task(title=title, description=description, priority=priority, due_date=due_date)\n db.session.add(task)\n db.session.commit()\n flash('Задача добавлена успешно!', 'success')\n return redirect('/')\n tasks = Task.query.all()\n return render_template('index.html', tasks=tasks)\n\n@app.route('/complete/')\ndef complete_task(id):\n task = Task.query.get_or_404(id)\n task.completed = True\n db.session.commit()\n flash('Задача выполнена!', 'success')\n return redirect('/')\n\nif __name__ == '__main__':\n db.create_all()\n app.run(debug=True)\n","repo_name":"Sokolkk/InnoTask","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26438529573","text":"# TO DELETE FOR NOW\n\nimport requests\nimport json\n\nurl = \"https://flashy-old-star.discover.quiknode.pro/b64d2659a0871f264e2cddcfdbd2ba054cc77498/\"\n\npayload = json.dumps({\n \"id\": 67,\n \"jsonrpc\": \"2.0\",\n \"method\": \"qn_fetchNFTsByCollection\",\n \"params\": [{\n \"collection\": \"0xBC4CA0EdA7647A8aB7C2061c2E118A18a936f13D\",\n \"omitFields\": [\n \"traits\"\n ],\n \"page\": 1,\n \"perPage\": 10\n }]\n})\nheaders = {\n 'Content-Type': 'application/json'\n}\n\nresponse = requests.request(\"POST\", url, headers=headers, data=payload)\n\nprint(response.text)\n","repo_name":"andrewliu08/probo","sub_path":"backend/collection_fetch.py","file_name":"collection_fetch.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"8393100205","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pylab as pl\nimport seaborn as sns\n\n\n# # we can choose some representative persons to see the events distribution\n# df_plot=pd.read_csv('./contributor_events.csv')\n# names=[\"*yong*\",\"*mi*\",\"*joker*\"]\n# print(df_plot.head(5))\n# data=[df_plot.iloc[1],df_plot.iloc[6],df_plot.iloc[25]]\n# df_plt=pd.DataFrame(data,index=names).drop(['Unnamed: 0'],axis=1)\n# print(df_plt)\n# df_plt.plot(kind=\"bar\",stacked=True,figsize=(10,8))\n# pl.xticks(rotation=360)\n# plt.legend()\n# plt.show()\n\n\ndf_rader=pd.read_csv('./contributor_events_radar.csv')\ndf_data=df_rader.iloc[50]\nlabels = np.array(['Coding','Comment','Issue'])\nstats_1 = [df_data['target_code'],df_data['target_comment'],df_data['target_issue']]\nprint(stats_1)\nangles = np.linspace(0, 2*np.pi, len(labels), endpoint=False)\nprint(angles)\nstats = np.concatenate((stats_1, [stats_1[0]]))\nangles = np.concatenate((angles, [angles[0]]))\n\nfig = plt.figure()\nax = fig.add_subplot(111, polar=True)\nax.plot(angles, stats, 'o-', linewidth=2)\nax.fill(angles, stats, alpha=0.25)\n\n# 设置中文字体\n# font = FontProperties(fname=r\"C:\\Windows\\Fonts\\simhei.ttf\", size=14)\nax.set_thetagrids(angles * 180/np.pi, labels)\nplt.show()","repo_name":"ZhennanWu/ECS260-21","sub_path":"plot_event.py","file_name":"plot_event.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23780465249","text":"from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom .routes import router as TranscriptionRouter\n\napp = FastAPI()\n\norigins = [\n \"http://localhost:3000\",\n \"http://0.0.0.0:3000\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\napp.include_router(TranscriptionRouter, prefix=\"/backend/transliteration\")\n","repo_name":"project-abgal/hatrami","sub_path":"api/server/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"36000949495","text":"import numpy\nimport pygad\nimport pygad.nn\nimport pygad.gann\nimport pandas as pd\nimport pickle\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\n\ndf = pd.read_csv('symptoms.csv')\n\n# creating instance of labelencoder\nlabelencoder = LabelEncoder()\n\n# Assigning numerical values and storing in another column\ndf['type'] = labelencoder.fit_transform(df['TYPE'])\n \n#Droping Type column\ndf.drop('TYPE', axis='columns', inplace=True)\n\n#Splitting the data set\ndata_input = df.loc[:,df.columns!='type']\ndata_output = df.iloc[:,-1]\nX_train, X_test, y_train, y_test = train_test_split(data_input ,data_output,test_size=.42)\n\n#converting to numpy array\ndata_inputs = X_train.to_numpy()\ndata_outputs = y_train.to_numpy()\n\nclass ModelAnn:\n\n def fitness_func(solution, sol_idx):\n global GANN_instance, data_inputs, data_outputs\n \n predictions = pygad.nn.predict(last_layer=GANN_instance.population_networks[sol_idx],\n data_inputs=data_inputs)\n \n correct_predictions = numpy.where(predictions == data_outputs)[0].size\n solution_fitness = (correct_predictions/data_outputs.size)*100\n \n return solution_fitness\n \n def callback_generation(ga_instance):\n global GANN_instance\n \n population_matrices = pygad.gann.population_as_matrices(population_networks=GANN_instance.population_networks, \n population_vectors=ga_instance.population)\n \n GANN_instance.update_population_trained_weights(population_trained_weights=population_matrices)\n \n print(\"Generation = {generation}\".format(generation=ga_instance.generations_completed))\n print(\"Accuracy = {fitness}\".format(fitness=ga_instance.best_solution()[1]))\n \n def predict(inputs, solution_idx):\n prediction = pygad.nn.predict(last_layer=GANN_instance.population_networks[solution_idx],\n data_inputs=inputs)\n return prediction\n\nGANN_instance = pygad.gann.GANN(num_solutions=10,\n num_neurons_input=20,\n num_neurons_hidden_layers=[2],\n num_neurons_output=4,\n hidden_activations=[\"relu\"],\n output_activation=\"softmax\")\n\npopulation_vectors = pygad.gann.population_as_vectors(population_networks=GANN_instance.population_networks)\n\nga_instance = pygad.GA(num_generations=50, \n num_parents_mating=3, \n initial_population=population_vectors.copy(),\n fitness_func=ModelAnn.fitness_func,\n mutation_percent_genes=5,\n callback_generation=ModelAnn.callback_generation)\n\nga_instance.run()\n\n\nga_instance.plot_result()\n\nglobal solution_idx \nsolution, solution_fitness, solution_idx = ga_instance.best_solution()\n\nelast_layer = GANN_instance.population_networks[solution_idx]\n\nprint(solution)\nprint(solution_fitness)\nprint(solution_idx)\n\npickle.dump(elast_layer, open('GA.pkl', 'wb'))\n\n\n\n\n","repo_name":"VonLagare/cfac19","sub_path":"models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29861777702","text":"import sys\n\nn = int(input())\nif n == 1:\n print(int(input()))\n exit(0)\nif n == 2:\n print(int(input())+int(input()))\n exit(0)\n\nstairs = [0]\nmemo = [0 for _ in range(n+1)]\nfor _ in range(n):\n stairs.append(int(sys.stdin.readline()))\nmemo[0] = 0\nmemo[1] = stairs[1]\nmemo[2] = stairs[1]+stairs[2]\n\nfor i in range(3, n+1):\n memo[i] = max(memo[i-2]+stairs[i], memo[i-3]+stairs[i-1]+stairs[i])\nprint(memo[n])\n","repo_name":"Sora-CodingTestStudy/our-code","sub_path":"dp/jungrye/082_계단오르기.py","file_name":"082_계단오르기.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26630969482","text":"import numpy as np\nimport scipy as sp\n\n## All the constants are in SI units\n\n# Speed of light\nc = 299792458 # m/s\n\n# Earth's gravitational constant \nmu_earth = 3.986004418e14 # m^3/s^2\n\n# Earth's semi-major axis (WGS84)\na_earth = 6378137 # m\n\n# Earth's semi-minor axis (WGS84)\nb_earth = 6356752.3142 # m\n\n# Earth's reverse flattening factor (WGS84)\nf_earth = 1/298.257223563 # unitless\n\n# Earth's eccentricity\ne_earth = 0.081819221456 # unitless\n\n# Earth's equatorial radius\nR_earth = 6378137 # m\n\n# J2 correction term\nJ2 = 1.08262668e-3 # unitless\n\n# Perturbation constant for Earth's gravity\nmu_J2_r_earth_sq = mu_earth * J2 * R_earth**2 # m^5/s^2 \n\n# Earth's rotation rate\nomega_earth = 7.292115e-5 # rad/s\n\n# Seconds in a day\nsec_in_day = 24 * 60 * 60 # s \n\n# Seconds in a minute\nsec_in_min = 60 # s\n\n# Radians per revolution\nrad_per_rev = 2 * np.pi # rad/rev","repo_name":"BaldPolnareff/Keplerian_orbit_python_simulator","sub_path":"src/utils/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43612686547","text":"from pylmkit.app import RolePlay\nfrom pylmkit.core.base import BaseWebUI\n\n\nclass RAGWebUI(BaseWebUI):\n def __init__(self,\n title=None,\n page_icon=None,\n layout=\"centered\",\n language='en',\n sidebar_title=None,\n sidebar_describe=None,\n footer_describe=None,\n logo1=None,\n logo2=None,\n greetings=None,\n placeholder=None,\n refer_name=None,\n ):\n super().__init__(\n title=title,\n page_icon=page_icon,\n layout=layout,\n language=language,\n sidebar_title=sidebar_title,\n sidebar_describe=sidebar_describe,\n footer_describe=footer_describe,\n logo1=logo1,\n logo2=logo2,\n greetings=greetings,\n placeholder=placeholder,\n refer_name=refer_name\n )\n\n\n","repo_name":"52phm/pylmkit","sub_path":"pylmkit/web/webui.py","file_name":"webui.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72019892694","text":"import requests\n\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse\n\n \ndef base(request):\n content = {\n 'amount': 0,\n }\n if request.method == 'GET':\n return render(request, 'base.html', content)\n else:\n try:\n amount = float(request.POST['amount'])\n except Exception:\n amount = 0\n content['amount'] = amount\n if request.POST['country'] == 'Ethereum':\n content['res'] = 355.23 * amount\n else:\n content['res'] = 53.76 * amount\n return render(request, 'base.html', content)\n","repo_name":"BekzatBekturgan/Blockchain-Technology-KBTU","sub_path":"Converter Ethereum/converter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24851534411","text":"\"\"\"\n给定一棵二叉树,其中每个节点都含有一个整数数值(该值或正或负)。设计一个算法,打印节点数值总和等于某个给定值的所有路径的数量。注意,路径不一定非得从二叉树的根节点或叶节点开始或结束,但是其方向必须向下(只能从父节点指向子节点方向)。\n\n示例:\n给定如下二叉树,以及目标和 sum = 22,\n\n 5\n / \\\n 4 8\n / / \\\n 11 13 4\n / \\ / \\\n 7 2 5 1\n返回:\n\n3\n解释:和为 22 的路径有:[5,4,11,2], [5,8,4,5], [4,11,7]\n提示:\n\n节点总数 <= 10000\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/paths-with-sum-lcci\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\nfrom TreeHelper import TreeNode, create_tree\n\n\nclass Solution:\n \"\"\"利用树路径的前缀和\"\"\"\n\n def pathSum(self, root: TreeNode, sum: int) -> int:\n path = [0]\n ans = 0\n\n def dfs(node: TreeNode, node_path: list):\n if not node:\n return\n pre_sum = node_path[-1] + node.val\n ct = node_path.count(pre_sum - sum)\n nonlocal ans\n ans += ct\n node_path.append(pre_sum)\n dfs(node.left, node_path.copy())\n dfs(node.right, node_path.copy())\n\n dfs(root, path)\n return ans\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.pathSum(create_tree('[5,4,8,11,null,13,4,7,2,null,null,5,1]'), 22))\n","repo_name":"wanzhouyi/leetcode","sub_path":"程序员面试金典/4.树与图/04.12. 求和路径.py","file_name":"04.12. 求和路径.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"34336617051","text":"\"\"\"\n Script to start rabbitmq consumers that\n 1. execute commands send to it\n 2. log their own life\n 3. log the execution of any command send to it\n\n Usage:\n python.exe -E worker.py queue_name\n\"\"\"\nimport sys\nimport os\nimport subprocess as sp\nimport threading\nfrom socket import gethostname\nfrom socket import gethostbyname\nfrom getpass import getuser\nimport json\n\nif __name__ == '__main__':\n if __package__ is None:\n sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n from connect import gen_connection, gen_channel\n from logging_sql.logging_sql import Log\n from logging_sql.logging_sql_periodic import PeriodicLog\n\n\n##############################################################\n# Done with loading libraries\n##############################################################\n\ndef print_waiting_message():\n \"\"\" prints mesage whenever for whenever the worker is waiting for a message \"\"\"\n print(' [*] Waiting for messages. To exit press CTRL+C then Enter')\n\n\ndef log_detail_gen(rabbit_message, output, queue_name,):\n \"\"\" generate a JSON string for the config column in the log table \"\"\"\n log_detail = json.JSONEncoder().encode({\n \"command\": rabbit_message[3:].replace(\"'\", \"\"),\n \"queue\": queue_name,\n \"sender_machine\": rabbit_message[0],\n \"sender_user\": rabbit_message[1],\n \"sender_ip\": rabbit_message[2],\n \"output\": output.replace(\"'\", \"\")\n })\n\n return log_detail\n\n\ndef scrape_cmd_output(sub_process):\n \"\"\"\n takes the subproccess screen output and return it as a single string\n \"\"\"\n cmd_output = b\"\"\n # Poll sub_process for new output until finished\n while True:\n nextline = sub_process.stdout.readline()\n\n if nextline == b'' and sub_process.poll() is not None:\n return cmd_output.decode()\n\n cmd_output += nextline\n\n sys.stdout.write(nextline.decode())\n sys.stdout.flush()\n\n\ndef execute_cmd(ch, method, body, queue_name):\n \"\"\"\n main function that governs what happens to the rabbitmq message (body)\n\n Keyword arguments:\n ch -- channel\n method --\n body -- byte string, body of the rabbit message\n queue_name -- string name of the queue this worker is listenting to\n \"\"\"\n\n # split the message (body) string into words to parse\n # the first two as username and machine of the message sender and\n # the rest as the actual message\n rabbit_message = body.decode().split()\n\n command = \" \".join(rabbit_message[3:])\n\n try:\n rabbit_work_log = Log(\n app_name=\"rabbitmq\",\n app_version=\"01.01.08052018\",\n log_tb=\"log_devOps_rabbitmq\",\n log_detail=log_detail_gen(rabbit_message, \" \", queue_name)\n )\n\n # actually execute the rabbit message in a shell\n sub_process = sp.Popen(command, shell=True, stdout=sp.PIPE, stderr=sp.STDOUT)\n cmd_output = scrape_cmd_output(sub_process)\n log_detail = log_detail_gen(rabbit_message, cmd_output, queue_name)\n output = sub_process.communicate()[0]\n exit_code = sub_process.returncode\n\n if exit_code == 0:\n # update the SQL log with success code\n rabbit_work_log.update(100, log_detail)\n print(\" [x] Process done.\")\n\n print_waiting_message()\n # send acknowledgement of message to rabbit server\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return output\n\n else:\n raise sp.CalledProcessError(command, exit_code, output)\n\n except sp.CalledProcessError as error:\n # update the SQL log with failure code\n rabbit_work_log.update(400, log_detail)\n print(\" [x] Process done, but failed:\")\n print(error.output)\n\n print_waiting_message()\n # send acknowledgement of message to rabbit server\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return output\n\n\n# function that executes upon receipt of a new message\ndef outer_callback(queue_name):\n \"\"\" function that gets called when a new message is received \"\"\"\n def callback(ch, method, properties, body):\n print(\" [x] Received %r\" % body)\n thread = threading.Thread(target=execute_cmd, args=(ch, method, body, queue_name))\n thread.start()\n return callback\n\n\n##############################################\n#\n# rabbitmq main\n#\n##############################################\n\nif __name__ == '__main__':\n with gen_connection() as connection:\n queue = sys.argv[1]\n channel = gen_channel(connection, queue)\n\n channel.basic_consume(\n outer_callback(queue),\n queue=queue\n )\n\n # logging the life of a rabbit worker\n log_worker_life = PeriodicLog(\n app_name=\"rabbitmq\",\n app_version=\"0.1.22022018\",\n log_tb=\"log_devOps_rabbitmq_life\",\n log_detail=queue,\n period=600\n )\n\n print_waiting_message()\n # start consuming messages!\n channel.start_consuming()\n","repo_name":"echna/rabbitmq","sub_path":"worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":5137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27549908386","text":"from repository.repo import RepoError\r\nfrom validation.validators import ValidError\r\n\r\n\r\nclass Game:\r\n def __init__(self, service):\r\n self.service = service\r\n self.sentence = self.service.choose_a_sentence().strip()\r\n self.hangman_output = \"\"\r\n self.output_sentence = self.initial_output_sentence().strip()\r\n self.hangman = \"hangman\"\r\n\r\n def start_game(self):\r\n print(\"STARTING THE GAME: \")\r\n print(self.output_sentence)\r\n while True:\r\n _continue = self.game_round()\r\n if _continue == 1 or _continue == 0:\r\n return\r\n\r\n def initial_output_sentence(self):\r\n split = self.sentence.split(\" \")\r\n output_sentence = \"\"\r\n for word in split:\r\n word.strip()\r\n\r\n for i in range(0, len(word)):\r\n if i == 0 or i == len(word) - 1:\r\n output_sentence += word[i]\r\n else:\r\n output_sentence += \"_\"\r\n output_sentence += \" \"\r\n return output_sentence\r\n\r\n def choose_a_letter(self):\r\n while True:\r\n letter = input(\"pick a letter\")\r\n if len(letter) == 1 and letter.isalpha() is True:\r\n return letter\r\n print(\"choose a valid letter\")\r\n\r\n def output_with_letter(self):\r\n letter = self.choose_a_letter()\r\n if letter in self.sentence:\r\n self.reveal_letters_output(letter)\r\n else:\r\n index = len(self.hangman_output)\r\n print(index)\r\n print(self.hangman[index])\r\n self.hangman_output += self.hangman[index]\r\n\r\n def reveal_letters_output(self, letter):\r\n conversion = \"\"\r\n for i in range(0, len(self.sentence)):\r\n if self.sentence[i] == letter:\r\n conversion += letter\r\n else:\r\n conversion += self.output_sentence[i]\r\n print(self.output_sentence[i])\r\n self.output_sentence = conversion\r\n\r\n def game_round(self):\r\n self.output_with_letter()\r\n print(\"sentence:\")\r\n print(self.output_sentence)\r\n if self.output_sentence == self.sentence:\r\n print(\"YOU WON\")\r\n return 1\r\n elif self.hangman == self.hangman_output:\r\n print(\"YOU LOST\")\r\n return 0\r\n else:\r\n print(\"current hangman:\")\r\n print(self.hangman_output)\r\n\r\n\r\nclass UI:\r\n def __init__(self, service, repo):\r\n self.service = service\r\n self.repo = repo\r\n\r\n def boot_up(self):\r\n print(\"WELCOME TO HANGMAN\")\r\n print()\r\n while True:\r\n print(\"options: \")\r\n print(\"1. add a sentence\")\r\n print(\"2. play game\")\r\n print(\"3. exit\")\r\n user_choice = input(\"choose an option: \").strip()\r\n try:\r\n if user_choice == \"1\":\r\n self.add_a_sentence()\r\n elif user_choice == \"2\":\r\n self.start_game()\r\n elif user_choice == \"3\":\r\n return\r\n else:\r\n print(\"unavailable option\")\r\n except ValidError as ve:\r\n print(str(ve))\r\n except RepoError as re:\r\n print(str(re))\r\n\r\n def add_a_sentence(self):\r\n sentence = input(\"Add a sentence: \").strip()\r\n split_sentence = sentence.split(\" \")\r\n new_sentence = \"\"\r\n for word in split_sentence:\r\n word = word.strip()\r\n new_sentence = new_sentence + word + \" \"\r\n self.service.add_a_sentence(new_sentence)\r\n\r\n def start_game(self):\r\n game = Game(self.service)\r\n game.start_game()\r\n","repo_name":"biancaszekely32/Fundamental-of-Programming","sub_path":"hangman/hangman/ui/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26351107120","text":"import numpy as np\n\nfrom rl.core.learning.action_target_calculator import \\\n SarsaActionTargetCalculator, \\\n ExpectedSarsaActionTargetCalculator, \\\n QLearningActionTargetCalculator\n\nfrom rl.core.state import IntExtState, StateList\n\n#\n# class ExperienceBasedTargetArrayCalculator(object):\n# \"\"\"\n# Class for calculating the training target arrays\n#\n# Each element of a target array should correspend to the target for that element's action.\n# \"\"\"\n#\n# def __init__(self, rl_system, action_target_calculator):\n# self.rl_system = rl_system\n# self.action_target_calculator = action_target_calculator\n#\n# def get_target_array(self, experience):\n# raise NotImplemented()\n#\n# def get_target(self, state, action, reward, next_state):\n# \"\"\"\n# Return the targets for the state\n# :param state:\n# :param action:\n# :param reward:\n# :return: np.ndarray(num_actions)\n# \"\"\"\n# # reward is a function of (state, action, next state)\n# #next_state = self.rl_system.model.apply_action(state, action)\n#\n# if next_state.is_terminal:\n# target = reward\n# else:\n# next_state_action_values = self.rl_system.action_value_function(next_state)\n# target = self.action_target_calculator.calculate(reward, next_state_action_values)\n#\n# return target\n#\n#\n# class ScalarTargetArrayCalculator(ExperienceBasedTargetArrayCalculator):\n#\n# def get_target_array(self, experience):\n# \"\"\"\n# Return a 1d array of targets for each action in experience\n# Args:\n# experience:\n#\n# Returns:\n# targets: 1-d array\n#\n# \"\"\"\n#\n# targets = np.zeros(experience.get_training_length())\n# actions = experience.get_training_actions()\n# rewards = experience.get_training_rewards()\n# states = experience.get_training_states()\n# for i, state in enumerate(states):\n# action = actions[i]\n# reward = rewards[i]\n# target_value = self.get_target(state, action, reward)\n# targets[i] = target_value\n#\n# return targets\n#\n#\n# class SemiVectorizedTargetArrayCalculator(ExperienceBasedTargetArrayCalculator):\n# \"\"\"\n# Only calculates the target for the sampled action\n# \"\"\"\n#\n# def get_target_array(self, experience):\n# \"\"\"\n# Get the training targets as an array\n# Args:\n# experience:\n#\n# Returns:\n# np.ndarray: (len(experience), num_actions)\n#\n# \"\"\"\n# targets = np.zeros((experience.get_training_length(), self.rl_system.num_actions))\n# actions = experience.get_training_actions()\n# rewards = experience.get_training_rewards()\n# states = experience.get_training_states()\n# for i, state in enumerate(states):\n# targets[i, :] = self.get_state_targets(state, actions[i], rewards[i])\n#\n# return targets\n#\n# def get_state_targets(self, state, action, reward):\n# \"\"\"\n# Return the targets for the state\n# :param state:\n# :param action:\n# :param reward:\n# :return: np.ndarray(num_actions)\n# \"\"\"\n# targets = self.rl_system.action_value_function(state).ravel()\n# targets[action] = self.get_target(state, action, reward)\n# return targets\n\n\nclass ModelBasedTargetArrayCalculator(object):\n \"\"\"\n Class for calculating the training target arrays\n\n Each element of a target array should correspond to the target for that element's action.\n \"\"\"\n\n def __init__(self, rl_system, action_target_calculator):\n self.rl_system = rl_system\n self.action_target_calculator = action_target_calculator\n\n def get_target_matrix(self, states):\n \"\"\"\n Get the training targets as an array\n Args:\n states:\n\n Returns:\n np.ndarray: (len(experience), num_actions)\n\n \"\"\"\n targets_list = [self.get_state_targets(state) for state in states]\n targets_array = np.stack(targets_list)\n\n return targets_array\n\n def get_state_targets(self, state):\n \"\"\"\n Return the targets for the state\n :param state:\n :param action:\n :param reward:\n :return: np.ndarray(num_actions)\n \"\"\"\n\n num_actions = self.rl_system.num_actions\n targets = np.empty(num_actions)\n\n for action in range(num_actions):\n targets[action] = self.get_state_action_target(state, action)\n\n return targets\n\n # TODO: Can we have a state action target calculator class?\n def get_state_action_target(self, state, action):\n \"\"\"\n Return the target for the state/action pair\n :param state:\n :param action:\n :param reward:\n :return: np.ndarray(num_actions)\n \"\"\"\n next_state = self.rl_system.model.apply_action(state, action)\n reward = self.rl_system.reward_function(state, action, next_state)\n\n if self.rl_system.model.is_terminal(next_state):\n target = reward\n else:\n next_state_action_values = self.rl_system.action_value_function(next_state)\n target = self.action_target_calculator.calculate(reward, next_state_action_values)\n\n return target\n\n\nclass ModelBasedStateMachineTargetArrayCalculator(ModelBasedTargetArrayCalculator):\n\n def get_target_matrix(self, external_states):\n\n targets = []\n for internal_state in range(self.rl_system.num_internal_states):\n int_ext_states = [IntExtState(internal_state, external_state) for external_state in external_states]\n i_targets = [self.get_state_targets(x) for x in int_ext_states]\n i_targets = np.r_[i_targets]\n targets.append(i_targets)\n\n return targets\n\n def get_state_targets(self, state):\n\n internal_state = state.internal_state\n num_actions = self.rl_system.num_actions[internal_state]\n targets = np.empty(num_actions)\n\n for action in range(num_actions):\n targets[action] = self.get_state_action_target(state, action)\n\n return targets\n\n\ndef build_target_array_calculator(\n rl_system,\n discount_factor=1.0,\n learning_algo='qlearning',\n calculator_type='modelbased'\n ):\n\n if learning_algo=='qlearning':\n action_target_calculator = QLearningActionTargetCalculator(rl_system, discount_factor=discount_factor)\n elif learning_algo=='sarsa':\n action_target_calculator = SarsaActionTargetCalculator(rl_system, discount_factor=discount_factor)\n elif learning_algo=='expectedsarsa':\n action_target_calculator = ExpectedSarsaActionTargetCalculator(rl_system, discount_factor=discount_factor)\n\n if calculator_type=='modelbased':\n return ModelBasedTargetArrayCalculator(rl_system, action_target_calculator)\n elif calculator_type=='modelbasedstatemachine':\n return ModelBasedStateMachineTargetArrayCalculator(rl_system, action_target_calculator)\n\n\n\n#\n# def build_sarsa_target_array_calculator(rl_system, discount_factor=1.0):\n# action_target_calculator = SarsaActionTargetCalculator(rl_system, discount_factor=discount_factor)\n# return ScalarTargetArrayCalculator(rl_system, action_target_calculator)\n#\n#\n# def build_q_learning_target_array_calculator(rl_system, discount_factor=1.0):\n# action_target_calculator = QLearningActionTargetCalculator(rl_system, discount_factor=discount_factor)\n# return ScalarTargetArrayCalculator(rl_system, action_target_calculator)\n#\n#\n# def build_expected_sarsa_target_array_calculator(rl_system, discount_factor=1.0):\n# action_target_calculator = ExpectedSarsaActionTargetCalculator(rl_system, discount_factor=discount_factor)\n# return ScalarTargetArrayCalculator(rl_system, action_target_calculator)\n#\n#\n# #def build_vectorized_sarsa_target_array_calculator(rl_system, discount_factor=1.0):\n# # action_target_calculator = SarsaActionTargetCalculator(rl_system, discount_factor=discount_factor)\n# # return VectorizedTargetArrayCalculator(rl_system, action_target_calculator)\n#\n#\n# #def build_vectorized_q_learning_target_array_calculator(rl_system, discount_factor=1.0):\n# # action_target_calculator = QLearningActionTargetCalculator(rl_system, discount_factor=discount_factor)\n# # return VectorizedTargetArrayCalculator(rl_system, action_target_calculator)\n#\n#\n# def build_vectorized_expected_sarsa_target_array_calculator(rl_system, discount_factor=1.0):\n# action_target_calculator = ExpectedSarsaActionTargetCalculator(rl_system, discount_factor=discount_factor)\n# return SemiVectorizedTargetArrayCalculator(rl_system, action_target_calculator)\n#\n#\n# #def build_vectorized_state_machine_q_learning_target_array_calculator(rl_system, discount_factor=1.0):\n# # action_target_calculator = QLearningActionTargetCalculator(rl_system, discount_factor=discount_factor)\n# # return VectorizedStateMachineTargetArrayCalculator(rl_system, action_target_calculator)\n","repo_name":"jsphon/reinforcement_learning","sub_path":"rl/core/learning/target_array_calculator.py","file_name":"target_array_calculator.py","file_ext":"py","file_size_in_byte":9114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72988444372","text":"from main.models.direction import MAX_STR_LENGTH\nfrom main.tests.factories import DirectionFactory, RecipeFactory, UserFactory\n\n\nclass TestDunderStrMethod:\n def test_returns_correctly_formatted_string(self) -> None:\n user = UserFactory.build()\n recipe = RecipeFactory.build(user=user)\n direction = DirectionFactory.build(recipe=recipe)\n assert str(direction) == direction.description\n\n def test_returns_truncated_string_when_too_long(self) -> None:\n user = UserFactory.build()\n recipe = RecipeFactory.build(user=user)\n direction = DirectionFactory.build(\n recipe=recipe, description=\"x\" * (MAX_STR_LENGTH + 20)\n )\n ellipsis_length = 3\n assert len(str(direction)) == MAX_STR_LENGTH + ellipsis_length\n","repo_name":"ptrhvns/meals","sub_path":"api/main/tests/models/direction_test.py","file_name":"direction_test.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33098582065","text":"from services.firebase_service import init_sdk_with_service_account, add_scope\nfrom typer import Typer, echo\n\ntyper_app = Typer()\n\n\n@typer_app.command()\ndef make_admin(uid: str) -> None:\n init_sdk_with_service_account()\n result = add_scope(uid, \"admin\")\n echo(f\"Admin scope added for {uid}: {result}\")\n\n\n@typer_app.command()\ndef make_editor(uid: str) -> None:\n init_sdk_with_service_account()\n result = add_scope(uid, \"editor\")\n echo(f\"Editor scope added for {uid}: {result}\")\n\n\n@typer_app.command()\ndef new_scope(uid: str, role: str) -> None:\n init_sdk_with_service_account()\n result = add_scope(uid, role)\n echo(f\"{role} scope added for {uid}: {result}\")\n\n\nif __name__ == \"__main__\":\n typer_app()\n","repo_name":"sjosegarcia/gojira_server","sub_path":"services/web/commands/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41457342951","text":"import os\nimport calendar \nimport time\nimport pdb\nimport sys\nimport glob\n\nfrom . import GIS_tools as utils\nfrom . import GIS_tools\n\n# date format: YYYYMMDD (string)\n\ndef getgefs(dates,download=1,split=1,lowres=0,custom_ens=0,control=1,\n coord='latlon'):\n \"\"\"This script downloads all variables for GEFS R2 reforecasts.\n All runs are initialised at 0000 UTC.\n\n Inputs (all optional unless stated):\n dates : YYYYMMDD, list of strings (mandatory)\n download : whether to download the data\n split : whether to split up the data\n lowres : whether to download times after T+190\n custom_ens : a custom list of perturbation ensemble members\n control : whether to download the control member\n coord : latlon/gaussian grid\n \"\"\"\n\n # This selected all 10 perturbation ensemble members. Change ens for desired member (or mean/sprd)\n if not custom_ens:\n ens = ['p' + '%02u' %p for p in range(1,11)]\n else:\n ens = custom_ens\n \n if control:\n ens.append('c00')\n \n # Root directory of FTP site\n FTP = 'ftp://ftp.cdc.noaa.gov/Projects/Reforecast2/'\n \n # -nc does not download a renamed multiple copy of file\n # --output-document=CATNAME concatenates all files together for the big grib file\n # -nd makes sure hierachy isn't downloaded too\n\n if download: \n for d in dates:\n for e in ens:\n url = os.path.join(FTP, d[0:4], d[0:6], d+'00', e, coord)\n fname = '/*' + e + '.grib2'\n CATNAME = d + '_' + e + '.grib2'\n cmnd = \"wget -nc -nd --output-document=\" + CATNAME + ' ' + url + fname\n os.system(cmnd)\n print(d, e, \" Downloaded.\")\n \n # This section will split the data into forecast times for WRF to read\n # Using WGRIB2\n # fin : grib2 input file\n # fout : smaller grib2 output file with just one forecast time\n # timestr : search pattern to find the forecast time\n\n if split: \n for d in dates:\n # Convert this date to python time for later conversion\n pytime_anl = calendar.timegm((int(d[:4]),int(d[4:6]),int(d[6:8]),0,0,0))\n for e in ens:\n fin = ''.join((d,'_',e,'.grib2'))\n fprefix = '_'.join((d,e,'f'))\n for t in range(0,198,6):\n ts = \"%03d\" %t # Gets files into chron order with padded zeroes\n if t==0:\n timestr = '\":anl:\"' \n else:\n timestr = ''.join(('\":(',str(t),' hour fcst):\"'))\n fout = fprefix + ts + '.grib2'\n str1 = ' '.join(('wgrib2',fin,'-match',timestr,'-grib',fout)) \n os.system(str1)\n\ndef getgfs(dates,hours):\n \"\"\" Downloads GFS analysis data.\n\n Inputs:\n dates : List of strings, YYYYMMDD\n hours : List of strings, HH \n \"\"\"\n\n # If date is before 2007, download grib1.\n\n for d in dates:\n yr_int = int(d[:4])\n for h in hours:\n if yr_int > 2006:\n os.system('wget \"http://nomads.ncdc.noaa.gov/data/gfsanl/'+d[:6]+'/'+ d+'/gfsanl_4_'+d+'_'+h+'00_000.grb2\"')\n else:\n os.system('wget \"http://nomads.ncdc.noaa.gov/data/gfsanl/'+d[:6]+'/'+ d+'/gfsanl_3_'+d+'_'+h+'00_000.grb\"')\n\ndef getnam(dates,hours,datatype,**kwargs):\n \"\"\" Downloads NAM analysis and forecast data.\n\n Inputs:\n dates : List of strings, YYYYMMDD\n hours : List of strings, HH \n datatype : analysis or forecast.\n\n Optional arguments for forecasts via kwargs:\n tmax : maximum forecast time to download (inclusive)\n tint : internal (hr) between fetched forecasts\n\n \"\"\"\n\n # If date is before ####, download grib1, use this:\n age = 'old'\n\n def get_anl(dates,hours,*args):\n for d in dates:\n for h in hours:\n #if age=='new':\n # command = ('wget \"http://nomads.ncdc.noaa.gov/data/namanl/'+\n # d[:6]+'/'+ d+'/namanl_4_'+d+'_'+h+'00_000.grb2\"')\n #elif age=='old':\n command = ('wget \"http://nomads.ncdc.noaa.gov/data/namanl/'+\n d[:6]+'/'+ d+'/namanl_218_'+d+'_'+h+'00_000.grb\"')\n os.system(command)\n \n # Where are these forecast archives? \n def get_218fcst(dates,hours,Tmax,Tint):\n for d in dates:\n for h in hours:\n fhs = list(range(0,Tmax+Tint,Tint))\n for fh in fhs:\n fpad = \"%03d\" %fh\n if age == 'old':\n command = ('wget \"http://nomads.ncdc.noaa.gov/data/nam/'+\n d[:6]+'/'+ d+'/nam_218_'+d+'_'+h+'00_'+fpad+'.grb\"')\n elif age == 'new': # doesn't seem to work\n command = ('wget \"http://nomads.ncdc.noaa.gov/data/nam/'+\n d[:6]+'/'+ d+'/nam_4_'+d+'_'+h+'00_'+fpad+'.grb2\"')\n \n os.system(command)\n\n CMND = {'forecast':get_218fcst, 'analysis':get_anl}\n CMND[data](dates,hours,Tmax,Tint)\n\ndef getruc(utc,ncpath='./',convert2nc=False,duplicate=False):\n\n URL = RUC_URL(utc)\n fname = RUC_fname(utc)\n URLpath = os.path.join(URL,fname)\n fpath = os.path.join(ncpath,fname)\n if not duplicate:\n fexist = glob.glob(fpath)\n\n # import pdb; pdb.set_trace()\n if not len(fexist):\n command = 'wget {0} -P {1}'.format(URLpath,ncpath)\n os.system(command)\n if convert2nc:\n command2 = 'ncl_convert2nc {0} -o {1}'.format(fpath,ncpath)\n os.system(command2)\n return\n\ndef RUC_fname(utc,filetype='grib'):\n \"\"\"\n Returns RUC filename for date.\n \"\"\"\n version = RUC_version(utc)\n\n t = GIS_tools.ensure_datenum(utc)\n yr = time.gmtime(t).tm_year\n mth = time.gmtime(t).tm_mon\n day = time.gmtime(t).tm_mday\n hr = time.gmtime(t).tm_hour\n\n if version == 3:\n prefix = 'rap_130'\n suffix = 'grb2'\n elif version == 2:\n prefix = 'ruc2anl_130'\n suffix = 'grb2'\n elif version == 1:\n prefix = 'ruc2anl_252'\n suffix = 'grb'\n else:\n prefix = 'ruc2_252'\n suffix = 'grb'\n\n if filetype=='netcdf':\n suffix = 'nc'\n fname = '{0}_{1:04d}{2:02d}{3:02d}_{4:02d}00_000.{5}'.format(prefix,yr,mth,day,hr,suffix)\n return fname\n\ndef RUC_URL(utc):\n \"\"\"\n Returns URL to download RUC file from nomads.\n \"\"\"\n t = GIS_tools.ensure_datenum(utc)\n yr = time.gmtime(t).tm_year\n mth = time.gmtime(t).tm_mon\n day = time.gmtime(t).tm_mday\n\n URL_base = \"http://nomads.ncdc.noaa.gov/data/rucanl\"\n URL = '{0}/{1:04d}{2:02d}/{1:02d}{2:02d}{3:02d}/'.format(URL_base,yr,mth,day)\n\n return URL\n\ndef RUC_version(utc,fname=False,URL=False):\n \"\"\"Returns the version/fname of RUC file\n \"\"\"\n t = GIS_tools.ensure_datenum(utc)\n date0 = utils.ensure_datenum((2004,1,1,0,0,0))\n date1 = utils.ensure_datenum((2007,1,1,0,0,0))\n date2 = utils.ensure_datenum((2008,1,1,0,0,0))\n date3 = utils.ensure_datenum((2009,1,1,0,0,0))\n date4 = utils.ensure_datenum((2012,5,9,0,0,0))\n\n if t >= date4:\n version = 3\n elif t >= date3:\n version = 2\n elif t >= date2:\n version = 0\n elif t >= date1:\n version = 1\n elif t >= date0:\n version = 0\n else:\n print(\"No RUC data for this date exists.\")\n raise Exception\n\n # print(\"This RUC file is Version {0}.\".format(version))\n return version\n\n","repo_name":"johnrobertlawson/WEM","sub_path":"utils/getdata.py","file_name":"getdata.py","file_ext":"py","file_size_in_byte":7695,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"67"} +{"seq_id":"14735627936","text":"import threading\nimport time\n\ndef job1(num):\n print('job1 thread%s %s\\n' %(num,time.ctime(time.time()))\n )\n time.sleep(1)\ndef job2(num):\n print('job2 thread%s %s\\n' %(num,time.ctime(time.time()))\n )\n time.sleep(1)\ndef job3(num):\n print('job3 thread%s %s\\n' %(num,time.ctime(time.time()))\n )\n time.sleep(1)\n\nthreads = []\nthreads.append(threading.Thread(target=job1,args=(1,)))\nthreads.append(threading.Thread(target=job2,args=(2,)))\nthreads.append(threading.Thread(target=job3,args=(3,)))\nfor i in range(len(threads)):\n threads[i].start()\n\nstr1 = \"main thread\"\nfor i in range(1,5):\n str2 = str1 + str(i) + '\\n'\n print(str2)\n time.sleep(1)\n\n\nfor i in range(3):\n threads[i].join()\n\nprint('finish')","repo_name":"Hakuonn/python3","sub_path":"0914_thread/exercise/ch6_3ex.py","file_name":"ch6_3ex.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3302339843","text":"import pandas as pd\nimport numpy as np\nnp.set_printoptions(precision=3, suppress=True)\n# take in clusters, distance matrix and determine score\n\ndef calculate(clust, distm):\n NC = 0\n\n # itterate through k times\n # c is a cluster\n for i, c in enumerate(clust):\n # calculate win\n win = 0\n for cci, cc in enumerate(c):\n # cc is index of value in cluster\n # print('cc {} c {}'.format(type(cc), type(c)))\n for cc2 in range(cci, len(c)):\n win += distm[cc][cc2]\n win = win/2\n\n # make array not in c\n others = [dd for dd in range(len(distm)) if dd not in c]\n wout = 0\n for cc in c:\n for o in others:\n wout += distm[cc][o]\n wout = wout/2\n\n # add to sum for this itteration of k\n NC += 1/(win/wout + 1)\n # print('{}:: win: {} wout: {}'.format(i, win, wout))\n\n print('NC = ', NC)\n return NC\n\n\ndef read_in_files(cluster_csv, distm_csv):\n clus = pd.read_csv(cluster_csv, header=None)\n distm = pd.read_csv(distm_csv, header=None)\n return clus, distm\n\n\ndef simple_test():\n data = pd.read_csv('simple01.csv', sep=' ', header=None)\n data = data.values\n\n # print([str(m) + '\\n' for m in distm])\n # calc dist matrix\n distm = np.ndarray(shape=(len(data), len(data)), dtype=float)\n for i in range(len(data)-1):\n for j in range(i, len(data)):\n distm[i][j] = np.linalg.norm(data[i]-data[j])\n # print('dist {} {} = {}'.format(i, j, distm[i][j]))\n distm[j][i] = distm[i][j]\n\n # print(distm)\n # print(len(distm))\n c = [[0,1,2,3,4], [5,6,7,8,9]]\n print(c)\n calculate(c, distm)\n c = [[5, 6, 8], [0, 1, 2, 3, 4, 7, 9]]\n print(c)\n calculate(c, distm)\n\n\ndef find_column_variance():\n data = pd.read_csv('full-monthly-avgs.csv').iloc[0:, 3:8]\n print(data.columns.values)\n data = data.values\n # data = pd.read_csv('monthly_avg_zscore.csv').iloc[0:, 3:8].values\n print('len data: ', len(data))\n t = np.transpose(data)\n print('transposed len: ', len(t))\n for r in t:\n print('std: ', np.std(r))\n\n\nif __name__ == '__main__':\n # simple_test()\n find_column_variance()\n","repo_name":"EarthSquirrel/csci-550-final-project","sub_path":"normalized_cut.py","file_name":"normalized_cut.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20103098061","text":"from google.cloud import storage\nfrom mangacover.model import load_learner_and_predict\n\nLEARNER_BUCKET = \"manga-classifier-model\"\nLEARNER_BLOB = \"export_multicat.pkl\"\nLEARNER_PATH = \"/tmp/model.pkl\"\n\n\ndef _download_learner():\n client = storage.Client()\n bucket = client.get_bucket(LEARNER_BUCKET)\n blob = bucket.blob(LEARNER_BLOB)\n blob.download_to_filename(LEARNER_PATH)\n\n\ndef predict(request):\n if request.method == \"OPTIONS\":\n headers = {\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Methods\": \"POST\",\n \"Access-Control-Allow-Headers\": \"Content-Type\",\n \"Access-Control-Max-Age\": \"3600\",\n }\n\n return (\"\", 204, headers)\n\n headers = {\"Access-Control-Allow-Origin\": \"*\"}\n file = request.files[\"file\"]\n\n _download_learner()\n return (load_learner_and_predict(LEARNER_PATH, file, thresh=0.75), 200, headers)\n","repo_name":"taksqth/MangaCover","sub_path":"functions/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25902544236","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport math\nimport os\nimport sys\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.interpolate import interp2d\n\ndef CatmullRom(p0, p1, p2, p3, t):\n v0 = (p2 - p0) * 0.5\n v1 = (p3 - p1) * 0.5\n t2 = t * t\n t3 = t2 * t\n return ((p1 - p2)*2.0 + v0 + v1) * t3 + ((p2 - p1)*3.0 - 2.0 * v0 - v1) * t2 + v0 * t + p1\n\ndef CatmullRomSpline(points, interpolate_num):\n dst = []\n for i in range(1, len(points) - 2):\n div_kernel = 1.0 / interpolate_num\n for k in range(interpolate_num):\n t = k * div_kernel\n xcr = CatmullRom(points[i-1][0],points[i][0],points[i+1][0],points[i+2][0],t)\n ycr = CatmullRom(points[i-1][1],points[i][1],points[i+1][1],points[i+2][1],t)\n zcr = CatmullRom(points[i-1][2],points[i][2],points[i+1][2],points[i+2][2],t)\n \n dst.append([xcr, ycr, zcr])\n\n return dst\n\nif __name__=='__main__':\n pass\n","repo_name":"umeykato/omg_instance_segmentation","sub_path":"create_dataset/spline.py","file_name":"spline.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35863678794","text":"import cv2\nimport face_recognition\nimport numpy as np\n\nvideo = cv2.VideoCapture(\"./video.mp4\")\n# 获取opencv的分类器——人眼识别和人脸识别\nface_Cascade = cv2.CascadeClassifier(\"./haarcascade_frontalface_alt.xml\")\n\nwidth = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\nheight = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\nfps = video.get(cv2.CAP_PROP_FPS)\nprint(fps)\nfont = cv2.FONT_HERSHEY_SIMPLEX\nfourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')\noutVideo = cv2.VideoWriter('./new_video.mp4', fourcc, fps, (width, height))\n\nsuccess, frame = video.read()\nmatch_faces = {}\n\nwhile success:\n # 将每帧图片灰度化\n size = frame.shape[:2]\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # 直方图均衡\n image = cv2.equalizeHist(image)\n im_h, im_w = size\n minSize_1 = (im_w // 10, im_h // 10)\n # 获得人脸识别结果\n face_rect_results = face_Cascade.detectMultiScale(image, 1.05, 2, cv2.CASCADE_SCALE_IMAGE, minSize_1)\n if len(face_rect_results) > 0:\n # 绘制当前帧结果\n for x, y, w, h in face_rect_results:\n rgb_frame = np.ascontiguousarray(frame[y:y + h, x:x + w, ::-1])\n cv2.imwrite(\"./data/\" + str(x) + \",\" + str(y) + \".jpg\", rgb_frame)\n\n face_locations = face_recognition.face_locations(rgb_frame)\n if len(face_locations) > 0:\n # 这里其实 face_encodings 的 len 为 1\n face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)\n if len(face_encodings) != 1:\n print(\"face encodings length=\" + str(len(face_encodings)))\n if len(face_encodings) > 0:\n if len(match_faces) == 0:\n user_idx = 1\n for face_encoding in face_encodings:\n match_faces[user_idx] = face_encoding\n user_idx = user_idx + 1\n else:\n for face_encoding in face_encodings:\n is_find = False\n for key, value in match_faces.items():\n matches = face_recognition.compare_faces(\n [value],\n face_encoding, # The single unknown face encoding\n tolerance=0.4\n )\n if True in matches:\n cv2.putText(frame, 'User' + str(key), (x + 10, y + h), font, 5, (255, 255, 255), thickness=4)\n is_find = True\n break\n if not is_find:\n # 加入字典作为新的人\n user_idx = len(match_faces) + 1\n match_faces[user_idx] = face_encoding\n\n # 绘制人脸框\n cv2.rectangle(frame, (x, y), (x + w, y + h), [255, 255, 0], 2)\n\n # 输出每一帧结果\n outVideo.write(frame)\n last_frame = frame\n success, frame = video.read()\n\nvideo.release()\noutVideo.release()\n","repo_name":"Martin7-1/NJU-doc","sub_path":"计算机视觉/hw/hw4/user_mark.py","file_name":"user_mark.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"4476156380","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ### Extract Structured Data From Text: Expert Mode (Using Function Calling)\n# \n# We are going to explore [OpenAI's Function Calling](https://openai.com/blog/function-calling-and-other-api-updates) for extracting structured data from unstructured sources.\n# \n# **Why is this important?**\n# LLMs are great at text output, but they need extra help outputing information in a structure that we want. A common request from developers is to get JSON data back from our LLMs.\n# \n# Spoiler: Jump down to the bottom to see a bonefied business idea that you can start and manage today.\n\n# In[1]:\n\n\n# LangChain Models\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.llms import OpenAI\nfrom langchain.schema import HumanMessage, SystemMessage, AIMessage\n\n# Standard Helpers\nimport pandas as pd\nimport requests\nimport time\nimport json\nfrom datetime import datetime\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n# Text Helpers\nfrom bs4 import BeautifulSoup\nfrom markdownify import markdownify as md\n\n# For token counting\nfrom langchain.callbacks import get_openai_callback\n\ndef printOutput(output):\n print(json.dumps(output,sort_keys=True, indent=3))\n\n\n# In[2]:\n\n\n# It's better to do this an environment variable but putting it in plain text for clarity\nopenai_api_key = os.getenv(\"OPENAI_API_KEY\", 'YourAPIKey')\n\n\n# Let's start off by creating our LLM. We're using gpt4 to take advantage of its increased ability to follow instructions\n\n# In[3]:\n\n\nchat = ChatOpenAI(\n model_name=\"gpt-3.5-turbo-0613\", # Cheaper but less reliable\n temperature=0,\n max_tokens=2000,\n openai_api_key=openai_api_key\n)\n\n\n# ### Function Calling Hello World Example\n# \n# Create an object that holds information about the fields you'd like to extract\n\n# In[4]:\n\n\nfunctions = [\n {\n \"name\": \"get_food_mentioned\",\n \"description\": \"Get the food that is mentioned in the review from the customer\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"food\": {\n \"type\": \"string\",\n \"description\": \"The type of food mentioned, ex: Ice cream\"\n },\n \"good_or_bad\": {\n \"type\": \"string\",\n \"description\": \"whether or not the user thought the food was good or bad\",\n \"enum\": [\"good\", \"bad\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n]\n\n\n# In[5]:\n\n\noutput = chat(messages=\n [\n SystemMessage(content=\"You are an helpful AI bot\"),\n HumanMessage(content=\"I thought the burgers were awesome\")\n ],\n functions=functions\n)\n\n\n# In[6]:\n\n\nprint(json.dumps(output.additional_kwargs, indent=4))\n\n\n# ### Pydantic Model\n# \n# Now let's do the same thing but with a pydantic model rather than json schema\n\n# In[7]:\n\n\nfrom langchain.pydantic_v1 import BaseModel, Field\nimport enum\n\nclass GoodOrBad(str, enum.Enum):\n GOOD = \"Good\"\n BAD = \"Bad\"\n\nclass Food(BaseModel):\n \"\"\"Identifying information about a person's food review.\"\"\"\n\n name: str = Field(..., description=\"Name of the food mentioned\")\n good_or_bad: GoodOrBad = Field(..., description=\"Whether or not the user thought the food was good or bad\")\n\n\n# In[8]:\n\n\noutput = chat(messages=\n [\n SystemMessage(content=\"You are an helpful AI bot\"),\n HumanMessage(content=\"I thought the burgers were awesome\")\n ],\n functions=[{\n \"name\": \"FoodExtractor\",\n \"description\": (\n \"Identifying information about a person's food review.\"\n ),\n \"parameters\": Food.schema(),\n }\n ]\n)\n\n\n# In[9]:\n\n\noutput\n\n\n# But LangChain has an abstraction for us that we can use\n\n# In[10]:\n\n\nfrom langchain.chains import create_extraction_chain_pydantic\n\n# Extraction\nchain = create_extraction_chain_pydantic(pydantic_schema=Food, llm=chat)\n\n# Run \ntext = \"\"\"I like burgers they are great\"\"\"\nchain.run(text)\n\n\n# ### Multiple Results\n# \n# Let's try to extract multiple objects from the same text. I'll create a person object now\n\n# In[11]:\n\n\nfrom typing import Sequence\n\nchat = ChatOpenAI(\n model_name=\"gpt-4-0613\", # Cheaper but less reliable\n temperature=0,\n max_tokens=2000,\n openai_api_key=openai_api_key\n)\n\nclass Person(BaseModel):\n \"\"\"Someone who gives their review on different foods\"\"\"\n\n name: str = Field(..., description=\"Name of the person\")\n foods: Sequence[Food] = Field(..., description=\"A food that a person mentioned\")\n\n\n# In[12]:\n\n\n# Extraction\nchain = create_extraction_chain_pydantic(pydantic_schema=Person, llm=chat)\n\n# Run \ntext = \"\"\"amy likes burgers and fries but doesn't like salads\"\"\"\noutput = chain.run(text)\n\n\n# In[13]:\n\n\noutput[0]\n\n\n# **User Query Extraction**\n# \n# Let's do another fun example where we want to extract/convert a query from a user\n\n# In[14]:\n\n\nclass Query(BaseModel):\n \"\"\"Extract the change a user would like to make to a financial forecast\"\"\"\n\n entity: str = Field(..., description=\"Name of the category or account a person would like to change\")\n amount: int = Field(..., description=\"Amount they would like to change it by\")\n year: int = Field(..., description=\"The year they would like the change to\")\n\n\n# In[15]:\n\n\nchain = create_extraction_chain_pydantic(pydantic_schema=Query, llm=chat)\n\n\n# In[16]:\n\n\nchain.run(\"Can you please add 10 more units to inventory in 2022?\")\n\n\n# In[17]:\n\n\nchain.run(\"Remove 3 million from revenue in 2021\")\n\n\n# ## Opening Attributes - Real World Example\n# \n# [Opening Attributes](https://twitter.com/GregKamradt/status/1643027796850253824) (my sample project for this application)\n# \n# If anyone wants to strategize on this project DM me on twitter\n\n# We are going to be pulling jobs from Greenhouse. No API key is needed.\n\n# In[18]:\n\n\ndef pull_from_greenhouse(board_token):\n # If doing this in production, make sure you do retries and backoffs\n \n # Get your URL ready to accept a parameter\n url = f'https://boards-api.greenhouse.io/v1/boards/{board_token}/jobs?content=true'\n \n try:\n response = requests.get(url)\n except:\n # In case it doesn't work\n print (\"Whoops, error\")\n return\n \n status_code = response.status_code\n \n jobs = response.json()['jobs']\n \n print (f\"{board_token}: {status_code}, Found {len(jobs)} jobs\")\n \n return jobs\n\n\n# Let's try it out for [Okta](https://www.okta.com/)\n\n# In[19]:\n\n\njobs = pull_from_greenhouse(\"okta\")\n\n\n# Let's look at a sample job with it's raw dictionary\n\n# In[20]:\n\n\n# Keep in mind that my job_ids will likely change when you run this depending on the postings of the company\njob_index = 0\n\n\n# In[21]:\n\n\nprint (\"Preview:\\n\")\nprint (json.dumps(jobs[job_index])[:400])\n\n\n# Let's clean this up a bit\n\n# In[22]:\n\n\n# I parsed through an output to create the function below\ndef describeJob(job_description):\n print(f\"Job ID: {job_description['id']}\")\n print(f\"Link: {job_description['absolute_url']}\")\n print(f\"Updated At: {datetime.fromisoformat(job_description['updated_at']).strftime('%B %-d, %Y')}\")\n print(f\"Title: {job_description['title']}\\n\")\n print(f\"Content:\\n{job_description['content'][:550]}\")\n\n\n# We'll look at another job. This job_id may or may not work for you depending on if the position is still active.\n\n# In[23]:\n\n\n# Note: I'm using a hard coded job id below. You'll need to switch this if this job ever changes\n# and it most definitely will!\njob_id = 5299914\n\njob_description = [item for item in jobs if item['id'] == job_id][0]\n \ndescribeJob(job_description)\n\n\n# I want to convert the html to text, we'll use BeautifulSoup to do this. There are multiple methods you could choose from. Pick what's best for you.\n\n# In[24]:\n\n\nsoup = BeautifulSoup(job_description['content'], 'html.parser')\n\n\n# In[25]:\n\n\ntext = soup.get_text()\n\n# Convert your html to markdown. This reduces tokens and noise\ntext = md(text)\n\nprint (text[:600])\n\n\n# Let's create a Kor object that will look for tools. This is the meat and potatoes of the application\n\n# In[26]:\n\n\nclass Tool(BaseModel):\n \"\"\"The name of a tool or company\"\"\"\n\n name: str = Field(..., description=\"Name of the food mentioned\")\n \nclass Tools(BaseModel):\n \"\"\"A tool, application, or other company that is listed in a job description.\"\"\"\n\n tools: Sequence[Tool] = Field(..., description=\"\"\" A tool or technology listed\n Examples:\n * \"Experience in working with Netsuite, or Looker a plus.\" > NetSuite, Looker\n * \"Experience with Microsoft Excel\" > Microsoft Excel\n \"\"\")\n\n\n# In[27]:\n\n\nchain = create_extraction_chain_pydantic(pydantic_schema=Tools, llm=chat)\n\n\n# In[28]:\n\n\noutput = chain(text)\n\n\n# In[29]:\n\n\noutput['text']\n\n\n# [OpenAI GPT4 Pricing](https://help.openai.com/en/articles/7127956-how-much-does-gpt-4-cost)\n\n# In[30]:\n\n\nwith get_openai_callback() as cb:\n result = chain(text)\n print(f\"Total Tokens: {cb.total_tokens}\")\n print(f\"Prompt Tokens: {cb.prompt_tokens}\")\n print(f\"Completion Tokens: {cb.completion_tokens}\")\n print(f\"Successful Requests: {cb.successful_requests}\")\n print(f\"Total Cost (USD): ${cb.total_cost}\")\n\n\n# Suggested To Do if you want to build this out:\n# \n# * Reduce amount of HTML and low-signal text that gets put into the prompt\n# * Gather list of 1000s of companies\n# * Run through most jobs (You'll likely start to see duplicate information after the first 10-15 jobs per department)\n# * Store results\n# * Snapshot daily as you look for new jobs\n# * Follow [Greg](https://twitter.com/GregKamradt) on Twitter for more tools or if you want to chat about this project\n# * Read the user feedback below for what else to build out with this project (I reached out to everyone who signed up on twitter)\n# \n# \n# ### Business idea: Job Data As A Service\n# \n# Start a data service that collects information about company's jobs. This can be sold to investors looking for an edge.\n# \n# After posting [this tweet](https://twitter.com/GregKamradt/status/1643027796850253824) there were 80 people that signed up for the trial. I emailed all of them and most were job seekers looking for companies that used the tech they specialized in.\n# \n# The more interesting use case were sales teams + investors.\n# \n# #### Interesting User Feedback (Persona: Investor):\n# \n# > Hey Gregory, thanks for reaching out.

\n# I always thought that job posts were a gold mine of information, and often suggest identifying targets based on these (go look at relevant job posts for companies that might want to work with you). Secondly, I also automatically ping BuiltWith from our CRM and send that to OpenAI and have a summarized tech stack created - so I see the benefit of having this as an investor.

\n# For me personally, I like to get as much data as possible about a company. Would love to see job post cadence, type of jobs they post and when, notable keywords/phrases used, tech stack (which you have), and any other information we can glean from the job posts (sometimes they have the title of who you'll report to, etc.).

\n# For sales people, I think finer searches, maybe even in natural language if possible - such as \"search for companies who posted a data science related job for the first time\" - would be powerful.\n# \n# If you do this, let me know! I'd love to hear how it goes.\n","repo_name":"ssbuild/gpt_agent","sub_path":"data_generation/Expert Structured Output (Using Function Calling).py","file_name":"Expert Structured Output (Using Function Calling).py","file_ext":"py","file_size_in_byte":11370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4587207509","text":"# -*- coding: latin-1 -*-\r\n\r\n\"\"\"\r\n@author: Ivano Lauriola\r\n@email: ivano.lauriola@phd.unipd.it, ivanolauriola@gmail.com\r\n\r\nThis file is part of MKLpy: a scikit-compliant framework for Multiple Kernel Learning\r\nThis file is distributed with the GNU General Public License v3 . \r\n\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom ..metrics import trace\r\nfrom ..utils.validation import check_K\r\n\r\n\r\n\r\n\r\ndef kernel_normalization(K):\r\n \"\"\"normalize a squared kernel matrix\r\n\r\n Parameters\r\n ----------\r\n K : (n,n) ndarray,\r\n the squared kernel matrix.\r\n\r\n Returns\r\n -------\r\n Kn : ndarray,\r\n the normalized version of *K*.\r\n\r\n Notes\r\n -----\r\n Given a kernel K, the normalized version is defines as:\r\n \r\n .. math:: \\hat{k}(x,z) = \\frac{k(x,z)}{\\sqrt{k(x,x)\\cdot k(z,z)}}\r\n \"\"\"\r\n\r\n K = check_K(K)\r\n n = K.size()[0]\r\n d = K.diag().view(n,1)\r\n K /= (d @ d.T)**0.5\r\n K[K!=K] = 0\r\n return K\r\n \r\n\r\n\r\n\r\n\r\n\r\ndef tracenorm(K):\r\n \"\"\"normalize the trace of a squared kernel matrix\r\n\r\n Parameters\r\n ----------\r\n K : (n,n) ndarray,\r\n the squared kernel matrix.\r\n\r\n Returns\r\n -------\r\n Kt : ndarray,\r\n the trace-normalized version of *K*.\r\n\r\n Notes\r\n -----\r\n In trace-normalization, the kernel is divided by the average of the diagonal.\r\n \"\"\"\r\n K = check_K(K)\r\n trn = trace(K) / K.size()[0]\r\n return K / trn\r\n\r\n\r\n\r\ndef kernel_centering(K):\r\n \"\"\"move a squared kernel at the center of axis\r\n\r\n Parameters\r\n ----------\r\n K : (n,n) ndarray,\r\n the squared kernel matrix.\r\n \r\n Returns\r\n -------\r\n Kc : ndarray,\r\n the centered version of *K*.\r\n \"\"\"\r\n K = check_K(K)\r\n N = K.size()[0]\r\n I = torch.ones(K.size()).double()\r\n C = torch.ones(N).diag() - (1.0/N * I)\r\n return C @ K @ C\r\n","repo_name":"IvanoLauriola/MKLpy","sub_path":"MKLpy/preprocessing/kernel_preprocessing.py","file_name":"kernel_preprocessing.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"67"} +{"seq_id":"27002937302","text":"import pygame\r\nimport os\r\nimport time\r\nimport random\r\nimport linecache\r\nimport re\r\n# variables\r\n\r\n# screen\r\nheight = 400\r\nwidth = 800\r\nsize = [width, height]\r\nscreen = pygame.display.set_mode(size)\r\n\r\nbgColour = [0, 0, 0]\r\nbgColour2 = [0, 255, 0]\r\n\r\nfps = 60\r\n# ----------------------------------------------------------------------------\r\n\r\n# Headings\r\npygame.font.init()\r\nmyfont = pygame.font.SysFont(\"monospace\", 40)\r\n\r\nlabel = myfont.render(\"Rock, Paper, Scissors!\", True, (255, 255, 255))\r\nlabelRect = label.get_rect()\r\nlabelRect.center = (width // 2, height // 9)\r\n\r\nlabelUser = myfont.render(\"Rock, Paper, Scissors!\", True, (255, 255, 255))\r\nlabelRect = label.get_rect()\r\nlabelRect.center = (width // 2, height // 9)\r\n# ----------------------------------------------------------------------------\r\n\r\n# images\r\nimage_pos_height = 270\r\nimage_height = 100\r\nimage_width = 100\r\nROCK_IMAGE = pygame.image.load(os.path.join('Assets', 'rock.png'))\r\nROCK = pygame.transform.scale(ROCK_IMAGE, (image_width, image_height))\r\nPAPER_IMAGE = pygame.image.load(os.path.join('Assets', 'paper.png'))\r\nPAPER = pygame.transform.scale(PAPER_IMAGE, (image_width, image_height))\r\nSCISSOR_IMAGE = pygame.image.load(os.path.join('Assets', 'scissor.png'))\r\nSCISSOR = pygame.transform.scale(SCISSOR_IMAGE, (image_width, image_height))\r\n# ----------------------------------------------------------------------------\r\n\r\n# Button displays\r\nbutton_width = 75\r\nbutton_height = 25\r\nbutton_pos_width = 25\r\ncolour = (128, 128, 128)\r\nmyfontFour = pygame.font.SysFont(\"Monospace\", 16)\r\nsave_heading = myfontFour.render(\"Save\", True, (255, 255, 255))\r\nsave_heading_rect = save_heading.get_rect()\r\nsave_heading_rect = (button_pos_width + 15, 322)\r\nload_heading = myfontFour.render(\"Load\", True, (255, 255, 255))\r\nload_heading_rect = load_heading.get_rect()\r\nload_heading_rect = (button_pos_width + 15, 362)\r\n# -----------------------------------------------------------------------------\r\n\r\n# display result\r\nuser = None\r\nlist = [\"Rock\", \"Paper\", \"Scissor\"]\r\nmyfontTwo = pygame.font.SysFont(\"monospace\", 35)\r\nresultLabel = myfontTwo.render(\"\", True, (255, 255, 255))\r\nresultLabelRect = resultLabel.get_rect()\r\n\r\n\r\ndef result(run, user, resultLabel, resultLabelRect, userScore, computerScore):\r\n\r\n computer = list[random.randint(0, 2)]\r\n if computer == user:\r\n resultVar = \"Draw: pick again\"\r\n draw_result(resultVar, resultLabel, resultLabelRect,\r\n userScore, computerScore)\r\n return(resultLabel, resultLabelRect)\r\n else:\r\n resultVar = checkWinner(user, computer)\r\n draw_result(resultVar, resultLabel, resultLabelRect,\r\n userScore, computerScore)\r\n return(resultVar)\r\n\r\n\r\ndef checkWinner(user, computer):\r\n winner = \"\"\r\n if user == \"Rock\" and computer == \"Scissor\":\r\n winner = \"user\"\r\n return(\"User Wins\")\r\n elif user == \"Scissor\" and computer == \"Paper\":\r\n winner = \"user\"\r\n return(\"User Wins\")\r\n elif user == \"Paper\" and computer == \"Rock\":\r\n winner = \"user\"\r\n return(\"User Wins\")\r\n else:\r\n winner = \"computer\"\r\n return(\"Computer Wins\")\r\n\r\n\r\ndef draw_result(resultVar, resultLabel, resultLabelRect, userScore, computerScore):\r\n resultLabel = myfontTwo.render(resultVar, True, (255, 255, 255))\r\n resultLabelRect = resultLabel.get_rect()\r\n resultLabelRect.center = (width // 2, height // 2)\r\n draw_window(resultLabel, resultLabelRect, userScore, computerScore)\r\n# ----------------------------------------------------------------------------\r\n\r\n\r\n# Scores\r\nmyfontThree = pygame.font.SysFont(\"Monospace\", 20)\r\n\r\nuser_heading = myfontThree.render(\"User\", True, (255, 255, 255))\r\nuser_heading_rect = user_heading.get_rect()\r\nuser_heading_rect = (40, 50)\r\n\r\ncomputer_heading = myfontThree.render(\"Computer\", True, (255, 255, 255))\r\ncomputer_heading_rect = computer_heading.get_rect()\r\ncomputer_heading_rect = (680, 50)\r\n\r\n\r\ndef print_score(userScore, computerScore):\r\n user_score_heading = myfontThree.render(\r\n str(userScore), True, (255, 255, 255))\r\n user_score_heading_rect = user_score_heading.get_rect()\r\n user_score_heading_rect = (57, 75)\r\n screen.blit(user_score_heading, user_score_heading_rect)\r\n computer_score_heading = myfontThree.render(\r\n str(computerScore), True, (255, 255, 255))\r\n computer_score_heading_rect = computer_score_heading.get_rect()\r\n computer_score_heading_rect = (720, 75)\r\n screen.blit(computer_score_heading, computer_score_heading_rect)\r\n# ----------------------------------------------------------------------------\r\n\r\n# Load/save\r\n\r\n\r\ndef save_score(userScore, computerScore):\r\n new_file = \"newSave.txt\"\r\n with open(new_file, 'w') as f:\r\n # new line\r\n f.write(str(userScore) + \"\\n\" + str(computerScore))\r\n\r\n\r\ndef load_user_score():\r\n file = \"newsave.txt\"\r\n temp = linecache.getline(file, 1)\r\n # removes the save formatting\r\n temp = re.sub('[\\W_]+', '', temp)\r\n print(temp)\r\n return int(temp)\r\n\r\n\r\ndef load_computer_score():\r\n file = \"newsave.txt\"\r\n temp = linecache.getline(file, 2)\r\n temp = re.sub('[\\W_]+', '', temp)\r\n print(temp)\r\n return int(temp)\r\n# ----------------------------------------------------------------------------\r\n\r\n# Window\r\n\r\n\r\ndef draw_window(resultLabel, resultLabelRect, userScore, computerScore):\r\n screen.fill(bgColour)\r\n screen.blit(user_heading, user_heading_rect)\r\n screen.blit(resultLabel, resultLabelRect)\r\n screen.blit(computer_heading, computer_heading_rect)\r\n pygame.draw.rect(screen, colour, pygame.Rect(\r\n button_pos_width, 360, button_width, button_height))\r\n pygame.draw.rect(screen, colour, pygame.Rect(\r\n button_pos_width, 320, button_width, button_height))\r\n screen.blit(save_heading, save_heading_rect)\r\n screen.blit(load_heading, load_heading_rect)\r\n print_score(userScore, computerScore)\r\n screen.blit(label, labelRect)\r\n screen.blit(ROCK, (150, image_pos_height))\r\n screen.blit(PAPER, (360, image_pos_height))\r\n screen.blit(SCISSOR, (600, image_pos_height))\r\n pygame.display.update()\r\n# ----------------------------------------------------------------------------\r\n\r\n# Collisions\r\n\r\n\r\ndef image_collison_rock(ROCK):\r\n ROCK = pygame.Rect(image_width, image_width,\r\n image_height, image_height)\r\n ROCK.x = 150\r\n ROCK.y = image_pos_height\r\n return(ROCK)\r\n\r\n\r\ndef image_collison_paper(PAPER):\r\n PAPER = pygame.Rect(image_width, image_width,\r\n image_height, image_height)\r\n PAPER.x = 360\r\n PAPER.y = image_pos_height\r\n return(PAPER)\r\n\r\n\r\ndef image_collison_scissor(SCISSOR):\r\n SCISSOR = pygame.Rect(image_width, image_width,\r\n image_height, image_height)\r\n SCISSOR.x = 600\r\n SCISSOR.y = image_pos_height\r\n return(SCISSOR)\r\n\r\n\r\ndef save_collision(save_heading):\r\n save_heading = pygame.Rect(\r\n button_pos_width, 360, button_width, button_height)\r\n save_heading.x = button_pos_width\r\n save_heading.y = 322\r\n return(save_heading)\r\n\r\n\r\ndef load_collision(load_heading):\r\n load_heading = pygame.Rect(\r\n button_pos_width, 320, button_width, button_height)\r\n load_heading.x = button_pos_width\r\n load_heading.y = 362\r\n return(load_heading)\r\n# ----------------------------------------------------------------------------\r\n\r\n# Main\r\n\r\n\r\ndef main():\r\n clock = pygame.time.Clock()\r\n run = True\r\n userScore = 0\r\n computerScore = 0\r\n while run:\r\n clock.tick(fps)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n run = False\r\n draw_window(resultLabel, resultLabelRect, userScore, computerScore)\r\n\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n mouse_pos = event.pos\r\n if image_collison_rock(ROCK).collidepoint(mouse_pos):\r\n user = \"Rock\"\r\n print('rock was pressed')\r\n temp = result(run, user, resultLabel, resultLabelRect,\r\n userScore, computerScore)\r\n if temp == \"User Wins\":\r\n userScore += 1\r\n elif temp == \"Computer Wins\":\r\n computerScore += 1\r\n # ensures mouse only clicks once\r\n time.sleep(1)\r\n if image_collison_paper(PAPER).collidepoint(mouse_pos):\r\n user = \"Paper\"\r\n print('paper was pressed')\r\n temp = result(run, user, resultLabel, resultLabelRect,\r\n userScore, computerScore)\r\n if temp == \"User Wins\":\r\n userScore += 1\r\n elif temp == \"Computer Wins\":\r\n computerScore += 1\r\n time.sleep(1)\r\n if image_collison_scissor(SCISSOR).collidepoint(mouse_pos):\r\n user = \"Scissor\"\r\n print('scissor was pressed')\r\n temp = result(run, user, resultLabel, resultLabelRect,\r\n userScore, computerScore)\r\n if temp == \"User Wins\":\r\n userScore += 1\r\n elif temp == \"Computer Wins\":\r\n computerScore += 1\r\n time.sleep(1)\r\n if save_collision(save_heading).collidepoint(mouse_pos):\r\n print(\"save\")\r\n save_score(userScore, computerScore)\r\n time.sleep(1)\r\n if load_collision(load_heading).collidepoint(mouse_pos):\r\n print(\"load\")\r\n userScore = load_user_score()\r\n computerScore = load_computer_score()\r\n time.sleep(1)\r\n pygame.quit()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"GBN-sb/RockPaperScissors","sub_path":"RockPaperScissors/guiBase.py","file_name":"guiBase.py","file_ext":"py","file_size_in_byte":9800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4845450340","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\nimport multiprocessing as mp\r\nfrom google.cloud import datastore\r\nimport yagmail\r\nfrom datetime import datetime\r\nimport config as cf\r\nfrom twilio.rest import Client\r\nfrom dotenv import load_dotenv\r\nimport os\r\n\r\n\r\n# GLOBAL VARIABLES\r\nclass_dict = {}\r\n\r\nurl_prefix = 'https://owaprod-pub.wesleyan.edu/reg/'\r\n\r\nmessages_to_send_dict = {}\r\n\r\n# Loads the twilio account credentials stored in a .env file\r\nload_dotenv()\r\n\r\n\r\n# print(src)\r\ndef ScrapeMainPage():\r\n print('\\n \\n got into scrape main page \\n \\n')\r\n result = requests.get(\r\n 'https://owaprod-pub.wesleyan.edu/reg/!wesmaps_page.html')\r\n src = result.content\r\n soup = BeautifulSoup(src, 'lxml')\r\n other_header = soup.find('b', text='OTHER')\r\n links = other_header.find_all_previous('a', href=re.compile('subj_page'))\r\n # links = links[:6]\r\n print('above multiprocessing')\r\n #num_p = mp.cpu_count()\r\n #print('num processors: ', num_p)\r\n print('got into if name is main in multiprocessing')\r\n print('len links in main page is: ', len(links))\r\n for i, link in enumerate(links):\r\n print('now scraping link number: ', str(i),\r\n 'out of total num links: ', len(links))\r\n ScrapeSubjectPage(url_prefix + link.attrs['href'])\r\n # with mp.Pool(num_p) as pool:\r\n # results = [pool.apply_async(ScrapeSubjectPage, args = (url_prefix + link.attrs['href'],)) for link in links]\r\n # pool.close()\r\n # pool.join()\r\n # if __name__ == '__main__':\r\n # print('got into if name is main in multiprocessing')\r\n # with mp.Pool(num_p) as pool:\r\n # results = [pool.apply_async(ScrapeSubjectPage, args = (url_prefix + link.attrs['href'],)) for link in links]\r\n # pool.close()\r\n # pool.join()\r\n print('below multiprocessing')\r\n\r\n\r\ndef ScrapeSubjectPage(link):\r\n print('\\n got into scrape subject page at link: ', link, '\\n')\r\n subject_content = requests.get(link).content\r\n subj_soup = BeautifulSoup(subject_content, 'lxml')\r\n \"\"\"\r\n Another place with fall/spring specific content\r\n \"\"\"\r\n courses_offered_link = subj_soup.find(href=re.compile(\r\n 'offered=Y#' + cf.dateObj.semester)) # CHANGE THIS TO offered=Y#FALL IF NECESSARY\r\n # print(courses_offered_link.text)\r\n try:\r\n ScrapeCoursesOfferedPage(\r\n url_prefix + courses_offered_link.attrs['href'])\r\n except:\r\n pass\r\n\r\n\r\ndef ScrapeCoursesOfferedPage(link):\r\n offered_content = requests.get(link).content\r\n offered_soup = BeautifulSoup(offered_content, 'lxml')\r\n\r\n # fall courses are all before the \"spring\" header, spring classes all after\r\n spring = offered_soup.find('a', attrs={'name': 'spring'})\r\n\r\n if cf.dateObj.semester == 'fall':\r\n links = spring.find_all_previous(href=re.compile('crse'))\r\n elif cf.dateObj.semester == 'spring':\r\n links = spring.find_all_next(href=re.compile('crse'))\r\n #links = offered_soup.find_all(href=re.compile('crse'))\r\n global linksScraped\r\n for link in links:\r\n link_href = link.attrs['href']\r\n # if not WasScrapedAlready(link):\r\n if link_href not in linksScraped:\r\n linksScraped[link_href] = 'Scraped'\r\n print('Link not in dict in scrape courses offered is: ', link_href)\r\n print(' linksScraped dict is: ', linksScraped)\r\n ScrapeIndividualPage(url_prefix + link_href)\r\n\r\n\r\ndef ScrapeIndividualPage(link):\r\n content = requests.get(link).content\r\n soup = BeautifulSoup(content, 'lxml')\r\n course_name = soup.find('span', class_='title').text\r\n depts = soup.find_all('a', text=re.compile('^[A-Z&]{3,4}$'))\r\n print('depts for: ', course_name, ' are: ', depts)\r\n seat_entries = soup.find_all('td', text=re.compile('Seats Available: '))\r\n total_num_seats = 0\r\n for entry in seat_entries:\r\n seats_avail = int(\r\n re.search('(?<=Seats Available: )-?\\d+', entry.text).group(0))\r\n if seats_avail > 0:\r\n total_num_seats += seats_avail\r\n UpdateEntries(course_name, total_num_seats, depts, link)\r\n #print(course_name, total_num_seats)\r\n\r\n\r\ndef UpdateEntries(course, num_seats, depts, link):\r\n masterEntity = RetrieveMasterEntity(client)\r\n # UPDATE THE MASTER ENTITY LIST TO HAVE DEPTS\r\n # RERUN EVERY FEW MONTHS AT MOST\r\n # for dept in depts:\r\n # if dept.text not in masterEntity[cf.dateObj.courseList]:\r\n # masterEntity[cf.dateObj.courseList].append(dept.text)\r\n # client.put(masterEntity)\r\n # MAKE THIS A DICT FOR EASY LOOKUP\r\n if course not in masterEntity[cf.dateObj.courseList]:\r\n masterEntity[cf.dateObj.courseList].append(course)\r\n client.put(masterEntity)\r\n\r\n print('made it into update entries for course: ', course)\r\n query = client.query(kind='course')\r\n print('made it past query')\r\n query.key_filter((client.key('course', course)), '=')\r\n print('made it past query key filter')\r\n results = list(query.fetch())\r\n print('made it past fetching query. Len query is: ', len(results))\r\n if len(results) > 1:\r\n print(\"GOT MORE THAN ONE RESULT IN QUERY\")\r\n elif len(results) == 1:\r\n print('got into len results = 1. Found a matching course')\r\n # RUN EVERY FEW MONTHS AT MOST\r\n #\"\"\"UPDATE DEPTS\"\"\"\r\n # for i, dept in enumerate(depts):\r\n # print('dept.text for course: ', course, ' is: ', dept.text)\r\n # results[0]['dept' + str(i)] = dept.text\r\n\r\n if results[0]['seats_avail'] == 0 and num_seats > 0:\r\n AddAggregatedMessages(results[0]['emails'], course, link)\r\n results[0]['seats_avail'] = num_seats\r\n elif results[0]['seats_avail'] != num_seats:\r\n results[0]['seats_avail'] = num_seats\r\n\r\n results[0]['link'] = link\r\n results[0]['date_scraped'] = datetime.utcnow()\r\n client.put(results[0])\r\n # A course not in course list has been found\r\n elif len(results) == 0:\r\n print('got into len results is 0')\r\n new_entity = datastore.Entity(key=client.key('course', course))\r\n new_entity.update({\r\n 'seats_avail': num_seats,\r\n 'emails': []\r\n })\r\n for i, dept in enumerate(depts):\r\n new_entity['dept' + str(i)] = dept.text\r\n new_entity['link'] = link\r\n new_entity['date_scraped'] = datetime.utcnow()\r\n\r\n client.put(new_entity)\r\n print('put new entity in: ', new_entity.key.name)\r\n\r\n\r\ndef AddAggregatedMessages(user_list, course, link):\r\n \"\"\"\r\n Creating a list of strings and then using ''.join(str_list)\r\n is much faster than concatenating many strings together\r\n \"\"\"\r\n global messages_to_send_dict\r\n\r\n for user in user_list:\r\n if re.search(\"\\d{9,10}\", user): # Text\r\n segment = '-' + course + '\\n'\r\n elif re.search(\"@\", user):\r\n segment = '-' + course + '\\n'\r\n try:\r\n if messages_to_send_dict[user] != []:\r\n messages_to_send_dict[user].append(segment)\r\n except:\r\n messages_to_send_dict[user] = [\r\n 'Congrats, a spot has opened up in: \\n' + segment]\r\n\r\n\r\ndef SendMessages():\r\n global messages_to_send_dict\r\n masterEntity = RetrieveMasterEntity(client)\r\n\r\n # oauth2 file is uploaded to gcloud not github\r\n yag = yagmail.SMTP('spotcheckwes@gmail.com',\r\n oauth2_file=\"oauth2_creds.json\")\r\n twilio_client = Client(os.getenv(\"TWILIO_ACCOUNT_SID\"),\r\n os.getenv(\"TWILIO_ACCOUNT_SECRET\"))\r\n\r\n for user in messages_to_send_dict:\r\n if re.search(\"\\d{9,10}\", user): # Text\r\n messages_to_send_dict[user].append(\r\n '\\nAct fast, others were notified too! \\n\\nVisit https://www.spotcheck.space/login to see your subscribed courses/unsubscribe')\r\n contents = ''.join(messages_to_send_dict[user])\r\n message = twilio_client.messages \\\r\n .create(\r\n body=contents,\r\n messaging_service_sid='MGce3b91ee5ecc126b6e230f1afb8c2c5b',\r\n to=\"+1\" + user\r\n )\r\n masterEntity[cf.dateObj.textsSent] += 1\r\n masterEntity[cf.totalTextsSent] += 1\r\n elif re.search(\"@\", user): # Email\r\n messages_to_send_dict[user].append(\r\n '\\nMake sure to act fast, other people have been notified and are trying to get this seat too! \\n\\nClick here to see your subscribed courses/unsubscribe

')\r\n contents = '

' + ''.join(messages_to_send_dict[user])\r\n yag.send(user, 'A Spot is Open in a Class You Want!', contents)\r\n masterEntity[cf.dateObj.emailsSent] += 1\r\n masterEntity[cf.totalEmailsSent] += 1\r\n messages_to_send_dict = {}\r\n client.put(masterEntity)\r\n\r\n\r\ndef WasScrapedAlready(link):\r\n now = datetime.utcnow()\r\n if now.minute < 10:\r\n if now.hour == 0:\r\n prev_time = now.replace(\r\n day=now.day-1, hour=23, minute=now.minute+50)\r\n else:\r\n prev_time = now.replace(hour=now.hour-1, minute=now.minute+50)\r\n else:\r\n prev_time = now.replace(minute=now.minute-10)\r\n query = client.query(kind='course')\r\n query.add_filter('date_scraped', '<', prev_time)\r\n results = list(query.fetch())\r\n if len(results) == 0:\r\n return False\r\n else:\r\n return True\r\n\r\n\r\ndef RetrieveMasterEntity(client):\r\n \"\"\"\r\n Only works if the current masterEntity is the first one\r\n \"\"\"\r\n query = client.query(kind='masterEntity')\r\n print('made it past query')\r\n results = list(query.fetch())\r\n return results[0]\r\n\r\n\r\ndef CreateMasterEntity(client):\r\n masterEntity = datastore.Entity(key=client.key('masterEntity'))\r\n masterEntity.update({\r\n cf.dateObj.courseList: []\r\n })\r\n client.put(masterEntity)\r\n\r\n\r\ndef StartFunction(datastore_client):\r\n global client\r\n client = datastore_client\r\n global linksScraped\r\n cf.dateObj.GetCurrDate() # Update date\r\n linksScraped = {'Test': 'Scraped'}\r\n print('GOT INTO START FUNCTION AHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH')\r\n ScrapeMainPage()\r\n SendMessages()\r\n","repo_name":"dknopf/SpotCheck","sub_path":"WesmapsWebscraperBS.py","file_name":"WesmapsWebscraperBS.py","file_ext":"py","file_size_in_byte":10338,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"33841541378","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: %(Mikel Val Calvo, Juan Antonio Barios Heredero, Arturo Bertomeu-Motos)\n@email: %(mikel1982mail@gmail.com, juan.barios@gmail.com, arturobm90@gmail.com)\n@institution: %(Dpto. de Inteligencia Artificial, Universidad Nacional de Educación a Distancia (UNED); Center for Biomedical Technology, Universidad Politécnica, Madrid, Spain; Neuroengineering medical group (UMH) ) \n@DOI: \n\"\"\"\n\n\nfrom pylsl import StreamInfo, StreamOutlet\nimport numpy as np\nimport time\nimport sys\n\ndef main(*args):\n # first create a new stream info (here we set the name to BioSemi,\n # the content-type to EEG, 8 channels, 100 Hz, and float-valued data) The\n # last value would be the serial number of the device or some other more or\n # less locally unique identifier for the stream as far as available (you\n # could also omit it but interrupted connections wouldn't auto-recover)\n\n info = StreamInfo(args[0][0], 'EEG', 8, 250, 'float32', 'myuid34234')\n # now attach some meta-data (in accordance with XDF format,\n # see also code.google.com/p/xdf)\n chns = info.desc().append_child(\"channels\")\n for label in [\"C3\", \"C4\", \"Cz\", \"FPz\", \"POz\", \"CPz\", \"O1\", \"O2\"]:\n ch = chns.append_child(\"channel\")\n ch.append_child_value(\"label\", label)\n ch.append_child_value(\"unit\", \"microvolts\")\n ch.append_child_value(\"type\", \"EEG\")\n info.desc().append_child_value(\"manufacturer\", \"SCCN\")\n cap = info.desc().append_child(\"cap\")\n cap.append_child_value(\"name\", \"EasyCap\")\n cap.append_child_value(\"size\", \"54\")\n cap.append_child_value(\"labelscheme\", \"10-20\")\n # next make an outlet\n outlet = StreamOutlet(info)\n\n while True:\n outlet.push_sample(np.random.rand(8)*100)\n time.sleep(1/250)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])","repo_name":"mikelval82/MULTI_GEERT","sub_path":"EEG_generator.py","file_name":"EEG_generator.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24138878161","text":"import socketserver\n\n# 必须继承socketserver.BaseRequestHandler\nclass Myserver(socketserver.BaseRequestHandler):\n def handle(self): # 方法名必须要叫handle\n # 字节类型\n while 1:\n # 针对window系统\n try:\n print(\"等待信息\")\n data = self.request.recv(1024) # 阻塞\n # 针对linux\n if len(data) == 0:\n break\n if data == b'exit':\n break\n response = data + b'SB'\n self.request.send(response)\n except Exception as e:\n break\n\n self.request.close()\n\n\n'''\n1 创建socket对象\n2 self.socket.bind()\n3 self.socket.listen(5)\n'''\n# socketserver.ForkingUDPServer\nserver = socketserver.ThreadingTCPServer(('127.0.0.1', 8899), Myserver)\n\nserver.serve_forever() # 相当于 coon,addr = sock.accept()\n","repo_name":"dong-pro/fullStackPython","sub_path":"p1_basic/day27_31netpro/day29/socketserver模块/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"5938216970","text":"from math import factorial\n\nn = int(input(\"digite um número: \"))\nprint(f'a fatorial {n}! é: ')\nf = factorial(n)\nx = n\nwhile x > 0:\n if x != 1:\n print(f\"{x} x \", end=\"\")\n else:\n print(f'{x} = ', end=\"\")\n x = x - 1\n\nprint(f'{f}')\n","repo_name":"eulauragabriel/curso-em-video","sub_path":"python/mundo 2/exercicio_60.py","file_name":"exercicio_60.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18568743438","text":"# -*- coding:utf-8 -*\nimport xml.dom.minidom\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8') \nnpc = 'npcshop.xml'\nitem = 'item.xml'\n\ndomNpc = xml.dom.minidom.parse(npc)\ndomItem = xml.dom.minidom.parse(item)\n\nroot = domItem.documentElement\nitemObj = {}\nitemlist = root.getElementsByTagName('item')\nfor item in itemlist:\n fileName = item.getAttribute('id')\n itemObj[fileName] = item.getAttribute('item_name')\n\n\nroot = domNpc.documentElement\nitemlist = root.getElementsByTagName('mysteryshop')\nnames = []\nfor item in itemlist:\n fileName = item.getAttribute('id')\n\n names.append({'id': fileName, 'name': itemObj[\n fileName], 'level': item.getAttribute('lv_need')})\n\ndef getStr(obj):\n\ts = '';\n\tfor i in obj:\n\t\ts = s + i +\":\"+obj[i]\n\treturn s+'\\n'\n\nwith open(\"test.txt\", \"w\") as tofile:\n for i in names:\n tofile.write(getStr(i))\n tofile.close()\n","repo_name":"ye1maozi/python_back","sub_path":"name.py","file_name":"name.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39289705983","text":"\n#knn_score = knn_classifier.score(X_test, y_test)\nfrom sklearn.neighbors import KNeighborsClassifier\n\nfrom sklearn.datasets import load_iris\n\niris=load_iris()\n\nfeatures=iris.data\nlabels=iris.target\n\nfrom sklearn.cross_validation import train_test_split\n\n(xtrain,xtest,ytrain,ytest)=train_test_split(features,labels,test_size=.3)\n\nknn_classifier = KNeighborsClassifier()\nknn_classifier.fit(xtrain,ytrain)\nknn_score = knn_classifier.score(xtest, ytest)\n\n\nprint(knn_score)\n#print(iris)\n\n","repo_name":"naveentnair96/machine-learning","sub_path":"iristwo.py","file_name":"iristwo.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7874616304","text":"import numpy as np\nfrom tqdm import tqdm\nimport utils\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader\nfrom tensorboardX import SummaryWriter\nfrom model.tvts_bert import TVTSBERT\nfrom classification_model import TVTSBERTClassification\n\ndef avg_acc(matrix):\n correct = np.diag(matrix)\n all = matrix.sum(axis=0)\n acc = correct / all\n avg_acc = np.average(acc)\n return avg_acc\n\n\nclass TVTSBERTFineTuner:\n def __init__(self, tvtsbert: TVTSBERT, num_classes: int,\n train_dataloader: DataLoader, valid_dataloader: DataLoader,\n lr: float=1e-4, with_cuda: bool=True,\n cuda_devices=None, log_freq: int=10):\n\n cuda_condition = torch.cuda.is_available() and with_cuda\n self.device = torch.device(\"cuda\" if cuda_condition else \"cpu\")\n\n self.tvtsbert = tvtsbert\n self.model = TVTSBERTClassification(tvtsbert, num_classes).to(self.device)\n self.num_classes = num_classes\n\n # 多gpu并行操作\n # if with_cuda and torch.cuda.device_count() > 1:\n # print(\"Using %d GPUs for model pretraining\" % torch.cuda.device_count())\n # self.model = nn.DataParallel(self.model, device_ids=cuda_devices)\n\n self.train_dataloader = train_dataloader\n self.valid_dataloader = valid_dataloader\n\n self.optim = Adam(self.model.parameters(), lr=lr)\n self.criterion = nn.CrossEntropyLoss()\n\n # 每次finetune之前改一下writer的地址\n self.writer = SummaryWriter('../runs/2021.3.30-finetune72')\n self.log_freq = log_freq\n\n\n def train(self, epoch):\n\n # 进度条\n data_iter = tqdm(enumerate(self.train_dataloader),\n desc=\"EP_%s:%d\" % (\"train\", epoch),\n total=len(self.train_dataloader),\n bar_format=\"{l_bar}{r_bar}\")\n\n train_loss = 0.0\n counter = 0\n total_correct = 0\n total_element = 0\n matrix = np.zeros([self.num_classes, self.num_classes])\n\n # for name, param in self.model.named_parameters():\n # print(name)\n # print(param.size())\n\n for i, data in data_iter:\n data = {key: value.to(self.device) for key, value in data.items()}\n\n # 将数据输入分类模型得到分类结果\n classification = self.model(data['bert_input'].float(),\n data['bert_mask'].long())\n\n loss = self.criterion(classification, data['class_label'].squeeze().long())\n\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n\n train_loss += loss.item()\n\n # 打印输出\n post_fix = {\n \"epoch\": epoch,\n \"iter\": i,\n \"avg_loss\": train_loss / (i+1),\n \"loss\": loss.item()\n }\n if i % self.log_freq == 0:\n data_iter.write(str(post_fix))\n\n\n classification_result = classification.argmax(dim=-1)\n classification_target = data['class_label'].squeeze()\n correct = classification_result.eq(classification_target).sum().item()\n\n total_correct += correct\n total_element += data['class_label'].nelement()\n for row, col in zip(classification_result, classification_target):\n matrix[row, col] += 1\n\n counter += 1\n\n train_loss /= counter\n train_overall_acc = total_correct / total_element * 100\n self.writer.add_scalar('train_loss', train_loss, global_step=epoch)\n self.writer.add_scalar('train_overall_acc', train_overall_acc, global_step=epoch)\n\n valid_loss, valid_overall_acc = self._validate()\n self.writer.add_scalar('valid_loss', valid_loss, global_step=epoch)\n self.writer.add_scalar('valid_overall_acc', valid_overall_acc, global_step=epoch)\n\n print(\"EP%d, train_loss=%.2f, train_overall_acc=%.2f, valid_loss=%.2f, valid_overall_acc=%.2f\"\n % (epoch, train_loss, train_overall_acc, valid_loss, valid_overall_acc))\n\n return train_loss, train_overall_acc, valid_loss, valid_overall_acc\n\n\n def _validate(self):\n with torch.no_grad():\n self.model.eval()\n\n valid_loss = 0.0\n counter = 0\n total_correct = 0\n total_element = 0\n matrix = np.zeros([self.num_classes, self.num_classes])\n for data in self.valid_dataloader:\n data = {key :value.to(self.device) for key, value in data.items()}\n\n classification = self.model(data['bert_input'].float(),\n data['bert_mask'].long())\n\n loss = self.criterion(classification, data['class_label'].squeeze().long())\n valid_loss += loss.item()\n\n classification_result = classification.argmax(dim=-1)\n classification_target = data['class_label'].squeeze()\n\n correct = classification_result.eq(classification_target).sum().item()\n total_correct += correct\n total_element += data['class_label'].nelement()\n for r, c in zip(classification_result, classification_target):\n matrix[r, c] += 1\n\n counter += 1\n\n valid_loss /= counter\n valid_overall_acc = total_correct / total_element * 100\n\n self.model.train()\n\n return valid_loss, valid_overall_acc\n\n def test(self, data_loader):\n with torch.no_grad():\n\n self.model.eval()\n\n total_correct = 0\n total_element = 0\n matrix = np.zeros([self.num_classes, self.num_classes])\n for data in data_loader:\n data = {key :value.to(self.device) for key, value in data.items()}\n\n result = self.model(data['bert_input'].float(),\n data['bert_mask'].long())\n\n # loss = self.criterion(classification, data['class_label'].squeeze().long())\n # valid_loss += loss.item()\n\n classification_result = result.argmax(dim=-1)\n classification_target = data['class_label'].squeeze()\n\n correct = classification_result.eq(classification_target).sum().item()\n total_correct += correct\n total_element += data['class_label'].nelement()\n for r, c in zip(classification_result, classification_target):\n matrix[r, c] += 1\n\n test_overall_acc = total_correct / total_element * 100\n test_avg_acc = avg_acc(matrix)\n\n self.model.train()\n\n return test_overall_acc, test_avg_acc\n\n\n def save(self, epoch, file_path):\n output_path = file_path + \"classification_checkpoint.tar\"\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': self.model.state_dict(),\n 'optimizer_state_dict': self.optim.state_dict()\n }, output_path)\n\n print(\"EP:%d Model Saved on:\" % epoch, output_path)\n return output_path\n\n def load(self, file_path):\n input_path = file_path + \"classification_checkpoint.tar\"\n\n checkpoint = torch.load(input_path)\n self.model.load_state_dict(checkpoint['model_state_dict'])\n self.optim.load_state_dict(checkpoint['optimizer_state_dict'])\n self.model.train()\n epoch = checkpoint['epoch']\n\n print(\"EP:%d Model Loaded from:\" % epoch, input_path)\n return input_path\n\n\n","repo_name":"Xiao-Di/TVTS-BERT","sub_path":"finetune/finetune.py","file_name":"finetune.py","file_ext":"py","file_size_in_byte":7631,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"11671072343","text":"\"\"\"\nMerge Two Sorted Lists\nMerge two sorted linked lists and return it as a new list.\nThe new list should be made by splicing together the nodes of the first two lists.\n\nExample:\nInput: 1->2->4, 1->3->4\nOutput: 1->1->2->3->4->4\n\"\"\"\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n#Actual Solution\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n if not l2:\n return l1\n elif not l1:\n return l2\n elif l1.val >= l2.val:\n l = ListNode(l2.val)\n l.next = self.mergeTwoLists(l1,l2.next)\n return l\n else:\n l = ListNode(l1.val)\n l.next = self.mergeTwoLists(l1.next,l2)\n return l\n#Demo\ns = Solution()\n\n#Read in two listnodes\nl1 = ListNode(0)\ntemp = l1\nfor i in input().split(\" \"):\n temp.next = ListNode(int(i))\n temp = temp.next\nl1 = l1.next\n\nl2 = ListNode(0)\ntemp = l2\nfor i in input().split(\" \"):\n temp.next = ListNode(int(i))\n temp = temp.next\nl2 = l2.next\n\nl3 = s.mergeTwoLists(l1,l2)\nwhile l3.val is not None:\n print(l3.val)\n l3 = l3.next\n","repo_name":"adityaarakeri/Interview-solved","sub_path":"LeetCode/21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"67"} +{"seq_id":"74248563734","text":"\"\"\"\ntest_etl_job.py\n~~~~~~~~~~~~~~~\n\nThis module contains unit tests for the transformation steps of the ETL\njob defined in etl_job.py. It makes use of a local version of PySpark\nthat is bundled with the PySpark package.\n\"\"\"\nimport json\nimport unittest\n\nimport pyspark.sql.functions as F\nimport quinn\nfrom dependencies.spark import start_spark\nfrom jobs.udffunctions import *\nfrom pyspark.sql.functions import mean\nfrom pyspark.sql.types import *\n\nfrom chispa import assert_column_equality, assert_approx_column_equality\nimport pyspark.sql.functions as F\nfrom pyspark.sql.types import *\nfrom chispa import assert_df_equality, assert_approx_df_equality\n\nfrom quinn.extensions import *\n\n\n\ndef title(x,y):\n if y:\n x = x.title()\n return x\n\n\n\nclass SparkETLTests(unittest.TestCase):\n \"\"\"Test suite for transformation in etl_job.py\n \"\"\"\n\n def setUp(self):\n \"\"\"Start Spark, define config and path to test data\n \"\"\"\n self.config = json.loads(\"\"\"{\"steps_per_floor\": 21}\"\"\")\n self.spark, *_ = start_spark()\n self.test_data_path = 'test_data/'\n\n def tearDown(self):\n \"\"\"Stop Spark\n \"\"\"\n self.spark.stop()\n\n def test_udf(self):\n df = self.spark.createDataFrame([\n [\"aaa\",\"1\"],\n [\"bbb\",\"2\"],\n [\"ccc\",\"5\"]\n ]).toDF(\"text\",\"id\")\n\n title_udf = F.udf(title, StringType())\n self.spark.udf.register('title_udf', title_udf)\n\n df.withColumn('text_title',title_udf('text',F.lit(True)))#.show()\n\n\n # def test_flatmap(self):\n # df = self.spark.read.text(\"resources/words.txt\")\n # words = df.rdd.flatMap(lambda row: row[0].split(\" \")).collect()\n # print(words)\n\n\n\n def test_pandas_approach(self):\n df = self.spark.createDataFrame([(1, 5), (2, 9), (3, 3), (4, 1)], [\"mvv\", \"count\"])\n mvv = list(df.select('mvv').toPandas()['mvv'])\n assert mvv == [1, 2, 3, 4]\n\n\n def test_flatmap_collect(self):\n df = self.spark.createDataFrame([(1, 5), (2, 9), (3, 3), (4, 1)], [\"mvv\", \"count\"])\n mvv = df.select('mvv').rdd.flatMap(lambda x: x).collect()\n assert mvv == [1, 2, 3, 4]\n\n\n def test_flatmap_toLocalIterator(self):\n df = self.spark.createDataFrame([(1, 5), (2, 9), (3, 3), (4, 1)], [\"mvv\", \"count\"])\n mvv = list(df.select('mvv').rdd.flatMap(lambda x: x).toLocalIterator())\n assert mvv == [1, 2, 3, 4]\n\n\n def test_rdd_map(self):\n df = self.spark.createDataFrame([(1, 5), (2, 9), (3, 3), (4, 1)], [\"mvv\", \"count\"])\n mvv = df.select('mvv').rdd.map(lambda row : row[0]).collect()\n\n assert mvv == [1, 2, 3, 4]\n\n\n def test_list_comprehension_map(self):\n df = self.spark.createDataFrame([(1, 5), (2, 9), (3, 3), (4, 1)], [\"mvv\", \"count\"])\n mvv = [row[0] for row in df.select('mvv').collect()]\n assert mvv == [1, 2, 3, 4]\n\n\n def test_list_comprehension_toLocalIterator(self):\n df = self.spark.createDataFrame([(1, 5), (2, 9), (3, 3), (4, 1)], [\"mvv\", \"count\"])\n mvv = [r[0] for r in df.select('mvv').toLocalIterator()]\n assert mvv == [1, 2, 3, 4]\n\n\n def test_pandas_to_two_lists(self):\n df = self.spark.createDataFrame([(1, 5), (2, 9), (3, 3), (4, 1)], [\"mvv\", \"count\"])\n collected = df.select('mvv', 'count').toPandas()\n mvv = list(collected['mvv'])\n count = list(collected['count'])\n assert mvv == [1, 2, 3, 4]\n assert count == [5, 9, 3, 1]\n\n def test_cast_arraytype(self):\n data = [\n (['200', '300'], [200, 300]),\n (['400'], [400]),\n (None, None)\n ]\n df = self.spark.createDataFrame(data, [\"nums\", \"expected\"])\\\n .withColumn(\"actual\", F.col(\"nums\").cast(ArrayType(IntegerType(), True)))\n assert_column_equality(df, \"actual\", \"expected\")\n\n def test_approx_df_equality_same(self):\n data1 = [\n (1.1, \"a\"),\n (2.2, \"b\"),\n (3.3, \"c\"),\n (None, None)\n ]\n df1 = self.spark.createDataFrame(data1, [\"num\", \"letter\"])\n\n data2 = [\n (1.05, \"a\"),\n (2.13, \"b\"),\n (3.3, \"c\"),\n (None, None)\n ]\n df2 = self.spark.createDataFrame(data2, [\"num\", \"letter\"])\n\n assert_approx_df_equality(df1, df2, 0.1)\n\n\n def test_approx_df_equality_different(self):\n data1 = [\n (1.1, \"a\"),\n (2.2, \"b\"),\n (3.3, \"c\"),\n (None, None)\n ]\n df1 = self.spark.createDataFrame(data1, [\"num\", \"letter\"])\n\n data2 = [\n (1.1, \"a\"),\n (5.0, \"b\"),\n (3.3, \"z\"),\n (None, None)\n ]\n df2 = self.spark.createDataFrame(data2, [\"num\", \"letter\"])\n\n assert_approx_df_equality(df1, df2, 0.1)\n\n\n def test_schema_mismatch_error(self):\n data1 = [\n (1, \"a\"),\n (2, \"b\"),\n (3, \"c\"),\n (None, None)\n ]\n df1 = self.spark.createDataFrame(data1, [\"num\", \"letter\"])\n\n data2 = [\n (1, 6),\n (2, 7),\n (3, 8),\n (None, None)\n ]\n df2 = self.spark.createDataFrame(data2, [\"num\", \"num2\"])\n\n assert_df_equality(df1, df2)\n\n\n\n def test_example_error(self):\n # schema = StructType([StructField(\"country.name\", StringType(), True)])\n df = self.spark.createDataFrame(\n [(\"china\", \"asia\"), (\"colombia\", \"south america\")],\n [\"country.name\", \"continent\"]\n )\n df.select(\"`country.name`\").show()\n\n def test_random_value_from_array(self):\n df = self.spark.createDataFrame(\n [\n (['a', 'b', 'c'],),\n (['a', 'b', 'c', 'd'],),\n (['x'],),\n ([None],)\n ],\n [\n \"letters\"\n ]\n )\n # df.show()\n actual_df = df.withColumn(\n \"random_letter\",\n quinn.array_choice(F.col(\"letters\"))\n )\n # actual_df.show()\n\n\n def test_random_value_from_columns(self):\n df = self.spark.createDataFrame(\n [\n (1, 2, 3),\n (4, 5, 6),\n (7, 8, 9),\n (10, None, None),\n (None, None, None)\n ],\n [\"num1\", \"num2\", \"num3\"]\n )\n # df.show()\n actual_df = df.withColumn(\n \"random_number\",\n quinn.array_choice(F.array(F.col(\"num1\"), F.col(\"num2\"), F.col(\"num3\")))\n )\n # actual_df.show()\n\n\n def test_random_animal(self):\n df = self.spark.createDataFrame([('jose',), ('maria',), (None,)], ['first_name'])\n cols = list(map(lambda col_name: F.lit(col_name), ['cat', 'dog', 'mouse']))\n actual_df = df.withColumn(\n \"random_animal\",\n quinn.array_choice(F.array(*cols))\n )\n # actual_df.show()\n\n\n# question motivation: https://stackoverflow.com/questions/63103302/creating-dictionary-from-pyspark-dataframe-showing-outofmemoryerror-java-heap-s/63103739#63103739\n def test_to_dictionary(self):\n data = [\n (\"BOND-9129450\", \"90cb\"),\n (\"BOND-1742850\", \"d5c3\"),\n (\"BOND-3211356\", \"811f\"),\n (\"BOND-7630290\", \"d5c3\"),\n (\"BOND-7175508\", \"90cb\"),\n ]\n df = self.spark.createDataFrame(data, [\"id\", \"hash_of_cc_pn_li\"])\n agg_df = df.groupBy(\"hash_of_cc_pn_li\").agg(F.max(\"hash_of_cc_pn_li\").alias(\"hash\"), F.collect_list(\"id\").alias(\"id\"))\n res = quinn.two_columns_to_dictionary(agg_df, \"hash\", \"id\")\n print(res)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"zhangabner/pyspark-debug-test","sub_path":"passedtestfor_etl_job/test_etl_job_load.py","file_name":"test_etl_job_load.py","file_ext":"py","file_size_in_byte":7683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1930248783","text":"from math import pi\nfrom polygons import make_polygon as mp\n\n\"\"\"Let's make some polygons!\"\"\"\n\np0 = mp.Polygon()\np0.set_name = 'p0'\np0.set_n_sides(4)\np0.set_side_length(10)\np0.info()\n\np1 = mp.Polygon()\np1.set_name = 'p1'\np1.set_n_sides(17)\np1.set_radius(10)\np1.info()\n\np2 = mp.Polygon()\np2.set_name = 'p2'\np2.set_n_sides(10000)\np2.set_radius(10)\np2.info()\n\n# How does the area of Polygon p2 compare to that of a circle with the same radius?\n\narea_circle = pi * (10 ** 2)\nprint('area of circle, radius 10 = ', area_circle)\nprint(' ')\n\np3 = mp.Polygon()\np3.set_name = 'p3'\np3.set_n_sides(4)\np3.set_side_length(10)\np3.info()\np3.set_n_sides(5)\np3.info()","repo_name":"jclaytonbell/Geometry","sub_path":"test_run.py","file_name":"test_run.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24900834045","text":"import os\nimport cv2\nimport numpy as np\nimport json\nimport matplotlib.pyplot as plt\n\nclass ImageUtils:\n def __init__(self):\n pass\n\n def display(self, im, window=\"Preview\"):\n cv2.imshow(window, im)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n pass\n\n def extractAnnotations_68FPs(self, annot_file):\n # return coords of annotation in [x1, y1, x2, y2, ..., x68, y68] format\n with open(annot_file, \"r\") as f:\n coords = []\n start = False\n for line in f:\n line = line.strip()\n if \"}\" in line:\n break # to stop the file\n\n if start:\n # print(line.split())\n coords.extend(map(np.float64, line.split()))\n if \"{\" in line:\n start = True\n\n np_coords = np.array(coords)\n return np_coords\n pass\n\n def drawAnnotationsOnImg(self, img, coords_ls,\n window=\"Preview\", display=True):\n # image path or image array\n # coords as a 2-D list format\n if img is None:\n return None\n\n if isinstance(img, str): # if sent as a path\n im = cv2.imread(img)\n else: # if sent as numpy array\n im = img\n\n for points in coords_ls:\n # print(points)\n cv2.circle(im, # image\n (int(points[0]), int(points[1])), # center\n 1, # thickness\n (0, 255, 0), # color\n lineType=cv2.LINE_AA)\n\n if display:\n self.display(im, window)\n\n return im\n\n def drawContours(self, img, coords_ls, window=\"Preview\", display=True):\n if img is None:\n return None\n\n if isinstance(img, str): # if sent as a path\n im = cv2.imread(img)\n else: # if sent as numpy array\n im = img\n\n pts = np.array(coords_ls)\n cv2.drawContours(im, contours=np.int32([pts]),contourIdx=0,color=(0,255,0),\n thickness=1, lineType=cv2.LINE_AA)\n\n return im\n\n def showBboxOnImg(self, img, bbox_coords_tup, window=\"Preview\", display=True):\n if img is None:\n return None\n\n if isinstance(img, str): # if sent as a path\n im = cv2.imread(img)\n else: # if sent as numpy array\n im = img\n\n cv2.rectangle(im, bbox_coords_tup[0], bbox_coords_tup[1], (0,255,0), 2)\n\n if display:\n self.display(im, window)\n\n return im\n\n def cropImg(self, img, x1, y1, x2, y2):\n if img is None:\n return None\n\n if isinstance(img, str): # if sent as a path\n im = cv2.imread(img)\n else: # if sent as numpy array\n im = img\n\n return im[y1:y2, x1:x2]\n\n def writeTextOnImg(self, im, x, y, text, color=(0,0,255), thick=2, window=\"Preview\", display = True):\n # write text at x, y coords\n xy_tup = (x, y - 10 if y - 10 > 10 else y + 10)\n cv2.putText(im, text, xy_tup, cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, color=color, thickness=thick)\n if display:\n self.display(im, window)\n return im\n\n def makeSquareBbox(self, x1, y1, x2, y2):\n # print(x1, y1, x2, y2)\n bbox_w, bbox_h = x2 - x1, y2 - y1\n diff = bbox_h - bbox_w\n delta = int(np.abs(diff) / 2)\n\n if diff == 0:\n return x1, y1, x2, y2\n if diff > 0:\n x1 -= delta\n x2 += delta\n if diff % 2 == 1:\n x2 += 1\n else:\n y1 -= delta\n y2 += delta\n if diff % 2 == 1:\n y2 += 1\n\n assert((x2 - x1) == (y2 - y1)), \"Not Equal in length - check bbox.\"\n return x1, y1, x2, y2\n\n def moveBbox(self, x1, y1, x2, y2, by, dir = 'down'):\n if dir == 'up':\n x1, y1, x2, y2 = x1, y1 - by, x2, y2 - by\n elif dir == 'down':\n x1, y1, x2, y2 = x1, y1 + by, x2, y2 + by\n elif dir == 'right':\n x1, y1, x2, y2 = x1 + by, y1, x2 + by, y2\n elif dir == 'left':\n x1, y1, x2, y2 = x1 - by, y1, x2 - by, y2\n return x1, y1, x2, y2\n\n def getOptBbox(self, x1, y1, x2, y2, img_shape, fit=False):\n h, w = img_shape\n # print(f\"Original - x1: {x1}, y1: {y1}, x2: {x2}, y2: {y2}, h: {h}, w: {w}\")\n\n if y2 <= h and x2 <= w and x1 >= 0 and y1 >= 0:\n return (x1, y1, x2, y2)\n\n if fit:\n if y2 > h:\n while y2 > h and y1 >= 0:\n # print(\"moving up.....\")\n x1, y1, x2, y2 = self.moveBbox(x1, y1, x2, y2, by=1, dir='up')\n # print(f\"Up - x1: {x1}, y1: {y1}, x2: {x2}, y2: {y2}, h: {h}, w: {w}\")\n if x2 > w:\n while x2 > w and x1 >= 0:\n # print(\"moving left.....\")\n x1, y1, x2, y2 = self.moveBbox(x1, y1, x2, y2, by=1, dir='left')\n # print(f\"Left - x1: {x1}, y1: {y1}, x2: {x2}, y2: {y2}, h: {h}, w: {w}\")\n if y1 < 0:\n while y1 < 0 and y2 <= h:\n # print(\"moving down.....\")\n x1, y1, x2, y2 = self.moveBbox(x1, y1, x2, y2, by=1, dir='down')\n # print(f\"Down - x1: {x1}, y1: {y1}, x2: {x2}, y2: {y2}, h: {h}, w: {w}\")\n if x1 < 0:\n while x1 < 0 and x2 <= w:\n # print(\"moving right.....\")\n x1, y1, x2, y2 = self.moveBbox(x1, y1, x2, y2, by=1, dir='right')\n # print(f\"Right - x1: {x1}, y1: {y1}, x2: {x2}, y2: {y2}, h: {h}, w: {w}\")\n\n # makes sure that the bbox coords are not outside the image dims\n return (max(x1, 0), max(y1, 0), min(x2, w), min(y2, h))\n\n def getTransformCoords(self, x1, y1, x2, y2, coords_np):\n # make coord correction\n coords_np[:, 0] -= x1\n coords_np[:, 1] -= y1\n\n # normalize\n w = x2 - x1\n h = y2 - y1\n coords_np[:, 0] /= w\n coords_np[:, 1] /= h\n\n return coords_np\n\n def getInvTransformCoords(self, coords_np, w, h):\n # restore coords based on dims w,h\n coords_np[:, 0] *= w\n coords_np[:, 1] *= h\n return coords_np\n\n def saveImage(self, img, path, name, display=False):\n file = os.path.join(path, name)\n cv2.imwrite(file, img)\n if display:\n print(f\"Image saved successfully in path: {file}\")\n\n pass\n\n def display_multiple_img(self, images, rows=1, cols=1, window_size=(10,10),\n print_title=False, titles=[]):\n plt.rcParams['figure.figsize'] = window_size\n figure, ax = plt.subplots(nrows=rows, ncols=cols)\n if print_title:\n images = zip(images, titles)\n else:\n images = zip(images, [\"\"] * len(images))\n\n for ind, img_title in enumerate(images):\n img, title = img_title\n ax.ravel()[ind].imshow(img)\n if print_title:\n ax.ravel()[ind].set_title(title)\n ax.ravel()[ind].set_axis_off()\n plt.tight_layout()\n plt.show()\n\n\nif __name__ == \"__main__\":\n # check the util function to draw annotations in an image\n annot_file_path = os.path.join(\"..\", \"..\", \"data\", \"afw\", \"134212_1.pts\")\n image_file_path = os.path.join(\"..\", \"..\", \"data\", \"afw\", \"134212_1.jpg\")\n imu = ImageUtils()\n annot_coords = imu.extractAnnotations_68FPs(annot_file_path)\n annot_coords_ls = annot_coords.reshape(-1,2).tolist()\n\n imu.drawAnnotationsOnImg(image_file_path, annot_coords_ls)\n\n","repo_name":"annamalsCMU/iBlink-using-Facial-Landmark-Detection","sub_path":"blink-detection/code/utils/ImageUtils.py","file_name":"ImageUtils.py","file_ext":"py","file_size_in_byte":7694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27283665698","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n#\n# Imports\n#\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import str\nfrom builtins import object\nimport sys\nimport xbmc\n\nfrom resources.lib.dumpert_const import LANGUAGE, log, convertToUnicodeString\n\n\n#\n# Main class\n#\nclass Main(object):\n #\n # Init\n #\n def __init__(self):\n # Get the command line arguments\n # Get the plugin url in plugin:// notation\n self.plugin_url = sys.argv[0]\n # Get the plugin handle as an integer number\n self.plugin_handle = int(sys.argv[1])\n\n log(\"ARGV\", repr(sys.argv))\n\n # Get search term from user\n keyboard = xbmc.Keyboard('', LANGUAGE(30508))\n keyboard.doModal()\n\n if keyboard.isConfirmed():\n search_term = keyboard.getText()\n # If the user has entered nothing, we stop\n if search_term == \"\":\n sys.exit(0)\n else:\n # If the user cancels the input box, we stop\n sys.exit(0)\n\n sys.argv[2] = convertToUnicodeString(sys.argv[2])\n\n # Converting URL argument to proper query string like 'https://api-live.dumpert.nl/mobile_api/json/search/fiets/0/'\n sys.argv[2] = sys.argv[2] + search_term + \"/0/\"\n\n log(\"sys.argv[2]\", sys.argv[2])\n\n import dumpert_json as plugin\n\n plugin.Main()\n","repo_name":"skipmodea1/plugin.video.dumpert.python3","sub_path":"resources/lib/dumpert_search.py","file_name":"dumpert_search.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"71603373972","text":"##\n# Leslie Dao\n# 10561234\n#\n# Neemt een link van geografisch geindexeerde data van wikipedia en maakt er een JSON van.\n# In dit specifieke geval:\n# https://en.wikipedia.org/wiki/List_of_countries_by_electricity_production_from_renewable_sources\n#\n# Dit is slechts een hulpscript dat een JSON object maakt van wikipedia informatie\n##\n\n# Libraries voor het dumpen van JSON en het scrapen\nimport json\nimport pattern\nfrom pattern.web import URL, DOM\n\n# Backup html file opslaan\ndef save_html(filename, html):\n with open(filename, 'wb') as f:\n f.write(html)\n \n# Functie om een dictionary te maken en dan te dumpen als JSON in een .txt file\ndef make_json(url):\n json_dict = {}\n # Geef de data een titel\n json_dict['data'] = 'percentage renewable energy'\n \n # Pak de DOM van de tabel van alle landen\n html = url.download()\n dom = DOM(DOM(html).by_class(\"wikitable\")[1].content)\n \n # Maak een list met info over de landen\n countrylist = dom.by_tag(\"tr\")[1:]\n \n # Lege list om de data aan te appenden\n pointslist = []\n for countryinfo in countrylist:\n \t# Lege list om land en percentage renewable energy aan te appenden\n infopair = []\n \n # Neem de naam van het land en append dat aan infopair\n infopair.append(DOM(countryinfo.content).by_tag(\"a\")[0].attrs.get(\"title\",\"\").encode(\"utf-8\"))\n # Neem het percentage renewable energy van het land en append dat aan infopair\n infopair.append(DOM(countryinfo.content).by_tag(\"td\")[8].content.encode(\"utf-8\"))\n \n # Append de list aan pointslist voor een nested list\n pointslist.append(infopair)\n \n\t# Geef de dictionary de key 'points' met value de nested list pointslist\n json_dict['points'] = pointslist\n \n # Dump de dictionary als JSON naar de textfile json.txt\n json.dump(json_dict, open('json.txt', 'wb'))\n \n\n# MAIN functie\n# Variabelen voor de URL en HTML\nwikiURL = URL('https://en.wikipedia.org/wiki/List_of_countries_by_electricity_production_from_renewable_sources')\nwikiHTML = wikiURL.download(cached=True)\n\n# Maak een backup van de pagina\nsave_html('renewableenergy.html', wikiHTML)\n\n# Roep make_json aan om JSON te dumpen\nmake_json(wikiURL)","repo_name":"Lesliedao/DataProcessing","sub_path":"Homework/week-4/SVG2/JSONconvert.py","file_name":"JSONconvert.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16812278345","text":"def divisors(n):\n \"\"\"\n Calculate the proper divisors of n\n Lazy implementation\n \"\"\"\n result = []\n for i in range(1, n):\n if n % i == 0:\n result.append(i)\n return result\n\ndef amicable(n):\n \"\"\"\n Checks if n is part of a pair of amicable numbers\n Return type: (bool, int)\n The int is the sum of the proper divisors of n, called m\n The bool is True if and only if (n, m) form an amicable pair\n Meaning: The sum of the proper divisors of m is n\n\n Calculating m this way prevents a second loop over range(bound)! \n \"\"\"\n dn = divisors(n)\n m = sum(dn)\n dm = divisors(m)\n if sum(dm) == n and m != n:\n return True, m\n return False, m\n\nlisted = []\ntried = set()\nbound = 10000\n\nfor i in range(bound):\n if i not in tried:\n is_amicable, other = amicable(i)\n tried.add(i)\n tried.add(other)\n if is_amicable:\n listed.append((i, other))\n amicables.add(i)\n amicables.add(other)\n\nprint(listed)\nprint(sum(sum(x) for x in listed))\n","repo_name":"SCRK16/ProjectEuler","sub_path":"src/1-100/21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29070390533","text":"\nimport base64\nimport cv2\nfrom Transport.MqttPublisher import Publisher\nimport logging\nfrom datetime import datetime\n\ndef play():\n cap = cv2.VideoCapture(0)\n p = Publisher('topic/video')\n counter = 0\n skip_frame = True\n while True:\n ret, frame = cap.read()\n if frame is None:\n break\n counter=counter+1\n if counter%3==0:\n frame = cv2.resize(frame, None, fx=0.5, fy=0.5)\n flipped = cv2.flip(frame, -1)\n now = datetime.now()\n cv2.putText(flipped, str(now),(20, 20), 0, 0.7, (255, 255, 255), 1)\n encoded, buffer = cv2.imencode('.jpg', flipped)\n message = base64.b64encode(buffer)\n p.send_message(message)\n if counter >100:\n counter = 0\n cap.release()\n p.send_message('eof')\n logging.info('finished sending video')\n\n\nif __name__ == '__main__':\n filename = '../Datasets/fourway.avi'\n play()\n","repo_name":"advt3/TekioPlatform","sub_path":"Device/VideoCapture.py","file_name":"VideoCapture.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"25442161641","text":"import unittest2\nfrom HTMLTestRunner import HTMLTestRunner\nfrom testclasses.uiAlignmentPageTest import UIAlignmentPageTest\nfrom testclasses.addRemoveElementsPageTest import AddRemoveElementsPageTest\nfrom testclasses.homePageTest import HomePageTest\nimport os\nfrom utilityclasses.constants import Constants as const\n\nclass SuiteClass(unittest2.TestCase): \n \n def suiteone(self): \n test_suite = unittest2.TestSuite()\n test_suite.addTest(HomePageTest('test_home_page'))\n test_suite.addTest(AddRemoveElementsPageTest('test_add_elements'))\n test_suite.addTest(AddRemoveElementsPageTest('test_remove_elements'))\n test_suite.addTest(UIAlignmentPageTest('test_go_to_ui_alignement_page'))\n test_suite.addTest(UIAlignmentPageTest('test_go_to_ui_random_url_page'))\n test_suite.addTest(UIAlignmentPageTest('test_go_to_ui_shift_url_page'))\n test_suite.addTest(UIAlignmentPageTest('test_go_to_ui_random_shift_url_page'))\n return test_suite\n \n def setUpModule(self):\n print(\"Setup\")\n HomePageTest.setUpModule()\n \n def tearDownModule(self):\n print(\"Teardown\")\n HomePageTest.tearDownModule()\n \nif __name__ == '__main__':\n path =os.getcwd()\n SuiteClass.setUpModule(object)\n test_suite = SuiteClass.suiteone(object)\n unittest2.TextTestRunner()\n runner =HTMLTestRunner(output=path + const.smoketest_reports, verbosity=2, descriptions=True, failfast=True, buffer=False, \n report_title= const.smoketest_reportstitle, template=None, resultclass=None)\n runner.run(test_suite)\n SuiteClass.tearDownModule(object)","repo_name":"akdeepa21/WhoTest_Deepa","sub_path":"WHOTest/testclasses/testSuite.py","file_name":"testSuite.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37996889144","text":"import os\nimport re\nimport json\nimport sys\nimport numpy as np\n\nPLUME_RE = \"Plumes_O\\d+\\-[B]\\d+\\-[SPWBR]\\d+\\.txt\"\n# PLUME_RE = \"Plumes_O*.txt\"\nFILL_VALUE = -999.99\n\nFILLS = [-9.999, -99, -99.9, -999.9]\n\ndef parsePlumeTxtFull(fpaths):\n outArr = []\n\n for f in fpaths:\n if os.path.isfile(f):\n\n case = {}\n \n with open(f, 'r') as fstream:\n \n #Header\n\n case[\"orbit\"] = (int)((fstream.readline().split())[3])\n case[\"path\"] = (int)((fstream.readline().split())[3])\n case[\"block\"] = (int)((fstream.readline().split())[3])\n\n case[\"date\"] = ((fstream.readline().split())[3])\n case[\"time_UTC\"] = ((fstream.readline().split())[3])\n # case[\"minx_version\"] = ((fstream.readline().split())[3])\n #MISR file info, blank lines, etc.\n for i in range(0, 4):\n fstream.readline()\n\n case[\"region\"] = (fstream.readline().split())[3]\n case[\"aerosol_type\"] = (fstream.readline().split())[4]\n case[\"geometry_type\"] = (fstream.readline().split())[4]\n\n #Region wind dir type, retrieved w/ band, retrieved w/ matcher, retrieved w/ cams, retrieval precision\n for i in range(0, 5):\n fstream.readline()\n \n case[\"min_height\"] = (float)((fstream.readline().split())[6])\n case[\"max_height\"] = (float)((fstream.readline().split())[6])\n case[\"sample_spacing\"] = (float)((fstream.readline().split())[4])\n \n\t\t\t\t#Registration corrected, Image color equalized, empty, first point lat/lon, geographic region\n for i in range(0, 6):\n \tfstream.readline()\n\n biome_line = fstream.readline().split()\t\n # case[\"biome_name\"] = biome_line[5]\n case[\"biome_name\"] = biome_line[5] if len(biome_line) == 7 else biome_line[5]+' '+biome_line[6]\n case[\"biome_id\"] = int(biome_line[len(biome_line)-1])\n\n #Red/Blue band better?\n fstream.readline()\n\n case[\"perimeter_km\"] = (int)((fstream.readline().split())[4])\n case[\"area_sq_km\"] = (int)((fstream.readline().split())[4])\n case[\"per_point_area_sq_km\"] = (float)((fstream.readline().split())[6])\n \n\n #Num. heights retrieved\n fstream.readline()\n\n #case[\"wind_corrected_count\"] = (int)((fstream.readline().split())[3])\n case[\"percent_area_covered\"] = (int)((fstream.readline().split())[4])\n case[\"fire_elevation\"] = (int)((fstream.readline().split())[6])\n case[\"best_median_height_m\"] = (int)((fstream.readline().split())[6])\n case[\"best_top_heigh_m\"] = (int)((fstream.readline().split())[6])\n case[\"height_st_dev\"] = (int)((fstream.readline().split())[5])\n case[\"height_local_var\"] = (int)((fstream.readline().split())[5])\n #case[\"corrht\"] = (int)((fstream.readline().split())[4])\n case[\"diff_dir_wind_at\"] = (int)((fstream.readline().split())[2])\n case[\"fire_power_mw\"] = (float)((fstream.readline().split())[5])\n case[\"quality\"] = (fstream.readline().split())[4]\n \n #pyro-cumulus, comments, empty\n for i in range(0, 3):\n fstream.readline()\n\n case[\"l1_rad_file\"] = (fstream.readline().split())[5]\n case[\"terrain_elevation_file\"] = (fstream.readline().split())[4]\n case[\"geometry_file\"] = (fstream.readline().split())[4]\n case[\"classifier_file\"] = (fstream.readline().split())[4]\n case[\"aerosol_file\"] = (fstream.readline().split())[4]\n case[\"biome_file\"] = (fstream.readline().split())[5]\t\n\n #empty\n fstream.readline()\n \n #Polygon\n poly_pts = (int)((fstream.readline().split())[1])\n\n #Polygon Header\n for i in range(0, 3):\n \tfstream.readline()\n\n case[\"polygon\"] = {\"lat\":[], \"lon\":[], \"line\":[], \"sample\":[], \"block\":[]}\n\n #Polygon Table\n for i in range(0, poly_pts):\n line = fstream.readline().split()\n case[\"polygon\"][\"lon\"].append((float)(line[1]))\n case[\"polygon\"][\"lat\"].append((float)(line[2]))\n case[\"polygon\"][\"block\"].append((int)(line[3]))\n case[\"polygon\"][\"sample\"].append((int)(line[4]))\n case[\"polygon\"][\"line\"].append((int)(line[5]))\t\n\n #empty\n fstream.readline() \n\n #Direction\n dir_pts = (int)((fstream.readline().split())[1])\n\n case[\"direction\"] = {\"lat\":[], \"lon\":[], \"line\":[], \"sample\":[], \"block\":[]}\n\n #Direction Header\n for i in range(0, 3):\n fstream.readline()\n\n #Direction Table\t\n for i in range(0, dir_pts):\n line = fstream.readline().split()\n case[\"direction\"][\"lon\"].append((float)(line[1]))\n case[\"direction\"][\"lat\"].append((float)(line[2]))\n case[\"direction\"][\"block\"].append((int)(line[3]))\n case[\"direction\"][\"sample\"].append((int)(line[4]))\n case[\"direction\"][\"line\"].append((int)(line[5]))\n\n\n #empty\n fstream.readline()\n\n #Results\n count = (int)((fstream.readline().split())[1])\n\n case[\"image_location\"] = {\"line\":[], \"sample\":[], \"block\":[]}\n case[\"position\"] = {\"lat\":[], \"lon\":[], \"clockwise_direction_from_north\":[], \"terrain_elevation\":[], \"distance_from_pt1_km\":[]}\n case[\"feature_height_m\"] = {\"wind_corrected\":[], \"zero_wind\":[], \"wind_filtered\":[], \"fill\":-9999}\n case[\"wind_speed\"] = {\"across_track\": [], \"along_track\": [], \"total\":[], \"fill\":-99.9}\n case[\"optical_depth\"] = {\"red\": [], \"green\": [], \"blue\": [], \"nir\": [], \"fill\":-9.999}\n case[\"single_scattering_albedo\"] = {\"red\": [], \"green\": [], \"blue\": [], \"nir\": [], \"fill\":-9.999}\t\n case[\"tau_frac\"] = {\"small_part\":[], \"med_part\":[], \"large_part\":[], \"sphere_part\":[], \"fill\":-9.999}\n case[\"power_mw\"] = {\"data\":[], \"fill\":-99.9}\n case[\"angstrom\"] = {\"data\":[], \"fill\":-9.999}\n \n\n #table header\n for i in range(0, 3):\n fstream.readline()\n\n #Results Table\n for i in range(0, count):\n line = fstream.readline().split()\n\n case[\"position\"][\"lon\"].append((float)(line[1]))\n case[\"position\"][\"lat\"].append((float)(line[2]))\n case[\"position\"][\"distance_from_pt1_km\"].append((float)(line[6]))\n case[\"position\"][\"clockwise_direction_from_north\"].append((int)(line[7]))\n case[\"position\"][\"terrain_elevation\"].append((int)(line[8]))\n\n case[\"image_location\"][\"block\"].append((int)(line[3]))\n case[\"image_location\"][\"sample\"].append((int)(line[4]))\n case[\"image_location\"][\"line\"].append((int)(line[5]))\n\n case[\"feature_height_m\"][\"zero_wind\"].append(checkFill((int)(line[9]), case[\"feature_height_m\"][\"fill\"]))\n case[\"feature_height_m\"][\"wind_corrected\"].append(checkFill((int)(line[10]), case[\"feature_height_m\"][\"fill\"]))\n case[\"feature_height_m\"][\"wind_filtered\"].append(checkFill((int)(line[11]), case[\"feature_height_m\"][\"fill\"]))\n\n case[\"wind_speed\"][\"across_track\"].append(checkFill((float)(line[12]), case[\"wind_speed\"][\"fill\"]))\n case[\"wind_speed\"][\"along_track\"].append(checkFill((float)(line[13]), case[\"wind_speed\"][\"fill\"]))\n case[\"wind_speed\"][\"total\"].append(checkFill((float)(line[14]), case[\"wind_speed\"][\"fill\"]))\n\n case[\"optical_depth\"][\"blue\"].append(checkFill((float)(line[15]), case[\"optical_depth\"][\"fill\"]))\n case[\"optical_depth\"][\"green\"].append(checkFill((float)(line[16]), case[\"optical_depth\"][\"fill\"]))\n case[\"optical_depth\"][\"red\"].append(checkFill((float)(line[17]), case[\"optical_depth\"][\"fill\"]))\n case[\"optical_depth\"][\"nir\"].append(checkFill((float)(line[18]), case[\"optical_depth\"][\"fill\"]))\n\n case[\"single_scattering_albedo\"][\"blue\"].append(checkFill((float)(line[19]), case[\"single_scattering_albedo\"][\"fill\"]))\n case[\"single_scattering_albedo\"][\"green\"].append(checkFill((float)(line[20]), case[\"single_scattering_albedo\"][\"fill\"]))\n case[\"single_scattering_albedo\"][\"red\"].append(checkFill((float)(line[21]), case[\"single_scattering_albedo\"][\"fill\"]))\n case[\"single_scattering_albedo\"][\"nir\"].append(checkFill((float)(line[22]), case[\"single_scattering_albedo\"][\"fill\"]))\n \n case[\"tau_frac\"][\"small_part\"].append(checkFill((float)(line[23]), case[\"tau_frac\"][\"fill\"]))\n case[\"tau_frac\"][\"med_part\"].append(checkFill((float)(line[24]), case[\"tau_frac\"][\"fill\"]))\n case[\"tau_frac\"][\"large_part\"].append(checkFill((float)(line[25]), case[\"tau_frac\"][\"fill\"]))\n case[\"tau_frac\"][\"sphere_part\"].append(checkFill((float)(line[26]), case[\"tau_frac\"][\"fill\"]))\n\n case[\"angstrom\"][\"data\"].append(checkFill((float)(line[27]), case[\"angstrom\"][\"fill\"]))\n\n case[\"power_mw\"][\"data\"].append(checkFill((float)(line[28]), case[\"power_mw\"][\"fill\"]))\n\n #case[\"reflectance\"][\"data\"].append(checkFill((float)(line[29]), case[\"reflectance\"][\"fill\"]))\n \n #case[\"brightness_temps_k\"][\"21\"].append(checkFill((float)(line[33])))\n #case[\"brightness_temps_k\"][\"31\"].append(checkFill((float)(line[34])))\n #case[\"brightness_temps_k\"][\"21BB\"].append(checkFill((float)(line[35])))\n #case[\"brightness_temps_k\"][\"31BB\"].append(checkFill((float)(line[36])))\n\n\n\n\n outArr.append(case)\n\n return outArr\n\ndef parsePlumeTextDemo(fpaths):\n\n outArr = []\n\n for f in fpaths:\n if os.path.isfile(f):\n\n case = {}\n uid = 0\n\n with open(f, 'r') as fstream:\n for i in range(0, 3):\n fstream.readline()\n\n case[\"id\"] = uid\n uid += 1\n\n case[\"date\"] = ((fstream.readline().split())[3])\n case[\"time_UTC\"] = ((fstream.readline().split())[3])\n\n #MISR file info, blank lines, etc.\n for i in range(0, 17):\n fstream.readline()\n\n count = (int)((fstream.readline().split())[5])\n\n for i in range(0, 11):\n fstream.readline()\n\n\n case[\"lat\"] = []\n case[\"lon\"] = []\n case[\"terrain_elevation\"] = []\n case[\"plume_height\"] = []\n case[\"aerosol_optical_depth_green\"] = []\n case[\"power_mw\"] = []\n case[\"wind_azimuth\"] = []\n case[\"wind_speed_x\"] = []\n case[\"wind_speed_y\"] = []\n\n for i in range(0, count):\n line = fstream.readline().split()\n\n case[\"lon\"].append((float)(line[1]))\n case[\"lat\"].append((float)(line[2]))\n \n case[\"wind_azimuth\"].append((int)(line[7]))\n\n case[\"terrain_elevation\"].append((int)(line[8]))\n #TODO checkFIll\n if line[10] == \"-99\":\n if line[9] == \"-99\":\n case[\"plume_height\"].append(FILL_VALUE)\n else:\n case[\"plume_height\"].append((int)(line[9]))\n else:\n case[\"plume_height\"].append((int)(line[10]))\n\n if line[11] == \"-99.9\":\n case[\"wind_speed_x\"].append(FILL_VALUE)\n else:\n case[\"wind_speed_x\"].append(abs((float)(line[11])))\n\n if line[12] == \"-99.9\":\n case[\"wind_speed_y\"].append(FILL_VALUE)\n else:\n case[\"wind_speed_y\"].append((float)(line[12]))\n\n if line[19] == \"-9.999\":\n case[\"aerosol_optical_depth_green\"].append(FILL_VALUE)\n else:\n case[\"aerosol_optical_depth_green\"].append((float)(line[19]))\n\n if line[31] == \"-99.9\":\n case[\"power_mw\"].append(FILL_VALUE)\n else:\n case[\"power_mw\"].append((float)(line[31]))\n\n\n outArr.append(case)\n return outArr\n\n\n\ndef findFiles(dr):\n\n ret = []\n\n if os.path.isdir(dr):\n for root, dirs, files in os.walk(dr):\n for fle in files:\n \t# if re.match(PLUME_RE, fle):\n \tret.append(os.path.join(root, fle))\n \tprint(fle)\n return ret\n\n\n#TODO fix up\ndef formatForML(case):\n out = {\"lat\":[], \"lon\":[], \"features\":[]}\n out[\"lat\"] = np.array(case[\"position\"][\"lat\"])\n out[\"lon\"] = np.array(case[\"position\"][\"lon\"])\n\n outFeat = []\n for i in range(0, len(case[\"feature_height_m\"][\"wind_corrected\"])):\n dataPt = []\n if case[\"feature_height_m\"][\"wind_corrected\"][i] == FILL_VALUE:\n dataPt.append(case[\"feature_height_m\"][\"zero_wind\"][i])\n else:\n dataPt.append(case[\"feature_height_m\"][\"wind_corrected\"][i])\n\n #dataPt.append(case[\"position\"][\"distance_from_pt1_km\"][i])\n #dataPt.append(case[\"position\"][\"clockwise_direction_from_north\"][i])\n dataPt.append(case[\"position\"][\"terrain_elevation\"][i])\n \n #dataPt.append(case[\"wind_speed\"][\"across_track\"][i])\n #dataPt.append(case[\"wind_speed\"][\"along_track\"][i])\n #dataPt.append(case[\"albedo\"][\"red\"][i])\n #dataPt.append(case[\"albedo\"][\"green\"][i])\n #dataPt.append(case[\"albedo\"][\"blue\"][i])\n #dataPt.append(case[\"albedo\"][\"nir\"][i])\n #dataPt.append(case[\"albedo\"][\"top_of_atmosphere\"][i])\n #dataPt.append(case[\"single_scattering_albedo\"][\"red\"][i])\n #dataPt.append(case[\"single_scattering_albedo\"][\"green\"][i])\n #dataPt.append(case[\"single_scattering_albedo\"][\"blue\"][i])\n #dataPt.append(case[\"single_scattering_albedo\"][\"nir\"][i])\n #dataPt.append(case[\"optical_depth\"][\"red\"][i])\n dataPt.append(case[\"optical_depth\"][\"green\"][i])\n #dataPt.append(case[\"optical_depth\"][\"blue\"][i])\n #dataPt.append(case[\"optical_depth\"][\"nir\"][i])\n #dataPt.append(case[\"tau_frac\"][\"small_part\"][i])\n #dataPt.append(case[\"tau_frac\"][\"med_part\"][i])\n #dataPt.append(case[\"tau_frac\"][\"large_part\"][i])\n #dataPt.append(case[\"tau_frac\"][\"sphere_part\"][i])\n dataPt.append(case[\"power_mw\"][\"data\"][i])\n #dataPt.append(case[\"reflectance\"][\"data\"][i])\n #dataPt.append(case[\"angstrom\"][\"data\"][i])\n #dataPt.append(case[\"brightness_temps_k\"][\"21\"][i])\n #dataPt.append(case[\"brightness_temps_k\"][\"31\"][i])\n #dataPt.append(case[\"brightness_temps_k\"][\"21BB\"][i])\n #dataPt.append(case[\"brightness_temps_k\"][\"31BB\"][i])\n\n\n outFeat.append(dataPt)\n print(len(outFeat), len(outFeat[0]))\n out[\"features\"] = np.array(outFeat)\n print(out[\"features\"].shape, out[\"lat\"].shape, out[\"lon\"].shape)\n\n return out\n\n\ndef parsePlumeFiles(files, outFile, full = True):\n dct = {}\n if full:\n dct = parsePlumeTxtFull(files)\n else:\n dct = parsePlumeTextDemo(files)\n\n output = json.dumps(dct)\n\n with open(outFile, 'w') as jFile:\n jFile.write(output)\n\n return dct\n\n\ndef parsePlumeDir(dr, outFile, full = True):\n files = findFiles(dr)\n print(files)\n dct = parsePlumeFiles(files, outFile, full)\n return dct\n\ndef checkFill(value, fill):\n if value == fill and value in FILLS:\n return FILL_VALUE\n return value\n\n\nif __name__ == '__main__':\n\tPATH = \"../data/plume_dataset/\"\n\t\n\t# PATH = '../data/'\n\tplumeDir = PATH + sys.argv[1]\n\toutFile = PATH + sys.argv[2]\n\tparseFullStr = sys.argv[3]\n\tparseFull = False\n\n\tprint(plumeDir)\n\tprint(outFile)\n\tprint(parseFullStr)\n\n\tif parseFullStr.lower() == 'true':\n \t\tparseFull = True\n\tparsePlumeDir(plumeDir, outFile, parseFull)\n\n#parsePlumeDir(\"/Users/nlahaye/Desktop/VR/data/California2008/SoCalCase\", \"/Users/nlahaye/Desktop/VR/Plume_VR_Demo_Southern_Ca_2008.json\", False)\n\n\n# TO RUN: python read_minx.py test plume_test.json true\n\n\n","repo_name":"vsean103/wildfire","sub_path":"code/read_minx.py","file_name":"read_minx.py","file_ext":"py","file_size_in_byte":17270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31662945284","text":"import logging\nimport os\n\nfrom suomi_validator.protobuf import validator_pb2\nfrom suomi_validator.state.merkle import INIT_ROOT_KEY\n\nfrom suomi_validator.execution import tp_state_handlers\nfrom suomi_validator.execution import processor_handlers\n\nfrom suomi_validator.concurrent.threadpool import \\\n InstrumentedThreadPoolExecutor\nfrom suomi_validator.execution.context_manager import ContextManager\n\nfrom suomi_validator.database.dict_database import DictDatabase\nfrom suomi_validator.database.indexed_database import IndexedDatabase\nfrom suomi_validator.database.lmdb_nolock_database import LMDBNoLockDatabase\n\nfrom suomi_validator.journal.block_store import BlockStore\nfrom suomi_validator.networking.dispatch import Dispatcher\nfrom suomi_validator.execution.executor import TransactionExecutor\nfrom suomi_validator.state.settings_view import SettingsViewFactory\nfrom suomi_validator.state.state_view import StateViewFactory\nfrom suomi_validator.networking.interconnect import Interconnect\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass InvalidChainError(Exception):\n pass\n\n\nclass ExecutionError(Exception):\n pass\n\n\ndef verify_state(bind_network, bind_component, scheduler_type, data_dir=None):\n \"\"\"\n Verify the state root hash of all blocks is in state and if not,\n reconstruct the missing state. Assumes that there are no \"holes\" in\n state, ie starting from genesis, state is present for all blocks up to some\n point and then not at all. If persist is False, this recomputes state in\n memory for all blocks in the blockstore and verifies the state root\n hashes.\n\n Raises:\n InvalidChainError: The chain in the blockstore is not valid.\n ExecutionError: An unrecoverable error was encountered during batch\n execution.\n \"\"\"\n\n # Get the global state database to operate on\n if data_dir is not None:\n global_state_db_filename = os.path.join(\n data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:]))\n LOGGER.debug(\n 'verifying state in %s', global_state_db_filename)\n global_state_db = LMDBNoLockDatabase(global_state_db_filename, 'c')\n else:\n global_state_db = DictDatabase()\n\n state_view_factory = StateViewFactory(global_state_db)\n\n # Get the blockstore\n block_db_filename = os.path.join(\n data_dir, 'block-{}.lmdb'.format(bind_network[-2:]))\n LOGGER.debug('block store file is %s', block_db_filename)\n block_db = IndexedDatabase(\n block_db_filename,\n BlockStore.serialize_block,\n BlockStore.deserialize_block,\n flag='c',\n indexes=BlockStore.create_index_configuration())\n blockstore = BlockStore(block_db)\n\n # Check if we should do state verification\n start_block, prev_state_root = search_for_present_state_root(\n blockstore, state_view_factory)\n\n if start_block is None:\n LOGGER.info(\n \"Skipping state verification: chain head's state root is present\")\n return\n\n LOGGER.info(\n \"Recomputing missing state from block %s with %s scheduler\",\n start_block, scheduler_type)\n\n component_thread_pool = InstrumentedThreadPoolExecutor(\n max_workers=10,\n name='Component')\n\n component_dispatcher = Dispatcher()\n component_service = Interconnect(\n bind_component,\n component_dispatcher,\n secured=False,\n heartbeat=False,\n max_incoming_connections=20,\n monitor=True,\n max_future_callback_workers=10)\n\n context_manager = ContextManager(global_state_db)\n\n transaction_executor = TransactionExecutor(\n service=component_service,\n context_manager=context_manager,\n settings_view_factory=SettingsViewFactory(state_view_factory),\n scheduler_type=scheduler_type,\n invalid_observers=[])\n\n component_service.set_check_connections(\n transaction_executor.check_connections)\n\n component_dispatcher.add_handler(\n validator_pb2.Message.TP_RECEIPT_ADD_DATA_REQUEST,\n tp_state_handlers.TpReceiptAddDataHandler(context_manager),\n component_thread_pool)\n\n component_dispatcher.add_handler(\n validator_pb2.Message.TP_EVENT_ADD_REQUEST,\n tp_state_handlers.TpEventAddHandler(context_manager),\n component_thread_pool)\n\n component_dispatcher.add_handler(\n validator_pb2.Message.TP_STATE_DELETE_REQUEST,\n tp_state_handlers.TpStateDeleteHandler(context_manager),\n component_thread_pool)\n\n component_dispatcher.add_handler(\n validator_pb2.Message.TP_STATE_GET_REQUEST,\n tp_state_handlers.TpStateGetHandler(context_manager),\n component_thread_pool)\n\n component_dispatcher.add_handler(\n validator_pb2.Message.TP_STATE_SET_REQUEST,\n tp_state_handlers.TpStateSetHandler(context_manager),\n component_thread_pool)\n\n component_dispatcher.add_handler(\n validator_pb2.Message.TP_REGISTER_REQUEST,\n processor_handlers.ProcessorRegisterHandler(\n transaction_executor.processors),\n component_thread_pool)\n\n component_dispatcher.add_handler(\n validator_pb2.Message.TP_UNREGISTER_REQUEST,\n processor_handlers.ProcessorUnRegisterHandler(\n transaction_executor.processors),\n component_thread_pool)\n\n component_dispatcher.start()\n component_service.start()\n\n process_blocks(\n initial_state_root=prev_state_root,\n blocks=blockstore.get_block_iter(\n start_block=start_block, reverse=False),\n transaction_executor=transaction_executor,\n context_manager=context_manager,\n state_view_factory=state_view_factory)\n\n component_dispatcher.stop()\n component_service.stop()\n component_thread_pool.shutdown(wait=True)\n transaction_executor.stop()\n context_manager.stop()\n\n\ndef search_for_present_state_root(blockstore, state_view_factory):\n \"\"\"\n Search through the blockstore and return a tuple containing:\n - the first block with a missing state root\n - the state root of that blocks predecessor\n \"\"\"\n # If there is no chain to process, then we are done.\n block = blockstore.chain_head\n if block is None:\n return None, None\n\n # Check the head first\n if state_db_has_root(state_view_factory, block.state_root_hash):\n return None, None\n\n prev_state_root = INIT_ROOT_KEY\n for block in blockstore.get_block_iter(reverse=False):\n if not state_db_has_root(state_view_factory, block.state_root_hash):\n return block, prev_state_root\n prev_state_root = block.state_root_hash\n\n # This should never happen, since we already checked that the chain head\n # didn't have a state root\n raise ExecutionError(\n \"Chain head state both missing but all blocks had state root present\")\n\n\ndef state_db_has_root(state_view_factory, root):\n try:\n state_view_factory.create_view(root)\n except KeyError:\n return False\n return True\n\n\ndef process_blocks(\n initial_state_root,\n blocks,\n transaction_executor,\n context_manager,\n state_view_factory,\n):\n prev_state_root = initial_state_root\n for block in blocks:\n LOGGER.info(\"Verifying state for block %s\", block)\n try:\n # If we can create the view, all is good, move on to next block\n state_view_factory.create_view(block.state_root_hash)\n\n except KeyError:\n # If creating the view fails, the root is missing so we should\n # recompute it and verify it\n new_root = execute_batches(\n previous_state_root=prev_state_root,\n transaction_executor=transaction_executor,\n context_manager=context_manager,\n batches=block.batches)\n\n if new_root != block.state_root_hash:\n raise InvalidChainError(\n \"Computed state root {} does not match state root in block\"\n \" {}\".format(new_root, block.state_root_hash))\n\n prev_state_root = block.state_root_hash\n\n\ndef execute_batches(\n previous_state_root,\n transaction_executor,\n context_manager,\n batches\n):\n scheduler = transaction_executor.create_scheduler(\n context_manager.get_squash_handler(),\n previous_state_root,\n always_persist=True)\n\n transaction_executor.execute(scheduler)\n\n for batch in batches:\n scheduler.add_batch(batch)\n\n scheduler.finalize()\n scheduler.complete(block=True)\n\n state_root = None\n for batch in batches:\n batch_id = batch.header_signature\n result = scheduler.get_batch_execution_result(batch_id)\n if result is None:\n raise ExecutionError(\n \"Batch {} did not execute\".format(batch_id))\n\n if not result.is_valid:\n raise ExecutionError(\n \"Batch {} was invalid\".format(batch_id))\n\n if result.state_hash is not None:\n if state_root is not None:\n raise ExecutionError(\n \"More than one batch had state root; First state root was\"\n \" {}, second state root was from batch {} with state root\"\n \" {}\".format(state_root, batch_id, result.state_hash))\n\n state_root = result.state_hash\n\n if state_root is None:\n raise ExecutionError(\"No state root found in execution results\")\n\n return state_root\n","repo_name":"suomichain/suomi-core","sub_path":"validator/suomi_validator/server/state_verifier.py","file_name":"state_verifier.py","file_ext":"py","file_size_in_byte":9423,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"67"} +{"seq_id":"8275738201","text":"# import math\n# arr = [4,30,34,5,9]\n# result_arr = [arr[0]]\n# j = 0\n# i =0\n# # print(str(arr[1])[0] 0):\n refElement = nums[0]\n i = 1\n while(i=len(nums)):\n break\n\n if(i A single entry list with mouse coordinates [x,y]\n# 'Cats' -> An single entry list with cat coordinates [x,y]\n# 'Cheese' -> An single entry list with cheese locations [x,y]\n# 'msx','msy'-> Maximum size of the maze along x and y respectively,\n# this is set by QLearn_core_GL. DO NOT \n# CHANGE. You only need this for indexing into A[][]\n# 'A[][]' -> An adjacency list encoding the maze connectivity.\n# The size of A[][] is (msx*msy) x 4, that is, it\n# contains one row for each location in the maze,\n# and for each row, it specifies 4 possible edges\n# to the top, right, bottom, and left neighbours\n# respectively.\n# Example: Say your mouse is at location [2,3], and\n# you want to check where it can move to.\n# The data for this location is stored at\n# A[2+(3*QLearn_global_data.msx)][0] : Link to grid location [2,2]\n# A[2+(3*QLearn_global_data.msx)][1] : Link to grid location [3,3]\n# A[2+(3*QLearn_global_data.msx)][2] : Link to grid location [2,4]\n# A[2+(3*QLearn_global_data.msx)][3] : Link to grid location [1,3]\n# If the link is 1, the locations are connected,\n# if the link is 0 there is a wall in between.\n#\n# 'Qtable[][]'\tAn array of size [(msx*msy)^3][4] with one row per state, and\n#\t\t4 columns corresponding to the 4 possible mouse moves.\n# 'Qweights[]' A list where you will store feature weights for feature-based Q Learning\n# 'alpha'\tThe Q-learning learning rate\n# 'lamb' \tThe Q-learning discount constant for future rewards\n# mapCr, mapCg, mapCb - Arrays of size [msx]x[msy] where you can store any data you want\n# displayed on the maze squares as a colour (r,g,b)!. Data is \n# autoscaled, just make sure you understand what you are doing.\n#\t\t\t * This only has an effect for feature based Q Learning\n#\n# YOU ARE NOT ALLOWED TO MODIFY ANY GLOBAL DATA EXCEPT FOR 'Qtable', 'Qweights',\n# 'alpha' (though I would leave it alone), and the colour arrays mapCx.\n#\n# This is the only place where you need to update anything at all.\n#\n\nfrom math import *\nfrom numpy import *\nimport random\n\n# Import global data\nimport QLearn_global_data as gdata\n\n#compute the manhattan distance\ndef mdist(a, b):\n return float(abs(b[0] - a[0]) + abs(b[1] - a[1]))\n\n# Hack to keep around some static data\n\nclass MyStatic(object):\n # You can add to this object any data you need to keep around for feature based Q Learning.\n # You can access and modify this data by, for example, doing\n # print MyStatic.init\n #\n # or\n #\n # MyStatic.init=10\n #\n # Do not add data just because you can! you will have to justify what you are using here\n init=-1\n\n# Function definitions\ndef QLearn(s,a,r,s_new):\n ####################################################################\n # This function implements the Q-learning update rule. It updates an\n # entry in the Qtable given the experience tuple \n # as discussed in lecture. \n #\n # Return values: NONE\n ####################################################################\n\n gdata.Qtable[s][a] +=\\\n gdata.alpha *\\\n (r + gdata.lamb *\\\n max(gdata.Qtable[s_new])\n - gdata.Qtable[s][a]\n )\n\ndef reward():\n ####################################################################\n # This function computes and returns a reward value for the\n # CURRENT GAME CONFIGURATION (i.e. for the current Mouse, Cat,\n # and Cheese location). It is called by the code in QLearn_core_GL\n # during training to determine the reward associated with a given\n # state.\n #\n # The function can be as simple or as complicated as you like,\n # but it should give positive rewards for configurations that\n # are favorable to the mouse, and negative rewards for configurations\n # that favour the cat.\n #\n # Be careful! the reward function will have a STRONG impact on the\n # ability of your mouse to learn!\n ####################################################################\n\n mouse = gdata.Mouse[0]\n cheese = [mdist(mouse, current) for current in gdata.Cheese]\n cat = [mdist(mouse, current) for current in gdata.Cats]\n if mouse in gdata.Cats:\n return -10\n if mouse in gdata.Cheese:\n return 10\n if cat <= 1:\n return -7\n if sum(gdata.A[mouse[0] + (mouse[1] * gdata.msx)]) == 1:\n return -4\n return 0\n\ndef decideAction(s):\n ####################################################################\n # This function is called by QLearn_core_GL once the training is\n # complete. It is used to playt the actual game against the\n # cat. The function should choose for the given input state 's'\n # the optimal action that is *ALLOWABLE*.\n #\n # This means, the optimal action that does not involve crossing a\n # wall. You must somehow use the Qtable and the A[][] adjacency\n # list to determine where the mouse should go.\n ####################################################################\n \n mouse = gdata.Mouse[0]\n A_index = mouse[0] + (mouse[1] * gdata.msx)\n adjacency = gdata.A[A_index]\n possible = gdata.Qtable[s]\n val = None\n for direction in range(4):\n if adjacency[direction]:\n if val == None:\n val = [direction]\n elif possible[direction] > possible[val[0]]:\n val = [direction]\n elif possible[direction] == possible[val[0]]:\n val.append(direction)\n return val[0]\n\n############################################################################\n#\n# Here begin the functions for feature-based Q Learning. Do not work on\n# these until you have completed standard Q Learning above!\n#\n############################################################################\n\ndef evaluateFeatures(mousep, catp, cheesep):\n ####################################################################\n # This function is called by the QLearning core to evaluate features\n # for a given game configuration. \n #\n # The function *MUST* return a list with a fixed number of features\n # which will be used to learn the weights of the overall reward \n # function as discussed in lecture. During play (once training is\n # complete) these features will determine how the mouse plays.\n #\n # This function will receive a list with the mouse position,\n # a list with cat position(s),\n # a list with cheese position(s)\n #\n # Global variables Ncheese and Ncats tell you how many of each.\n #\n # You are free to add as many features as you want. But:\n #\n # - You *must* document here with comments, and also in your report,\n # what each feature does and how it is computed\n #\n # - Consider runtime. If a feature takes long to compute but\n # doesn't help the mouse get smarter, don't use it!\n #\n # - We will evaluate mouse smartness competitively (again!) so be\n # sure to spend some time coming up with clever features!\n #\n # - You're free to use search and path finding to help with your\n # feature selection. But everything you use must eventually\n # be turned into a single value within your features list.\n #\n # - For this function, you can the maze adjacency matrix A. But you\n # are not allowed to change it. You are also not allowed to \n # change the positions of the agents.\n #\n # - The weights to be trained are stored in the global variable\n # QLearn_global_data.Qweights; which is a list of the same\n # length as your feature list.\n #\n # - Your features should be in the range [-1,1] to avoid issues\n # with weights becoming too large. Also, make sure your reward\n # function returns small numbers. If your weights are growing\n # like mad, try reducing the reward range, feature range, or\n # setting a smaller learning factor alpha.\n #\n ###################################################################\n\n\n feature_list=[]\t\t# Add as many features as you need!\n\n mouse = mousep[0]\n # count the number of walls\n walls = sum(gdata.A[mouse[0] + (mouse[1] * gdata.msx)])\n # check the distance up to each cheese\n cheese = [mdist(mouse, current) for current in cheesep]\n # check the distance up to each cat\n cat = [mdist(mouse, current) for current in catp]\n # check if the mouse is dead\n if mouse in catp:\n death = -1\n else:\n death = 1\n # check if the mouse has managed to eat\n if mouse in cheesep:\n win = 1\n else:\n win = -1\n # avoid corners\n if walls == 1:\n corner = -1\n elif walls == 2:\n corner = 0\n else:\n corner = 1\n max_dist = gdata.msx + gdata.msy\n\n # Try to minimize the number of cheese left\n feature_list.append(len(cheesep)/gdata.Ncheese)\n cheese.sort()\n # Try to get closer to cheese\n feature_list.append((max_dist - cheese[0])/max_dist)\n\n # Try to get further from cats\n cat.sort()\n feature_list.append(-1.0 * cat[0]/max_dist)\n if len(cat) > 1:\n feature_list.append(-1.0 * cat[1]/max_dist)\n else:\n feature_list.append(0)\n feature_list.append(corner)\n feature_list.append(death)\n feature_list.append(win)\n \n return feature_list\n\ndef evaluateQsa(feature_list):\n ####################################################################\n # \n # This function returns the current value for Q(s,a). The input,\n # the feature_list, contains the features evaluated after the\n # mouse has taken a specific action 'a' from state 's'. Note that\n # this function doesn't care what 'a' or 's' are, it just returns\n # the result of performing the linear combination of features\n # given the current weight. \n #\n # Either the QLearning training code, or the code that decides on\n # mouse actions must figure out how to compute the appropriate\n # feature_list.\n ####################################################################\n\n total = 0\n for i in range(len(feature_list)):\n total += gdata.Qweights[i] * feature_list[i]\n\n return total\n\ndef maxQsa_prime(mousep,catp,cheesep):\n ####################################################################\n #\n # This function computes and returns the index and value of the\n # best possible action starting at the configuration given by the\n # input mousep, catp, and cheesep. \n #\n # In other words, it must try every possible action (that is allowed\n # given the maze!) and return the action with highest value given\n # the current weights for the QLearning expected reward function.\n #\n # The return value should be a 2-entry list with [idx,val]. idx\n # must be in [0,3] where each value has the same meaning as in\n # the 'decideAction' function above. 'val' is a real-valued \n # expected reward given that the mouse takes action 'idx' from\n # the current config.\n ####################################################################\n\n ####################################################################\n #\n # TO DO: Complete this function.\n #\n ####################################################################\n\n x, y = gdata.Mouse[0]\n A_index = x + (y * gdata.msx)\n adjacency = gdata.A[A_index]\n neighbors = [[x, y - 1], [x + 1, y], [x, y + 1], [x - 1, y]]\n val = None\n for direction in range(4):\n if adjacency[direction]:\n currentQ = evaluateQsa(evaluateFeatures([neighbors[direction]], \n catp, cheesep))\n if val == None:\n val = [direction, currentQ]\n elif currentQ > val[1]:\n val = [direction, currentQ]\n return val\n\ndef QLearn_features(a,r):\n ####################################################################\n #\n # This function carries out the QLearning update for the feature-\n # based method. It evaluates the utility Q(s,a) given the specified\n # action a (in [0,3]) and determines the weight updates for each\n # of the weights associated to your features.\n #\n # The immediate reward obtained by the mouse is provided in 'r'\n #\n # Returns NONE\n ####################################################################\n\n # Make local copies of the current config. Do not mess with the globals!\n mousep=list(gdata.Mouse)\n catp=list(gdata.Cats)\n cheesep=list(gdata.Cheese)\n\n # First time through this code we need to initialize the weights\n # Requires a working 'evaluateFeatures' function!\n if (len(gdata.Qweights)==0):\n dummy_features=evaluateFeatures(mousep,catp,cheesep)\n for i in range(len(dummy_features)):\n gdata.Qweights.append(0.0);\n\n features = evaluateFeatures(mousep, catp, cheesep)\n currentQ = evaluateQsa(features)\n val = maxQsa_prime(mousep, catp, cheesep)\n for i in range(len(features)):\n gdata.Qweights[i] += gdata.alpha *\\\n (r + (gdata.lamb * val[1]) - currentQ) * features[i]\n\n return\n\ndef decideAction_features(mousep, catp, cheesep):\n ####################################################################\n #\n # This function is used to decide which action to take from the\n # specified configuration given the current weights of the features\n # in the QLearning method.\n #\n # Returns the index 'idx' of the optimal action. idx is in [0,3] as\n # described in 'decideAction' above\n ####################################################################\n\n return maxQsa_prime(mousep, catp, cheesep)[0]\n","repo_name":"wmak/Notes","sub_path":"CSC/D84/Assignments/a4/QLearn.py","file_name":"QLearn.py","file_ext":"py","file_size_in_byte":14523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40483011496","text":"\"\"\"\n\nGiven a sorted array and a target value, return the index if the target is found.\nIf not, return the index where it would be if it were inserted in order.\n\nYou may assume no duplicates in the array.\n\nExample 1:\nInput: [1,3,5,6], 5\nOutput: 2\n\nExample 2:\nInput: [1,3,5,6], 2\nOutput: 1\n\nExample 3:\nInput: [1,3,5,6], 7\nOutput: 4\n\nExample 4:\nInput: [1,3,5,6], 0\nOutput: 0\n\n\"\"\"\n\nnum = [] #Empty List\npos = 0\n\nnum = list(map(int,input().split()))\ntarget = int(input())\npos = 0\n\nlb = 0\nub = len(num)-1\nmid = (lb+ub)//2\n\nwhile(lb<=ub): #Binary Search\n if (num[mid] == target):\n pos = mid\n break;\n \n elif (target < num[mid]):\n ub = mid-1\n \n elif (target>num[mid]):\n lb = mid+1\n \n mid = (lb+ub)//2\n\n if(lb>ub): #If element not found\n pos = lb\n\nprint (pos)\n","repo_name":"mehaktawakley/Python-Competitive-Programming","sub_path":"SearchInsertPosition.py","file_name":"SearchInsertPosition.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"14739515060","text":"import time\n\nimport trimesh\nimport pyrr\nimport pyrender\nfrom pyrender import PerspectiveCamera, \\\n DirectionalLight, SpotLight, PointLight, \\\n MetallicRoughnessMaterial, \\\n Primitive, Mesh, Node, Scene, \\\n OffscreenRenderer, Viewer\n\nimport numpy as np\nimport cv2\n\nfrom visualization.parametric.shapes import parametric_surface\n\nif __name__ == '__main__':\n R = 10\n r = 3\n delta_r = 0.1\n off_screen_renderer = OffscreenRenderer(viewport_width=640 * 2, viewport_height=480 * 2)\n\n t = 0\n frame = 0\n start_time = time.time()\n direc_l = DirectionalLight(color=np.ones(3), intensity=1.0)\n spot_l = SpotLight(color=np.ones(3), intensity=10.0,\n innerConeAngle=np.pi / 16, outerConeAngle=np.pi / 6)\n point_l = PointLight(color=np.ones(3), intensity=10.0)\n\n cam = PerspectiveCamera(yfov=(np.pi / 3.0))\n cam_pose = np.array([\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 30.0],\n [0.0, 0.0, 0.0, 1.0],\n ])\n delta_r = 0.1\n while True:\n r += delta_r\n frame += 1\n t += 50\n if t % 1000 == 0:\n delta_r *= -1\n\n s = parametric_surface.doughnut(R, r, [50, 20])\n\n doughnut_trimesh = trimesh.Trimesh(vertices=s.flat_vertices, faces=s.flat_triangular_mesh_indices, )\n # for facet in doughnut_trimesh.facets:\n # doughnut_trimesh.visual.face_colors[facet] = trimesh.visual.random_color()\n mesh = pyrender.Mesh.from_trimesh(doughnut_trimesh, smooth=False)\n mesh_node = Node(mesh=mesh, translation=np.array([0.0, 0.0, 0.0]))\n\n scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]), bg_color=[0.0, 0.0, 0.0])\n cam_node = scene.add(cam, pose=cam_pose)\n scene.add_node(mesh_node)\n # v = Viewer(scene)\n color, depth = off_screen_renderer.render(scene)\n cv2.imshow('f', color)\n cv2.waitKey(1)\n end_time = time.time()\n print(frame / (end_time - start_time))\n\n\n off_screen_renderer.delete()\n","repo_name":"pajouheshgar/Music-Visualization","sub_path":"visualization/differentiable/realtime.py","file_name":"realtime.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"19915900816","text":"import json\nimport os\nimport nltk\nimport math\n\"\"\"\"\nCreated an n-gram_dictionary beforehand, this time sorted on ngram size, with their frequencies.\n\"\"\"\n\n\ndef c_value(ct_dict):\n ngram_dict = dict(sorted(ct_dict.items(), key=lambda item: len(item[0].split()), reverse=True))\n\n c_value_count_dict = dict()\n for candidate in ngram_dict:\n candidate_split = candidate.split()\n length = len(candidate_split)\n for i in reversed(range(1, length - 1)):\n children_ngrams = list(nltk.ngrams(candidate_split, i))\n for child_ngram in children_ngrams:\n if child_ngram in ngram_dict:\n if child_ngram in c_value_count_dict:\n c_value_count_dict[child_ngram]['nested_count'] += 1\n c_value_count_dict[child_ngram]['parent_count'] += ngram_dict[candidate]\n else:\n c_value_count_dict[child_ngram] = dict()\n c_value_count_dict[child_ngram]['nested_count'] = 1\n c_value_count_dict[child_ngram]['parent_count'] = ngram_dict[candidate]\n c_value_count_dict[child_ngram]['total_count'] = ngram_dict[child_ngram]\n\n c_value_dict = {}\n\n for candidate in ngram_dict:\n # if nested\n if candidate in c_value_count_dict:\n c_value_dict[candidate] = ((math.log(0.1 + len(candidate), 2)) * c_value_count_dict[candidate]['total_count'] - ((1/c_value_count_dict[candidate]['nested_count']) * c_value_count_dict[candidate]['parent_count']))\n\n else:\n # if not nested\n c_value_dict[candidate] = ((math.log(0.1 + len(candidate), 2)) * ngram_dict[candidate])\n\n return c_value_dict\n\n\nct_file_path = \"/home/gillesfloreal/PycharmProjects/ASTRA/data/training.json\"\nwith open(ct_file_path, 'r', encoding='utf8') as source:\n ct_list = json.load(source)\n source.close()\n\nc_value_list = []\nfor ct_dict in ct_list:\n c_value_list.append(c_value(ct_dict))\n\ntarget_path = \"/home/gillesfloreal/PycharmProjects/ASTRA/data/statistical_scores/c_values/c_values_list.json\"\nwith open(target_path, 'w') as target:\n json.dump(c_value_list, target)\n\n","repo_name":"GillesFloreal/ASTRA","sub_path":"c-value.py","file_name":"c-value.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43974617980","text":"#Advent of Code\n#Day #1\n#Problem 1\n\nnum_file = open(\"numbers.txt\", \"r\")\n\nlines = num_file.readlines()\n\nfor i in range(0, len(lines)):\n lines[i] = lines[i][:-1]\n lines[i] = int(lines[i])\n\nfor n1 in lines:\n for n2 in lines:\n for n3 in lines:\n if (n1 != n2) and (n2 != n3) and (n1 != n3) and n1 + n2 + n3 == 2020:\n print(n1*n2*n3)","repo_name":"SamuelJCopeland/Advent-of-Code","sub_path":"2020/D1P1.py","file_name":"D1P1.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32503371292","text":"#!/usr/bin/env python\n\nimport rospy\n\nfrom dynamic_reconfigure.server import Server\nfrom arm_mobility.cfg import armGainConfig\nfrom std_msgs.msg import Float32MultiArray\n\noutMsg = Float32MultiArray()\ndef callback(config, level):\n rospy.loginfo(\"\"\"Reconfigure Request: Kp {Kp} Ki {Ki} Kd {Kd} Kii {Kii} \"\"\".format(**config))\n data =[config.Kp,config.Ki, config.Kd, config.Kii]\n rospy.loginfo(\"Gains Changed\")\n rospy.loginfo(data)\n outMsg.data = data;\n pub.publish(outMsg)\n return config\n\nif __name__ == \"__main__\":\n rospy.init_node(\"arm_dyn_server\", anonymous = True)\n pub = rospy.Publisher('arm_conf_mssg', Float32MultiArray, queue_size=10)\n srv = Server(armGainConfig, callback)\n rospy.spin()","repo_name":"amitbd1508/Arm-Subsystems","sub_path":"arm_mobility/nodes/arm_dynserver.py","file_name":"arm_dynserver.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6105475786","text":"'''\n小扣在秋日市集选择了一家早餐摊位,一维整型数组 staple 中记录了每种主食的价格,一维整型数组 drinks 中记录了每种饮料的价格。\n小扣的计划选择一份主食和一款饮料,且花费不超过 x 元。请返回小扣共有多少种购买方案。\n注意:答案需要以 1e9 + 7 (1000000007) 为底取模,如:计算初始结果为:1000000008,请返回 1\n'''\nfrom typing import List\nclass Solution:\n def breakfastNumber(self, staple: List[int], drinks: List[int], x: int) -> int:\n def ds(target,left,right)->int:\n while left < right:\n mid = left + (right-left)//2\n if drinks[mid] <= target:\n left = mid + 1\n else:\n right = mid\n\n return left\n\n if not staple or not drinks:return 0\n staple.sort()\n drinks.sort()\n #[1,5,8,9] 2\n\n ret = 0\n left = 0\n right = len(drinks)\n for i in range(len(staple)):\n if staple[i] < x:\n tmp = ds(x-staple[i],left,right)\n ret += tmp\n if not tmp:\n break\n \n return ret%(1000000007)","repo_name":"xiaoqi25478/Job","sub_path":"算法与数据结构/LeetCode/二分查找(BinarySearch)/LCP 18. 早餐组合.py","file_name":"LCP 18. 早餐组合.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"3734770794","text":"import tensorflow as tf\nfrom position import positional_encoding_1d, positional_encoding_2d, point_wise_feed_forward_network\n\nMAX_LENGTH = 50\nclass EncoderBlock(tf.keras.layers.Layer):\n def __init__(self, emb_dim, num_heads, fc_dim,\n dropout_rate=0.1, layernorm_eps=1e-6):\n super(EncoderBlock, self).__init__()\n\n self.mha = tf.keras.layers.MultiHeadAttention(num_heads=num_heads,\n key_dim=emb_dim,\n dropout=dropout_rate)\n\n self.ffn = point_wise_feed_forward_network(emb_dim=emb_dim,\n fc_dim=fc_dim)\n\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=layernorm_eps)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=layernorm_eps)\n\n self.dropout1 = tf.keras.layers.Dropout(dropout_rate)\n\n def call(self, x, training, mask):\n \"\"\"\n `mask` is padding mask\n \"\"\"\n attn_output = self.mha(query=x, key=x, value=x,\n training=training, attention_mask=mask)\n\n # (batch_size, input_seq_len, fully_connected_dim)\n out1 = self.layernorm1(attn_output + x, training=training)\n\n # (batch_size, input_seq_len, fully_connected_dim)\n ffn_output = self.ffn(out1, training=training)\n\n ffn_output = self.dropout1(ffn_output, training=training)\n\n # (batch_size, input_seq_len, fully_connected_dim)\n encoder_layer_out = self.layernorm2(ffn_output + out1, training=training)\n\n return encoder_layer_out\n \nclass Encoder(tf.keras.layers.Layer):\n def __init__(self,\n *,\n num_layers,\n emb_dim, # Input/output dimensionality (or Embedding dim).\n num_heads,\n fc_dim, # Inner-layer dimensionality (or FC dim).\n row_size, col_size, # Shape of grid features\n dropout_rate=0.1,\n layernorm_eps=1e-6):\n super().__init__()\n\n self.emb_dim = emb_dim\n self.num_layers = num_layers\n\n # Embeddings (it's just a Dense layer)\n self.embedding = tf.keras.layers.Dense(emb_dim, activation='relu')\n # Positional encoding 2D\n self.pos_encoding = positional_encoding_2d(row_size, col_size, emb_dim)\n\n # Encoder layers.\n self.enc_layers = [EncoderBlock(emb_dim=emb_dim,\n num_heads=num_heads,\n fc_dim=fc_dim,\n dropout_rate=dropout_rate,\n layernorm_eps=layernorm_eps)\n for _ in range(num_layers)]\n\n # Dropout.\n self.dropout = tf.keras.layers.Dropout(dropout_rate)\n\n def call(self, x, training, mask=None):\n seq_len = tf.shape(x)[1]\n\n # Sum up embeddings and positional encoding.\n x = self.embedding(x)\n x += self.pos_encoding[:, :seq_len, :]\n\n # Add dropout.\n x = self.dropout(x, training=training)\n\n # N encoder blocks.\n for i in range(self.num_layers):\n x = self.enc_layers[i](x, training, mask)\n\n return x # Shape `(batch_size, input_seq_len, emb_dim)\n\nclass DecoderBlock(tf.keras.layers.Layer):\n def __init__(self,\n *,\n emb_dim, # Input/output dimensionality (or Embedding dim).\n num_heads,\n fc_dim, # Inner-layer dimensionality (or FC dim).\n dropout_rate=0.1,\n layernorm_eps=1e-6):\n super().__init__()\n\n # Masked multi-head self-attention.\n self.mha_masked = tf.keras.layers.MultiHeadAttention(\n num_heads=num_heads,\n # Size of each attention head for query Q and key K.\n key_dim=emb_dim,\n dropout=dropout_rate\n )\n # Multi-head cross-attention.\n self.mha_cross = tf.keras.layers.MultiHeadAttention(\n num_heads=num_heads,\n # Size of each attention head for query Q and key K.\n key_dim=emb_dim,\n dropout=dropout_rate\n )\n\n # Point-wise feed-forward network.\n self.ffn = point_wise_feed_forward_network(emb_dim, fc_dim)\n\n # Layer normalization.\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=layernorm_eps)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=layernorm_eps)\n self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=layernorm_eps)\n\n # Dropout for the point-wise feed-forward network.\n self.dropout1 = tf.keras.layers.Dropout(dropout_rate)\n\n def call(self, x, enc_output, training, look_ahead_mask=None, padding_mask=None):\n # The encoder output shape is `(batch_size, input_seq_len, emb_dim)`.\n\n attn_masked, attn_weights_masked = self.mha_masked(\n query=x,\n value=x,\n key=x,\n # A boolean mask that prevents attention to certain positions.\n attention_mask=look_ahead_mask,\n # Shape `(batch_size, target_seq_len, emb_dim)`.\n return_attention_scores=True,\n training=training\n )\n\n out1 = self.layernorm1(attn_masked + x)\n\n attn_cross, attn_weights_cross = self.mha_cross(\n query=out1,\n value=enc_output,\n key=enc_output,\n # A boolean mask that prevents attention to certain positions.\n attention_mask=padding_mask,\n # Shape `(batch_size, target_seq_len, emb_dim)`.\n return_attention_scores=True,\n training=training\n )\n\n out2 = self.layernorm2(attn_cross + out1)\n\n # Shape `(batch_size, target_seq_len, emb_dim)`.\n ffn_output = self.ffn(out2)\n ffn_output = self.dropout1(ffn_output, training=training)\n \n # Shape `(batch_size, target_seq_len, emb_dim)`.\n out3 = self.layernorm3(ffn_output + out2)\n\n return out3, attn_weights_masked, attn_weights_cross\n\nclass Decoder(tf.keras.layers.Layer):\n def __init__(self,\n *,\n num_layers,\n emb_dim, # Input/output dimensionality.\n num_heads,\n fc_dim, # Inner-layer dimensionality.\n target_vocab_size,\n dropout_rate=0.1,\n layernorm_eps=1e-6):\n super(Decoder, self).__init__()\n\n self.emb_dim = emb_dim\n self.num_layers = num_layers\n\n self.embedding = tf.keras.layers.Embedding(\n target_vocab_size,\n emb_dim,\n mask_zero=True\n )\n self.pos_encoding = positional_encoding_1d(MAX_LENGTH, emb_dim)\n\n self.dec_layers = [\n DecoderBlock(\n emb_dim=emb_dim,\n num_heads=num_heads,\n fc_dim=fc_dim,\n dropout_rate=dropout_rate,\n layernorm_eps=layernorm_eps)\n for _ in range(num_layers)\n ]\n self.dropout = tf.keras.layers.Dropout(dropout_rate)\n\n def call(self, x, enc_output, training, look_ahead_mask, padding_mask):\n seq_len = tf.shape(x)[1]\n attention_weights = {}\n\n # Sum up embeddings and positional encoding.\n # Shape: `(batch_size, target_seq_len, emb_dim)`.\n x = self.embedding(x)\n x *= tf.math.sqrt(tf.cast(self.emb_dim, tf.float32))\n x += self.pos_encoding[:, :seq_len, :]\n\n x = self.dropout(x, training=training)\n\n for i in range(self.num_layers):\n x, block1, block2 = self.dec_layers[i](x, enc_output, training, look_ahead_mask, padding_mask)\n\n attention_weights[f'decoder_layer{i+1}_block1'] = block1\n attention_weights[f'decoder_layer{i+1}_block2'] = block2\n\n # The shape of x is `(batch_size, target_seq_len, emb_dim)`.\n return x, attention_weights\n\n","repo_name":"Sagar2k1/midterm_DeepLearning_C1900015_51900723_52100873","sub_path":"block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":8005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29082288327","text":"money, round = map(int, input().split(\" \"))\n# money, round = map(int, \"100 4\".split(\" \"))\nfor i in range(round):\n n1, b , t, n2 = map(int, input().split(\" \"))\n # n1, b, t, n2 = map(int, \"8 0 100 2\".split(\" \"))\n if t > money:\n print(\"Not enough tokens. Total = \"+str(money)+\".\")\n continue\n if (n1 > n2 and b == 0) or (n1 < n2 and b == 1):\n #赢了\n money = money + t\n print(\"Win \"+str(t)+\"! Total = \"+str(money)+\".\")\n else:\n #输了\n money = money - t\n print(\"Lose \"+str(t)+\". Total = \"+str(money)+\".\")\n if money == 0:\n print(\"Game Over.\")\n break\n\n","repo_name":"freesan44/LeetCode","sub_path":"PTA_BL_1071.py","file_name":"PTA_BL_1071.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27954559704","text":"import requests\nfrom bs4 import BeautifulSoup\n\nclass CarEdge:\n\n def __init__(self):\n page = requests.get(\"https://caredge.com/ranks/depreciation/popular/5-year/best\")\n self.soup = BeautifulSoup(page.content, \"html.parser\")\n rows = self.soup.find(\"table\").find_all(\"tr\")\n self.depValues = {}\n for row in rows:\n columns = row.find_all(\"td\")\n if len(columns) == 3:\n self.depValues[columns[1].find(\"a\").contents[0].lower()] = columns[2].contents[0]\n\n def findDep(self, make):\n if make in self.depValues:\n return self.depValues[make]\n else:\n return \"N/A\"\n","repo_name":"awimmel/car-comp","sub_path":"scraper/carEdge.py","file_name":"carEdge.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"30835815943","text":"###################################\n# Julian Cabezas Pena\n# Introduction to Statistical Machine Learning\n# University of Adelaide\n# Assignment 2\n# Performance metrics: Accuracy\n####################################\n\n# Import standard libraries\nimport numpy as np # Numerical calculations\nimport pandas as pd # read csv dataset\n\n# Calculate the accuracy given a true and predicted y value\ndef accuracy(ytrue,ypred):\n\n # calculate the number of samples\n n_samples = len(ytrue)\n\n # Sum the number of correct labels\n ones = np.ones(n_samples)\n sum_correct = np.sum(ones[ytrue == ypred])\n \n # Calculate the accuracy\n acc = (sum_correct / n_samples) *1.0\n\n return acc\n\n","repo_name":"juliancabezas/AdaBoost-FromScratch","sub_path":"PerformanceMetrics.py","file_name":"PerformanceMetrics.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"40170503299","text":"\"\"\" Download and cash the Fremont data\n\nparameters\n-----------------\nfilename = string (optional)\n location to save the Data\nurl = string (optional)\n location of the data\nforced_download = Boolean (optional)\n if true, force redownload data\n\nReturns\n-------------\ndata: Pandas DataFrame\n The Fremont Bridge Dataset\n\"\"\"\n# Dataset: Seatle Fremont Bridge bike crossings from 2013 to 2019\nimport pandas as pd\nfrom urllib.request import urlretrieve\nimport os\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn')\nFremont_url= 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD'\n\n\ndef get_fremont_data(filename='Fremont.csv', url=Fremont_url,\n forced_download = False):\n\n if forced_download or not os.path.exists(filename):\n urlretrieve(url, 'Fremont.csv')\n data = pd.read_csv('Fremont.csv', index_col = 'Date')\n try:\n data.index = pd.to_datetime(data.index, format='%m/%d/%Y %I:%M:%S %p')\n except TypeError:\n data.index= pd.to_datetime(data.index)\n\n data.columns = ['West', 'East']\n data['Total'] = data['West'] + data['East']\n return data\n","repo_name":"ADAM-OB/Unsupervised_ML","sub_path":"workflow/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3221895317","text":"# ----- Алгоритмы и структуры данных -----\n\nimport graphics as gr\n\n# Линейный поиск\ndef array_search(A:list, N:int, x:int):\n \"\"\" Осуществлят поиск числа x в массиве A\n от 0 до N-1 индекса в включительно\n Возвращает индекс элемента x в массиве A\n или -1, если такого нет \"\"\"\n\n for k in range(N):\n if A[k] == k:\n return k\n return -1\n\ndef test_array_search():\n A1 = [1, 2, 3, 4, 5]\n m = array_search(A1, 5, 8)\n if m == -1:\n print(\"#test 1 - ok\")\n else:\n print(\"#test 1 - fail\")\n\n A2 = [-1, -2, -3, -4, -5]\n m = array_search(A2, 5, -3)\n if m == 2:\n print(\"#test 2 - ok\")\n else:\n print(\"#test 2 - fail\")\n\n A3 = [10, 20, 30, 10, 10]\n m = array_search(A3, 5, 10)\n if m == 0:\n print(\"#test 3 - ok\")\n else:\n print(\"#test 3 - fail\")\n\n# Обращение массива\ndef invert_array(A:list, N:int):\n \"\"\" Обращение массива(задом наперёд)\n в рамках индексов от 0 до N-1 \"\"\"\n\n for k in range(N // 2):\n A[K], A[N - 1 - k] = A[N - 1 -K], A[k]\n\n\ndef test_invert_array():\n A1 = [1, 2, 3, 4, 5]\n invert_array(A1, 5)\n if A1 == [5, 4, 3, 2, 1]:\n print(\"#test 1 - ok\")\n else:\n print(\"test 1 - fail\")\n\n A2 = [0, 0, 0, 0, 0, 0, 0, 10]\n invert_array(A2, 8)\n if A2 == [10, 0, 0, 0, 0, 0, 0, 0]:\n print(\"#test 2 - ok\")\n else:\n print(\"test 2 - fail\")\n\n# Циклические сдвиги\n\n# влево\ntmp = A[0]\nfor k in range(N - 1):\n A[k] = A[k + 1]\nA[N-1] = tmp\n\n# вправо\ntmp = A[N - 1]\nfor j in range(N - 2, -1, -1):\n A[j + 1] = A[j]\nA[0] = tmp\n\n# Тернарный оператор\np = 1 # Тут должен быть цикл\nprint(p, '-', \"простое\" if A[p] else \"составное\")\n\n# Сортировка\ndef insert_sort(A):\n \"\"\" сортировка списка A вставками \"\"\"\n N = len(A)\n for top in range(1, N):\n k = top\n while k > 0 and A[k-1] > A[k]:\n A[k], A[k-1] = A[k-1], A[k]\n k -= 1\n\n\ndef choise_sort(A):\n \"\"\" сортировка списка A выбором \"\"\"\n N = len(A)\n for pos in range(0, N-1):\n for k in range(pos+1, N):\n if A[k] < A[pos]:\n A[k], A[pos] = A[pos], A[k]\n\n\ndef bubble_sort(A):\n \"\"\" сортировка списка A методом пузырьком \"\"\"\n N = len(A)\n for bypass in range(1, N):\n for k in range(0, N-bypass):\n if A[k] > A[k+1]:\n A[k], A[k+1] = A[k+1], A[k]\n\n\ndef test_sort(sort_algorithm):\n pritn(\"Тестирем: \", sort_algorithm.__doc__)\n print(\"testcase #1: \", end=\"\")\n A = [4, 2, 5, 1, 3]\n A_sorted = [1, 2, 3, 4, 5]\n sort_algorithm(A)\n print(\"Ok\" if A == A_sorted else \"Fail\")\n\n print(\"testcase #2: \", end=\"\")\n A = list(range(10, 20)) + list(range(0, 10))\n A_sorted = list(range(20))\n sort_algorithm(A)\n print(\"Ok\" if A == A_sorted else \"Fail\")\n\n print(\"testcase #3: \", end=\"\")\n A = [4, 2, 4, 2, 1]\n A_sorted = [1, 2, 2, 4, 4]\n sort_algorithm(A)\n print(\"Ok\" if A == A_sorted else \"Fail\")\n\n\n# if __name__ == \"__main__\":\n# test_ort(insert_sort)\n# test_ort(choise_sort)\n# test_ort(bubble_sort)\n\n# Сортировка подсчётом\nN = None\nF = [0]*10\nfor i in range(N):\n x = int(input())\n F[x] += 1\n\n# Рекурсия\nwindow = gr.GraphWin(\"Russian game\", 600, 600)\nalpha = 0.2\n\ndef fractal_rectangle(A, B, C, D, deep=10):\n if deep < 1:\n return\n for M, N in (A, B), (B, C), (C, D), (D, A):\n gr.Line(gr.Point(*M), gr.Point(*N)).draw(window)\n A1 = (A[0]*(1-alpha) + B[0]*alpha, A[1]*(1-alpha) + B[1]*alpha)\n B1 = (B[0]*(1-alpha) + C[0]*alpha, B[1]*(1-alpha) + C[1]*alpha)\n C1 = (C[0]*(1-alpha) + D[0]*alpha, C[1]*(1-alpha) + D[1]*alpha)\n D1 = (D[0]*(1-alpha) + A[0]*alpha, D[1]*(1-alpha) + A[1]*alpha)\n fractal_rectangle(A1, B1, C1, D1, deep-1)\n\n\nfractal_rectangle((100, 100),(500, 100), (500, 500), (100, 500))\n\n# Фактариал\ndef f(n:int):\n assert n >= 0, \"Фактариал отрицательного не определён\"\n if n == 0:\n return 1\n return f(n-1) * n\n\n\n# Алгоритм Евклида\n# -1-\ndef gcd1(a, b):\n if a == b:\n return\n elif a > b:\n return gcd(a-b, b)\n else: # a < b\n return gcd(a, b-a)\n\n# -2-\ndef gcd2(a, b):\n if b == 0:\n return a\n else:\n return gcd(b, a%b)\n\n# -3-\ndef gcd3(a, b):\n return a if b == 0 else gcd(b, a%b)\n\n# Быстрое возведение в степень\ndef pow(a:float, n:int):\n if n == 0:\n return 1\n elif n % 2 == 1:\n return pow(a, n-1) * a\n else:\n return pow(a**2, n//2)\n\n# Ханойские башни\n","repo_name":"RyuZacki/PythonStudent","sub_path":"Kodik/y4eb/AlgoVidos.py","file_name":"AlgoVidos.py","file_ext":"py","file_size_in_byte":4967,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"28380160813","text":"# |-|-| lytic bot |-|-|\n# -*- coding: utf-8 -*-\n\n# BlackSmith plugin\n# fomenko_plugin.py\n\n# Coded by: Evgеn [email: meb81@mail.ru]\n# http://witcher-team.ucoz.ru/\n\ndef handler_fomenko(type, source, body):\n\ttry:\n\t\tradky = read_link('http://www.fomenko.ru/foma/lenta/text.html').splitlines()\n\t\tif len(radky) >= 16:\n\t\t\tradky = radky[15].decode('windows-1251')\n\t\t\tif radky.count('') >= 1:\n\t\t\t\tradky = radky.split('')[1]\n\t\t\telse:\n\t\t\t\tradky = u'что-то левое с разметкой'\n\t\telse:\n\t\t\tradky = u'что-то левое с разметкой'\n\texcept:\n\t\tradky = u'не могу пропарсить сайт'\n\treply(type, source, radky)\n\nregister_command_handler(handler_fomenko, 'фоменко', ['фан','все'], 10, 'Аналог приколов Фоменко на Эндлессе', 'фоменко', ['фоменко'])\n","repo_name":"superpsp/blacksmith","sub_path":"plugins/fomenko_plugin.py","file_name":"fomenko_plugin.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35800299785","text":"import nltk.sentiment\nimport random\nimport emoji\nanalyzer = nltk.sentiment.SentimentIntensityAnalyzer()\n\ndef main():\n user_text = input('Do you want to say anything ? ')\n score = get_sentiment(user_text)\n reaction = get_reaction(score)\n print(\"You seems to be \"+reaction)\n predict_song(score)\n\ndef predict_song(score):\n if score>0.5:\n file=open('VeryHappy.txt')\n l = []\n for line in file:\n l.append(line)\n ans=random.choice(l)\n print(\"You should try this Song! \"+ans)\n elif score>0:\n file=open('Happy.txt')\n la = []\n for line in file:\n la.append(line)\n ans1=random.choice(la)\n print(\"You should try this Song! \"+ans1)\n elif score==0:\n file=open('Neutral.txt')\n lal = []\n for line in file:\n lal.append(line)\n ans2=random.choice(lal)\n print(\"You should try this Song! \"+ans2)\n elif score<0:\n file=open('Sad.txt')\n lala = []\n for line in file:\n lala.append(line)\n ans3=random.choice(lala)\n print(\"You should try this Song! \"+ans3)\n elif score<-0.5:\n file=open('VerySad.txt')\n lu = []\n for line in file:\n lu.append(line)\n ans4=random.choice(lu)\n print(\"You should try this Song! \"+ans4)\ndef get_reaction(score):\n \"\"\"\n Parameter score: a float between -1 and +1\n Return: An emoji as a string!\n \"\"\"\n if score > 0.5:\n return \"😍(Very Happy)\"\n if score > 0:\n return \"🙂(Happy)\"\n if score == 0:\n return \"😶(Normal)\"\n if score < -0.5:\n return \"😢(Very Sad)\"\n if score < 0:\n return \"😟(Sad)\"\n\ndef get_sentiment(user_text):\n \"\"\"\n Parameter user_text: any text (string)\n Return: a sentiment score between -1 and +1 (float)\n \"\"\"\n # 1. pass the text into the analyzer.polarity_scores function, part of the nltk package\n scores = analyzer.polarity_scores(user_text)\n # 2. extract the sentiment score. Scores is a \"dictionary\" (covered on May 17th)\n sentiment_score = scores['compound']\n\n return sentiment_score\n\nif __name__ == '__main__':\n main()","repo_name":"krutik2377/Sentiment-Based-Song-Recommendation-System","sub_path":"FinalProject/SentimentBasedSongSuggestionSystem.py","file_name":"SentimentBasedSongSuggestionSystem.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11170899121","text":"#this is a game about a space-ship that fires bullets at aliens\n#let's import the required modules for the game\nimport sys\nfrom time import sleep\nimport json\n\nimport pygame\n\nfrom settings import Settings\nfrom ship import Ship\nfrom game_stats import GameStats\nfrom bullet import Bullet\nfrom alien import Alien\nfrom button import Button\nfrom button1 import Button1\nfrom button2 import Button2\nfrom button3 import Button3\nfrom scoreboard import Scoreboard\n\n#let's make a class of the game\nclass AlienInvasion:\n \"\"\"Overall class to manage game assets and behavior.\"\"\"\n def __init__(self):\n \"\"\"initialize the game's attributes\"\"\"\n pygame.init()\n #make an instance of settings so that we can use it elsewhere\n self.settings=Settings()\n #set the width and height of the window\n self.screen=pygame.display.set_mode(\n (self.settings.screen_width,self.settings.screen_height),\n #pygame.RESIZABLE\n #(0,0),pygame.FULLSCREEN\n )\n #self.settings.screen_width = self.screen.get_rect().width\n #self.settings.screen_height = self.screen.get_rect().height\n \n #set the caption of the window\n pygame.display.set_caption(\"Alien Invasion\")\n # Create an instance to store game statistics.\n self.stats=GameStats(self)\n self.ship=Ship(self)\n self.bullets=pygame.sprite.Group()\n self.aliens=pygame.sprite.Group()\n self._create_fleet()\n #make the play button.\n self.play_button=Button(self,\"Play\")\n self.level1=Button1(self,\"Easy\")\n self.level2=Button2(self,\"Medium\")\n self.level3=Button3(self,\"Hard\")\n self.scoreboard=Scoreboard(self)\n pygame.mixer.music.load(\"sounds/music.ogg\")\n self.fire=pygame.mixer.Sound(\"sounds/fire.wav\")\n self.explosion=pygame.mixer.Sound(\"sounds/explosion.ogg\")\n self.new_level=pygame.mixer.Sound(\"sounds/new_level.ogg\")\n self.game_over=pygame.mixer.Sound(\"sounds/game_over.wav\")\n \n\n def run_game(self):\n \"\"\"the main loop of the game\"\"\"\n \n while True:\n self._check_events()\n if self.stats.game_active:\n self.ship.update()\n self._update_bullets()\n self._update_aliens()\n else:\n pygame.mixer.music.stop()\n self._update_screen()\n\n\n def _check_events(self):\n \"\"\"loop through the events that happen by the user in the game\"\"\"\n for event in pygame.event.get():\n #when the user clicks the close button exit the game.\n if event.type == pygame.QUIT:\n self._close_game()\n #detect the event when the user presses a key.\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n #detect the event when the user releases a key.\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)\n #detect the event when the user presses the mouse button.\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos=pygame.mouse.get_pos()\n self._check_play_button(mouse_pos)\n self._check_level1_button(mouse_pos)\n self._check_level2_button(mouse_pos)\n self._check_level3_button(mouse_pos)\n\n\n def _check_level3_button(self,mouse_pos):\n \"\"\"respond when the user clicks level 2 button\"\"\"\n button_clicked=self.level3.rect.collidepoint(mouse_pos)\n if button_clicked and not self.stats.game_active:\n self._start_game()\n self.settings.increase_speed()\n self.settings.increase_speed()\n self.settings.increase_speed()\n self.settings.increase_speed()\n self.settings.increase_speed()\n self.settings.increase_speed()\n self.settings.ship_speed-=1.2\n self.stats.level+=6\n self.scoreboard.prep_level()\n\n\n def _check_level2_button(self,mouse_pos):\n \"\"\"respond when the user clicks level 2 button\"\"\"\n button_clicked=self.level2.rect.collidepoint(mouse_pos)\n if button_clicked and not self.stats.game_active:\n self._start_game()\n self.settings.increase_speed()\n self.settings.increase_speed()\n self.settings.increase_speed()\n self.settings.increase_speed()\n self.stats.level+=4\n self.scoreboard.prep_level()\n \n\n\n def _check_level1_button(self,mouse_pos):\n \"\"\"respond when the user clicks level 1 button\"\"\"\n button_clicked=self.level1.rect.collidepoint(mouse_pos)\n if button_clicked and not self.stats.game_active:\n self._start_game()\n self.settings.increase_speed()\n self.settings.increase_speed()\n self.stats.level+=2\n self.scoreboard.prep_level()\n\n\n def _check_play_button(self,mouse_pos):\n \"\"\"Start a new game when the player clicks Play.\"\"\"\n button_clicked=self.play_button.rect.collidepoint(mouse_pos)\n if button_clicked and not self.stats.game_active:\n self._start_game()\n\n def _start_game(self):\n \"\"\"start the game\"\"\"\n #reset the game statistics and activate the game.\n self.stats.reset_stats()\n \n self.stats.game_active=True\n #clear any existing aliens and bullets.\n self.aliens.empty()\n self.bullets.empty()\n #create a new fleet of aliens and center the ship.\n self._create_fleet()\n self.ship.center_ship()\n #hide the mouse's curser.\n pygame.mouse.set_visible(False)\n #reset the speed of the game.\n self.settings.initialize_dynamic_settings()\n self.scoreboard.prep_images()\n pygame.mixer.music.play(-1)\n\n def _close_game(self):\n \"\"\"save the high score and exit the game\"\"\"\n file_name='high_score.json'\n saved_high_score=self.stats.get_saved_high_score()\n if self.stats.high_score > saved_high_score:\n with open(file_name,'w') as file_object:\n json.dump(self.stats.high_score,file_object)\n sys.exit()\n\n def _check_keydown_events(self,event):\n \"\"\"respond to the keys pressed by the user\"\"\"\n #when the user presses the right arrow key \n #set the moving_right flag to True\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right=True\n #when the user presses the left arrow key\n #set the moving_left flag to True\n elif event.key == pygame.K_LEFT:\n self.ship.moving_left=True\n #exit the game when the user presses 'q'\n elif event.key == pygame.K_q:\n self._close_game()\n elif event.key == pygame.K_p:\n self._start_game()\n \n\n def _check_keyup_events(self,event):\n \"\"\"respond to the keys released by the user\"\"\"\n #when the user releases the right arrow key\n #set the moving_right flag to False.\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right=False\n #when the user releases the left arrow key\n #set the moving_left flag to False.\n elif event.key == pygame.K_LEFT:\n self.ship.moving_left=False\n #fire a bullet when the user presses the spacebar.\n elif event.key == pygame.K_SPACE:\n self._fire_bullet()\n\n def _update_bullets(self):\n \"\"\"update the positions of bullets \n and removed the disappeared bullets\"\"\"\n self.bullets.update()\n #get rid of the bullets that have disappeared f=rom the screen.\n for bullet in self.bullets.copy():\n if bullet.rect.bottom <= 0:\n self.bullets.remove(bullet)\n self._check_bullet_alien_collisions()\n \n\n def _check_bullet_alien_collisions(self):\n \"\"\"Respond to bullet-alien collisions.\"\"\"\n #Remove any bullets and aliens that have collided.\n collisions=pygame.sprite.groupcollide(\n self.bullets,self.aliens,True,True)\n for aliens in collisions.values():\n self.stats.score+=self.settings.alien_points * len(aliens)\n self.explosion.play()\n self.scoreboard.prep_score()\n self.scoreboard.check_high_score()\n if not self.aliens.sprites():\n #when the user destroies all the aliens.\n self._start_new_level()\n \n def _start_new_level(self):\n \"\"\"start a new level after the fleet of aliens has been destroyed\"\"\"\n #delete the existing bullets and make a new fleet of aliens.\n self.bullets.empty()\n self._create_fleet()\n self.settings.increase_speed()\n #increase level.\n self.stats.level+=1\n self.scoreboard.prep_level()\n self.new_level.play()\n\n def _fire_bullet(self):\n \"\"\"add a new bullet to the group of fired bullets\"\"\"\n if len(self.bullets) < self.settings.bullet_limit:\n new_bullet=Bullet(self)\n #add a new instance to the group of bullets \n #unless the group already reached the limit of allowed bullets.\n self.bullets.add(new_bullet)\n self.fire.play()\n\n def _create_fleet(self):\n \"\"\"create a fleet of aliens\"\"\"\n #create an instance of alien to get it's width.\n alien=Alien(self)\n #calculate it's width and height to see how many aliens \n #can fit the screen horizontally and vertically.\n alien_width,alien_height=alien.rect.size\n ship_height=self.ship.rect.height\n #calculating the available space by subtracting \n #two alien's width from the width of the screen.\n available_space_x=self.settings.screen_width - (2 * alien_width)\n #same way when calculating the vertical space.\n available_space_y = (self.settings.screen_height - \n (3 * alien_height) - ship_height)\n #then using floor division to divide the available space\n #so we can have a space between the aliens equal to one alien's width.\n number_aliens_x = available_space_x // (2 * alien_width)\n #same way to calculate the number of rows.\n number_rows = available_space_y // (2 * alien_height)\n #now we can make the fleet of aliens.\n for number_row in range(number_rows):\n for alien_number in range(number_aliens_x):\n self._create_alien(alien_number,number_row)\n\n\n def _create_alien(self,alien_number,number_row):\n \"\"\"create a new alien and add it to the fleet\"\"\"\n #make a new alien.\n alien=Alien(self)\n #get its width.\n alien_width,alien_height=alien.rect.size\n #calculate its decimal rect.\n alien.x=alien_width + 2 * alien_width * alien_number\n #return the value of the rect.\n alien.rect.x = alien.x\n alien.rect.y=alien_height + 2*alien_height*number_row\n #add the alien to the fleet.\n self.aliens.add(alien)\n\n\n def _update_aliens(self):\n \"\"\"update the positions of the fleet of aliens \n after checking if the fleet hit an edge\"\"\"\n self._check_fleet_edges()\n self.aliens.update()\n #check for alien-ship collisions.\n if pygame.sprite.spritecollideany(self.ship,self.aliens):\n self._ship_hit()\n #check if an alien reaches the bottom of the screen.\n self._check_aliens_bottom()\n\n def _check_fleet_edges(self):\n \"\"\"check to see if the fleet hits the edge of the screen\"\"\"\n for alien in self.aliens.sprites():\n if alien.check_edges():\n self._change_fleet_direction()\n break\n\n def _change_fleet_direction(self):\n \"\"\"drop down the feet and then change the direction of the fleet\"\"\"\n for alien in self.aliens.sprites():\n alien.rect.y += self.settings.fleet_drop_speed\n self.settings.fleet_direction *= -1\n\n def _ship_hit(self):\n \"\"\"respond when the ship gets hit by an alien\"\"\"\n if self.stats.ships_left > 0:\n #decrement the number of ships left \n #and update the ships on the scoreboard.\n self.stats.ships_left-=1\n self.scoreboard.prep_ships()\n #delete the current aliens and bullets.\n self.aliens.empty()\n self.bullets.empty()# Create an instance to store game statistics.\n #center the ship.\n self.ship.center_ship()\n #create a new fleet of aliens.\n self._create_fleet()\n #pause.\n sleep(0.5)\n else:\n self.stats.game_active=False\n self.game_over.play()\n #show the mouse curser.\n pygame.mouse.set_visible(True)\n\n def _check_aliens_bottom(self):\n \"\"\"respond if any alien has reached the bottom of the screen\"\"\"\n for alien in self.aliens.sprites():\n if alien.rect.bottom >= self.screen.get_rect().bottom:\n #when an alien reaches the bottom of the screen\n #respond the same way as if the ship was hit by an alien.\n self._ship_hit()\n break\n\n def _update_screen(self):\n \"\"\"update the images on the screen and flip to the new screen\"\"\"\n #fill the surface of the screen with the background color.\n self.screen.fill(self.settings.bg_color)\n #draw the ship to the screen.\n self.ship.blitme()\n #draw the bullets to the screen.\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n #draw the aliens to the screen.\n self.aliens.draw(self.screen)\n #draw the play button when the game is inactive.\n if not self.stats.game_active:\n self.play_button.draw_button()\n self.level1.draw_button()\n self.level2.draw_button()\n self.level3.draw_button()\n self.scoreboard.draw_score()\n #draw to the screen the last made screen.\n pygame.display.flip()\n\n\n#create an instance of the game and run 'run_game'\nif __name__=='__main__':\n game=AlienInvasion()\n game.run_game()","repo_name":"moamall11/Alien_invasion","sub_path":"alien_invasion.py","file_name":"alien_invasion.py","file_ext":"py","file_size_in_byte":14208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5235102151","text":"\"\"\"\nTasks for maintaining the project.\n\nExecute 'invoke --list' for guidance on using Invoke\n\"\"\"\nimport shutil\nfrom ruamel.yaml import YAML\nimport pprint\n\nfrom invoke import task\nimport webbrowser\nfrom pathlib import Path\n\nPath().expanduser()\nyaml = YAML()\n\nROOT_DIR = Path(__file__).parent\nSETUP_FILE = ROOT_DIR.joinpath(\"setup.py\")\nTEST_DIR = ROOT_DIR.joinpath(\"tests\")\nSOURCE_DIR = ROOT_DIR.joinpath(\"deeplearning2020\")\nTOX_DIR = ROOT_DIR.joinpath(\".tox\")\nTRAVIS_CONFIG_FILE = ROOT_DIR.joinpath(\".travis.yml\")\nCOVERAGE_FILE = ROOT_DIR.joinpath(\".coverage\")\nCOVERAGE_DIR = ROOT_DIR.joinpath(\"htmlcov\")\nCOVERAGE_REPORT = COVERAGE_DIR.joinpath(\"index.html\")\nPYTHON_DIRS = [str(d) for d in [SOURCE_DIR, TEST_DIR]]\n\n\ndef _delete_file(file):\n try:\n file.unlink(missing_ok=True)\n except TypeError:\n # missing_ok argument added in 3.8\n try:\n file.unlink()\n except FileNotFoundError:\n pass\n\n\n@task(help={\"check\": \"Checks if source is formatted without applying changes\"})\ndef format(c, check=False):\n \"\"\"Format code\n \"\"\"\n python_dirs_string = \" \".join(PYTHON_DIRS)\n black_options = \"--diff\" if check else \"\"\n c.run(\"pipenv run black {} {}\".format(black_options, python_dirs_string))\n isort_options = \"--recursive {}\".format(\"--check-only\" if check else \"\")\n c.run(\"pipenv run isort {} {}\".format(isort_options, python_dirs_string))\n\n\n@task\ndef lint(c):\n \"\"\"Lint code\n \"\"\"\n c.run(\"pipenv run flake8 {}\".format(SOURCE_DIR))\n\n\n@task\ndef test(c, min_coverage=None):\n \"\"\"Run tests\n \"\"\"\n pytest_options = \"--cov-fail-under={}\".format(min_coverage) if min_coverage else \"\"\n c.run(\"pipenv run pytest --cov={} {}\".format(SOURCE_DIR, pytest_options))\n\n\n@task\ndef type_check(c):\n \"\"\"Check types\n \"\"\"\n c.run(\"pipenv run mypy\")\n\n\ndef _create(d, *keys):\n current = d\n for key in keys:\n try:\n current = current[key]\n except (TypeError, KeyError):\n current[key] = dict()\n current = current[key]\n\n\ndef _fix_token(config_file=None, force=False, verify=True):\n config_file = config_file or TRAVIS_CONFIG_FILE\n with open(config_file, \"r\") as _file:\n try:\n travis_config = yaml.load(_file)\n except Exception:\n raise ValueError(\n \"Failed to parse the travis configuration. \"\n \"Make sure the config only contains valid YAML and keys as specified by travis.\"\n )\n\n # Get the generated token from the top level deploy config added by the travis cli\n try:\n real_token = travis_config[\"deploy\"][\"password\"][\"secure\"]\n except (TypeError, KeyError):\n raise AssertionError(\"Can't find any top level deployment tokens\")\n\n try:\n # Find the build stage that deploys to PyPI\n pypy_stages = [\n stage\n for stage in travis_config[\"jobs\"][\"include\"]\n if stage.get(\"deploy\", dict()).get(\"provider\") == \"pypi\"\n ]\n assert (\n len(pypy_stages) > 0\n ), \"Can't set the new token because there are no stages deploying to PyPI\"\n assert (\n len(pypy_stages) < 2\n ), \"Can't set the new token because there are multiple stages deploying to PyPI\"\n except (TypeError, KeyError):\n raise AssertionError(\"Can't set the new token because there no build stages\")\n\n try:\n is_mock_token = pypy_stages[0][\"deploy\"][\"password\"][\"secure\"] == \"REPLACE_ME\"\n is_same_token = pypy_stages[0][\"deploy\"][\"password\"][\"secure\"] == real_token\n\n unmodified = is_mock_token or is_same_token\n except (TypeError, KeyError):\n unmodified = False\n\n # Set the new generated token as the stages deploy token\n _create(pypy_stages[0], \"deploy\", \"password\", \"secure\")\n pypy_stages[0][\"deploy\"][\"password\"][\"secure\"] = real_token\n\n # Make sure it is fine to overwrite the config file\n assert unmodified or force, (\n 'The secure token in the \"{}\" stage has already been changed. '\n \"Retry with --force if you are sure about replacing it.\".format(\n pypy_stages[0].get(\"stage\", \"PyPI deployment\")\n )\n )\n\n # Remove the top level deploy config added by the travis cli\n travis_config.pop(\"deploy\")\n\n if not unmodified and verify:\n pprint.pprint(travis_config)\n if (\n not input(\"Do you want to save this configuration? (y/n) \")\n .strip()\n .lower()\n == \"y\"\n ):\n return\n\n # Save the new travis config\n assert travis_config\n with open(config_file, \"w\") as _file:\n yaml.dump(travis_config, _file)\n print(\"Fixed!\")\n\n\n@task(help=dict(\n force=\"Force overriding the current travis configuration\",\n verify=\"Verify config changes by asking for the user's approval\"\n))\ndef fix_token(c, force=False, verify=True):\n \"\"\"\n Add the token generated by the travis cli script to the correct entry\n \"\"\"\n _fix_token(force=force, verify=verify)\n\n\n@task\ndef install_hooks(c):\n \"\"\"Install pre-commit hooks\n \"\"\"\n c.run(\"pipenv run pre-commit install -t pre-commit\")\n c.run(\"pipenv run pre-commit install -t pre-push\")\n\n\n@task\ndef pre_commit(c):\n \"\"\"Run all pre-commit checks\n \"\"\"\n c.run(\"pipenv run pre-commit run --all-files\")\n\n\n@task(\n pre=[test],\n help=dict(\n publish=\"Publish the result (default False)\",\n provider=\"The provider to publish (default codecov)\",\n ),\n)\ndef coverage(c, publish=False, provider=\"codecov\"):\n \"\"\"Create coverage report\n \"\"\"\n if publish:\n # Publish the results via provider (e.g. codecov or coveralls)\n c.run(\"pipenv run {}\".format(provider))\n else:\n # Build a local report\n c.run(\"pipenv run coverage html -d {}\".format(COVERAGE_DIR))\n webbrowser.open(COVERAGE_REPORT.as_uri())\n\n\n@task\ndef clean_build(c):\n \"\"\"Clean up files from package building\n \"\"\"\n c.run(\"rm -fr build/\")\n c.run(\"rm -fr dist/\")\n c.run(\"rm -fr .eggs/\")\n c.run(\"find . -name '*.egg-info' -exec rm -fr {} +\")\n c.run(\"find . -name '*.egg' -exec rm -f {} +\")\n\n\n@task\ndef clean_python(c):\n \"\"\"Clean up python file artifacts\n \"\"\"\n c.run(\"find . -name '*.pyc' -exec rm -f {} +\")\n c.run(\"find . -name '*.pyo' -exec rm -f {} +\")\n c.run(\"find . -name '*~' -exec rm -f {} +\")\n c.run(\"find . -name '__pycache__' -exec rm -fr {} +\")\n\n\n@task\ndef clean_tests(c):\n \"\"\"Clean up files from testing\n \"\"\"\n _delete_file(COVERAGE_FILE)\n shutil.rmtree(TOX_DIR, ignore_errors=True)\n shutil.rmtree(COVERAGE_DIR, ignore_errors=True)\n\n\n@task(pre=[clean_build, clean_python, clean_tests])\ndef clean(c):\n \"\"\"Runs all clean sub-tasks\n \"\"\"\n pass\n\n\n@task(clean)\ndef dist(c):\n \"\"\"Build source and wheel packages\n \"\"\"\n c.run(\"python setup.py sdist\")\n c.run(\"python setup.py bdist_wheel\")\n\n\n@task(pre=[clean, dist])\ndef release(c):\n \"\"\"Make a release of the python package to pypi\n \"\"\"\n c.run(\"twine upload dist/*\")\n","repo_name":"into-ai/deeplearning2020","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":7208,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"67"} +{"seq_id":"23444422384","text":"import json\r\nimport numpy as np\r\nimport os\r\n\r\nfrom classes import UAV\r\nfrom utils import euclidian_distance, get_normalized_vector, plot_history\r\nfrom math import *\r\nfrom simulations import simulate\r\n\r\n\r\nK_DIR = 8\r\nTIMESTEP = 2\r\nTIMERANGE = 10\r\nSPEED = 1.5\r\nSPEEDRATE = 20\r\nRADIO = 1\r\n\r\nMAXAMPLITUDE=radians(89.99)\r\n\r\ndef test_experiment():\r\n\r\n u1 = UAV((-20, 0), SPEED, RADIO, (1, 0), (0, 0),max_amp=MAXAMPLITUDE)\r\n u2 = UAV((20, 0), SPEED, RADIO, (-1, 0), (0, 0),max_amp=MAXAMPLITUDE)\r\n u3 = UAV((0, 20), SPEED, RADIO, (0, -1), (0, 0),max_amp=MAXAMPLITUDE)\r\n u4 = UAV((0, -20), SPEED, RADIO, (0, 1), (0, 0),max_amp=MAXAMPLITUDE)\r\n\r\n measures = simulate([u1, u2, u3, u4],K_DIR,TIMERANGE,TIMESTEP)\r\n print(measures)\r\n\r\n plot_history([u1, u2, u3, u4])\r\n\r\n\r\ndef experiment1(k=K_DIR, speed = SPEED, radio = RADIO, timestep=TIMESTEP, ca_timerange=TIMERANGE):\r\n ''' 6 drones antipodales'''\r\n\r\n drone_positions = []\r\n for i in range(6):\r\n x = 10*cos(i*2*pi/6) \r\n y = 10*sin(i*2*pi/6) \r\n drone_positions.append((x,y))\r\n\r\n goals = [(-x, -y) for x,y in drone_positions]\r\n\r\n # SPEEDRATE = defining_speedrate(drone_positions, goals)\r\n # print(\"speedrate:\", SPEEDRATE)\r\n\r\n uavs = []\r\n for position, goal in zip(drone_positions, goals):\r\n # For different speeds\r\n # speed = euclidian_distance(position, goal)/SPEEDRATE\r\n\r\n direction = get_normalized_vector(np.array(goal)-np.array(position))\r\n uav = UAV(position, speed, radio, direction, goal, goal_distance=0.5, max_amp=MAXAMPLITUDE)\r\n uavs.append(uav)\r\n \r\n # timestep = defining_timestep(uavs)\r\n # print(timestep)\r\n\r\n measures, mean_time = simulate(uavs, k, ca_timerange, timestep)\r\n with open(\"results/experiments1.json\", \"w\") as f:\r\n f.write(json.dumps(measures))\r\n\r\n plot_history(uavs, name=\"results/experiments1\")\r\n\r\n return mean_time\r\n\r\ndef experiment2(k=K_DIR, speed = SPEED, radio = RADIO, timestep=TIMESTEP, ca_timerange=TIMERANGE):\r\n ''' 5 drones down to up'''\r\n\r\n drone_positions = [(-9.5, -8), (-5, -8), (0, -8), (5, -8), (9.5, -8)] \r\n goals = [(9.5, 9), (5, 9), (-5.05, 8.9), (-9.5, 9), (0, 9)]\r\n\r\n # SPEEDRATE = defining_speedrate(drone_positions, goals)\r\n # print(\"speedrate:\", SPEEDRATE)\r\n\r\n uavs = []\r\n for position, goal in zip(drone_positions, goals):\r\n # For different speeds\r\n # speed = euclidian_distance(position, goal)/SPEEDRATE\r\n\r\n direction = get_normalized_vector(np.array(goal)-np.array(position))\r\n uav = UAV(position, speed, radio, direction, goal, goal_distance=0.5, max_amp=MAXAMPLITUDE)\r\n uavs.append(uav)\r\n \r\n \r\n # timestep = defining_timestep(uavs)\r\n # print(timestep)\r\n\r\n measures, mean_time = simulate(uavs, k, ca_timerange, timestep)\r\n # print(measures)\r\n with open(\"results/experiments2.json\", \"w\") as f:\r\n f.write(json.dumps(measures))\r\n\r\n plot_history(uavs, name=\"results/experiments2\")\r\n\r\n return mean_time\r\n\r\ndef experiment3(k=K_DIR, speed = SPEED, radio = RADIO, timestep=TIMESTEP, ca_timerange=TIMERANGE):\r\n '6agentes_esc4'\r\n\r\n drone_positions = [(0, 0), (10, 0), (15, 2), (15, -2), (20, 4), (20, -4)] \r\n goals = [(20, 0), (0, 0.05), (0, -4.05), (0.05, 4.05), (-0.05, -8.05), (0, 8.05)]\r\n\r\n # SPEEDRATE = defining_speedrate(drone_positions, goals)\r\n # print(\"speedrate:\", SPEEDRATE)\r\n\r\n uavs = []\r\n for position, goal in zip(drone_positions, goals):\r\n # For different speeds\r\n # speed = euclidian_distance(position, goal)/SPEEDRATE\r\n\r\n direction = get_normalized_vector(np.array(goal)-np.array(position))\r\n uav = UAV(position, speed, radio, direction, goal, goal_distance=0.5, max_amp=MAXAMPLITUDE)\r\n uavs.append(uav)\r\n \r\n \r\n # timestep = defining_timestep(uavs)\r\n # print(timestep)\r\n\r\n measures, mean_time = simulate(uavs, k, ca_timerange, timestep)\r\n with open(\"results/experiments3.json\", \"w\") as f:\r\n f.write(json.dumps(measures))\r\n\r\n plot_history(uavs, name=\"results/experiments3\")\r\n\r\n return mean_time\r\n\r\ndef experiment4(k=K_DIR, speed = SPEED, radio = RADIO, timestep=TIMESTEP, ca_timerange=TIMERANGE):\r\n ''' 5 drones antipodal alternando '''\r\n \r\n drone_positions = [] \r\n goals = []\r\n \r\n for i in range(10):\r\n if i == 0 or i%2 ==0:\r\n position = ( 10*cos(i*2*pi/10), 10*sin(i*2*pi/10) )\r\n drone_positions.append(position)\r\n \r\n goals.append((-position[0], -position[1]))\r\n \r\n # SPEEDRATE = defining_speedrate(drone_positions, goals)\r\n # print(\"speedrate:\", SPEEDRATE)\r\n\r\n uavs = []\r\n for position, goal in zip(drone_positions, goals):\r\n # For different speeds\r\n # speed = euclidian_distance(position, goal)/SPEEDRATE\r\n\r\n direction = get_normalized_vector(np.array(goal)-np.array(position))\r\n uav = UAV(position, speed, radio, direction, goal, goal_distance=0.5, max_amp=MAXAMPLITUDE)\r\n uavs.append(uav)\r\n \r\n # timestep = defining_timestep(uavs)\r\n # print(timestep)\r\n\r\n measures, mean_time = simulate(uavs, k, ca_timerange, timestep)\r\n # print(measures)\r\n with open(\"results/experiments4.json\", \"w\") as f:\r\n f.write(json.dumps(measures))\r\n\r\n plot_history(uavs, name=\"results/experiments4\")\r\n\r\n return mean_time\r\n\r\n\r\ndef random_experiments(k=K_DIR, speed = SPEED, radio = RADIO, timestep=TIMESTEP, ca_timerange=TIMERANGE, max_amp=MAXAMPLITUDE):\r\n \r\n cwd = os.getcwd()\r\n\r\n f = open(\"results/random_results.json\", \"w\")\r\n f.close()\r\n\r\n f = open(\"results/random_results.txt\", \"w\")\r\n f.close()\r\n\r\n\r\n allfiles = [f for f in os.listdir(f'{cwd}/data') if os.path.isfile(os.path.join(f'{cwd}/data', f)) if f.endswith(\".txt\")]\r\n results = {}\r\n for file in allfiles:\r\n with open(f'{cwd}/data/{file}', 'r') as f:\r\n print(f\"- Experiment File {file}\")\r\n f.readline()\r\n\r\n drone_positions = []\r\n for _ in range(6):\r\n line = f.readline().split()\r\n x = float(line[0]) \r\n y = float(line[1])\r\n\r\n drone_positions.append((x,y))\r\n\r\n f.readline()\r\n f.readline()\r\n \r\n goal_positions = []\r\n for _ in range(6):\r\n line = f.readline().split()\r\n x = float(line[0]) \r\n y = float(line[1])\r\n\r\n goal_positions.append((x,y))\r\n\r\n drone_positions.pop()\r\n goal_positions.pop()\r\n\r\n # SPEEDRATE = defining_speedrate(drone_positions, goal_positions)\r\n # print(\"speedrate:\", SPEEDRATE)\r\n \r\n uavs = []\r\n for position, goal in zip(drone_positions, goal_positions):\r\n # For different speeds\r\n # speed = euclidian_distance(position, goal)/SPEEDRATE\r\n\r\n direction = get_normalized_vector(np.array(goal)-np.array(position))\r\n uav = UAV(position, speed, radio, direction, goal, goal_distance=1, max_amp=max_amp)\r\n uavs.append(uav)\r\n \r\n \r\n # timestep = defining_timestep(uavs)\r\n # print(timestep)\r\n\r\n measures, _ = simulate(uavs, k, ca_timerange, timestep)\r\n\r\n if not measures:\r\n print(\"COLLISION\")\r\n print(\"Flying uavs\", [(uav.position, uav.direction) for uav in uavs])\r\n print(\"Directions\", [uav.generate_directions(k) for uav in uavs])\r\n print()\r\n\r\n with open(\"results/random_results.txt\", \"a\") as f:\r\n f.write(str(measures))\r\n f.write(\"\\n\")\r\n\r\n results[file] = measures\r\n\r\n plot_history(uavs, f'data/{file[:-4]}')\r\n \r\n with open(\"results/random_results.json\", \"w\") as f:\r\n f.write(json.dumps(results))\r\n\r\n\r\ndef defining_timestep(uavs):\r\n \r\n dists = [euclidian_distance(uav.goal_point, uav.position) for uav in uavs]\r\n # dists = [euclidian_distance(uav.goal_point, uav.position)/uav.speed for uav in uavs]\r\n # return min(dists)/max(dists) ***\r\n # return 2*min(dists)/max(dists) ***\r\n # return 3*min(dists)/max(dists)\r\n \r\n if min(dists) == max(dists):\r\n return 1\r\n else:\r\n if 3*min(dists)/max(dists) < 1:\r\n return 5*min(dists)/max(dists)\r\n else:\r\n return 3*min(dists)/max(dists)/2\r\n \r\n # return 3*min(dists)/max(dists)/2 # ***** \r\n \r\n\r\ndef defining_speedrate(position, goals):\r\n \r\n dists = [euclidian_distance(p, g) for p, g in zip(position, goals)]\r\n res = max((max(dists)/min(dists))**2, 20)\r\n # return min(res, 40) \r\n # return 15\r\n if (max(dists)/min(dists))>3:\r\n return (max(dists)/min(dists))**2\r\n else:\r\n return 2*(max(dists)/min(dists))*10\r\n\r\n# ## for exp 2, 3: \r\n# speedrate = 15\r\n# timestep = 3*min(dists)/max(dists)/2\r\n\r\n# speedrate = 15\r\n# timestep = min(dists)/max(dists)/2\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # test_experiment()\r\n\r\n t = experiment1()\r\n\r\n print(\"****Experimento 1 terminado****\")\r\n print(f\"Iteration Time: Max = {max(t)}, Mean = {sum(t)/len(t)}\")\r\n print()\r\n\r\n t = experiment2()\r\n\r\n print(\"****Experimento 2 terminado****\")\r\n print(f\"Iteration Time: Max = {max(t)}, Mean = {sum(t)/len(t)}\")\r\n print()\r\n \r\n t = experiment3()\r\n\r\n print(\"****Experimento 3 terminado****\")\r\n print(f\"Iteration Time: Max = {max(t)}, Mean = {sum(t)/len(t)}\")\r\n print()\r\n \r\n t = experiment4()\r\n\r\n print(\"****Experimento 4 terminado****\")\r\n print(f\"Iteration Time: Max = {max(t)}, Mean = {sum(t)/len(t)}\")\r\n print()\r\n\r\n random_experiments()\r\n \r\n print(\"****Random experiments terminados****\")\r\n print()\r\n ","repo_name":"fabio-rodriguez/CollisionAvoidance","sub_path":"experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":9711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72761894294","text":"class Node:\n def __init__(self, data, next = None):\n self.data = data\n self.next = next\n\n def __repr__(self):\n mystr = \"\"\n while self:\n mystr = str(self.data) + mystr\n self = self.next\n return mystr\n \n def linkedSum(self, first, second):\n carry = 0\n prev = None\n while first or second:\n firstVal = first.data if first else 0\n secondVal = second.data if second else 0\n sumVal = firstVal + secondVal + carry\n self.data = sumVal % 10\n carry = sumVal // 10\n if first: first = first.next\n if second: second = second.next\n self.next = Node(0)\n prev = self\n self = self.next\n prev.next = None\n\nif __name__ == \"__main__\":\n a = Node(3, Node(1, Node(5))) # 513\n b = Node(4, Node(9, Node(1, Node(4)))) # 4194\n c = Node(0)\n c.linkedSum(a, b)\n print(repr(c)) # 4707\n a0 = Node(0)\n d = Node(0)\n d.linkedSum(a0, a0)\n print(repr(d)) # 0","repo_name":"kimjiwook0129/Coding-Interivew-Cheatsheet","sub_path":"Miscellaneous/linkedSum.py","file_name":"linkedSum.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"2316118597","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n# from matplotlib.lines import Line2D\r\nimport seaborn as sns\r\n\r\n# Read the data into two DataFrames and then joins the DataFrames into a single DataFrame\r\ntry:\r\n et_df = pd.read_excel('raw_data/ucrc_riparian_means.xlsx')\r\n pr_df = pd.read_excel('raw_data/ucrc_cda_pr_means.xlsx')\r\nexcept:\r\n print(\"ERROR WHEN READING IN DATA\")\r\n exit(1)\r\n\r\njoined_df = new_df = pd.merge(pr_df, et_df, how='left', left_on=['station_id', 'site_name', 'year'],\r\n right_on=['station_id', 'site_name', 'year'])\r\n\r\n# site filter\r\n# joined_df = joined_df[joined_df.station_id == 9379500]\r\n\r\nvar_list = ['gs_pr', 'ann_pr', 'wy_pr', 'gs_et',\r\n 'gs_etof', 'gs_eto', 'ann_et', 'ann_etof', 'ann_eto']\r\n\r\n#%%\r\ncorrMatrix = joined_df[var_list].corr().round(2)\r\nax1 = plt.axes()\r\nmask = np.triu(np.ones_like(corrMatrix, dtype=bool))\r\nsns.heatmap(corrMatrix, annot=True, vmax=1, vmin=-1, center=0, cmap='viridis', mask=mask)\r\n\r\nax1.set_title('Pearson R - All Sites')\r\nplt.show()\r\n\r\n#%%\r\nplt.figure()\r\ncorrMatrix_tau = joined_df[var_list].corr(method='kendall').round(2)\r\nax2 = plt.axes()\r\nmask = np.triu(np.ones_like(corrMatrix_tau, dtype=bool))\r\nsns.heatmap(corrMatrix_tau, annot=True, vmax=1, vmin=-1, center=0, cmap='viridis', mask=mask)\r\n\r\nax2.set_title('Kendall Tau- All Sites')\r\nplt.show()\r\n\r\n#%%\r\n# plt.figure()\r\nax1 = joined_df.plot.scatter(x='gs_etof',\r\n y='gs_et', c='DarkBlue')\r\n\r\n#%%\r\n# plt.figure()\r\nax1 = joined_df.plot.scatter(x='gs_eto',\r\n y='gs_et', c='DarkBlue')\r\n\r\n#%%\r\n# plt.figure()\r\nax1 = joined_df.plot.scatter(x='ann_pr',\r\n y='gs_etof', c='DarkBlue')\r\n#%%\r\n# plt.figure()\r\nax1 = joined_df.plot.scatter(x='wy_pr',\r\n y='gs_et', c='DarkBlue')\r\n\r\n","repo_name":"chunkyshrapnel/dri_july_tasks","sub_path":"task_7_Correlation_Matrix_Plots/corr_mat_gs.py","file_name":"corr_mat_gs.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74582237333","text":"import sys\nimport os\nfrom github import Github\n\nimport templates\n\ndef main():\n\n # Builds the new folder path and gets the Git token\n foldername = str(sys.argv[1])\n path = os.environ.get('proj_path') # add projects directory to the env vars\n token = os.environ.get('git_token') # add github token to the env vars\n _dir = path + '\\\\' + foldername\n\n # Creates the new Git Repo\n g = Github(token)\n user = g.get_user()\n login = user.login\n repo = user.create_repo(foldername, private=True)\n\n # Create the new folder for the project\n os.mkdir(_dir)\n os.chdir(_dir)\n\n # Create a main.py file and add the main function\n file_path = _dir + '\\main.py'\n file_content = templates.template_py_file(foldername)\n f = open(file_path, \"w\", encoding='utf-8')\n f.write(file_content)\n f.close()\n\n # Create a README file\n file_path = _dir + '\\README.md'\n file_content = templates.template_readme_file(foldername)\n f = open(file_path, \"w\", encoding='utf-8')\n f.write(file_content)\n f.close()\n\n # Creates a list with the git commands to execute\n commands = ['git init',\n f'git remote add origin https://github.com/{login}/{foldername}.git',\n 'git add .',\n 'git commit -m \"Initial commit\"',\n 'git push -u origin master']\n\n # Executes the git commands\n for c in commands:\n os.system(c)\n\n # Prints a sucess message\n print(f'{foldername} created locally')\n\n # Opens the new project folder\n # os.startfile(_dir)\n\n # Open VSCode\n code_command = f'''code \"{_dir}\"'''\n os.system(code_command)\n\nif __name__ == '__main__':\n main() ","repo_name":"andrefcluz/AutomateProjectCreation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21358902900","text":"LIST_CATEGORIES = [\n \"Snacks\",\n \"Viandes\",\n \"Fromages\",\n \"Charcuteries\",\n \"Fruits\",\n \"Poissons\",\n \"Légumes\",\n \"Biscuits\",\n \"Boissons aux fruits\",\n \"Chocolats\",\n \"Pains\",\n \"Yaourts\",\n \"Fruits à coques et dérives\",\n \"Jus de fruits\",\n \"Huiles\",\n \"Vins\",\n \"Légumineuses\",\n \"Fruits\",\n \"Pâtisseries\",\n \"Viennoiseries\",\n \"Cereales pour petit-dejeuner\"\n \"Glaces et sorbets\",\n \"Bieres\",\n \"Cafés\",\n \"Laits\",\n \"Sodas\",\n \"Pâtes à tartiner\",\n \"Sauces\"\n]\n","repo_name":"Theodrem/Pure_beurre_OP","sub_path":"product/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40675962366","text":"import os\r\nfrom numba import jit\r\nimport numpy as np\r\n\r\n\r\nos.environ['NUMBA_CUDA_DEVICE'] = '0'\r\n\r\n\r\n@jit(nopython=True)\r\ndef sandpile_simulation(sandpile):\r\n while True:\r\n new_grid = np.zeros_like(sandpile, dtype='int')\r\n\r\n for i in range(sandpile.shape[0]):\r\n for j in range(sandpile.shape[1]):\r\n if sandpile[i, j] < 4:\r\n new_grid[i, j] = sandpile[i, j]\r\n\r\n row_ind, col_ind = np.where(sandpile >= 4)\r\n for c in range(len(row_ind)):\r\n i, j = row_ind[c], col_ind[c]\r\n new_grid[i, j] = sandpile[i, j] - 4\r\n if i < 639:\r\n new_grid[i + 1, j] += 1\r\n if i > 0:\r\n new_grid[i - 1, j] += 1\r\n if j < 639:\r\n new_grid[i, j + 1] += 1\r\n if j > 0:\r\n new_grid[i, j - 1] += 1\r\n\r\n sandpile = new_grid\r\n if len(row_ind) == 0:\r\n break\r\n\r\n return sandpile\r\n\r\n\r\ngrid = np.zeros([640, 640], dtype='int')\r\ngrid[320][320] = 5000\r\n\r\n\r\nresult = sandpile_simulation(grid)\r\n\r\nnp.savetxt('sandpile_result.txt', result, fmt='%d')\r\n","repo_name":"HanTheDestroyer/Sandpile_Simulation","sub_path":"numba_sandpile.py","file_name":"numba_sandpile.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74294512532","text":"def is_invalid_position(row, col, size):\r\n if row < 0 or col < 0 or row >= size or col >= size:\r\n return True\r\n return False\r\n\r\n\r\ndef get_next_position(direction, r, c, steps):\r\n if direction == 'up':\r\n return r - steps, c\r\n if direction == 'down':\r\n return r + steps, c\r\n if direction == 'left':\r\n return r, c - steps\r\n return r, c + steps\r\n\r\n\r\nsize = 5\r\nmatrix = []\r\nplayer_row, player_col = 0, 0\r\ntargets_count = 0\r\nfor row in range(size):\r\n matrix.append(input().split())\r\n for col in range(size):\r\n if matrix[row][col] == 'A':\r\n player_row, player_col = row, col\r\n elif matrix[row][col] == 'x':\r\n targets_count += 1\r\n# print(player_row, player_col)\r\nargs = int(input())\r\nhit_targets = []\r\nfor _ in range(args):\r\n line = input().split()\r\n cmd = line[0]\r\n direction = line[1]\r\n if cmd == 'move':\r\n steps = int(line[2])\r\n next_player_row, next_player_col = get_next_position(direction, player_row, player_col, steps)\r\n if is_invalid_position(next_player_row, next_player_col, size):\r\n continue\r\n if matrix[next_player_row][next_player_col] != '.':\r\n continue\r\n matrix[player_row][player_col] = '.'\r\n matrix[next_player_row][next_player_col] = 'A'\r\n player_row, player_col = next_player_row, next_player_col\r\n else:\r\n bullet_row, bullet_col = get_next_position(direction, player_row, player_col, 1)\r\n while True:\r\n if is_invalid_position(bullet_row, bullet_col, size):\r\n break\r\n if matrix[bullet_row][bullet_col] == 'x':\r\n hit_targets.append([bullet_row, bullet_col])\r\n matrix[bullet_row][bullet_col] = '.'\r\n break\r\n\r\n bullet_row, bullet_col = get_next_position(direction, bullet_row, bullet_col, 1)\r\n if len(hit_targets) == targets_count:\r\n break\r\nif len(hit_targets) == targets_count:\r\n print(f'Training completed! All {targets_count} targets hit.')\r\nelse:\r\n print(f'Training not completed! {targets_count - len(hit_targets)} targets left.')\r\nfor target in hit_targets:\r\n print(target)","repo_name":"albenayordanova/Python-Advanced","sub_path":"06. Range Day.py","file_name":"06. Range Day.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74104246614","text":"import pandas as pd\n\nimport numpy as np\nfrom numpy.linalg import norm\nimport matplotlib.pyplot as plt\n\nfrom sklearn.linear_model import Ridge\nfrom sklearn.model_selection import GridSearchCV\n\n#-----------------------------------------------------------------\n# Class PolynomialRegression\n#-----------------------------------------------------------------\n\nclass PolynomialRegression:\n\n def __init__(self, degree = 1, regLambda = 1E-8\n , tuneLambda = False, regLambdaValues = []):\n '''\n Constructor\n '''\n self.degree = degree\n self.regLambda = regLambda\n self.tuneLambda = tuneLambda\n self.regLambdaValues = regLambdaValues\n self.JHist = None\n self.theta = np.random.randn(degree+1).reshape(-1,1)\n self.alpha = 0.25\n self.thresh = 1E-4\n self.mean = np.zeros((degree,1))\n self.std = np.zeros((degree,1))\n\n\n def polyfeatures(self, X, degree):\n '''\n Expands the given X into an n * d array of polynomial features of\n degree d.\n\n Returns:\n A n-by-d data frame, with each column comprising of\n X, X * X, X ** 3, ... up to the dth power of X.\n Note that the returned matrix will not include the zero-th power.\n\n Arguments:\n X is an n-by-1 data frame\n degree is a positive integer\n '''\n base = X.to_numpy()\n for i in range(degree):\n if i == 0:\n poly_feat = base\n else:\n poly_feat = np.c_[poly_feat,base**(i+1)]\n return pd.DataFrame(poly_feat)\n \n\n def fit(self, X, y):\n '''\n Trains the model\n Arguments:\n X is a n-by-1 data frame\n y is an n-by-1 data frame\n Returns:\n No return value\n Note:\n You need to apply polynomial expansion and scaling first\n '''\n X = self.polyfeatures(X,self.degree)\n X = self.standardize_train(X)\n X = X.to_numpy()\n y = y.to_numpy()\n n = len(y)\n X = np.c_[np.ones((n,1)), X] # Add a row of ones for the bias term\n \n if self.tuneLambda and self.regLambdaValues != []:\n model = Ridge()\n grid = GridSearchCV(estimator = model, cv = 2,\n param_grid = dict(alpha = self.regLambdaValues))\n grid.fit(X,y.reshape(-1,1))\n self.regLambda = grid.best_params_.get('alpha')\n print(f'best lambda: {self.regLambda}')\n\n self.theta = self.gradientDescent(X,y,self.theta)\n \n \n def predict(self, X):\n '''\n Use the trained model to predict values for each instance in X\n Arguments:\n X is a n-by-1 data frame\n Returns:\n an n-by-1 data frame of the predictions\n '''\n X = self.polyfeatures(X,self.degree)\n X = self.standardize_test(X)\n X = X.to_numpy()\n n = X.shape[0]\n X = np.c_[np.ones((n,1)), X] # Add a row of ones for the bias term\n \n return pd.DataFrame(np.dot(X,self.theta))\n \n def standardize_train(self, X):\n '''\n standardize the training data before training or predicting\n Arguments:\n X is a n-by-d data frame\n Returns:\n an n-by-d data frame of the predictions\n '''\n X = X.to_numpy()\n self.mean = np.mean(X, axis=0)\n self.std = np.std(X, axis=0)\n standard = (X - self.mean) / self.std\n return pd.DataFrame(standard)\n \n def standardize_test(self, X):\n '''\n standardize the test data before training or predicting\n Arguments:\n X is a n-by-d data frame\n Returns:\n an n-by-d data frame of the predictions\n '''\n X = X.to_numpy()\n standard = (X - self.mean) / self.std\n return pd.DataFrame(standard)\n \n def computeCost(self, X, y, theta):\n '''\n Computes the objective function\n Arguments:\n X is a n-by-d numpy matrix\n y is an n-dimensional numpy vector\n theta is a d-dimensional numpy vector\n Returns:\n a scalar value of the cost \n ** Not returning a matrix with just one value! **\n '''\n n,d = X.shape\n yhat = np.dot(X,theta)\n y = y.reshape(-1,1)\n J = np.dot((yhat-y).T,(yhat-y))/n\\\n + self.regLambda*np.sum(theta[1:]**2)\n J_scalar = J.tolist()[0][0] # convert matrix to scalar\n return J_scalar\n \n def gradientDescent(self, X, y, theta):\n '''\n Fits the model via gradient descent\n Arguments:\n X is a n-by-d numpy array\n y is an n-dimensional numpy vector\n theta is a d-dimensional numpy vector\n Returns:\n the final theta found by gradient descent\n '''\n n,d = X.shape\n y = y.reshape(-1,1)\n self.JHist = []\n iter_num = 0\n last_cost = 1E8\n while True:\n cost = self.computeCost(X, y, theta)\n self.JHist.append( (cost, theta) )\n if iter_num > 0 and\\\n abs(cost - last_cost) < self.thresh:\n break\n# if iter_num > 0 and\\\n# norm(theta - self.JHist[-2][-1]) < self.thresh:\n# print(\"Iteration: \", iter_num+1, \n# \" \\nCost: \", self.JHist[iter_num][0],\n# \" \\nTheta:\\n \", theta)\n# break\n yhat = np.dot(X,theta)\n theta[1:] = theta[1:] * (1 - self.alpha * self.regLambda)\n theta = theta - np.dot(X.T, (yhat-y)) * (self.alpha / n)\n iter_num += 1\n last_cost = cost\n return theta","repo_name":"JiatongSun/CIS-519-Applied-Machine-Learning","sub_path":"HW2/HW2_code/linearRegression.py","file_name":"linearRegression.py","file_ext":"py","file_size_in_byte":5830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38220124256","text":"\"\"\"\n套接字\n\"\"\"\n\nimport socket\n\n\"\"\"\n网络类型:1.ipv4---PC 2.ipv6----移动端\n套接字类型:1.udp 2.tcp\n\"\"\"\n\n\ndef mains():\n socketObject = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n socketObject.sendto(b'abc',('192.168.20.2',8080))\n # 释放端口\n socketObject.close()\n\n\nif __name__ == \"__main__\":\n mains()","repo_name":"lansebuding/myLearn","sub_path":"网络UDP/socket学习.py","file_name":"socket学习.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24632156911","text":"# apply_landmarks.py\n#\n# Program to apply mediapipe hands landmarks to dataset \n# images. Training and testing sets imported from \n# split_sets.py. Running this file also runs split_sets.py.\n#\n# Author: Ciara Sookarry\n# Date: 20th November 2021\n\nimport csv\nimport cv2\nimport math\nimport mediapipe as mp\nimport numpy as np\nimport re\n\nfrom split_sets import X_train, X_test, y_train, y_test\n\nmp_hands = mp.solutions.hands\nmp_drawing = mp.solutions.drawing_utils\nmp_drawing_styles = mp.solutions.drawing_styles\n\n#############################\n# Declare functions\n#############################\n \n# Split large sets of images into smaller batches for processing\n# Ensures we don't run out of memory\ndef create_batches(list_name, batch_size):\n for i in range(0, len(list_name), batch_size):\n yield list_name[i:i + batch_size]\n\n# Write landmark values to CSV file\ndef write_csv(data):\n header = ['WristX', 'WristY', 'ThumbCMCX', 'ThumbCMCY', 'ThumbMCPX', 'ThumbMCPY', 'ThumbIPX', 'ThumbIPY', 'ThumbTIPX', 'ThumbTIPY', 'IndexMCPX', 'IndexMCPY', 'IndexPIPX', 'IndexPIPY', 'IndexDIPX', 'IndexDIPY', 'IndexTIPX', 'IndexTIPY', 'MiddleMCPX', 'MiddleMCPY', 'MiddlePIPX', 'MiddlePIPY', 'MiddleDIPX', 'MiddleDIPY', 'MiddleTIPX', 'MiddleTIPY', 'RingMCPX', 'RingMCPY', 'RingPIPX', 'RingPIPY', 'RingDIPX', 'RingDIPY', 'RingTIPX', 'RingTIPY', 'PinkyMCPX', 'PinkyMCPY', 'PinkyPIPX', 'PinkyPIPY', 'PinkyDIPX', 'PinkyDIPY', 'PinkyTIPX', 'PinkyTIPY', 'label']\n \n with open('testing_landmarks.csv', 'w', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(header)\n writer.writerows(data)\n\n############################\n# Main code\n############################\n\n# Read images with OpenCV.\nbatches = create_batches(X_test, 3500)\ncsv_data = list()\ntotal_marks = 0\n\n# for each batch of pre-determined size\n# read image and put in images var\nfor sets in batches:\n images = {name: cv2.imread(name) for name in sets}\n \n # Run MediaPipe Hands.\n no_marks = 0\n marks = 0\n \n with mp_hands.Hands(\n static_image_mode=True,\n max_num_hands=1,\n model_complexity=0,\n min_detection_confidence=0.05) as hands:\n # for each image in images var\n for name, image in images.items():\n # Convert the BGR image to RGB, flip the image around y-axis for correct \n # handedness output and process it with MediaPipe Hands.\n results = hands.process(cv2.flip(cv2.cvtColor(image, cv2.COLOR_BGR2RGB), 1))\n landmarks = list()\n\n if not results.multi_hand_landmarks:\n no_marks += 1\n continue\n \n # Draw hand landmarks of each hand.\n marks += 1\n total_marks += 1\n print(f'Hand landmarks of {name}:')\n image_hight, image_width, _ = image.shape\n annotated_image = cv2.flip(image.copy(), 1)\n \n # for each set of all landmarks on one hand\n for hand_landmarks in results.multi_hand_landmarks:\n for i in range(21):\n # Append x and y finger tip landmarks \n landmarks.append(hand_landmarks.landmark[i].x)\n landmarks.append(hand_landmarks.landmark[i].y)\n \n # Extract label from filename and append to landmarks\n m = re.search('Frames_(.+?)/', name)\n if m:\n found = m.group(1) \n landmarks.append(found)\n\n csv_data.append(landmarks) \n \n \n # print(\"Landmarks Applied\")\n \n # Print number of images to which landmarks \n # could/couldn't be applied\n print(no_marks)\n print(marks)\n\nwrite_csv(csv_data)\nprint(total_marks)\n","repo_name":"CiaraSookarry/ISL_Translation","sub_path":"apply_landmarks.py","file_name":"apply_landmarks.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"74715141972","text":"import os, cv2\n\npath = \"images\"\nimages = []\n \nfor i in os.listdir(path):\n name, ext = os.path.splitext(i)\n\n if ext in ['.gif', '.png', '.jpg', '.jpeg','.jfif']:\n file_name = path+\"/\"+i\n \n images.append(file_name)\n \ncount = len(images)\nframe = cv2.imread(images[0])\nheight, width, channel = frame.shape\nsize = (width, height)\n\nout = cv2.VideoWriter(\"project.avi\", cv2.VideoWriter_fourcc(*'DIVX'), 0.8, size)\n\nfor j in range(count-1, 0, -1):\n frame = cv2.imread(images[j])\n out.write(i)\nout.release()","repo_name":"RK2310/Project-105","sub_path":"video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73284888853","text":"from sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\ndocuments = [\r\n \"Natural language processing (NLP) is a field of study in artificial intelligence.\",\r\n \"NLP techniques are used in various applications like machine translation and sentiment analysis.\",\r\n \"The development of NLP tools and libraries has made text analysis easier.\",\r\n]\r\nquery = \"What is natural language processing?\"\r\nvectorizer = TfidfVectorizer()\r\ntfidf_matrix = vectorizer.fit_transform(documents)\r\nquery_vector = vectorizer.transform([query])\r\ncosine_similarities = cosine_similarity(query_vector, tfidf_matrix).flatten()\r\nmost_similar_index = cosine_similarities.argmax()\r\nprint(\"Query:\", query)\r\nprint(\"Most Similar Document:\")\r\nprint(documents[most_similar_index])\r\nprint(\"Cosine Similarity:\", cosine_similarities[most_similar_index])\r\n","repo_name":"NithinSai0110/Natural-Language-Processing","sub_path":"TF-IDFScore.py","file_name":"TF-IDFScore.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29267174790","text":"\nimport cv2, imutils, socket\nimport numpy as np\nimport time, os\nimport base64\nimport threading, wave, pyaudio,pickle,struct\nBUFF_SIZE = 65536\n\nBREAK = False\nclient_socket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\nclient_socket.setsockopt(socket.SOL_SOCKET,socket.SO_RCVBUF,BUFF_SIZE)\nhost_name = socket.gethostname()\nhost_ip = '196.109.118.204'\n#host_ip socket.gethostbyname(host_name)\nport = 9688\nmessage = b'Hello'\nprint(f\"{host_ip} on {port}\")\nclient_socket.sendto(message,(host_ip,port))\n\ndef video_stream():\n\t\n\tcv2.namedWindow('RECEIVING VIDEO') \n\tcv2.moveWindow('RECEIVING VIDEO', 10,360) \n\tfps,st,frames_to_count,cnt = (0,0,20,0)\n\twhile True:\n\t\tpacket,_ = client_socket.recvfrom(BUFF_SIZE)\n\t\tdata = base64.b64decode(packet,' /')\n\t\tnpdata = np.frombuffer(data,dtype=np.uint8)\n\t\t# npdata = np.fromstring(data,dtype=np.uint8)\n\t\n\t\tframe = cv2.imdecode(npdata,1)\n\t\tcv2.imshow(\"RECEIVING VIDEO\",frame)\n\t\tkey = cv2.waitKey(1) & 0xFF\n\t\n\t\tif key == ord('q'):\n\t\t\tclient_socket.close()\n\t\t\tos._exit(1)\n\t\t\tbreak\n\n\t\tif cnt == frames_to_count:\n\t\t\ttry:\n\t\t\t\tfps = round(frames_to_count/(time.time()-st))\n\t\t\t\tst=time.time()\n\t\t\t\tcnt=0\n\t\t\texcept:\n\t\t\t\tpass\n\t\tcnt+=1\n\t\n\tclient_socket.close()\n\tcv2.destroyAllWindows() \n\n\ndef audio_stream():\n\t\n\tp = pyaudio.PyAudio()\n\tCHUNK = 1024\n\tstream = p.open(format=p.get_format_from_width(2),\n\t\t\t\t\tchannels=2,\n\t\t\t\t\trate=44100,\n\t\t\t\t\toutput=True,\n\t\t\t\t\tframes_per_buffer=CHUNK)\n\t\t\t\t\t\n\t# create socket\n\tclient_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\tsocket_address = (host_ip,port-1)\n\tprint('server listening at',socket_address)\n\tclient_socket.connect(socket_address) \n\tprint(\"CLIENT CONNECTED TO\",socket_address)\n\tdata = b\"\"\n\tpayload_size = struct.calcsize(\"Q\")\n\twhile True:\n\t\ttry:\n\t\t\twhile len(data) < payload_size:\n\t\t\t\tpacket = client_socket.recv(4*1024) # 4K\n\t\t\t\tif not packet: break\n\t\t\t\tdata+=packet\n\t\t\tpacked_msg_size = data[:payload_size]\n\t\t\tdata = data[payload_size:]\n\t\t\tmsg_size = struct.unpack(\"Q\",packed_msg_size)[0]\n\t\t\twhile len(data) < msg_size:\n\t\t\t\tdata += client_socket.recv(4*1024)\n\t\t\tframe_data = data[:msg_size]\n\t\t\tdata = data[msg_size:]\n\t\t\tframe = pickle.loads(frame_data)\n\t\t\tstream.write(frame)\n\n\t\texcept:\n\t\t\t\n\t\t\tbreak\n\n\tclient_socket.close()\n\tprint('Audio closed',BREAK)\n\tos._exit(1)\n\t\n\n\nfrom concurrent.futures import ThreadPoolExecutor\nwith ThreadPoolExecutor(max_workers=2) as executor:\n\t# executor.submit(audio_stream) #to hear audio uncomment\n\texecutor.submit(video_stream)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# import socket\n# import io\n# import time\n\n# # create socket object\n# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# # Get the message size\n# message_size = s.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024)\n# print(message_size.decode())\n# # get local machine name\n# host =socket.gethostbyname(socket.gethostname())\n\n# port = 10021\n\n# # connection to hostname on the port\n# s.connect((host, port))\n\n# # receive no more than 1024 bytes\n# while True:\n# msg = s.recv(21383097)\n\n# buffer_reader = io.BufferedReader(io.BytesIO(msg))\n# print(type(buffer_reader))\n# with open('file.png','wb') as file:\n# file.write(buffer_reader.read())\n# time.sleep(100)\n# # s.close()\n","repo_name":"Qarani-m/pyhton-socket-video-stream","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16066922393","text":"import torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom transformers import GPT2Config, default_data_collator\nfrom datasets import load_from_disk\nimport os\nimport itertools\nfrom typing import Iterable, Iterator\n\n# -- batch training utils: auto-reload dataloader.\nclass InfiniteYield(Iterator):\n def __init__(self, iterable: Iterable):\n self.iterable = iterable\n self.iterator = iter(itertools.cycle(self.iterable))\n\n def __next__(self):\n return next(self.iterator)\n\n def pop(self):\n return next(self.iterator)\n \n# -- (bucketing for seq2seq) stratify input sequences into groups that have roughly the same size and pad them accordingly for minimal padding.\nclass BucketingDataset(Dataset):\n def __init__(self, data:list, split:str, context_length:int, model_config:GPT2Config, pad_token_id:int):\n self.split = split\n self.total_tokens = 1024\n self.token_limit = model_config.n_positions\n self.pad_token_id = pad_token_id\n self.context_length = context_length\n \n original_seq_cnt = len(data)\n # NOTE: discard sequences shorter than the context size plus 1.\n data = filter(lambda x: len(x) > context_length, data)\n \n # NOTE: discard sequences greater than the maximum sequence length of the model.\n data = list(filter(lambda x: len(x) <= self.token_limit, data))\n\n self.batches = self._make_batches(data)\n print(f\"{'='*10} {split} size: {len(data)} ({original_seq_cnt - len(data)} discarded) (max len %d) (%d batches) {'='*10}\" % (\n max(len(d) for d in data),\n len(self.batches)\n ))\n\n def _make_batches(self, data):\n \"\"\"Group by similar lengths, then create padded batches that meet the token limit.\"\"\"\n sorted_data = sorted(data, key=lambda x: -len(x))\n batches = []\n\n i = 0\n while i < len(sorted_data):\n example = sorted_data[i]\n\n # The first element will be the longest, which will determine the padded size.\n element_size = len(example)\n batch_size = max(1, self.total_tokens // element_size)\n\n batch = sorted_data[i:i+batch_size]\n batch = self._pad_batch(batch, element_size)\n\n batches.append(batch)\n i = i + batch_size\n\n return batches\n\n def _pad_batch(self, batch, element_size):\n batch_ = []\n for element in batch:\n element_ = element + [self.pad_token_id]*(element_size - len(element))\n assert len(element_) == element_size\n batch_.append(element_)\n return batch_\n\n def __len__(self):\n return len(self.batches)\n\n def __getitem__(self, index):\n return torch.tensor(self.batches[index], dtype=torch.long)\n\n\ndef load_dataloaders(args, model_config, pad_token_id:int, eval_mode:bool = False):\n # -- load dataset\n if args.sentencized:\n # the dataset preprocessed into sentences.\n dataset_file_name = f\"{args.dataset}-sentencized.hf\"\n elif args.bucketing:\n print(f\"{'='*10} Using bucketing batching technique for minimal padding. {'='*10}\")\n # for using the padding setting same as in STRLM 2020 GPT-2 expr.\n import warnings\n warnings.warn(\"This setting constructs batches with sequences with nearly equal lengths for minimal padding. To do so, it will not use the batch size specified in the expr args.\")\n dataset_file_name = f\"{args.dataset}-no-pad.hf\"\n else:\n # the dataset preprocessed with padding for typical batch training.\n dataset_file_name = f\"{args.dataset}.hf\"\n \n if args.bucketing:\n path_to_bucket = os.path.join(args.dataset_load_dir, f\"{args.dataset}-bucketing.pth\")\n if os.path.exists(path_to_bucket):\n datasets = torch.load(path_to_bucket)\n else:\n datasets = load_from_disk(os.path.join(args.dataset_load_dir, dataset_file_name))\n datasets = load_bucketing_dataset(datasets, model_config, pad_token_id, args)\n torch.save(datasets, path_to_bucket)\n else:\n datasets = load_from_disk(os.path.join(args.dataset_load_dir, dataset_file_name))\n \n eval_dataloader = DataLoader(\n dataset = datasets[args.eval_split], \n collate_fn = None if args.bucketing else default_data_collator,\n shuffle = False,\n batch_size = 1 if args.bucketing else args.eval_batch_size,\n )\n \n decode_dataloader = DataLoader(\n dataset = datasets[args.decode_split], \n collate_fn = None if args.bucketing else default_data_collator,\n shuffle = False if eval_mode else True,\n batch_size = 1 if args.bucketing else args.eval_batch_size,\n )\n \n if eval_mode:\n return eval_dataloader, decode_dataloader\n \n else:\n train_dataloader = DataLoader(\n dataset = datasets[\"train\"], \n collate_fn = None if args.bucketing else default_data_collator, \n shuffle = True, \n batch_size = 1 if args.bucketing else args.train_batch_size,\n )\n return train_dataloader, eval_dataloader, decode_dataloader\n\n\ndef load_bucketing_dataset(dataset_dict, model_config, pad_token_id, args):\n datasets = {}\n for split_name, data in dataset_dict.items():\n datasets[split_name] = BucketingDataset(\n data[\"input_ids\"], split_name, args.context_length, model_config, pad_token_id\n )\n return datasets","repo_name":"nyu-dl/non-monotonic-self-terminating-lm","sub_path":"src/gpt2/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":5485,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"31925252489","text":"import unittest\nfrom unittest.mock import patch\n\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\nfrom core.scrapers.rewe_scraper import ReweScraper\n\n\nclass TestReweScraper(unittest.TestCase):\n def setUp(self):\n self.scraper = ReweScraper()\n\n def test_get_path(self):\n relative_path = \"./config.ini\"\n result = self.scraper.get_path(relative_path)\n self.assertIsInstance(result, str)\n self.assertTrue(result.endswith(relative_path))\n\n def test_read_config(self):\n config = self.scraper.read_config()\n self.assertIsNotNone(config)\n self.assertIn(\"paths\", config.sections())\n\n @patch(\"core.scrapers.rewe_scraper.requests.get\")\n def test_scrape(self, mock_get):\n # Mock the response from requests.get\n mock_response = MockResponse()\n mock_get.return_value = mock_response\n\n soup_bowl, url_bowl = self.scraper.scrape()\n self.assertIsNotNone(soup_bowl)\n self.assertIsNotNone(url_bowl)\n self.assertIsInstance(soup_bowl, dict)\n self.assertIsInstance(url_bowl, dict)\n\n def test_etl(self):\n with patch.object(self.scraper, \"scrape\") as mock_scrape:\n # Mock the return value of the scrape method with appropriate values\n mock_soup = BeautifulSoup(\"\", \"html.parser\")\n mock_scrape.return_value = (\n {\"some_address\": [mock_soup]},\n {\"some_address\": [\"https://www.example.com\"]},\n )\n df = self.scraper.etl()\n self.assertIsInstance(df, pd.DataFrame)\n\n def test_export_csv(self):\n with patch(\"core.scrapers.rewe_scraper.pd.read_csv\") as mock_read_csv, patch(\n \"core.scrapers.rewe_scraper.pd.DataFrame.to_csv\"\n ) as mock_to_csv:\n # Mock the return value of pd.read_csv\n mock_read_csv.return_value = pd.DataFrame()\n test_df = pd.DataFrame(\n {\n \"title\": [\"title1\", \"title2\"],\n \"img_url\": [\"url1\", \"url2\"],\n \"article_link\": [\"link1\", \"link2\"],\n \"A\": [1, 2],\n \"B\": [3, 4],\n }\n )\n self.scraper.export_csv(test_df)\n mock_to_csv.assert_called_once()\n\n def test_run(self):\n with patch.object(self.scraper, \"etl\") as mock_etl, patch.object(\n self.scraper, \"export_csv\"\n ) as mock_export_csv:\n # Mock the return value of the etl method\n mock_etl.return_value = pd.DataFrame()\n self.scraper.run()\n mock_etl.assert_called_once()\n mock_export_csv.assert_called_once()\n\n\nclass MockResponse:\n @property\n def status_code(self):\n return 200\n\n def json(self):\n # Add mock data here\n return [\n {\n \"companyName\": \"Company\",\n \"contactStreet\": \"Street\",\n \"contactCity\": \"City\",\n \"contactZipCode\": \"12345\",\n \"wwIdent\": \"1234\",\n }\n ]\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"CsSCE2023/CapstoneScraper","sub_path":"tests/test_rewe_scraper.py","file_name":"test_rewe_scraper.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74099262933","text":"from django.shortcuts import render,redirect, get_object_or_404\nfrom extra_func import get_weekly_calendar,get_fav_status\nfrom .models import *\nfrom django.http import HttpResponse,Http404,HttpResponseRedirect\nfrom datetime import datetime\nfrom django.contrib.auth import authenticate, login , logout\nimport time,string,random\nfrom django.contrib.auth.decorators import login_required\nfrom scripts import gmail\nfrom django.contrib.auth.decorators import login_required\nimport re\nimport textwrap\nfrom django.utils import timezone\nfrom django.db.models import Q\nimport json\nimport requests\n\ndef auth_logout(request):\n logout(request)\n return redirect('home')\n\n\ndef auth_register(request):\n if request.method == \"POST\":\n method = request.POST['signup_method']\n if method == 'email':\n try:\n username = request.POST['username']\n first_name = request.POST['nickname']\n if not re.match(r'[fh]201[0-9]{4}',username):\n raise Http404(\"Not Allowed!\")\n except:\n raise Http404(\"Please provide all necessary details to register.\")\n else:\n if username and first_name:\n try:\n user = User.objects.get(username=username)\n return render(request, 'connect/info.html', { 'msg' : \"\"\"Your account is\n already registered. Please login using the link below\"\"\"})\n except Exception as e:\n email = username+'@pilani.bits-pilani.ac.in'\n password = \"\".join(random.choice(string.letters+string.digits) for i in range(8))\n user = User(username=username, first_name=first_name, email=email)\n user.set_password(password)\n user.save()\n email_subject = \"Bits Connect Password\"\n email_body = (\"Dear %s,\\nThank you for registering with BITS Connect.Your password is\\n%s\\n\\n\"+\n \"If you are a SU representative please contact the administrators for permission to \"+\n \"solve problems on P.R.S. Also if you represent a Department, Club or Regional Association \"+\n \"that is yet to receive login credentials please request for one at the earliest. \"+\n \"\\n\\n-regards\\nSidhartha Namburi\")%(first_name, password)\n gmail.send(email, email_subject, email_body)\n return render(request, 'connect/email_sent.html')\n\n else:\n raise Http404(\"Please provide the necessary details to register.\")\n elif method == 'facebook':\n access_token = request.POST['access_token']\n file_ = requests.get(\n \"https://graph.facebook.com/me?access_token=\" + access_token + '&fields=email,first_name,last_name')\n ret = file_.json()\n uid_ = ret['id']\n try:\n email = ret['email']\n except KeyError:\n raise Http404(\"Unknown Facebook error\")\n try:\n user = UserFacebookData.objects.get(uid=uid_).user_profile\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n login(request, user)\n return HttpResponse(1,status=200)\n except UserFacebookData.DoesNotExist:\n email = ret['email'].encode('utf-8')\n try:\n user = User.objects.get(email=email)\n except User.DoesNotExist:\n first_name = ret['first_name'].encode('utf-8')\n last_name = ret['last_name'].encode('utf-8')\n # HANDLE PHONE HERE\n username = first_name.lower().replace(\" \", \"\") + str(random.randint(1, 999999))\n user = User(username=username, first_name=first_name, email=email)\n user.save()\n except:\n return HttpResponse(0, status=400)\n UserFacebookData.objects.create(token=access_token, extra_data=json.dumps(ret),\n user_profile=user, uid=uid_)\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n login(request, user)\n return HttpResponse(1, status=200)\n else:\n return HttpResponse(-1, status=400)\n\n\n\ndef auth_forgot(request):\n if request.method == \"POST\":\n try:\n username = request.POST['username']\n except:\n raise Http404(\"500.Empty Username not allowed!!\")\n\n else:\n try:\n user = User.objects.get(username=username)\n password = \"\".join(random.choice(string.letters+string.digits) for i in range(8))\n user.set_password(password)\n user.save()\n email_subject = \"BITS Connect Password RESET\"\n email_body = \"Dear %s, \\nYour password is %s \\n\\n -regards\\n Sidhartha Namburi\"%(user.first_name, password)\n gmail.send(user.email, email_subject, email_body)\n return render(request, 'connect/email_sent.html')\n except User.DoesNotExist:\n return render(request, 'connect/info.html', { 'msg' : \"\"\"You don't have an account.\n\t\t\t\t\tPlease register\"\"\"})\n except Exception as e:\n #print e\n raise Http404('Error Occured')\n else:\n return redirect('home')\n\n\n\n\ndef home(request):\n if not request.user.is_authenticated():\n\n return render(request,'connect/index.html')\n else:\n context = {'events':GlobalEvent.objects.all()}\n return render(request,'connect/dashboard.html',context)\n\ndef about(request):\n return render(request, 'connect/about.html')\n\n\n\ndef auth_login(request):\n error = 0\n if request.method == 'POST':\n try:\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n login(request, user)\n return HttpResponse(1, status=200)\n\n\n else:\n error = 3\n else:\n\n user = User.objects.get(username=username)\n error=1\n\n except User.DoesNotExist:\n error=2\n except Exception as e:\n redirect('home')\n return HttpResponse(error, status=400)\n else:\n return redirect('home')\n\n\n\n\n\n@login_required\ndef get_user_name(request):\n if request.method == 'POST':\n username = request.POST['username']\n try:\n user = User.objects.get(username=username)\n return HttpResponse(user.first_name)\n except:\n return HttpResponse('')\n else:\n return redirect('home')\n\n\n@login_required\ndef auth_reset_pass(request):\n if request.method == 'POST':\n try:\n password = request.POST['newpassword']\n oldpassword = request.POST['oldpassword']\n user = authenticate(username=request.user, password=oldpassword)\n except:\n return redirect('home')\n try:\n if user:\n user.set_password(password)\n user.save()\n return redirect('logout')\n else:\n return redirect('home')\n except:\n raise Http404('Error occured.')\n else:\n redirect('home')\n\n\n@login_required\ndef missed_call(request):\n if request.method == 'POST':\n try:\n user = User.objects.get(username=request.POST['username'])\n MissedCall.objects.create(user=user,actor=request.user)\n return HttpResponse(1)\n except:\n return HttpResponse(1)\n else:\n return redirect('home')\n@login_required\ndef bits_line(request):\n partner_id = request.GET.get('partner_id',\"\")\n call_obj = MissedCall.objects.filter(user=request.user).select_related('actor__username').order_by('-pk')\n calls = [x.actor.username for x in call_obj]\n call_obj.delete()\n\n\n return render(request,'connect/bits_line.html',{'my_id':request.user, 'partner_id':partner_id, 'calls':calls})\n\n@login_required\ndef services(request):\n context={'services':Service.objects.all().order_by(\"-created\")}\n return render(request, 'connect/services.html',context,)\n\n@login_required\ndef del_service(request,service_id):\n try:\n service = get_object_or_404(Service,pk=service_id, user=request.user)\n service.delete()\n except:\n raise Http404(\"Cannot delete post.\")\n return redirect('myservices')\n\n\n\n@login_required\ndef del_classified(request,ad_id):\n try:\n classified = get_object_or_404(Classified,pk=ad_id, user=request.user)\n classified.delete()\n except:\n raise Http404(\"Cannot delete post.\")\n return redirect('myads')\n\n\n@login_required\ndef del_travel(request,t_id):\n try:\n classified = get_object_or_404(Travel,pk=t_id, user=request.user)\n classified.delete()\n except:\n raise Http404(\"Cannot delete post.\")\n return redirect('mytravel')\n\n\n@login_required\ndef my_travel(request):\n context={'travel' : Travel.objects.filter(user=request.user).order_by(\"-pk\")}\n return render(request, 'connect/my_travel.html',context)\n\n\n\n\n@login_required\ndef del_problem(request,p_id):\n try:\n problem = get_object_or_404(Problem,pk=p_id, user=request.user)\n bhavan = problem.bhavan.name\n problem.delete()\n except:\n raise Http404(\"Cannot delete post.\")\n\n return redirect('myproblems',bhavan=bhavan )\n\n\n@login_required\ndef my_services(request):\n context={'post':False,'error':False}\n if request.method == 'POST':\n context['post'] = True\n try:\n title = request.POST['title']\n content = request.POST['content']\n if title.isspace() or content.isspace():\n raise ValueError('Only spaces')\n if not Service.objects.filter(title=title, content=content, user=request.user):\n Service.objects.create(title=title, content=content, user=request.user)\n except ValueError as e:\n context['error'] = True\n\n context['services'] = Service.objects.filter(user=request.user).order_by(\"-created\")\n return render(request, 'connect/my_services.html',context)\n\n\n\n@login_required\ndef classifieds(request):\n context={'ads':Classified.objects.all().order_by(\"-created\")}\n return render(request, 'connect/classifieds.html',context,)\n\n\n\n@login_required\ndef my_classifieds(request):\n context={'post':False,'error':False}\n if request.method == 'POST':\n context['post'] = True\n try:\n title = request.POST['title']\n content = request.POST['content']\n if title.isspace() or content.isspace():\n raise ValueError('Only spaces')\n if not Classified.objects.filter(title=title, content=content, user=request.user):\n Classified.objects.create(title=title, content=content, user=request.user)\n except ValueError as e:\n context['error'] = True\n\n context['ads']=Classified.objects.filter(user=request.user).order_by(\"-created\")\n return render(request, 'connect/my_classifieds.html',context)\n\n\n\n@login_required\ndef travel(request):\n context={'travel':None, 'post':False, 'error':False, 'date':'', 'places':Place.objects.all()}\n if request.POST:\n\n try:\n if request.POST[\"type\"] == 'search':\n context['post'] = True\n context['date'] = request.POST['date']\n from_place = Place.objects.get(pk=request.POST['my-location'])\n to_place = Place.objects.get(pk=request.POST['destination'])\n start_date = datetime.strptime(request.POST['date'], \"%d/%m/%Y\")\n date_range = (\n timezone.make_aware(datetime.combine(start_date, datetime.min.time()), timezone.get_current_timezone()),\n timezone.make_aware(datetime.combine(start_date, datetime.max.time()), timezone.get_current_timezone())\n )\n context['travel'] = Travel.objects.filter(date__range=date_range,from_place=from_place, to_place=to_place)\n elif request.POST[\"type\"] == \"create\":\n content = request.POST['description']\n if content.isspace():\n raise ValueError(\"only Spaces\")\n from_place = Place.objects.get(pk=request.POST['my-location'])\n to_place = Place.objects.get(pk=request.POST['destination'])\n start_date = timezone.make_aware(datetime.strptime(request.POST['date'], \"%d/%m/%Y %I:%M %p\"), timezone.get_current_timezone())\n Travel.objects.create(date=start_date, from_place=from_place, to_place=to_place, content=content, user=request.user)\n return redirect('mytravel')\n\n else:\n return Http404(\"Your form is not authorised to make a request.\")\n\n except ValueError as e:\n #print e\n context['error'] = True\n except:\n redirect(\"travel\")\n\n\n return render(request, 'connect/travel.html',context)\n\n\n\n\n@login_required\ndef calendar(request):\n context={}\n context['post']=False\n context['error']=False\n if request.method == 'POST':\n if not request.user.has_perm(\"connect.event_add\"):\n raise Http404('You do not have permission')\n\n context['post'] = True\n try:\n title = request.POST['title']\n date_time = request.POST['date_time']\n if title.isspace():\n raise ValueError('Only spaces')\n date_time = timezone.make_aware(datetime.strptime(date_time, \"%d/%m/%Y %I:%M %p\"), timezone.get_current_timezone())\n date_time = date_time\n if not Event.objects.filter(user=request.user, title=title, time=date_time):\n Event.objects.create(user=request.user, title=title, time=date_time)\n else:\n pass\n except ValueError as e:\n context['error'] = True\n except:\n Http404(\"Some error occured\")\n\n week = request.GET.get('week',0)\n week = int(week)\n context.update(get_weekly_calendar(week))\n return render(request, 'connect/calendar.html',context)\n\n\n\n\n@login_required\ndef vote_problem(request):\n if request.method == 'POST':\n p_id = request.POST['p_id']\n problem = get_object_or_404(Problem, pk=p_id)\n try:\n vote = ProblemVote.objects.get(problem=problem, user=request.user)\n vote.delete()\n except ProblemVote.DoesNotExist:\n ProblemVote.objects.create(problem=problem, user=request.user)\n return HttpResponse(1)\n else:\n raise \tHttp404(\"Cannot Vote\")\n\n\n\n\n@login_required\ndef problems(request, bhavan):\n bhavan = get_object_or_404(Bhavan,name=bhavan)\n problems = Problem.objects.all().filter(bhavan=bhavan).order_by('-votes')\n status = get_fav_status(problems,request.user)\n context = {'problems': problems, 'status_dict':status, 'bhavan':bhavan}\n return render(request,'connect/problems_unsolved.html',context)\n\n\n\n\n@login_required\ndef problems_solved(request, bhavan):\n bhavan = get_object_or_404(Bhavan,name=bhavan)\n problems = ProblemSolved.objects.all().filter(bhavan=bhavan).order_by('-pk')[0:30]\n context = {'problems': problems, 'bhavan':bhavan}\n return render(request,'connect/problems_solved.html',context)\n\n\n\n@login_required\ndef my_problems(request, bhavan):\n bhavan = get_object_or_404(Bhavan,name=bhavan)\n context={'post':False,'error':False}\n if request.method == 'POST':\n context['post'] = True\n try:\n title = request.POST['title']\n content = request.POST['content']\n if title.isspace() or content.isspace():\n raise ValueError('Only spaces')\n if not Problem.objects.filter(title=title, content=content, user=request.user, bhavan=bhavan):\n Problem.objects.create(title=title, content=content, user=request.user, bhavan=bhavan)\n except ValueError as e:\n context['error'] = True\n problems = Problem.objects.all().filter(bhavan=bhavan, user=request.user).order_by('-pk')\n context.update({'problems': problems, 'bhavan':bhavan})\n return render(request,'connect/my_problems.html',context)\n\n\n\n\n@login_required\ndef solve_problem(request, p_id):\n if not request.user.has_perm(\"connect.problem_solve\"):\n raise Http404('You don\\'t have permission to solve this')\n context={}\n context['post']=False\n context['error']=False\n context['problem'] = get_object_or_404(Problem,pk=p_id)\n problem = context['problem']\n if request.method == 'POST':\n\n context['post'] = True\n try:\n reply = request.POST['reply']\n if reply.isspace():\n raise ValueError('Only spaces')\n ProblemSolved.objects.create(user=problem.user, title=problem.title, reply=reply,\n solved_by=request.user, bhavan = problem.bhavan, posted_on = problem.created )\n if request.user != problem.user:\n gmail.send(problem.user.email, 'Your Problem has been solved by %s!'%request.user.first_name,\n 'Dear %s,\\nRejoice! Your problem has been addressed.\\n %s \\n\\n %s \\n\\n\\n-regards\\nSidhartha Namburi'%(problem.user.first_name,problem.title,reply))\n\n\n problem.delete()\n return redirect(\"problems_solved\", bhavan=problem.bhavan.name)\n\n except ValueError as e:\n context['error'] = True\n\n\n else:\n return render(request, 'connect/solve_problem.html',context)\n\n\n@login_required\ndef phone_db(request):\n if request.method == 'POST':\n nos = PhoneNumberDB.objects.filter(Q(name__istartswith=request.POST['q'])| Q(designation__icontains=request.POST['q']))\n else:\n nos = PhoneNumberDB.objects.all()\n\n\n return render(request, 'connect/misc_no.html', {'nos':nos})\n\n\n@login_required\ndef book_search(request):\n if request.method == 'POST':\n books = Book.objects.filter(title__icontains=request.POST['q'])\n else:\n books = Book.objects.all()\n return render(request, 'connect/books.html', {'books':books})\n\n@login_required\ndef my_book_orders(request):\n if request.method == 'POST':\n phone = request.POST['phone']\n address = request.POST['address']\n book = Book.objects.get(id=request.POST['book_id'])\n nos = request.POST['nos']\n BookOrder.objects.create(user=request.user, nos=nos, phone=phone, address=address, book=book)\n return redirect('my_book_orders')\n else:\n return render(request, 'connect/books_order.html', {'orders':BookOrder.objects.filter(user=request.user)})\n\ndef del_book_orders(request, bo_id):\n order = BookOrder.objects.get(pk=bo_id)\n if order.user == request.user:\n order.delete()\n return redirect('my_book_orders')\n\n\n@login_required\ndef view_store(request):\n return render(request, 'connect/store_in_town.html')\n\n@login_required\ndef book_request_view(request):\n title = request.POST['title']\n title = str(title)\n title.replace(\"'\",'\"')\n BookRequest.objects.create(user=request.user, title=title)\n return HttpResponse(1, status=200)","repo_name":"aukris/bitsconnect","sub_path":"connect/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"71228109013","text":"import json\nimport sqlite3\n\nimport xlsxwriter\nfrom pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive\n\nfrom tba_py import TBA\n\n\nclass SpreadsheetGenerator:\n def __init__(self, db_path, tba):\n self.db_path = db_path\n self.tba = tba\n\n self.workbook = None\n self.formats = None\n self.headers = None\n self.event = None\n self.raw_entries = None\n self.teams = None\n self.matches = None\n\n self.page_names = {\n 'raw': 'raw_data',\n 'raw_calculated': 'raw_calculated',\n 'raw_analysis': 'raw_analysis',\n 'raw_team_list': 'raw_team_list',\n 'raw_matches': 'raw_matches',\n 'raw_team_schedule': 'raw_team_schedule',\n 'pretty_raw': 'Raw Data',\n 'pretty_team_list': 'Team List',\n 'pretty_analysis': 'Analysis',\n 'pretty_matches': 'Schedule',\n 'pretty_team_schedule': 'Team Schedule',\n 'team_stats': 'Team Stats',\n 'match_rundown': 'Match Rundown'\n }\n\n self.raw_formats = json.load(open('formats.json'))\n\n self.range_formats = {\n 'max_green': {\n 'type': '2_color_scale',\n 'min_color': \"#FFFFFF\",\n 'max_color': \"#66BB6A\"\n },\n 'max_red': {\n 'type': '2_color_scale',\n 'min_color': \"#FFFFFF\",\n 'max_color': \"#EF5350\"\n }\n }\n\n def create_spreadsheet_for_event(self, event_id, filename='Clooney.xlsx'):\n self.workbook = xlsxwriter.Workbook(filename)\n self.formats = dict([(k, self.workbook.add_format(v)) for k, v in self.raw_formats.items()])\n\n db = sqlite3.connect(self.db_path)\n self.headers = json.load(open('headers.json'))\n self.event = db.execute('SELECT * FROM events WHERE id = \"{}\"'.format(event_id)).fetchone()\n self.raw_entries = [json.loads(e[-2]) for e in\n db.execute('SELECT * FROM scouting_entries WHERE event = \"{}\"'.format(event_id)).fetchall()]\n self.teams = sorted(json.loads(self.event[2]), key=lambda x: int(x['team_number']))\n self.matches = sorted([e for e in self.tba.get_event_matches(event_id) if e['comp_level'] == 'qm'],\n key=lambda x: x['match_number'])\n for match in self.matches:\n for alli in ['red', 'blue']:\n for i in range(3):\n match[alli + '_' + str(i + 1)] = int(match['alliances'][alli]['team_keys'][i][3:])\n\n self.draw_pretty_analysis()\n self.draw_pretty_match_rundown()\n self.draw_pretty_team_stats()\n self.draw_pretty_team_schedule()\n self.draw_pretty_schedule()\n self.draw_pretty_team_list()\n self.draw_pretty_raw_data()\n\n self.draw_raw_data()\n self.draw_raw_calculated()\n self.draw_raw_analysis()\n self.draw_raw_team_list()\n self.draw_raw_schedule()\n self.draw_raw_team_matches()\n\n self.workbook.close()\n self.workbook = None\n\n @staticmethod\n def next_col(col, i=1):\n col = list(col)\n while i > 0:\n if col[-1] == 'Z':\n col[-1] = 'A'\n col.append('A')\n else:\n col[-1] = chr(ord(col[-1]) + 1)\n i -= 1\n return \"\".join(col)\n\n def name_col(self, name, page, col, num_rows=999, start_row=1):\n self.workbook.define_name(name, \"='{0}'!{1}{3}:{1}{2}\".format(page, col, num_rows + start_row, start_row))\n\n def name_range(self, name, page, start_row=None, start_col='A', end_col='Z', end_row=None):\n range_str = \"='{0}'!{1}{3}:{2}{4}\".format(page, start_col, end_col,\n start_row if start_row is not None else \"\",\n (end_row if end_row is not None else start_row)\n if start_row is not None else \"\")\n self.workbook.define_name(name, range_str)\n\n def draw_raw_data(self):\n page_name = self.page_names['raw']\n headers = self.headers['raw']\n sheet = self.workbook.add_worksheet(page_name)\n sheet.set_tab_color('red')\n sheet.hide()\n col = 'A'\n row = 1\n header_cols = {}\n data_len = len(self.raw_entries)\n for header in headers:\n sheet.write(self.get_cell(col, row), header['title'])\n self.name_col('raw_{}'.format(header['key']), page_name, col, data_len + 1)\n header_cols[header['key']] = col\n col = self.next_col(col)\n\n for i in range(data_len):\n for header in headers:\n col = header_cols[header['key']]\n val = self.raw_entries[i][header['key']]\n sheet.write(self.get_cell(col, i + 2), val, self.formats['raw_data_cell'])\n\n def draw_raw_calculated(self):\n page_name = self.page_names['raw_calculated']\n headers = self.headers['raw_calculated']\n sheet = self.workbook.add_worksheet(page_name)\n sheet.set_tab_color('red')\n sheet.hide()\n data_len = len(self.raw_entries)\n col = 'A'\n for header in headers:\n sheet.write(self.get_cell(col, 1), header['title'])\n self.name_col('raw_calculated_{}'.format(header['key']), page_name, col, data_len + 1)\n for i in range(data_len):\n sheet.write(self.get_cell(col, i + 2), header['value'], self.formats['raw_data_cell'])\n col = self.next_col(col)\n\n def draw_raw_analysis(self):\n page_name = self.page_names['raw_analysis']\n headers = self.headers['analysis']\n sheet = self.workbook.add_worksheet(page_name)\n sheet.set_tab_color('red')\n sheet.hide()\n num_teams = len(self.teams)\n\n functions = {\n 'avg': '=IF(ISBLANK(analysis_team_number), \"\", SUMIF(raw_team_number, \"=\"&analysis_team_number, {}) / analysis_match)',\n 'sum': '=IF(ISBLANK(analysis_team_number), \"\", SUMIF(raw_team_number, \"=\"&analysis_team_number, {}))'\n }\n\n col = 'A'\n for header in headers:\n sheet.write(self.get_cell(col, 1), header['title'])\n self.name_col('analysis_{}'.format(header['key']), page_name, col, num_teams + 1)\n for i in range(num_teams):\n if header['key'] == 'team_number':\n sheet.write(self.get_cell(col, i + 2), self.teams[i]['team_number'], self.formats['raw_data_cell'])\n elif 'func' in header.keys():\n value = functions[header['func']].format('raw_{}'.format(header['key']))\n sheet.write(self.get_cell(col, i + 2), value, self.formats['raw_data_cell'])\n else:\n sheet.write(self.get_cell(col, i + 2), header['value'], self.formats['raw_data_cell'])\n col = self.next_col(col)\n\n def draw_raw_team_list(self):\n page_name = self.page_names['raw_team_list']\n headers = self.headers['team_list']\n sheet = self.workbook.add_worksheet(page_name)\n sheet.set_tab_color('red')\n sheet.hide()\n data_len = len(self.teams)\n col = 'A'\n for header in headers:\n sheet.write(self.get_cell(col, 1), header['title'])\n self.name_col('team_list_{}'.format(header['key']), page_name, col, data_len + 1)\n if header['key'] == 'team_number':\n self.name_col('team_number_list', page_name, col, data_len, 2)\n for i in range(data_len):\n sheet.write(self.get_cell(col, i + 2), self.teams[i][header['key']], self.formats['raw_data_cell'])\n col = self.next_col(col)\n\n def draw_raw_team_matches(self):\n page_name = self.page_names['raw_team_schedule']\n sheet = self.workbook.add_worksheet(page_name)\n sheet.set_tab_color('red')\n sheet.hide()\n data_len = len(self.teams)\n col = 'A'\n sheet.write(self.get_cell(col, 1), 'Team Number')\n self.name_col('team_schedule_team_number', page_name, col)\n for i in range(data_len):\n sheet.write(self.get_cell(col, i + 2), self.teams[i]['team_number'], self.formats['raw_data_cell'])\n col = self.next_col(col)\n\n sheet.write(self.get_cell(col, 1), 'Matches')\n self.name_range('team_schedule_matches', page_name,\n start_col=col, end_col=self.next_col(col, 20))\n for i in range(data_len):\n for j in range(20):\n sheet.write_array_formula(\n \"{0}:{0}\".format(self.get_cell(self.next_col(col, j), i + 2)),\n \"=ArrayFormula(IFERROR(SMALL(IF(schedule_match_teams=$A{0},ROW(schedule_red_1)-1), ROW({1}:{1}))))\".format(\n i + 2, j + 1),\n self.formats['raw_data_cell']\n )\n\n def draw_raw_schedule(self):\n page_name = self.page_names['raw_matches']\n headers = self.headers['matches']\n sheet = self.workbook.add_worksheet(page_name)\n sheet.set_tab_color('red')\n sheet.hide()\n data_len = len(self.matches)\n col = 'A'\n red_1_col = col\n blue_3_col = col\n for header in headers:\n sheet.write(self.get_cell(col, 1), header['title'])\n self.name_col('schedule_{}'.format(header['key']), page_name, col, data_len + 1)\n if header['key'] == 'red_1':\n red_1_col = col\n elif header['key'] == 'blue_3':\n blue_3_col = col\n for i in range(data_len):\n sheet.write(self.get_cell(col, i + 2), self._get_data(self.matches[i], header['key']),\n self.formats['raw_data_cell'])\n col = self.next_col(col)\n self.workbook.define_name(\n 'schedule_match_teams',\n \"='{0}'!{1}:{2}\".format(page_name, red_1_col, blue_3_col)\n )\n\n def draw_pretty_raw_data(self):\n page_name = self.page_names['pretty_raw']\n raw_headers = self.headers['raw']\n calc_headers = self.headers['raw_calculated']\n sheet = self.workbook.add_worksheet(page_name)\n sheet.set_tab_color('blue')\n col = 'A'\n row = 1\n data_len = len(self.raw_entries)\n for header in raw_headers:\n sheet.write(\n self.get_cell(col, row),\n header['title'],\n self.formats[header['header_format'] if 'header_format' in header.keys() else 'pretty_header']\n )\n for i in range(data_len):\n val = '=raw_{}'.format(header['key'])\n sheet.write(self.get_cell(col, i + 2), val, self.formats[header['format'] if 'format' in header.keys() else 'pretty_data_cell'])\n col = self.next_col(col)\n\n for header in calc_headers[3:]:\n sheet.write(\n self.get_cell(col, row),\n header['title'],\n self.formats[header['header_format'] if 'header_format' in header.keys() else 'pretty_header']\n )\n for i in range(data_len):\n val = '=raw_calculated_{}'.format(header['key'])\n sheet.write(self.get_cell(col, i + 2), val, self.formats[header['format'] if 'format' in header.keys() else 'pretty_data_cell'])\n col = self.next_col(col)\n\n def draw_pretty_team_list(self):\n page_name = self.page_names['pretty_team_list']\n headers = self.headers['team_list']\n sheet = self.workbook.add_worksheet(page_name)\n sheet.set_tab_color('blue')\n sheet.set_default_row(16, True)\n sheet.set_row(0, 35)\n data_len = len(self.teams)\n col = 'A'\n for header in headers:\n sheet.write(\n self.get_cell(col, 1),\n header['title'],\n self.formats[header['header_format']] if 'format' in header.keys()\n else self.formats['pretty_header']\n )\n options = {}\n if \"hidden\" in header.keys():\n options['hidden'] = header['hidden']\n sheet.set_column(self.get_col_range(col),\n width=header['width'] if \"width\" in header.keys() else 8,\n options=options)\n for i in range(data_len):\n sheet.write(\n self.get_cell(col, i + 2),\n self.teams[i][header['key']],\n self.formats[header['format']] if 'format' in header.keys()\n else self.formats['pretty_data_cell']\n )\n col = self.next_col(col)\n\n def draw_pretty_schedule(self):\n page_name = self.page_names['pretty_matches']\n headers = self.headers['matches']\n sheet = self.workbook.add_worksheet(page_name)\n sheet.set_tab_color('blue')\n sheet.set_default_row(16, True)\n sheet.set_row(0, 35)\n data_len = len(self.matches)\n col = 'A'\n for header in headers:\n sheet.write(\n self.get_cell(col, 1),\n header['title'],\n self.formats[header['header_format']] if 'format' in header.keys() else self.formats[\n 'pretty_header']\n )\n options = {}\n if \"hidden\" in header.keys():\n options['hidden'] = header['hidden']\n for i in range(data_len):\n sheet.write(\n self.get_cell(col, i + 2),\n self._get_data(self.matches[i], header['key']),\n self.formats[header['format']] if 'format' in header.keys() else self.formats[\n 'pretty_data_cell']\n )\n sheet.set_column(self.get_col_range(col),\n width=header['width'] if \"width\" in header.keys() else 8,\n options=options)\n # if header['title'] == 'Red Score':\n # sheet.conditional_format(self.get_col_range(col, 2, data_len), {\n # 'type': 'formula',\n # 'criteria': '{0}2>{1}2'.format(col, self.next_col(col)),\n # 'format': self.formats['bold']\n # })\n # sheet.conditional_format(self.get_col_range(self.next_col(col), 2, data_len), {\n # 'type': 'formula',\n # 'criteria': '{0}2>{1}2'.format(self.next_col(col), col),\n # 'format': self.formats['bold']\n # })\n col = self.next_col(col)\n\n def draw_pretty_analysis(self):\n page_name = self.page_names['pretty_analysis']\n headers = self.headers['analysis']\n sheet = self.workbook.add_worksheet(page_name)\n sheet.set_tab_color('green')\n sheet.set_default_row(16, True)\n sheet.set_row(0, 70)\n data_len = len(self.teams)\n col = 'A'\n team_num_col = col\n for header in headers:\n sheet.write(\n self.get_cell(col, 1),\n header['title'],\n self.formats[header['header_format']] if 'header_format' in header.keys()\n else self.formats['pretty_header']\n )\n options = {}\n if \"hidden\" in header.keys():\n options['hidden'] = header['hidden']\n sheet.set_column(self.get_col_range(col), header['width'] if \"width\" in header.keys() else 8,\n options=options)\n if header['key'] != 'team_number':\n if \"scale\" in header.keys():\n sheet.conditional_format(self.get_col_range(col, 2, data_len), {\n 'type': 'cell',\n 'criteria': '=',\n 'value': 0,\n 'format': self.formats[header['format']] if 'format' in header.keys()\n else self.formats['pretty_data_cell']\n })\n sheet.conditional_format(self.get_col_range(col, 2, data_len), self.range_formats[header['scale']])\n for i in range(data_len):\n if header['key'] == 'team_number':\n sheet.write(\n self.get_cell(col, i + 2),\n self.teams[i]['team_number'],\n self.formats[header['format']] if 'format' in header.keys()\n else self.formats['pretty_data_cell']\n )\n team_num_col = col\n else:\n formula = '=LOOKUP({0}, analysis_team_number, {1})'.format(\n self.get_col_range(team_num_col),\n 'analysis_' + header['key']\n )\n sheet.write(\n self.get_cell(col, i + 2),\n formula,\n self.formats[header['format']] if 'format' in header.keys()\n else self.formats['pretty_data_cell']\n )\n col = self.next_col(col)\n\n def draw_pretty_team_schedule(self):\n page_name = self.page_names['pretty_team_schedule']\n headers = self.headers['matches']\n sheet = self.workbook.add_worksheet(page_name)\n sheet.set_tab_color('blue')\n sheet.write('B2', 'Team:', self.formats['team_input_label'])\n sheet.write('C2', int(self.teams[0][\"team_number\"]), self.formats['team_input'])\n sheet.data_validation('C2', {\n 'validate': 'list',\n 'source': '=team_number_list'\n })\n sheet.set_default_row(16, True)\n sheet.set_row(3, 35)\n data_len = 20\n col = 'B'\n match_num_col = col\n for header in headers:\n options = {}\n if \"hidden\" in header.keys():\n options['hidden'] = header['hidden']\n sheet.set_column(self.get_col_range(col),\n width=header['width'] if \"width\" in header.keys() else 8,\n options=options)\n sheet.write(\n self.get_cell(col, 4),\n header['title'],\n self.formats[header['header_format']] if 'format' in header.keys()\n else self.formats['pretty_header']\n )\n if header['title'] in ['Red 1', 'Red 2', 'Red 3', 'Blue 1', 'Blue 2', 'Blue 3']:\n sheet.conditional_format(self.get_col_range(col, 5, data_len), {\n 'type': 'formula',\n 'criteria': '{0}5=$C$2'.format(col),\n 'format': self.formats['bold']\n })\n for i in range(data_len):\n if header['title'] == 'Match':\n match_num_col = col\n sheet.write(\n self.get_cell(col, 5),\n \"=TRANSPOSE(FILTER(team_schedule_matches, team_schedule_team_number=$C$2))\",\n self.formats[header['format']] if 'format' in header.keys()\n else self.formats['pretty_data_cell']\n )\n for i in range(1, data_len):\n sheet.write_blank(\n self.get_cell(col, 5 + i),\n \"\",\n self.formats[header['format']] if 'format' in header.keys()\n else self.formats['pretty_data_cell']\n )\n else:\n for i in range(data_len):\n sheet.write(\n self.get_cell(col, i + 5),\n '=IFERROR(LOOKUP({0}, schedule_match_number, {1}))'.format(\n self.get_col_range(match_num_col),\n 'schedule_{}'.format(header['key'])\n ),\n self.formats[header['format']] if 'format' in header.keys()\n else self.formats['pretty_data_cell']\n )\n if header['title'] == 'Red Score':\n sheet.conditional_format(self.get_col_range(col, 5, data_len), {\n 'type': 'formula',\n 'criteria': '{0}5>{1}5'.format(col, self.next_col(col)),\n 'format': self.formats['bold']\n })\n sheet.conditional_format(self.get_col_range(self.next_col(col), 5, data_len), {\n 'type': 'formula',\n 'criteria': '{0}5>{1}5'.format(self.next_col(col), col),\n 'format': self.formats['bold']\n })\n col = self.next_col(col)\n\n def draw_pretty_team_stats(self):\n page_name = self.page_names['team_stats']\n header_dict = {\n 'raw': self.headers['raw'],\n 'raw_calculated': self.headers['raw_calculated'][3:]\n }\n sheet = self.workbook.add_worksheet(page_name)\n sheet.set_tab_color('green')\n sheet.write('B1', 'Team:', self.formats['team_input_label'])\n sheet.write('C1', int(self.teams[0][\"team_number\"]), self.formats['team_input'])\n sheet.data_validation('C1', {\n 'validate': 'list',\n 'source': '=team_number_list'\n })\n sheet.set_default_row(16, True)\n sheet.set_row(3, 70)\n data_len = 20\n col = 'A'\n for key, headers in header_dict.items():\n for header in headers:\n sheet.write(\n self.get_cell(col, 4),\n header['title'],\n self.formats[header['header_format']] if 'header_format' in header.keys()\n else self.formats['pretty_header']\n )\n options = {}\n if \"hidden\" in header.keys():\n options['hidden'] = header['hidden']\n sheet.set_column(self.get_col_range(col), header['width'] if \"width\" in header.keys() else 8,\n options=options)\n\n sheet.write(\n self.get_cell(col, 5),\n \"=IFERROR(LOOKUP($C1, analysis_team_number, analysis_{}{}))\"\n .format('' if key == 'raw' else 'calculated_', header['key']),\n self.formats['pretty_avg_cell']\n )\n sheet.write(\n self.get_cell(col, 6),\n \"=FILTER({0}_{1}, {0}_team_number=$C$1)\".format(key, header['key']),\n self.formats[header['format']] if 'format' in header.keys()\n else self.formats['pretty_data_cell']\n )\n\n for i in range(1, data_len):\n sheet.write(\n self.get_cell(col, 6 + i),\n \"\",\n self.formats[header['format']] if 'format' in header.keys()\n else self.formats['pretty_data_cell']\n )\n\n if \"scale\" in header.keys():\n sheet.conditional_format(self.get_col_range(col, 6, data_len), {\n 'type': 'cell',\n 'criteria': '=',\n 'value': 0,\n 'format': self.formats[header['format']] if 'format' in header.keys()\n else self.formats['pretty_data_cell']\n })\n sheet.conditional_format(self.get_col_range(col, 6, data_len), self.range_formats[header['scale']])\n\n col = self.next_col(col)\n\n def draw_pretty_match_rundown(self):\n page_name = self.page_names['match_rundown']\n raw_header_dict = {\n 'raw': self.headers['raw'],\n 'raw_calculated': self.headers['raw_calculated'][3:]\n }\n analysis_headers = self.headers['analysis']\n sheet = self.workbook.add_worksheet(page_name)\n sheet.set_tab_color('green')\n sheet.set_default_row(10, True)\n\n sheet.write('B2', 'Team:', self.formats['team_input_label'])\n sheet.write('C2', int(self.teams[0][\"team_number\"]), self.formats['team_input'])\n sheet.data_validation('C2', {\n 'validate': 'list',\n 'source': '=team_number_list'\n })\n\n sheet.write('A1', '=FILTER(team_schedule_matches, team_schedule_team_number=C2)')\n sheet.set_row(0, None, None, {'hidden': True})\n sheet.write('D2', 'Match:', self.formats['team_input_label'])\n sheet.write('E2', '=A1', self.formats['team_input'])\n sheet.data_validation('E2', {\n 'validate': 'list',\n 'source': 'A1:{}1'.format(self.next_col('A', 20))\n })\n\n col = 'A'\n row = 3\n team_num_col = col\n sheet.set_row(row - 1, 70)\n for header in analysis_headers:\n sheet.write(\n self.get_cell(col, row),\n header['title'],\n self.formats[header['header_format']] if 'header_format' in header.keys()\n else self.formats['pretty_header']\n )\n sheet.set_column(self.get_col_range(col), 8)\n if header['key'] == 'team_number':\n team_num_col = col\n for pos in range(6):\n sheet.set_row(row + 1 + pos, 16)\n sheet.write(\n self.get_cell(col, row + 1 + pos),\n \"=LOOKUP($E$2, schedule_match_number, schedule_{}_{})\".format('red' if pos < 3 else 'blue',\n (pos % 3) + 1),\n self.formats['red_alliance_data_cell' if pos < 3 else 'blue_alliance_data_cell']\n )\n sheet.conditional_format(\n self.get_range(start_col=col, end_col=self.next_col(col, len(analysis_headers)),\n start_row=row + 1 + pos), {\n 'type': 'formula',\n 'criteria': '${0}{1}=$C$2'.format(col, row + 1 + pos),\n 'format': self.formats['bold']\n })\n else:\n for pos in range(6):\n sheet.write(\n self.get_cell(col, row + 1 + pos),\n \"=LOOKUP({}{}, analysis_team_number, analysis_{})\".format(team_num_col, row + 1 + pos,\n header['key']),\n self.formats['red_alliance_data_cell' if pos < 3 else 'blue_alliance_data_cell']\n )\n\n col = self.next_col(col)\n\n col = 'A'\n row = 11\n team_num_col = col\n sheet.set_row(row - 1, 70)\n for key, raw_headers in raw_header_dict.items():\n for header in raw_headers:\n sheet.write(\n self.get_cell(col, row),\n header['title'],\n self.formats[header['header_format']] if 'header_format' in header.keys() else self.formats[\n 'pretty_header']\n )\n sheet.set_column(self.get_col_range(col), 8)\n if header['key'] == 'team_number':\n team_num_col = col\n for pos in range(6):\n sheet.set_row(row + 1 + pos, 16)\n sheet.write(\n self.get_cell(col, row + 1 + pos),\n \"=LOOKUP($E$2, schedule_match_number, schedule_{}_{})\".format('red' if pos < 3 else 'blue',\n (pos % 3) + 1),\n self.formats['red_alliance_data_cell' if pos < 3 else 'blue_alliance_data_cell']\n )\n sheet.conditional_format(\n self.get_range(start_col=col, end_col=self.next_col(col, len(raw_headers)),\n start_row=row + 1 + pos), {\n 'type': 'formula',\n 'criteria': '${0}{1}=$C$2'.format(col, row + 1 + pos),\n 'format': self.formats['bold']\n })\n else:\n for pos in range(6):\n sheet.write(\n self.get_cell(col, row + 1 + pos),\n \"=FILTER({}_{}, raw_match=$E$2, raw_team_number=${}{})\".format(key, header['key'], team_num_col, row + 1 + pos),\n self.formats['red_alliance_data_cell' if pos < 3 else 'blue_alliance_data_cell']\n )\n\n col = self.next_col(col)\n\n @staticmethod\n def col_to_num(col):\n return (26 * (len(col) - 1)) + (ord(col[-1]) - ord('A'))\n\n @staticmethod\n def get_range(start_col='A', end_col='Z', start_row=None, end_row=None):\n return \"{0}{2}:{1}{3}\".format(start_col, end_col, start_row if start_row is not None else \"\",\n (end_row if end_row is not None else start_row) if start_row is not None else \"\")\n\n @staticmethod\n def get_col_range(col, start=1, num=None):\n if not num:\n return '{0}:{0}'.format(col)\n return '{0}{1}:{0}{2}'.format(col, start, start + num)\n\n @staticmethod\n def get_cell(col, row):\n return '{0}{1}'.format(col, row)\n\n @staticmethod\n def _get_data(data, key):\n keys = key\n if type(key) is str:\n keys = []\n [[keys.append(e) for e in k.split(\".\")] for k in key.split(\",\")]\n val = data\n for k in keys:\n try:\n val = val[str(k).strip()]\n except Exception as ex:\n print(val.keys())\n raise ex\n return val\n\n @staticmethod\n def upload_to_google_drive(filename, upload_filename=\"Clooney.xlsx\"):\n gauth = GoogleAuth()\n # Try to load saved client credentials\n gauth.LoadCredentialsFile(\"credentials.json\")\n if gauth.credentials is None:\n # Authenticate if they're not there\n gauth.LocalWebserverAuth()\n elif gauth.access_token_expired:\n # Refresh them if expired\n gauth.Refresh()\n else:\n # Initialize the saved creds\n gauth.Authorize()\n # Save the current credentials to a file\n gauth.SaveCredentialsFile(\"credentials.json\")\n\n drive = GoogleDrive(gauth)\n\n for file in drive.ListFile({'q': \"'1Y20z_cAs780qNOm-hwXx0ork1dgIQJHb' in parents and trashed=false\"}).GetList():\n if file['title'] == upload_filename:\n clooney_file = file\n clooney_file.FetchMetadata()\n break\n else:\n clooney_file = drive.CreateFile({\n 'title': upload_filename, \"parents\": [\n {\"kind\": \"drive#fileLink\", \"id\": '1Y20z_cAs780qNOm-hwXx0ork1dgIQJHb'}]\n })\n\n clooney_file.SetContentFile(filename)\n clooney_file.Upload({'convert': True})\n\n\nif __name__ == \"__main__\":\n db = sqlite3.connect('/Users/kestin/db.sqlite')\n tba = TBA('GdZrQUIjmwMZ3XVS622b6aVCh8CLbowJkCs5BmjJl2vxNuWivLz3Sf3PaqULUiZW')\n filename = '/Users/kestin/Google Drive/Scouting/Clooney.xlsx'\n gen = SpreadsheetGenerator(db, tba)\n gen.create_spreadsheet_for_event('2018onham', filename=filename)\n gen.upload_to_google_drive(filename)\n","repo_name":"kForth/ClooneyScanner","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":32524,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"12723674723","text":"import numpy as np\n\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.models.estimator import (\n transform_state_to_data,\n)\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.common import (\n dictionarize_objective,\n INTERNAL_METRIC_NAME,\n)\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.utils.test_objects import (\n create_tuning_job_state,\n)\nfrom syne_tune.config_space import uniform, randint, choice, loguniform\nfrom syne_tune.optimizer.schedulers.searchers.utils.hp_ranges_factory import (\n make_hyperparameter_ranges,\n)\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.warping import (\n warpings_for_hyperparameters,\n)\n\n\ndef test_get_internal_candidate_evaluations():\n \"\"\"we do not test the case with no evaluations, as it is assumed\n that there will be always some evaluations generated in the beginning\n of the BO loop.\"\"\"\n\n hp_ranges = make_hyperparameter_ranges(\n {\"a\": randint(0, 10), \"b\": uniform(0.0, 10.0), \"c\": choice([\"X\", \"Y\"])}\n )\n cand_tuples = [(2, 3.3, \"X\"), (1, 9.9, \"Y\"), (7, 6.1, \"X\")]\n metrics = [dictionarize_objective(y) for y in (5.3, 10.9, 13.1)]\n\n state = create_tuning_job_state(\n hp_ranges=hp_ranges, cand_tuples=cand_tuples, metrics=metrics\n )\n state.failed_trials.append(\"0\") # First trial with observation also failed\n\n result = transform_state_to_data(\n state, INTERNAL_METRIC_NAME, normalize_targets=True, num_fantasy_samples=20\n )\n\n assert len(result.features.shape) == 2, \"Input should be a matrix\"\n assert len(result.targets.shape) == 2, \"Output should be a matrix\"\n\n assert result.features.shape[0] == len(cand_tuples)\n assert result.targets.shape[-1] == 1, \"Only single output value per row is suppored\"\n\n assert (\n np.abs(np.mean(result.targets)) < 1e-8\n ), \"Mean of the normalized outputs is not 0.0\"\n assert (\n np.abs(np.std(result.targets) - 1.0) < 1e-8\n ), \"Std. of the normalized outputs is not 1.0\"\n\n np.testing.assert_almost_equal(result.mean, 9.766666666666666)\n np.testing.assert_almost_equal(result.std, 3.283629428273267)\n\n\ndef test_warpings_for_hyperparameters():\n # Note: ``choice`` with binary value range is encoded as 1, not 2 dims\n hp_ranges = make_hyperparameter_ranges(\n {\n \"a\": choice([\"X\", \"Y\"]), # pos 0\n \"b\": loguniform(0.1, 10.0), # pos 1\n \"c\": choice([\"a\", \"b\", \"c\"]), # pos 2\n \"d\": uniform(0.0, 10.0), # pos 5\n \"e\": choice([\"X\", \"Y\"]), # pos 6\n }\n )\n\n warpings = warpings_for_hyperparameters(hp_ranges)\n assert hp_ranges.ndarray_size == 7\n assert len(warpings) == 2\n assert warpings[0].lower == 1 and warpings[0].upper == 2\n assert warpings[1].lower == 5 and warpings[1].upper == 6\n","repo_name":"awslabs/syne-tune","sub_path":"tst/schedulers/bayesopt/test_gp_components.py","file_name":"test_gp_components.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":332,"dataset":"github-code","pt":"67"} +{"seq_id":"16768003781","text":"import socket\nimport sys\nfrom termcolor import colored\nimport os\nimport threading\n\n#user switch check\nif len(sys.argv)<2:\n print(\"usage : python client.py portno\")\n sys.exit()\n\n#Primary definitaions\nFORMAT=\"utf-8\"\n#ip='192.168.43.164'\nip='192.168.1.104'\nport=int(sys.argv[1])\ndisconnect_msg=\"#!disconnect\"\nn='0123456789'\ns=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nrunning=True\n\n\n# try to connect to server\ntry:\n s.connect((ip,port))\n print(colored(\"connected\",'green'))\n # s.send(player_name.encode(FORMAT))\nexcept:\n print(colored(\"Faild to connect\",'red'))\n s.close()\n sys.exit()\n\n\n#draw board\ndef board(n,ch):\n os.system(\"clear\")\n print(f\" | | \")\n print(f\" {n[1]} | {n[2]} | {n[3]} \")\n print(f\" _________________\")\n print(f\" | | \")\n print(f\" {n[4]} | {n[5]} | {n[6]} \")\n print(f\" _________________\")\n print(f\" {n[7]} | {n[8]} | {n[9]} \")\n print(f\" | | \")\n print(f\" You are {ch} \")\n print()\n\nboard(n,'-')\n\n\ndef reciver(ch):\n global running\n while running:\n try:\n res=s.recv(64).decode(FORMAT)\n if res==disconnect_msg:\n break\n elif res[:3]==\"Err\":\n print(res)\n elif res[0:12]=='Game is over':\n print(res)\n running=False\n break\n else:\n board(res,ch)\n except:\n pass\n\n\n\n\n# connected msg\nmsg=s.recv(64).decode(FORMAT)\nch=msg[::-1][0]\nprint(msg)\nif msg=='Waiting for 2nd player your number is 1 and you are #':\n #number 2 connected\n msg=s.recv(64).decode(FORMAT)\n print(msg)\n\n\n\nthread=threading.Thread(target=reciver,args=(ch))\nthread.start()\n#main loop\nwhile running:\n try:\n msg=input(\"\")\n if msg.isnumeric():\n s.send(msg.encode(FORMAT))\n if msg==disconnect_msg:\n break\n else:\n print(\"Not valid\")\n except:\n print(colored(\"\\nDisconnected\",'red'))\n running=False\n s.send(disconnect_msg.encode(FORMAT))\n s.close()\n break\n","repo_name":"mht130/python","sub_path":"tic_toc_toe(online)/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31557325049","text":"import os\nimport json\nimport logging\nimport http.client\nfrom enum import Enum\nfrom typing import Dict\nfrom datetime import datetime\nfrom dataclasses import dataclass\nfrom contextlib import contextmanager\n\n_logger = None\n\nTELEMETRY_SUBSCRIBER_PORT = 1060\nLUMIGO_EXTENSION_NAME = \"telemetry\"\nHEADERS_ID_KEY = \"Lambda-Extension-Identifier\"\nHEADERS_NAME_KEY = \"Lambda-Extension-Name\"\n\n\ndef get_logger():\n global _logger\n if not _logger:\n _logger = logging.getLogger(\"lambda-telemetry-handler\")\n handler = logging.StreamHandler()\n if os.environ.get(\"LOG_SHIPPER_DEBUG\", \"\").lower() == \"true\":\n _logger.setLevel(logging.DEBUG)\n handler.setLevel(logging.DEBUG)\n _logger.addHandler(handler)\n return _logger\n\n\ndef lambda_service():\n return http.client.HTTPConnection(os.environ[\"AWS_LAMBDA_RUNTIME_API\"])\n\n\n@contextmanager\ndef never_fail(part_name: str = \"\"):\n try:\n yield\n except Exception as e:\n get_logger().exception(\n f\"An exception occurred in a never-fail code '{part_name}'\", exc_info=e\n )\n\n\nclass LogType(Enum):\n START = \"START\"\n END = \"END\"\n REPORT = \"REPORT\"\n FUNCTION = \"FUNCTION\"\n RUNTIME_DONE = \"RUNTIME_DONE\"\n OTHER = \"OTHER\"\n\n @staticmethod\n def parse(record_type) -> \"LogType\":\n if record_type == \"platform.start\":\n return LogType.START\n elif record_type == \"platform.end\":\n return LogType.END\n elif record_type == \"platform.report\":\n return LogType.REPORT\n elif record_type == \"platform.runtimeDone\":\n return LogType.RUNTIME_DONE\n elif record_type == \"function\":\n return LogType.FUNCTION\n elif record_type in (\n \"platform.initStart\",\n \"platform.telemetrySubscription\",\n \"platform.initRuntimeDone\",\n \"platform.runtimeDone\",\n \"platform.initReport\",\n \"platform.logsSubscription\",\n \"platform.extension\",\n \"platform.fault\",\n ):\n return LogType.OTHER\n raise ValueError(f\"Unknown record type: {record_type}\")\n\n\n@dataclass(frozen=True)\nclass TelemetryRecord:\n record_type: LogType\n record_time: datetime\n record: str\n raw: dict\n\n @staticmethod\n def parse(record: Dict[str, str]) -> \"TelemetryRecord\":\n return TelemetryRecord(\n record_type=LogType.parse(record[\"type\"]),\n record_time=datetime.fromisoformat(record[\"time\"][:-1]),\n record=json.dumps(record[\"record\"]),\n raw=record,\n )\n","repo_name":"lumigo-io/lambda-telemetry-shipper","sub_path":"src/lambda_telemetry_shipper/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"20882181174","text":"from django.test import TestCase\n\n# Create your tests here.\nfrom django.test import SimpleTestCase\nfrom django.urls import reverse\n\n\n\"\"\"\n- Test that home status codes\n- Test that about status codes\n- Test home url template use, including ancestor template.\n- Test about url template use, including ancestor template.\n\"\"\"\n\nclass SnacksTests(SimpleTestCase):\n \"\"\"Class contains methods to test Snacks templates.\"\"\"\n\n \n def test_about_status_codes(self):\n \"\"\"Method to test whether the about page is accessible.\"\"\"\n url = reverse('about')\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n \n def test_home_status_codes(self):\n \"\"\" Method to check whether the home page is accessible.\"\"\"\n url = reverse('home')\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n# -------------------------------------------------------------\n\n def test_about_url_template(self):\n \"\"\"Method to test the about template for the about page.\"\"\"\n url = reverse('about')\n response = self.client.get(url)\n self.assertTemplateUsed(response, 'about.html')\n self.assertTemplateUsed(response, 'base.html')\n \n def test_home_url_template(self):\n \"\"\"Method to test the home template for the home page.\"\"\"\n url = reverse('home')\n response = self.client.get(url)\n self.assertTemplateUsed(response, 'home.html')\n self.assertTemplateUsed(response, 'base.html')\n ","repo_name":"sarahhudaib/django-snacks","sub_path":"snacks_project/snacks/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32392473210","text":"from selenium import webdriver \nfrom selenium.webdriver.common.by import By \nfrom selenium.webdriver.support.ui import WebDriverWait \nfrom selenium.webdriver.support import expected_conditions as EC \nfrom selenium.common.exceptions import TimeoutException\nimport time\nimport ujson\nimport re\nimport os\n\noption = webdriver.ChromeOptions()\noption.add_argument(\"--incognito\")\n\nbrowser = webdriver.Chrome(executable_path=\"/home/ironmantle/Documents/chromeDriver/chromedriver\", chrome_options=option)\n\nbrowser.get(\"https://registry.cno.org/\")\n\ntimeout = 10\n\ntry:\n # wait for the checkbox (DOM last to load) and click it\n WebDriverWait(browser, timeout).until(EC.visibility_of_element_located((By.ID, \"chkAcceptTerms\")))\n findEULACheckBox = browser.find_element_by_id(\"chkAcceptTerms\")\n time.sleep(1)\n findEULACheckBox.click()\n\n WebDriverWait(browser, timeout).until(EC.visibility_of_element_located((By.ID, \"submitButton\")))\n\n # look for the submit button and click it\n startSearchingBox = browser.find_element_by_id(\"submitButton\")\n startSearchingBox.click()\n\n WebDriverWait(browser, timeout).until(EC.visibility_of_element_located((By.XPATH, \"//input[@value='NUMBER']\")))\n\n listBadTags = [\n \"\",\n \"\",\n ]\n \n dateCleaner = [\n \".+\"\n ]\n\n medicalStaff = {}\n\n staffNum = 0\n\n # test RN nums\n listKnownNums = [14056148, 14056158]\n\n with open(\"RegisteredNurses.json\", \"w\") as jsonDoc:\n\n jsonDoc.write(\"[\\n\")\n \n # for number in our list\n for regNum in range(14030032, 99999999):\n\n try:\n\n # find the registration number radio\n WebDriverWait(browser, timeout).until(EC.visibility_of_element_located((By.XPATH, \"//*[@value='NUMBER']\")))\n registrationNumberRadio = browser.find_element_by_xpath(\"//input[@value='NUMBER']\")\n registrationNumberRadio.click()\n WebDriverWait(browser, timeout).until(EC.visibility_of_element_located((By.ID, \"RegistrationNumberCNO\")))\n\n # find the text box to enter the current id\n searchBox = browser.find_element_by_id(\"RegistrationNumberCNO\")\n searchBox.click()\n searchBox.send_keys(str(regNum).zfill(8))\n\n # hit the search button\n submitSearchButton = browser.find_element_by_xpath(\"//input[@class='btn btn-primary'][@value='Search']\")\n submitSearchButton.click()\n\n try:\n browser.find_element_by_xpath(\"//*[contains(text(), 'Your search did not return any results')]\")\n # go back to the start and search again\n browser.get(\"https://registry.cno.org/Search/Search\")\n continue\n \n except: \n\n staffNum += 1\n\n medicalStaff[staffNum] = {}\n \n print(\"Found record matching CNO: {}!\".format(regNum))\n\n # wait for the req divs to load\n WebDriverWait(browser, timeout).until(EC.visibility_of_element_located((By.XPATH, \"//div[@class='tab-content hidden-xs']\")))\n # rn name\n medicalStaffName = browser.find_elements_by_tag_name(\"h1\")[1].text\n\n medicalStaff[staffNum][\"Name\"] = medicalStaffName\n medicalStaff[staffNum][\"Registration Number\"] = regNum\n \n # G E N E R A L T A B #\n\n tempList = []\n jobDetails = []\n medicalStaff[staffNum][\"General Information\"] = {}\n counter = 1\n\n try:\n tabDivGeneral = browser.find_element_by_xpath(\"//div[@id='general']\")\n for div in tabDivGeneral.find_elements_by_xpath(\"//div[@id='general']/div[@class='well'][@style='']\"):\n divTitle = div.find_element_by_tag_name(\"h3\")\n if divTitle.text == \"\" or divTitle.text == \"Former Names\": break\n medicalStaff[staffNum][\"General Information\"][\"Employment Type {}\".format(counter)] = {}\n medicalStaff[staffNum][\"General Information\"][\"Employment Type {}\".format(counter)][\"Job Title\"] = divTitle.text\n for td in div.find_elements_by_tag_name(\"td\"):\n tempList.append(td.text)\n iterableList = iter(tempList)\n for item in iterableList:\n medicalStaff[staffNum][\"General Information\"][\"Employment Type {}\".format(counter)][item] = next(iterableList)\n\n counter += 1 \n except:\n pass\n\n # C O N T A C T I N F O R M A T I O N #\n\n medicalStaff[staffNum][\"Last Employment\"] = {}\n try:\n address = browser.find_element_by_xpath(\"//*[@id='contactInformation']/div[@class='well']/div[@class='row']/div[@class='col-md-6']\")\n startDate = browser.find_element_by_xpath(\"//*[@id='contactInformation']/div[@class='well']/div[@class='row']/div[@class='col-md-3'][1]\").get_attribute(\"innerText\")\n startDate = re.search(\"(\\\\d{4})\", startDate).group(1)\n endDate = browser.find_element_by_xpath(\"//*[@id='contactInformation']/div[@class='well']/div[@class='row']/div[@class='col-md-3'][2]\").get_attribute(\"innerText\")\n try:\n endDate = re.search(\"(\\\\d{4})\", endDate).group(1)\n except AttributeError:\n endDate = \"Currently Employed\"\n address = str(address.get_attribute(\"innerText\"))\n for badTag in listBadTags:\n address = address.replace(badTag, \"\")\n address = re.sub(r\"\\n\", \"\", address)\n address = re.sub(r\" +\", \" \", address)\n\n medicalStaff[staffNum][\"Last Employment\"][\"Address\"] = address\n medicalStaff[staffNum][\"Last Employment\"][\"Start Date\"] = startDate\n medicalStaff[staffNum][\"Last Employment\"][\"End Date\"] = endDate\n \n except:\n continue\n\n # W R I T E T O D I C T #\n \n print(medicalStaff[staffNum])\n ujson.dump(medicalStaff[staffNum], jsonDoc)\n jsonDoc.write(\",\")\n\n # R E S T A R T #\n\n browser.get(\"https://registry.cno.org/Search/Search\")\n except:\n browser.get(\"https://registry.cno.org/Search/Search\")\n continue\n browser.quit()\n\nexcept TimeoutException:\n print(\"Timed out waiting for page to load\")\n browser.quit()\n\nfinally:\n with open(\"RegisteredNurses.json\", \"a\") as jsonDoc:\n jsonDoc.write(\"\\n]\")\n jsonDoc.seek(-1, os.SEEK_END)\n re.sub(\",\", \"\", jsonDoc)\n print(line.rstrip())\n\n\n browser.quit()\n","repo_name":"cplpearce/CONO-Nurse-Database-Scraper","sub_path":"statscanNurseScrape.py","file_name":"statscanNurseScrape.py","file_ext":"py","file_size_in_byte":7338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37900622195","text":"from random import randint\nlista = list()\njogos = list()\nquant = int(input('Quantos jogos voce quer que eu faça ? '))\ntot = 1\nwhile tot <= quant:\n cont = 0\n while True:\n num = randint(1, 60)\n if num not in lista:\n lista.append(num)\n cont += 1\n if cont >= 6:\n break\n lista.sort()\n jogos.append(lista[:])\n lista.clear()\n tot += 1\nfor i, l in enumerate(jogos):\n print(f'Jogo {i+1}: {l}')\n","repo_name":"Rickliger/Python","sub_path":"henrique_projetos_python/curso_em_video/mundo_3/aula_17_Listas/aula_17_listas_parte2/mega_sena.py","file_name":"mega_sena.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"27867575789","text":"import sys, io, os, gzip, glob, copy, re, time, pickle, warnings, importlib\nimport numpy as np\nimport pandas as pd\nimport base64\n\nfrom sklearn.preprocessing import normalize\nimport scipy.cluster\nimport scipy.stats\nimport scipy.spatial\nimport scipy.linalg\n\nimport ete3\nfrom ete3 import Tree, TreeStyle, NodeStyle\nos.environ['QT_QPA_PLATFORM']='offscreen'\n\ndef logrank(x):\n v = scipy.stats.rankdata(x)\n return - np.log( 1-(v-.5)/len(v) )\n\ndef cor_logrank(orgmtx):\n K, M = orgmtx.shape\n rankmtx = np.zeros((K, M), dtype=int)\n for k in range(K):\n rankmtx[k, :] = scipy.stats.rankdata(orgmtx[k, :])\n corlogrank = np.corrcoef(-np.log( 1-(rankmtx-.5)/M ) )\n return corlogrank\n\ndef NJ_logrank(orgmtx):\n K, M = orgmtx.shape\n rankmtx = np.zeros((K, M), dtype=int)\n for k in range(K):\n rankmtx[k, :] = scipy.stats.rankdata(orgmtx[k, :])\n corlogrank = np.corrcoef(-np.log( 1-(rankmtx-.5)/M ) )\n node = {} # node id -> [leaves, profile]\n active_node = []\n for k in range(K):\n node[k] = [[k], orgmtx[k, :]]\n active_node.append(k)\n pairwise_dict = {} # (id1, id2) -> score\n for k in range(K-1):\n for l in range(k+1, K):\n pairwise_dict[(k, l)] = corlogrank[k, l]\n Z = []\n for s in range(K-1):\n candi = [] # [(id1, id2), score]\n for i,v1 in enumerate(active_node[:-1]):\n for j in range(i+1, len(active_node)):\n v2 = active_node[j]\n if (v1, v2) not in pairwise_dict:\n vec1 = logrank(node[v1][1])\n vec2 = logrank(node[v2][1])\n score = np.corrcoef(vec1, vec2)[0,1]\n candi.append([(v1, v2), score])\n pairwise_dict[ (v1, v2) ] = score\n else:\n score = pairwise_dict[(v1, v2)]\n candi.append([(v1, v2), score])\n if len(candi) == 0:\n break\n candi.sort(key = lambda x : x[1], reverse=True)\n v1, v2 = candi[0][0]\n lvs = node[v1][0]+node[v2][0]\n node[K+s] = [lvs, orgmtx[lvs, :].mean(axis = 0)]\n active_node.remove(v1)\n active_node.remove(v2)\n active_node.append(K+s)\n Z.append([ v1, v2, 1-candi[0][1], len(lvs) ])\n return Z\n\ndef visual_hc(model_prob, weight, top_gene, node_color=None, circle=False, vertical=False, output_f=None, cprob_cut=.99):\n\n K = model_prob.shape[0]\n assert len(weight) == K, \"model_prob.shape[0] != len(weight)\"\n assert len(top_gene) == K, \"len(top_gene) != K\"\n\n model_prob = normalize(np.array(model_prob), norm='l1', axis=1)\n weight = np.array(weight)\n weight /= weight.sum()\n weight_anno = [\"%.2e\" % x if x < 0.1 else \"%.3f\" % x for x in weight]\n v = np.argsort(weight)[::-1]\n w = np.cumsum(weight[v] )\n if sum(w > cprob_cut) == 0:\n k = K - 1\n else:\n k = np.arange(K)[w > cprob_cut][0]\n kept_factor = v[:(k+1)].astype(str)\n\n # # Hierarchical clustering\n # cd_dist = scipy.spatial.distance.pdist(model_prob, metric='cosine')\n # Z_hc = scipy.cluster.hierarchy.linkage(cd_dist, method=\"complete\")\n # Z_hc = NJ_logrank(model_prob)\n\n corlogrank = cor_logrank(model_prob)\n corlogrank = np.nan_to_num(corlogrank, copy=False)\n cd_dist = 1 - .5 * (corlogrank + corlogrank.T)\n np.fill_diagonal(cd_dist, 0)\n cd_dist = scipy.spatial.distance.squareform(cd_dist)\n Z_hc = scipy.cluster.hierarchy.linkage(cd_dist, method=\"complete\")\n\n # Construct tree object from the clustering\n R, T = scipy.cluster.hierarchy.to_tree(Z_hc, rd=True)\n tr = Tree()\n tr.dist=0\n tr.name='root'\n node_dict = {R.id:tr}\n stack = [R]\n while stack:\n node = stack.pop()\n c_dist = node.dist / 2\n for c in [node.left, node.right]:\n if c:\n ch = Tree()\n ch.dist = c_dist\n ch.name = str(c.id)\n _=node_dict[node.id].add_child(ch)\n node_dict[c.id] = ch\n stack.append(c)\n\n node_list = [x for x in tr.traverse() if x.is_leaf() and x.name in kept_factor]\n subtr = tr.copy()\n subtr.prune( [x.name for x in node_list] )\n\n if output_f is None:\n return subtr\n\n ### Visualize tree\n title=f\"Hierarchical clustering of {len(node_list)} factors\"\n\n # Node style\n istyle = NodeStyle()\n istyle[\"size\"] = 0\n for n in subtr.traverse():\n n.set_style(istyle)\n if node_color is not None:\n for n in subtr.traverse():\n if n.is_leaf():\n nstyle = NodeStyle()\n nstyle[\"fgcolor\"] = node_color[n.name]\n nstyle['size'] = 10\n n.set_style(nstyle)\n node_anno = {k: \" \" + str(k) + \" (\"+weight_anno[k] + \"): \" + v for k,v in enumerate(top_gene) }\n def layout(node):\n if node.is_leaf():\n ete3.faces.add_face_to_node(ete3.TextFace(node_anno[int(node.name)]), node, column=0)\n\n # Tree style\n ts = TreeStyle()\n ts.layout_fn=layout\n ts.show_leaf_name = False\n ts.show_branch_length = False\n ts.show_scale = False\n if circle:\n ts.mode = \"c\"\n ts.arc_start = 0\n ts.arc_span = 360\n else:\n if vertical:\n ts.rotation = 90\n ts.branch_vertical_margin = 20\n\n ts.title.add_face(ete3.TextFace(title, fsize=25),column=0)\n subtr.render(output_f, w=2560, units='mm', tree_style=ts)\n\n return subtr\n\ndef image_to_base64(image_path):\n with open(image_path, \"rb\") as image_file:\n return base64.b64encode(image_file.read()).decode(\"utf-8\")\n","repo_name":"seqscope/ficture","sub_path":"ficture/utils/visualize_factors.py","file_name":"visualize_factors.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"11770831059","text":"def name_data():\n name = input('Введите Ваше имя: ')\n print('Очень красивое имя! (а меня зовут гб_бот, меня создала компания GeekBrains!')\n return name\n\n\ndef surname_data():\n surname = input('Введите Вашу фамилию: ')\n return surname\n\n\ndef phone_data():\n # import re\n phone = input('Введите Ваш телефон: ')\n return phone\n\n\ndef address_data():\n address = input('Введите Ваш адрес: ')\n return address\n\ndef search_parameters():\n print('По какому полю выполнить поиск?')\n search_field = input('1 - по фамилии\\n2 - по имени\\n3 - по номеру телефона\\n')\n print()\n search_value = None\n if search_field == '1':\n search_value = input('Введите фамилию для поиска: ')\n print()\n elif search_field == '2':\n search_value = input('Введите имя для поиска: ')\n print()\n elif search_field == '3':\n search_value = input('Введите номер для поиска: ')\n print()\n return search_field, search_value\n\n","repo_name":"NataliaZarubkina/HomeworkPyton","sub_path":"HomeworkPyton8/data_create.py","file_name":"data_create.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32609895144","text":"remind = input('Do you have any reminders to set: ')\r\n\r\nif (remind == 'yes'):\r\n response1 = input('Type a reminder here: ')\r\n remind = input('Do you have any more reminders: ')\r\n if(remind == 'yes'):\r\n response2 = input('Type another reminder here: ')\r\n print('Reminders:' + ' ' + response1 + ',and' + ' ' + response2)\r\n else:\r\n print('reminders:' + ' ' + response1)\r\nelif (remind == 'no'):\r\n print('Ok! Come back and type a reminder another time')\r\nelse:\r\n print('Type Yes or No only')","repo_name":"chandhan-j/AutomationProject","sub_path":"automateTasks.py","file_name":"automateTasks.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24851095941","text":"\"\"\"\n给定一个链表,两两交换其中相邻的节点,并返回交换后的链表。\n你不能只是单纯的改变节点内部的值,而是需要实际的进行节点交换。\n\n示例 1:\n\n输入:head = [1,2,3,4]\n输出:[2,1,4,3]\n\n示例 2:\n\n输入:head = []\n输出:[]\n\n示例 3:\n\n输入:head = [1]\n输出:[1]\n\n\n提示:\n\n链表中节点的数目在范围 [0, 100] 内\n0 <= Node.val <= 100\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/swap-nodes-in-pairs\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\nfrom .ListNodeHelper import ListNode\n\n\nclass Solution:\n def swapPairs(self, head: ListNode) -> ListNode:\n if not head:\n return None\n pre = ListNode(None)\n if head.next:\n pre.next = head.next\n else:\n return head\n pre1 = pre\n current = head\n while current and current.next:\n nxt = current.next\n third = nxt.next\n nxt.next = current\n current.next = third\n pre1.next = nxt\n pre1 = current\n current = third\n return pre.next\n","repo_name":"wanzhouyi/leetcode","sub_path":"2.链表/24. 两两交换链表中的节点.py","file_name":"24. 两两交换链表中的节点.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"27305965735","text":"\nimport numpy as np\n\ndef read_log_file(file, read_iterations=True):\n n_iterations = None\n rasscf_timing = None\n wall_timing = None\n\n with open(file, 'r') as f:\n while True:\n line = f.readline()\n\n if read_iterations:\n if \"Convergence after\" in line:\n for el in line.split():\n if el.isdigit():\n n_iterations = int(el)\n break\n\n if \"--- Module rasscf spent\" in line:\n for el in line.split():\n if el.isdigit():\n rasscf_timing = float(el)\n break\n\n if \"Timing: Wall\" in line:\n wall_timing = float(line.replace(\"=\", \" \").split()[2])\n break\n\n return rasscf_timing, wall_timing, n_iterations\n\nif __name__ == \"__main__\":\n output_path = '/home/ubuntu/fulvene/openmolcas_calculations/MD_prev_geometry/'\n # split_file = 'data/MD_trajectories_05_01_random.npz'\n\n n_iterations = []\n\n # for i in np.load(split_file)['val_idx']: \n for i in range(200):\n geometry_path = output_path + 'geometry_' + str(i) + '/'\n _, _, n = read_log_file(geometry_path + 'calc.log')\n n_iterations.append(n)\n\n n_iterations = np.array(n_iterations)\n print(n_iterations)\n # np.save('temp/gs200_ANOSVDZ_standard.npy', np.array(n_iterations))","repo_name":"rhjvanworkum/CasSchNet","sub_path":"openmolcas/calculation/extract_n_iterations.py","file_name":"extract_n_iterations.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"27337457490","text":"import os\nimport pywt\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport librosa\nimport scipy.io as sio\nimport scipy.io.wavfile\n\ndef checkPath(target) :\n if not os.path.exists(target): os.makedirs(target)\n\n#### Check Dataset & Output Directory\nROOT_INPUT_PATH = os.path.join(os.path.abspath(__file__+ \"../../\"), '.dataset/')\nROOT_OUT_PATH = os.path.join(os.path.abspath(__file__+ \"../../\"), '.output/')\nROOT_FIGURE_PATH = ROOT_OUT_PATH+\".WaveLetDWT/\"\nfileName = \"Loop_0\"\nfileExt = \".wav\"\ninputFile = ROOT_INPUT_PATH+fileName+ fileExt\ntransFile = ROOT_INPUT_PATH+fileName+\"_32\" + fileExt\n\ncheckPath(ROOT_OUT_PATH)\ncheckPath(ROOT_INPUT_PATH)\ncheckPath(ROOT_FIGURE_PATH)\n\nif not os.path.exists(transFile): \n data, samplerate = librosa.load(inputFile, dtype='float32')\n librosa.output.write_wav(transFile, data, samplerate)\n \n#### Load\n# Return the sample rate (in samples/sec), data from a WAV file, Wave Format PCM\nfs, samples_murmur = sio.wavfile.read(transFile)\nprint(\"Wave Info\\n Sample Rate={0}, \".format(fs)) # 22.050kHz, 1초당 추출되는 샘플개수\nprint(\" Data Length={0}\\n Data={1}\".format(len(samples_murmur), samples_murmur))\n\n### Discrete Wavelet Info\n# pywt.Wavelet: Describes properties of a discrete wavelet identified by the specified wavelet name, must be a valid wavelet name from the pywt.wavelist() list.\n# wavelist: 'haar', 'db', 'sym', 'coif', 'bior', 'rbio', 'dmey', 'gaus', 'mexh', 'morl', 'cgau', 'shan', 'fbsp', 'cmor'\ndiscrete_wavelet = pywt.Wavelet('db2')\nprint(discrete_wavelet)\nmax_level = pywt.dwt_max_level(len(samples_murmur), discrete_wavelet)\nprint('MAXIMUM DECOMPOSE LEVEL = ', max_level)\n\ntargetData = samples_murmur.copy() # NO read only\n\n#### Discrete Wavelet Transform\n# pywt.wavedec: Multilevel 1D Discrete Wavelet Transform of data. \n# Parameters: data, wavelet, mode='symmetric', level=None, axis=-1\n# Returns: [cA_n, cD_n, cD_n-1, …, cD2, cD1] : list\noriginalMatrix = pywt.wavedec(data=targetData, wavelet='db2', level=3)\ncA3, cD3, cD2, cD1 = originalMatrix\nprint(\"< Discrete Wavelet Transform >\\n\" + \" cD1: {0}\\n cD2: {1}\\n cD3: {2}\\n cA3: {3}\\n\".format(cD1,cD2,cD3,cA3))\nprint(cA3.size, cD3.size, cD2.size, cD1.size);\n\n#### Reconstruct\nreconstructMatrix = [cA3, cD3, cD2, cD1];\nreconstruct_sample = pywt.waverec(reconstructMatrix, 'db2')\nprint(\"< Reconstruct >\\n\" + \" Length={0}\\n Data={1}\".format(len(reconstruct_sample), reconstruct_sample))\nsio.wavfile.write(ROOT_FIGURE_PATH+fileName+fileExt, fs, reconstruct_sample)\nrec_to_orig = pywt.idwt(None, cD1, 'db2', 'smooth')\nrec_to_level1 = pywt.idwt(None, cD2, 'db2', 'smooth')\nrec_to_level2_from_detail = pywt.idwt(None, cD3, 'db2', 'smooth')\nrec_to_level2_from_approx = pywt.idwt(cA3, None, 'db2', 'smooth')\n# print(rec_to_orig,rec_to_level1,rec_to_level2_from_detail,rec_to_level2_from_approx)\n\n#### visualize\n# plt.figure(figsize=(4,4))\n# (phi, psi, x) = discrete_wavelet.wavefun()\n# plt.plot(x, phi)\n# plt.savefig(ROOT_FIGURE_PATH+fileName+\"_Info_DWT.png\")\n# plt.show()\n\nplt.figure(figsize=(15,10))\nplt.subplot(6,1,1)\nplt.title('Sample')\nplt.plot(np.linspace(0.0, len(samples_murmur),len(samples_murmur)), samples_murmur)\nplt.grid()\n\nplt.subplot(6,1,2)\nplt.title('cD1')\nplt.plot(np.linspace(0.0, len(rec_to_orig),len(rec_to_orig)), rec_to_orig)\nplt.grid()\n\nplt.subplot(6,1,3)\nplt.title('cD2')\nplt.plot(np.linspace(0.0, len(rec_to_level1),len(rec_to_level1)), rec_to_level1)\nplt.grid()\n\nplt.subplot(6,1,4)\nplt.title('cD3')\nplt.plot(np.linspace(0.0, len(rec_to_level2_from_detail),len(rec_to_level2_from_detail)), rec_to_level2_from_detail)\nplt.grid()\n\nplt.subplot(6,1,5)\nplt.title('cA3')\nplt.plot(np.linspace(0.0, len(rec_to_level2_from_approx),len(rec_to_level2_from_approx)), rec_to_level2_from_approx)\nplt.grid()\n\nplt.subplot(6,1,6)\nplt.title('reconstruct_sample')\nplt.plot(np.linspace(0.0, len(reconstruct_sample),len(reconstruct_sample)), reconstruct_sample)\nplt.grid()\n\nplt.tight_layout()\nplt.savefig(ROOT_FIGURE_PATH+fileName+\"_Figure_DWT.png\")\nplt.show()","repo_name":"Ninei/GANs","sub_path":"Exercise/Sound/WaveLet_DWT.py","file_name":"WaveLet_DWT.py","file_ext":"py","file_size_in_byte":3995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41746641551","text":"import numpy as np\nimport scipy.io\nfrom scipy import stats\nimport pandas as pd\nimport random\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\n\n# eigen face and face projection\n\nname = ['Anne', 'benjamin', 'clooney', 'keanu', 'Markle', 'ryan']\npicSize = (80, 60)\n\nface = []\nfor i in range(len(name)):\n face.append([])\n for j in range(5):\n facename = name[i] + str(j + 1)\n face_rgb = Image.open('./FilesHomework3/%s.jpg' % facename)\n faceResize = face_rgb.resize((60, 80))\n face_gray = faceResize.convert('L')\n face[i].append(np.array(face_gray, dtype=float))\n\n# show full pics\nplt.figure(figsize=(20, 20))\nfor i in range(6):\n for j in range(5):\n plt.subplot(6, 5, i * 5 + j + 1)\n plt.imshow(face[i][j], cmap=plt.get_cmap('gray'))\nplt.show()\n\n# average faces\nplt.figure(figsize=(24, 4))\nfor i in range(6):\n plt.subplot(1, 6, i + 1)\n faceAvg = sum(face[i]) / len(face[i])\n plt.imshow(faceAvg, cmap=plt.get_cmap('gray'))\nplt.show()\n\n# first 5 eigenfaces and 20 eigenvalues\n# construct data matrix\nimgData = []\nfor i in range(6):\n for j in range(5):\n faceRhpe = face[i][j].reshape((-1,))\n imgData.append(faceRhpe)\nimgData = np.asarray(imgData)\n\n# PCA\nCmat = np.dot(imgData.transpose(), imgData)\n# Cmat = np.cov(imgData.T)\nvalue, vector = np.linalg.eig(Cmat)\nvector = vector.real\nvalue = value.real\n\nplt.figure(figsize=(20, 4))\nfor i in range(5):\n plt.subplot(1, 5, i + 1)\n eigenFace = vector[:, i].reshape(picSize)\n plt.imshow(eigenFace, cmap=plt.get_cmap('gray'))\nplt.show()\n\nplt.plot(value[:21], marker='D', color='r')\nplt.title('First 20 Eigenvalues')\nplt.xlim((-1, 20))\nplt.show()\n\n# reconstructing Anne1.jpg\nplt.figure(figsize=(28, 4))\nK = [1, 5, 10, 15, 20, 50, 100]\nAnne1 = imgData[0]\nfor i in range(len(K)):\n faceRecnsct = 0.0\n for j in range(K[i]):\n plt.subplot(1, len(K), i + 1)\n weight = np.dot(vector[:, j], Anne1)\n faceRecnsct += weight * vector[:, j]\n faceRecnsct = faceRecnsct.reshape(picSize)\n plt.imshow(faceRecnsct, cmap=plt.get_cmap('gray'))\nplt.show()\n\n# projection of an arbitrary photo\nselfie = Image.open('1.jpg')\nselfie = selfie.resize((60, 80))\nselfie = selfie.convert('L')\nselfie = np.array(selfie, dtype=float)\nplt.imshow(selfie, cmap=plt.get_cmap('gray'))\nplt.show()\nselfieRshpe = selfie.reshape((-1,))\n\n# picking the first 20th eigenvectors as vector space\nV = vector[:, :20]\nweights = np.dot(selfieRshpe, V)\nselfieRecnsct = np.dot(V, weights.transpose())\nselfieRecnsct = selfieRecnsct.reshape(picSize)\nplt.imshow(selfieRecnsct, cmap=plt.get_cmap('gray'))\nplt.show()\n\n# histogram\nerror = []\nfor i in range(len(name)):\n error.append([])\n for j in range(5):\n nu = np.linalg.norm(np.dot(imgData[i * 5 + j, :], V) - np.dot(selfieRshpe, V), ord=2)\n de = np.linalg.norm(np.dot(imgData[i * 5 + j, :], V), ord=2)\n error[i].append(nu / de)\n\navgeError = np.mean(np.array(error), axis=1)\nprint(name[np.argmin(avgeError)])\n\nplt.figure(figsize=(24, 4))\nfor i in range(6):\n plt.subplot(1, 6, i + 1)\n plt.title(name[i])\n plt.bar(range(1, 6, 1), error[i])\nplt.show()","repo_name":"fangli-DX3906/mini-ML-projects","sub_path":"eigen face and projection.py","file_name":"eigen face and projection.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74653148694","text":"import numpy as np\nimport h5py\nimport scipy.stats as stat\nimport os\n\ndef bin_volumes(radial_bins):\n \"\"\"Returns the volumes of the bins. \"\"\"\n\n single_vol = lambda x: (4.0 / 3.0) * np.pi * x ** 3\n outer = single_vol(radial_bins[1:])\n inner = single_vol(radial_bins[:-1])\n return outer - inner\n\n\ndef bin_centers(radial_bins):\n \"\"\"Returns the centers of the bins. \"\"\"\n\n outer = radial_bins[1:]\n inner = radial_bins[:-1]\n return 0.5 * (outer + inner)\n\n\ndef analyse_halo(mass, pos, radial_bins):\n\n # Radial coordinates [kpc units]\n r = np.sqrt(np.sum(pos ** 2, axis=1))\n\n SumMasses, _, _ = stat.binned_statistic(x=r, values=np.ones(len(r)) * mass[0], statistic=\"sum\", bins=radial_bins, )\n density = (SumMasses / bin_volumes(radial_bins)) # Msun/kpc^3\n return density\n\ndef read_data(which_halos,siminfo,mass_select):\n\n radial_bins = np.arange(-0.3, 3, 0.1)\n radial_bins = 10**radial_bins\n centers = bin_centers(radial_bins) #kpc\n\n with h5py.File(siminfo.snapshot,\"r\") as hf:\n a = hf[\"/Header\"].attrs[\"Scale-factor\"]\n mass = hf['PartType1/Masses'][:] * 1e10 #Msun\n pos = hf['PartType1/Coordinates'][:][:] * a\n\n snapshot_file = h5py.File(siminfo.snapshot,\"r\")\n group_file = h5py.File(siminfo.catalog_groups,\"r\")\n particles_file = h5py.File(siminfo.catalog_particles,\"r\")\n properties_file = h5py.File(siminfo.subhalo_properties,\"r\")\n\n m200c = properties_file[\"Mass_200crit\"][:] * 1e10\n m200c[m200c == 0] = 1\n m200c = np.log10(m200c)\n CoP = np.zeros((len(m200c), 3))\n CoP[:, 0] = properties_file[\"Xcminpot\"][:] * a\n CoP[:, 1] = properties_file[\"Ycminpot\"][:] * a\n CoP[:, 2] = properties_file[\"Zcminpot\"][:] * a\n subtype = properties_file[\"Structuretype\"][:]\n\n select_halos = np.where((m200c >= mass_select-0.2) & (m200c <= mass_select+0.2))[0] # >10 star parts\n\n # Checking sample\n if which_halos == 'subhalos':\n select = np.where(subtype[select_halos] > 10)[0]\n select_halos = select_halos[select]\n else:\n select = np.where(subtype[select_halos] == 10)[0]\n select_halos = select_halos[select]\n\n if len(select_halos) >= 20:\n #select_random = np.random.random_integers(len(select_halos) - 1, size=(20))\n #select_halos = select_halos[select_random]\n select_halos = select_halos[0:20]\n\n\n M200 = np.median(10 ** m200c[select_halos])\n M200 = np.log10(M200)\n num_halos = len(select_halos)\n\n density_all = np.zeros((len(centers), num_halos))\n\n for halo in range(0, num_halos):\n halo_j = select_halos[halo]\n\n # # Grab the start position in the particles file to read from\n # halo_start_position = group_file[\"Offset\"][halo_j]\n # halo_end_position = group_file[\"Offset\"][halo_j + 1]\n # particle_ids_in_halo = particles_file[\"Particle_IDs\"][halo_start_position:halo_end_position]\n # particle_ids_from_snapshot = snapshot_file[\"PartType1/ParticleIDs\"][...]\n #\n # _, indices_v, indices_p = np.intersect1d(particle_ids_in_halo,\n # particle_ids_from_snapshot,\n # assume_unique=True,\n # return_indices=True, )\n #\n # particles_mass = mass[indices_p].copy()\n # particles_pos = pos[indices_p, :].copy()\n particles_mass = mass.copy()\n particles_pos = pos.copy()\n particles_pos -= CoP[halo_j, :] # centering\n particles_pos *= 1e3 # kpc\n if len(particles_mass) == 0 :continue\n density_all[:, halo] = analyse_halo(particles_mass, particles_pos, radial_bins)\n\n density = analyse_halo(particles_mass, particles_pos, radial_bins)\n output = np.zeros((len(centers),2))\n output[:, 0] = centers\n output[:, 1] = density\n\n if which_halos == 'subhalos':\n np.savetxt(siminfo.output_path+\"Profile_subhalos_M%0.1f\"%mass_select+\"_\"+siminfo.name+\"_%i.txt\"%halo, output, fmt=\"%s\")\n else:\n np.savetxt(siminfo.output_path+\"Profile_halos_M%0.1f\"%mass_select+\"_\"+siminfo.name+\"_%i.txt\"%halo, output, fmt=\"%s\")\n\n\n densityM = np.median(density_all[:, :], axis=1)\n densityUp = np.percentile(density_all[:, :], 84, axis=1)\n densityLow = np.percentile(density_all[:, :], 16, axis=1)\n\n # Output final median profile:\n output = np.zeros((len(centers),4))\n output[:,0] = centers\n output[:,1] = densityM\n output[:,2] = densityLow\n output[:,3] = densityUp\n\n\n if which_halos == 'subhalos':\n np.savetxt(siminfo.output_path+\"Profile_subhalos_M%0.1f\"%mass_select+\"_\"+siminfo.name+\".txt\", output, fmt=\"%s\")\n else:\n np.savetxt(siminfo.output_path+\"Profile_halos_M%0.1f\"%mass_select+\"_\"+siminfo.name+\".txt\", output, fmt=\"%s\")\n\n\n\nclass SimInfo:\n def __init__(self, folder, snap, output_path, name):\n self.name = name\n self.output_path = output_path\n\n snapshot = os.path.join(folder,\"snapshot_%04i.hdf5\"%snap)\n if os.path.exists(snapshot):\n self.snapshot = os.path.join(folder,\"snapshot_%04i.hdf5\"%snap)\n\n properties = os.path.join(folder, \"halo_%04i.properties\" % snap)\n if os.path.exists(properties):\n self.subhalo_properties = os.path.join(folder, \"halo_%04i.properties\" % snap)\n else:\n self.subhalo_properties = os.path.join(folder, \"subhalo_%04i.properties\" % snap)\n\n catalog = os.path.join(folder,\"halo_%04i.catalog_groups\"%snap)\n if os.path.exists(catalog):\n self.catalog_groups = os.path.join(folder,\"halo_%04i.catalog_groups\"%snap)\n else:\n self.catalog_groups = os.path.join(folder,\"subhalo_%04i.catalog_groups\"%snap)\n\n catalog_particles = os.path.join(folder, \"halo_%04i.catalog_particles\" % snap)\n if os.path.exists(catalog_particles):\n self.catalog_particles = os.path.join(folder, \"halo_%04i.catalog_particles\" % snap)\n else:\n self.catalog_particles = os.path.join(folder, \"subhalo_%04i.catalog_particles\" % snap)\n\n snapshot_file = h5py.File(self.snapshot, \"r\")\n self.softening = float(snapshot_file[\"/Parameters\"].attrs[\"Gravity:comoving_DM_softening\"][:])\n self.softening *= 1e3 #kpc units\n\n\nif __name__ == '__main__':\n \n from utils import *\n\n output_path = args.output\n folder = args.input\n snapshot = int(args.snapshot)\n name = args.name\n\n siminfo = SimInfo(folder, snapshot, output_path, name)\n\n # mass = 9.0\n # read_data(\"halos\",siminfo,mass)\n # read_data(\"subhalos\",siminfo,mass)\n\n # mass = 9.5\n # read_data(\"halos\",siminfo,mass)\n # read_data(\"subhalos\",siminfo,mass)\n\n mass = 10.0\n read_data(\"halos\",siminfo,mass)\n # read_data(\"subhalos\",siminfo,mass)\n","repo_name":"correac/SIDM-fitting-profiles","sub_path":"extract_profile_cosmo_box.py","file_name":"extract_profile_cosmo_box.py","file_ext":"py","file_size_in_byte":6809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"62724353","text":"import gi\nimport smtplib\nimport threading\nimport time\n\nfrom datetime import datetime\nfrom email.mime.text import MIMEText\nfrom pulsectl import Pulse\n\nfrom galicaster.core import context\n\ngi.require_version('GUdev', '1.0')\nfrom gi.repository import GLib, GUdev # noqa\n\n\nconf = context.get_conf()\ndispatcher = context.get_dispatcher()\nlogger = context.get_logger()\nrecorder = context.get_recorder()\n\npulse = Pulse('galicaster-plugin-unplugged')\nudev = GUdev.Client(subsystems=['usb'])\n\n\ndef init():\n Unplugged()\n\n\nclass WatchedDevice(object):\n def __init__(self, device_name, device_info):\n self.name = device_name\n self.vendor_id = device_info.get('vendor_id')\n self.device_id = device_info.get('device_id')\n self.switch_on_connect = device_info.get('switch_on_connect')\n self.switch_on_disconnect = device_info.get('switch_on_disconnect')\n self._unplugged_since = False\n self.plugged_in\n\n @property\n def plugged_in(self):\n enum = GUdev.Enumerator.new(udev)\n enum.add_match_property('ID_VENDOR_ID', self.vendor_id)\n enum.add_match_property('ID_MODEL_ID', self.device_id)\n plugged_in = bool(enum.execute())\n if plugged_in:\n self._unplugged_since = False\n elif not self._unplugged_since:\n self._unplugged_since = datetime.now()\n return plugged_in\n\n @property\n def status(self):\n return {True: 'plugged in', False: 'unplugged'}.get(self.plugged_in)\n\n @property\n def unplugged_since(self):\n return self._unplugged_since\n\n @property\n def unplugged_for(self):\n return datetime.now() - self._unplugged_since\n\n def __repr__(self):\n return (''.format(self))\n\n\nclass Unplugged(object):\n\n def __init__(self):\n # resend_every = how many minutes to resend email if still unplugged\n self.resend_every = conf.get_int('unplugged', 'resend_every', 60) * 60\n self.resend_every -= 3 # just to make sure ;)\n\n self.switch = {}\n\n self.last_check = time.time()\n\n # devices to watch, can be multiple, in the form:\n # {'display name': {'vendor_id': '0a1b', device_id: '3c4d'}}\n devices_conf = conf.get_json('unplugged', 'devices')\n self.devices = []\n for d in devices_conf:\n dev = WatchedDevice(d, devices_conf[d])\n switch_to = dev.switch_on_connect\n # send email immediately if unplugged on startup\n if not dev.plugged_in:\n self.send_email(dev)\n switch_to = dev.switch_on_disconnect\n\n # switch pulse input on startup as required\n if switch_to:\n self.switch[dev.name] = GLib.timeout_add_seconds(\n 1, self.switch_input, switch_to, dev)\n\n self.devices.append(dev)\n logger.debug(\"watching: {}\".format(dev))\n\n udev.connect('uevent', self._handle_event)\n dispatcher.connect('timer-long', self._handle_timer)\n\n def send_email(self, device):\n host = conf.get_hostname()\n threading.Thread(target=self._send_email,\n args=(host, device,)).start()\n\n def _send_email(self, host, device):\n logger.info('sending \"{0.status}\" email for \"{0.name}\"'.format(device))\n\n to = conf.get('unplugged', 'mailto')\n fr = conf.get('unplugged', 'mailfrom')\n smtpserver = conf.get('unplugged', 'smtpserver')\n\n txt = '{0.name} is {0.status}!\\n\\n'.format(device)\n if not device.plugged_in:\n # TODO: will still say \"just\" if starting up...\n if device.unplugged_for.total_seconds() < 10:\n txt += 'It has just been unplugged.'\n else:\n txt += ('It has been unplugged for {0.unplugged_for} '\n '(since {0.unplugged_since})'.format(device))\n msg = MIMEText(txt)\n msg['To'] = to\n msg['From'] = fr\n msg['Subject'] = '[{0}] {1.name} {1.status}'.format(host, device)\n\n s = None\n try:\n s = smtplib.SMTP(smtpserver)\n s.sendmail(fr, to.split(','), msg.as_string())\n except Exception:\n logger.error('problem sending email', exc_info=True)\n finally:\n if s:\n s.quit()\n\n logger.debug('sent \"{0.status}\" email for \"{0.name}\"'.format(device))\n\n def switch_input(self, switch_to, device=None):\n logger.info('switching pulse input to {}'.format(switch_to))\n for source in pulse.source_list():\n if source.name == switch_to:\n for recording in pulse.source_output_list():\n if (pulse.client_info(recording.client).name\n == 'run_galicaster.py'):\n pulse.source_output_move(recording.index, source.index)\n if device:\n self.switch[device.name] = None\n return False\n logger.warning('could not switch to {}'.format(switch_to))\n return True\n\n def translate_action(self, action):\n return {'add': 'plugged in', 'remove': 'unplugged'}.get(action)\n\n def _handle_timer(self, sender):\n # resend notification emails if still unplugged\n now = time.time()\n if self.last_check < now - self.resend_every:\n self.last_check = now\n for d in self.devices:\n if not d.plugged_in:\n self.send_email(d)\n\n def _handle_event(self, client, action, device):\n for d in self.devices:\n if (device.get_property('ID_VENDOR_ID') == d.vendor_id and\n device.get_property('ID_MODEL_ID') == d.device_id):\n logger.info(\"%s was %s\", d.name, self.translate_action(action))\n self.send_email(d)\n switch_to = {'add': d.switch_on_connect,\n 'remove': d.switch_on_disconnect}.get(action)\n\n if ((action == 'add' and d.switch_on_connect) or\n (action == 'remove' and d.switch_on_disconnect)):\n if self.switch.get(d.name):\n GLib.source_remove(self.switch.get(d.name))\n self.switch[d.name] = None\n self.switch[d.name] = GLib.timeout_add_seconds(\n 1, self.switch_input, switch_to, d)\n","repo_name":"ppettit/galicaster-plugin-unplugged","sub_path":"galicaster_plugin_unplugged/unplugged.py","file_name":"unplugged.py","file_ext":"py","file_size_in_byte":6758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12956706728","text":"import os\nimport datasets\nimport numpy as np\n\nfrom fewshot_gym_dataset import FewshotGymDataset, FewshotGymTextToTextDataset\n\nclass HotpotQA(FewshotGymTextToTextDataset):\n\n def __init__(self):\n self.hf_identifier = \"hotpot_qa\"\n self.task_type = \"text to text\"\n self.license = \"unknown\"\n\n def get_context(self, dp):\n counter = 1\n context = \"\"\n titles = dp[\"supporting_facts\"][\"title\"]\n for sentences, title in zip(dp[\"context\"][\"sentences\"], dp[\"context\"][\"title\"]):\n if title in titles:\n context += \"\".join(sentences) + \" \"\n return context\n\n def map_hf_dataset_to_list(self, hf_dataset, split_name):\n lines = []\n for datapoint in hf_dataset[split_name]:\n context = self.get_context(datapoint)\n lines.append((\"question: \" + datapoint[\"question\"] + \" context: \" + context.strip(), datapoint[\"answer\"]))\n return lines\n\n def load_dataset(self):\n return datasets.load_dataset(\"hotpot_qa\", \"distractor\")\n\ndef main():\n dataset = HotpotQA()\n\n for seed in [100, 13, 21, 42, 87]:\n train, dev, test = dataset.generate_k_shot_data(k=32, seed=seed, path=\"../data/\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"facebookresearch/MetaICL","sub_path":"preprocess/hotpot_qa.py","file_name":"hotpot_qa.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":226,"dataset":"github-code","pt":"67"} +{"seq_id":"19738441515","text":"import pydicom\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nimport datetime\nfrom skimage import measure\n\n\n\nclass DICOMimage:\n # Sets the initial parameters for a standard DICOM Image (REF:1 in supporting document)\n def __init__(self, path):\n self.path = path\n self.data = pydicom.read_file(path)\n self.pixels = self.data.pixel_array\n self.type = self.data[0x18, 0x6031].value\n self.date = DICOMimage.reformat_date(self.data[0x8, 0x23].value)\n self.seriel = self.data[0x18, 0x1000].value\n self.manufacturer = self.data[0x8, 0x70].value\n self.scannermodel = self.data[0x8, 0x1090].value\n\n # Extracts image location data from DICOM header in form (x1, y1, x2, y2, x(centre of image))\n self.region = [self.data[0x18, 0x6011][0][0x18, 0x6018].value,\n self.data[0x18, 0x6011][0][0x18, 0x601A].value,\n self.data[0x18, 0x6011][0][0x18, 0x601C].value,\n self.data[0x18, 0x6011][0][0x18, 0x601E].value,\n self.data[0x18, 0x6011][0][0x18, 0x6020].value + self.data[0x18, 0x6011][0][0x18, 0x6018].value]\n self.grayscale()\n\n # Shows the image\n def showimage(self):\n if self.type == \"LINEAR\":\n plt.imshow(self.pixels)\n plt.show()\n else:\n plt.imshow(self.refactored)\n plt.show()\n\n # Prints pixel array in full\n def pixelarray(self):\n np.set_printoptions(threshold=sys.maxsize)\n return self.pixels\n\n def islinear(self):\n if self.type == \"LINEAR\":\n return True\n else:\n return False\n\n #If image is RGB, this method converts it to grayscale\n def grayscale(self):\n pixels = self.pixels\n if self.data[0x28, 0x2].value == 3:\n self.pixels = pixels[:, :, 2]\n\n # Used in the crop_bottom method to condense a list of pixel values down by taking the mean of sets of pixel values (e.g. set of 5 pixel values is averaged to 1 mean value)\n def condense(self, List, factor):\n new_length = int(len(List) / factor)\n standard_deviation = []\n mean_list = []\n old_list = List[:]\n for i in range(new_length):\n mean = []\n for element in range(factor):\n mean.append(List.pop(0))\n mean_list.append(np.mean(mean))\n standard_deviation.append(np.std(mean) / self.minmax(old_list))\n return standard_deviation, mean_list\n\n # Crops the bottom of the image to remove unwanted blank space / noise\n def crop_bottom(self):\n if self.type == \"LINEAR\":\n image = self.pixels\n else:\n image = self.refactored\n\n width = int(image.shape[1] / 10)\n mean_values = self.middle_values(image, width)\n factor_constant = 15\n factor = int(len(mean_values) / factor_constant)\n standard_dev_list, mean_list = self.condense(mean_values, factor)\n index_cut = self.cutoff_index(standard_dev_list, mean_list, factor)\n\n if self.type == \"LINEAR\":\n self.pixels = image[:index_cut, :]\n else:\n self.refactored = image[:index_cut, :]\n\n # Analyses the image, returns coefficent of variation, skew, and low values for the pixel distribution (REF:2)\n def analyse(self):\n if self.type == \"LINEAR\":\n pixels = self.pixels\n else:\n pixels = self.refactored\n\n columns = [sum(pixels[:,i]) for i in range(pixels.shape[1])]\n cov = self.cov(columns)\n skew = self.skew(columns)\n low_L = self.low(columns[:int(len(columns) / 10)], columns)\n low_CL = self.low(columns[int(len(columns) / 10):int(len(columns) * (3 / 10))], columns)\n low_C = self.low(columns[int(len(columns) * (3 / 10)):int(len(columns) * (7 / 10))], columns)\n low_CR = self.low(columns[int(len(columns) * (7 / 10)):int(len(columns) * (9 / 10))], columns)\n low_R = self.low(columns[int(len(columns) / 10):], columns)\n return cov, skew, low_L, low_CL, low_C, low_CR, low_R\n\n #Reformats the date of the image to a sutible format\n @staticmethod\n def reformat_date(date):\n year = int(date[:4])\n if date[4] == 0:\n month = int(date[5])\n else:\n month = int(date[4:6])\n if date[6] == 0:\n day = int(date[7])\n else:\n day = int(date[6:8])\n return datetime.date(year, month, day).strftime('%d,%m,%Y')\n\n # Looks down a column of pixels and determines if more than a threshold amount of pixels are nnon zero\n @staticmethod\n def nonzero_threshold(pixels, threshold):\n nonzeros = sum(1 for pixel in pixels if pixel.any() !=0)\n if nonzeros/len(pixels) >= threshold:\n return True\n else:\n return False\n\n # Determines the point at which the standard deviation is below a threshold value (used in crop_bottom method)\n @staticmethod\n def cutoff_index(standard_dev_list, mean_list, factor):\n for index, value in enumerate(standard_dev_list):\n if value < 0.015 and mean_list[index] < 230:\n new_index = index * factor\n return new_index\n elif index == len(standard_dev_list) - 1:\n return None\n\n # Finds range of a set of data\n @staticmethod\n def minmax(val_list):\n min_val = min(val_list)\n max_val = max(val_list)\n return max_val - min_val\n\n # Finds a set of pixels in the middle of the image (used in crop_bottom method)\n @staticmethod\n def middle_values(image, width):\n middle = int(image.shape[1] / 2)\n mean_values = []\n for i in range(-(int(width / 2)), (int(width / 2))):\n values = []\n for pixel in range(image.shape[0]):\n values.append(image[pixel, middle + i] / width)\n if mean_values == []:\n mean_values = values\n else:\n for pixel in range(len(mean_values)):\n mean_values[pixel] += values[pixel]\n return mean_values\n\n # Finds the coefficient of variation from a set of data\n @staticmethod\n def cov(columns):\n std = np.std(columns)\n mean = np.mean(columns)\n return (std / mean) * 100\n\n # Finds the skew of a set of data\n @staticmethod\n def skew(columns):\n n = len(columns)\n mean = np.mean(columns)\n m1 = 0\n m3 = 0\n for column in columns:\n m1 += (column - mean) ** 2\n m3 += (column - mean) ** 3\n\n m1 = m1 * (1/(n-1))\n m3 = m3 * (1/n)\n skew = m3 / (m1 ** (3/2))\n return skew\n\n # Finds the low of a set of data\n @staticmethod\n def low(segment, columns):\n median = np.median(columns)\n lowest = 0\n for element in segment:\n if lowest == 0:\n lowest = (element - median) / median\n elif ((element - median) / median) < lowest:\n lowest = (element - median) / median\n return abs(lowest) * 100\n\n# Class for linear DICOM images (REF:3)\nclass linearDICOMimage(DICOMimage):\n\n #crops the image to remove information from the outside FINISH!!!!!!!\n def main_crop(self):\n pixels = self.pixels\n region = self.region\n self.region[4] = region[4] - region[0]\n self.pixels = pixels[region[1]:region[3], region[0]:region[2]]\n self.data.PixelData = self.pixels.tobytes()\n\n # Crops the sides of the image\n def crop_sides(self):\n pixels = self.pixels\n centre = self.region[4]\n\n for h in range(pixels.shape[1] - centre):\n values = []\n for n in range(pixels.shape[0] - 1):\n values.append(pixels[n, centre + h])\n if self.nonzero_threshold(values, 0.05):\n continue\n else:\n self.pixels = pixels[:, centre - (h-2):centre + (h-2)]\n break\n\n # Crops the sides of the image if the image does not have the region DICOM tags\n def alt_crop_sides(self):\n pixels = self.pixels\n centre = int(pixels.shape[1] / 2)\n for h in range(pixels.shape[1] - centre):\n values = []\n for n in range(pixels.shape[0] - 1):\n values.append(pixels[n, centre + h])\n if self.nonzero_threshold(values, 0.05):\n continue\n else:\n pixels = pixels[:,:centre + (h)]\n break\n\n for h in range(pixels.shape[1] - centre):\n values = []\n for n in range(pixels.shape[0] - 1):\n values.append(pixels[n, centre - h])\n if self.nonzero_threshold(values, 0.05):\n continue\n else:\n pixels = pixels[:,centre - h:]\n break\n self.pixels = pixels\n\n # Crops the image if it does not have the region DICOM tags\n def alternative_crop(self):\n image = self.pixels\n\n # get the pixel information\n cutoff = int(0.08 * image.shape[0])\n image = image[cutoff:, :]\n\n # convert to a black and white image\n bw = (image > 0)\n\n # find connected white regions\n labels = measure.label(bw, connectivity=1)\n properties = measure.regionprops(labels)\n\n # empty area list to add to and then find the biggest area\n\n maxArea = 0\n maxIndex = 0\n\n for prop in properties:\n # print('Label: {} >> Object size: {}'.format(prop.label, prop.area))\n if prop.area > maxArea:\n maxArea = prop.area\n maxIndex = prop.label\n\n bboxCoord = properties[maxIndex - 1].bbox\n minx = bboxCoord[1]\n miny = bboxCoord[0]\n maxx = bboxCoord[3]\n maxy = bboxCoord[2]\n\n if miny > int(bw.shape[0] / 6):\n bw = bw[:miny, :]\n labels = measure.label(bw, connectivity=1)\n properties = measure.regionprops(labels)\n maxArea = 0\n maxIndex = 0\n\n # loop over the connected white regions and select the largest region size\n for prop in properties:\n if prop.area > maxArea:\n maxArea = prop.area\n maxIndex = prop.label\n\n # crop the original image to the bounding box of the maximum white region\n\n bboxCoord = properties[maxIndex - 1].bbox\n\n minx_new = bboxCoord[1]\n miny_new = bboxCoord[0]\n maxx_new = bboxCoord[3]\n maxy_new = bboxCoord[2]\n\n if maxy_new - miny_new > 0.05 * image.shape[0]:\n croppedImage = image[miny_new:maxy_new, minx_new:maxx_new]\n else:\n croppedImage = image[miny:maxy, minx:maxx]\n else:\n croppedImage = image[miny:maxy, minx:maxx]\n\n self.pixels = croppedImage\n # save header as one channel\n self.data[0x28, 0x2].value = 1\n plt.imshow(croppedImage)\n plt.show()\n\n# Class for curvlinear images (REF:4)\nclass curvedDICOMimage(DICOMimage):\n\n # Has extra attributes that help with refactoring the image\n def __init__(self, path):\n super().__init__(path)\n # Finds the coordinates of the top two points of the curved image and the coordinates of the middle of the sector\n # REF 4.1.2\n self.sectorcoords = self.find_top_values(), self.find_middle_value()\n # REF 4.1.1\n self.centre = self.circle_centre()\n\n\n\n # finds the coordinates of the two points at the top of the curved image (labelled x1,y1 and x2,y2 in diagram) CHANGE FOR REFERENCE PIXELS\n def find_top_values(self):\n xmiddle = self.region[4]\n height = self.region[1] + 1\n for index, pixel in enumerate(self.pixels[height, xmiddle:]):\n if pixel.all() != 0:\n return [height,xmiddle - index], [height, xmiddle + index]\n\n # finds the x and y coordinates of the middle of the top arc of the image (labelled xm, ym in diagram)\n def find_middle_value(self):\n xmiddle = self.region[4]\n s_height = self.region[1]\n for index, pixel in enumerate(self.pixels[s_height:, xmiddle]):\n if pixel.all() != 0:\n return [s_height + index, xmiddle]\n\n # Finds the centre of the circle that the image arcs follow (i.e. the origin of the signal)\n def circle_centre(self):\n x1, x2 = self.sectorcoords[0]\n middle = self.sectorcoords[1]\n m = middle[0] - x1[0]\n l = middle[1] - x1[1]\n r1 = (l**2 + m**2) / (2*m)\n h1 = int(np.sqrt(r1**2 - l**2))\n return [middle[0] - m - h1 , middle[1]]\n\n # Refactors the image, making it linear\n def refactor(self):\n STRETCH = 2\n\n\n print('cartesian top left point --> ' + str(self.sectorcoords[0][0]))\n print('cartesian centre --> ' + str(self.centre))\n imageleft = self.zero_coords(self.sectorcoords[0][0])\n imageright = self.zero_coords(self.sectorcoords[0][1])\n print('cartesian top left point zeroed --> ' + str(imageleft))\n print('cartesian top left point zeroed --> ' + str(imageright))\n # REF:4.1.4\n phi_max = self.cart2pol(imageleft[1], imageleft[0])[1]\n phi_min = self.cart2pol(imageright[1], imageright[0])[1]\n print('polar max angle --> ' + str(abs(phi_max)))\n # REF:4.1.3\n rho_min = int(self.cart2pol(imageleft[1], imageleft[0])[0])\n rho_max = int(self.pixels.shape[0] - self.centre[0])\n print('polar min radius --> ' + str(rho_min))\n print('vertical pixels --> ' + str(self.pixels.shape[0]))\n print('polar max radius --> ' + str(rho_max))\n # REF:4.1.5\n arc_length = abs(int(2 * (phi_max - phi_min) * rho_max))\n # REF:4.1.6\n phi_increment = (phi_max - phi_min) / arc_length\n print('arc length --> ' + str(arc_length))\n print('angle increment --> ' + str(phi_increment))\n\n #Creates a blank linear numpy array to input refactored data into REF:4.1.7/4.1.8\n refactored = np.ndarray(shape=(STRETCH * (rho_max - rho_min), arc_length))\n print(refactored.shape)\n\n x = 0\n #REF:4.2.2\n for j in range(arc_length):\n phi = phi_max - (j * phi_increment)\n y = 0\n # REF:4.1.9\n for i in range(STRETCH * rho_min, STRETCH * (rho_max - 1)):\n i = i / STRETCH\n cartesian = self.pol2cart(i, phi)\n cart_reset = self.reset_coords(cartesian)\n # REF:4.2.0\n pixel_val = self.nearest_neighbour(cart_reset[0], cart_reset[1])\n # REF:4.2.1\n refactored[y,x] = pixel_val\n\n y += 1\n x += 1\n\n self.refactored = refactored\n plt.imshow(self.refactored)\n plt.show()\n\n\n # Zeros the cartesian coordinate system around the centre of the circle\n def zero_coords(self, point):\n return (point[0] - self.centre[0], point[1] - self.centre[1])\n\n # Resets the cartesian coordinates back to what they were originally\n def reset_coords(self, point):\n return (point[0] + self.centre[0], point[1] + self.centre[1])\n\n # Interpolates using the nearest neighbour function\n def nearest_neighbour(self, y, x):\n y_round = int(round(y))\n x_round = int(round(x))\n return self.pixels[y_round, x_round]\n\n # Changes the coordinates from cartesian to polar\n @staticmethod\n def cart2pol(x, y):\n rho = np.sqrt(x ** 2 + y ** 2)\n phi = np.arctan2(y, x)\n return (rho, phi)\n\n # Changes coordinates from polar to cartesian\n @staticmethod\n def pol2cart(rho, phi):\n x = rho * np.cos(phi)\n y = rho * np.sin(phi)\n return (y, x)\n\n","repo_name":"ethanhinton/ultrasound_qa","sub_path":"DICOMimages.py","file_name":"DICOMimages.py","file_ext":"py","file_size_in_byte":15860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6056068698","text":"#%% ##############################second part: load data, and train the model############################ \nimport pickle\nfrom flair.data import Corpus\nfrom flair.datasets import SentenceDataset\nfrom sklearn.model_selection import train_test_split\nfrom flair.embeddings import WordEmbeddings, CharacterEmbeddings,StackedEmbeddings\n#%%\nfrom KD_sequence_tagger_model import SequenceTagger\n# from flair.models import SequenceTagger\n\npath = '/Users/Wu/Google Drive/'\nwith open(path+'data/data_LogitsLabel_25k.pickle', 'rb') as handle:\n data = pickle.load(handle)\n\ndata_train, data_test = train_test_split(data, test_size=0.2, random_state=42)\ndata_dev,data_test = train_test_split(data_test, test_size=0.5, random_state=42)\ncorpus: Corpus = Corpus(SentenceDataset(data_train),SentenceDataset(data_test),SentenceDataset(data_dev))\ntag_type = 'ner'\ntag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)\ncorpus: Corpus = corpus.downsample(0.1)\n\nembedding_types = [\n WordEmbeddings('glove'),\n CharacterEmbeddings(),\n]\nembeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embedding_types)\n\ntagger: SequenceTagger = SequenceTagger(hidden_size=256,\n embeddings=embeddings,\n tag_dictionary=tag_dictionary,\n tag_type=tag_type,\n use_crf=True,\n reproject_embeddings=50,\n use_soft_labels=True,\n # use_logits = True,\n )\n\n#%% train the model \nfrom flair.trainers import ModelTrainer\ntrainer: ModelTrainer = ModelTrainer(tagger, corpus)\n\ntrainer.train('resources/taggers/test2.5k_softlabels_crf',\n learning_rate=0.1,\n mini_batch_size=10,\n max_epochs=15,\n checkpoint=True,\n )\n#%% continue training the model\n# checkpoint = 'resources/taggers/test/checkpoint.pt'\n# trainer = ModelTrainer.load_checkpoint(checkpoint, corpus) \n# trainer.train(path+'resources/taggers/test60ep',\n# learning_rate=0.05,\n# mini_batch_size=10,\n# max_epochs=50,\n# checkpoint=True) \n\n","repo_name":"wuqi057/distill_ner","sub_path":"KD_NER.py","file_name":"KD_NER.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16300530134","text":"import imp\nfrom lib2to3.pgen2 import driver\nfrom operator import imod\nfrom pickle import TRUE\nfrom selenium import webdriver\nimport constants as const\nfrom selenium.webdriver.chrome.options import Options\nfrom prettytable import PrettyTable\nfrom time import sleep\nfrom selenium.webdriver.support.ui import WebDriverWait \nfrom selenium.webdriver.common.by import By \nfrom selenium.webdriver.support import expected_conditions as EC\nimport pandas as pd\nimport os\nfrom selenium.common.exceptions import NoSuchElementException\nimport pull_reviews_data as mr\nimport shutil\n\nclass ReviewsResult(webdriver.Chrome):\n def __init__(self,tearDown = False,driver_path = const.DRIVER_PATH,File_name=''):\n self.driver_path = driver_path\n self.file_name = File_name\n print(File_name)\n options = Options()\n options.binary_location = const.APPLICATION_PATH\n #options.add_experimental_option('excludeSwitches', ['enable-logging'])\n super(ReviewsResult,self).__init__(executable_path=driver_path,options=options)\n self.implicitly_wait(5)\n self.maximize_window()\n self.oldCompany = ''\n self.company = ''\n def land_on_page(self,url):\n self.get(url)\n\n def land_on_default_page(self):\n self.get(const.WEB_URL)\n sleep(5)\n\n def quit_browser(self):\n self.quit()\n \n def last_scraped_reviews_listing_id(self):\n if (os.path.isfile(f'reviews_{self.file_name}.csv')):\n reviews_data = pd.read_csv(f'reviews_{self.file_name}.csv')\n last_row = reviews_data.iloc[-1:]\n return int(last_row['user_id'].iat[0])\n else:\n return 0\n\n def wait_for_listing_to_be_loaded(self):\n # pulling company name\n while self.company == self.oldCompany:\n try:\n self.company = str(self.find_element_by_css_selector(\n 'h2[data-attrid=\"title\"]'\n ).find_element_by_tag_name('span').get_attribute('innerHTML')).strip()\n self.company = self.company.replace('&','&')\n except:\n pass\n \n self.oldCompany = self.company\n def check_if_reviews_exists(self):\n flag = True\n try:\n google_reviews_div = self.find_element_by_css_selector(\n 'div[data-attrid=\"kc:/local:lu attribute list\"]'\n )\n except:\n flag = False\n return flag\n\n def pull_reviews_results(self):\n user_id = ''\n filename = ''\n os.chdir('/Users/zainali/scraping1/GMB listings 1/files')\n listing_data = pd.read_csv(self.file_name)\n listing_data = listing_data.reset_index()\n counter = 1\n total_listings = len(listing_data)\n last_scraped_user_id = self.last_scraped_reviews_listing_id()\n for index, listingURL in listing_data.iterrows():\n\n #checking to skip if already scraped reviews\n if(int(listingURL['user_id']) > last_scraped_user_id):\n #Opening listing\n print(\"\\n\\n\\nOpening ID: \", listingURL['user_id'],' name: ',listingURL['company'],' from ', self.file_name)\n self.land_on_page(listingURL['google_url'])\n #sleep(5000)\n #if(counter % 10 == 0):\n # self.refresh()\n\n self.wait_for_listing_to_be_loaded()\n if(self.company == listingURL['company']):\n if(self.check_if_reviews_exists()):\n #getting reviews of listing and storing in another csv\n Reviews = mr.Reviews(self,listingURL['user_id'],listingURL['filename'])\n #getting reviews of listing and storing in another csv\n if(counter == 1):\n Reviews.pull_reviews().to_csv(f'reviews_{self.file_name}.csv',index=False)\n print(\"Saving reviews in csv file for \", listingURL['company'])\n else:\n Reviews.pull_reviews().to_csv(f'reviews_{self.file_name}.csv', mode='a', index=False, header=False)\n print(\"Appending reviews in csv file for \", listingURL['company'])\n counter += 1\n else:\n print(\"No reviews found for \", listingURL['user_id'],' name: ',listingURL['company'])\n else:\n print(\"Company name did not match old = \", f\"{listingURL['company']} != {self.company}\")\n else:\n print(\"Already scraped reviews till user_id: \",last_scraped_user_id)\n print(\"Reviews already scraped for \", f\"{listingURL['user_id']} :{listingURL['company']}\")\n counter += 1\n\n\n total_listings -= 1\n print(\"Remaining listing reviews: \", total_listings)\n\n #moving file to reviews scraped file folder\n target_dir = const.target_dir\n source_dir = const.source_dir\n shutil.move(os.path.join(source_dir, self.file_name), target_dir)\n shutil.move(os.path.join(source_dir, f'reviews_{self.file_name}.csv'), target_dir)\n \n\n ","repo_name":"zainali89/Data-Cleaning-and-Scraping-Google-Maps","sub_path":"listing_page.py","file_name":"listing_page.py","file_ext":"py","file_size_in_byte":5329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30157346906","text":"\"\"\"\n# blockChain API Documentation\n# https://www.blockchain.com/api/blockchain_api\n#\n# Single Block\n# --------------------------------------------\n# https://blockchain.info/rawblock/$block_hash\n# You can also request the block to return in binary form\n# (Hex encoded) using ?format=hex\n#\\n\\n\"\"\"\nprint(__doc__)\n\nimport time\nimport requests\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pprint import pprint\n\nprint(\"read today's blocks\")\n\nurl = \"https://blockchain.info/blocks?format=json\"\nres = requests.get(url=url)\ndata = res.json()\n\nblocks = data['blocks']\n\nheader = []\nheader_brief = []\n\nfor n in range(len(blocks)):\n height = blocks[n]['height']\n b_time = blocks[n]['time']\n b_hash = blocks[n]['hash']\n\n header.append([height, b_time, b_hash])\n header_brief.append([height, b_time, b_hash[:35]+'....'])\n\n\n# read yesterday's blocks\nstime = b_time - 24 * 60 * 60\n\n\n# read blocks generated during last 10 days\nfor n_day in range(0, 10):\n pass\n\nprint(\"\\n[_Height_],[__Time__], [________Block-Hash 35-heading__________],\")\npprint(header_brief)\n\n\n\n\n\n\n\n\"\"\"\n# json file format\npprint(blocks)\n[\n {'hash': '000000000000000000054058cb421254c58d7c12c4e18970a07ef41d0cbc170e',\n 'height': 550928,\n 'main_chain': True,\n 'time': 1542785999},\n\n {'hash': '0000000000000000001647f96c634d9230f97c92779e5a502cabe07acd65b8c1',\n 'height': 550927,\n 'main_chain': True,\n 'time': 1542782621},\n]\n\"\"\"\n","repo_name":"onitonitonito/block_chain_study","sub_path":"_practice/1-02_bitcoin_block.py","file_name":"1-02_bitcoin_block.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"29874501403","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Interpreter for user-entered commands.\"\"\"\n\nimport cmd\nimport colorise\nimport importlib\nimport shlex\n\nimport pyrigate\nimport pyrigate.gpio as gpio\nimport pyrigate.mail\nfrom pyrigate.log import output, warn\nfrom pyrigate.user_settings import settings\nfrom pyrigate.utils.printing import print_dict, print_columns, print_list\n\n\nclass CommandInterpreter(cmd.Cmd):\n \"\"\"Interpreter for user-entered commands.\"\"\"\n\n def __init__(self, controller, prompt='> '):\n super(CommandInterpreter, self).__init__()\n self._controller = controller\n self.prompt = prompt\n\n def split_command(self, line):\n \"\"\"Split a user-entered line into a command and its arguments.\"\"\"\n tokens = shlex.split(line)\n\n return tokens[0], [] if len(tokens) == 1 else tokens[1:]\n\n def expect_args(self, command_name, command, count):\n \"\"\"Check that a command is given the number of arguments it expects.\"\"\"\n args = shlex.split(command)\n\n if len(args) != count:\n if count == 0:\n msg = \"no arguments\"\n elif count == 1:\n msg = '{0} argument'\n else:\n msg = '{0} arguments'\n\n output(\"Command '{0}' expected {1}\",\n command_name,\n msg.format(count))\n return None\n\n return args[0] if count == 1 else args\n\n def columnise(self, mapping):\n \"\"\"Print dictionary keys and values in two columns.\"\"\"\n max_width = max(10, len(max(mapping, key=len)))\n\n for key in mapping:\n colorise.fprint('{{fg=white,bold}}{0:<{1}} {{reset}} {2}'\n .format(key, max_width, mapping[key]))\n\n def do_version(self, line):\n \"\"\"Print pyrigate, python and raspberry pi versions.\"\"\"\n output(pyrigate.all_versions())\n\n def do_reload(self, line):\n \"\"\"Reload user settings.\"\"\"\n importlib.reload(pyrigate.user_settings)\n output('Reloaded settings')\n\n def do_test_mail(self, line):\n \"\"\"Test the mail system by sending a mail to the given address.\"\"\"\n if not settings['email']:\n output('Please set email settings to send test mail')\n elif not settings['email']['sender']:\n output('Please set email.sender to send test mail')\n elif not settings['email']['subscribers']:\n output('Please set email.subscribers to send test mail')\n else:\n output(\"Sending mail to 'localhost'\")\n output(\"From: {0}\", settings['email']['sender'])\n output(\"To : {0}\", \", \".join(settings['email']['subscribers']))\n output(\"Start debug server 'sudo python -m smtpd -c \"\n \"DebuggingServer -n localhost:25' to see result\")\n\n try:\n pyrigate.mail.send_mail(\n 'Test',\n settings['email']['sender'],\n settings['email']['subscribers'],\n 'Subject: Test\\nThis is a test mail sent from pyrigate',\n server='localhost',\n port=25\n )\n except TimeoutError:\n output(\"Operation timed out...\")\n\n def do_pump(self, line):\n \"\"\"Pump a specfic amount (dl, cm, ml etc.).\n\n pump \n\n You can also use 'on' and 'off' to control the pump.\n\n \"\"\"\n args = self.expect_args('pump', line, 2)\n\n if args:\n pump = self._controller.get_pump(args[0])\n\n if not pump:\n warn('No pump with that name')\n return\n\n cmd = args[1].lower()\n\n if cmd == 'on':\n pump.activate()\n output(\"Pump '{0}' activated\".format(pump.name))\n elif cmd == 'off':\n pump.deactivate()\n output(\"Pump '{0}' deactivated\".format(pump.name))\n else:\n try:\n pump.pump(float(args[1]))\n except ValueError:\n warn(\n \"Cannot convert '{0}' to a floating-point value\"\n .format(args[1])\n )\n\n def do_pumps(self, line):\n \"\"\"Show all loaded pumps.\"\"\"\n pumps = self._controller.pumps\n\n if pumps:\n self.columnise(pumps)\n else:\n print('No pumps loaded')\n\n def do_sensor(self, line):\n \"\"\"Query the value of a sensor.\"\"\"\n arg = self.expect_args('sensor', line, 1)\n sensor = self._controller.get_sensor(arg)\n\n if sensor:\n output('Current value is {0} (analog: {1})'\n .format(sensor.read(), sensor.analog))\n else:\n output(\"No sensor called '{0}' registered\".format(arg))\n\n def do_sensors(self, line):\n \"\"\".\"\"\"\n if self._controller.sensors:\n self.columnise(self._controller.sensors)\n else:\n print('No sensors loaded')\n\n def do_settings(self, line):\n \"\"\"List current settings.\"\"\"\n print_dict(settings)\n\n def do_configs(self, line):\n \"\"\"Print currently loaded plant configurations.\"\"\"\n configs = self._controller.configs\n\n if not configs:\n output('No configurations loaded')\n else:\n print_columns(\n [[name, config.path] for name, config in configs.items()],\n headers=['Name', 'Path']\n )\n\n def do_config(self, line):\n \"\"\"List a configuration.\"\"\"\n arg = self.expect_args('config', line, 1)\n\n if arg:\n configs = self._controller.configs\n\n if arg in configs:\n config = configs[arg]\n\n print_list([\n ('Name', config.name),\n ('Description', config.description),\n ('Path', config.path),\n ('Pump', config.scheme['pump']),\n ('Amount', config.scheme['amount']),\n ('Schedule', config.schedule_description),\n ('Running?', self._controller.is_job_running(arg)),\n ])\n else:\n print(\"Unknown plant configuration '{0}'\".format(arg))\n\n def do_select(self, line):\n \"\"\"Select the plant configuration to use.\"\"\"\n arg = self.expect_args('select', line, 1)\n\n if arg:\n self._controller.select_config(arg)\n\n output(\"Selected plant configuration '{0}'\".format(arg))\n\n def do_rpi_specs(self, line):\n \"\"\"Print the Raspberry Pi's specifications.\"\"\"\n if gpio.mocked():\n output(\n 'Cannot get Raspberry Pi specs, gpio access is being mocked'\n )\n else:\n for key, value in pyrigate.rpi_specs().items():\n print(\"{0:<20} {1}\".format(key, value))\n\n def do_read_pin(self, line):\n \"\"\"Read a value from a gpio input pin.\"\"\"\n arg = self.expect_args('read', line, 1)\n\n if arg:\n if gpio.mocked():\n output('Cannot read pin, gpio access is being mocked')\n else:\n pin = int(arg)\n output(\"Read value '{0}' from pin '{1}'\", gpio.input(pin), pin)\n\n def do_write_pin(self, line):\n \"\"\"Write a HIGH or LOW value to a gpio output pin.\"\"\"\n args = self.expect_args('write', line, 2)\n\n if args:\n if gpio.mocked():\n output('Cannot write pin, gpio access is being mocked')\n else:\n pin = int(args[0])\n value = int(args[1])\n\n if pin in (gpio.LOW, gpio.HIGH):\n output(\"Wrote '{0}' on pin '{1}'\",\n gpio.output(pin, value), pin)\n else:\n output(\"Output value must be either '{0}' or '{1}'\",\n gpio.LOW, gpio.HIGH)\n\n def do_schedule(self, line):\n \"\"\"Schedule a config or all jobs.\n\n > schedule (start | stop) ( | 'all')\n\n \"\"\"\n action, config_name = self.expect_args('schedule', line, 2)\n\n if action == 'start':\n if config_name == 'all':\n self._controller.schedule_tasks()\n else:\n did_start = self._controller.start_job(config_name)\n\n if did_start:\n output(f\"Started job '{config_name}'\")\n else:\n output('Job already running')\n elif action == 'stop':\n if config_name == 'all':\n self._controller.cancel_tasks()\n else:\n did_stop = self._controller.stop_job(config_name)\n\n if did_stop:\n output(f\"Stopped job '{config_name}'\")\n else:\n output('Job does not exist or is not running')\n else:\n output(f\"Unknown action '{action}'\")\n\n def do_jobs(self, line):\n \"\"\"List all running jobs.\"\"\"\n jobs = self._controller.all_jobs\n\n if not jobs:\n output('No running jobs')\n else:\n print_columns(\n [\n [name, job.tag, job.runs, job.running, job.description]\n for name, job in jobs.items()\n ],\n headers=['Name', 'Tag', 'Runs', 'Running?', 'Schedule'],\n padding=6,\n )\n\n def do_quit(self, line):\n \"\"\"Quit pyrigate.\"\"\"\n raise KeyboardInterrupt\n\n def emptyline(self):\n \"\"\"Do not repeat the last command, just prompt the user again.\"\"\"\n pass\n\n def default(self, line):\n \"\"\"Handle unknown commands.\"\"\"\n command, args = self.split_command(line)\n\n args = \"\" if not args\\\n else \" with argument(s) {0}\".format(\", \".join(args))\n\n output(\"Unrecognised command '{0}'{1}\".format(command, args))\n","repo_name":"pyrigate/pyrigate","sub_path":"pyrigate/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":9919,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"39691548873","text":"\"\"\"filter.py\n\nTake a contexts file and a list of terms and create a new contexts file that has\nonly the terms in the list of terms.\n\nUsage:\n\n python filter.py CONTEXTS_IN CONTEXTS_OUT TERM_LIST\n\nExample:\n\n python filter.py annotate.terms.context.txt annotate.terms.filtered.context.txt good_terms.txt\n\n\"\"\"\n\n\nimport sys, codecs\nfrom utils import TermContexts\n\ninfile = sys.argv[1]\noutfile = sys.argv[2]\ntermfile = sys.argv[3]\n\ncontexts = TermContexts(infile, termfile)\n#contexts.pp()\n\nout = codecs.open(outfile, 'w', encoding='utf-8')\n\nout.write(contexts.info)\nout.write(\"# ## filtering notes\\n\")\nout.write(\"#\\n\")\nout.write(\"# Created with filter.py from %s\\n\" % infile)\nout.write(\"# Keeping only the terms in %s\\n\" % termfile)\nout.write(\"#\\n\")\nfor t in contexts.terms:\n t.write_as_raw_data(out)\n","repo_name":"techknowledgist/techknowledgist","sub_path":"ontology/annotation/tool/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8166344042","text":"import contextlib\nimport io\nimport tempfile\nfrom pathlib import Path\nfrom typing import Any, ContextManager, Dict, List, Optional, Tuple\n\nimport pyarrow as pa\nfrom cjwmodule.http import httpfile\nfrom cjwmodule.testing.i18n import i18n_message\n\nfrom googlesheets import FetchResult, RenderError, render\n\nDEFAULT_FILE = {\n \"id\": \"aushwyhtbndh7365YHALsdfsdf987IBHJB98uc9uisdj\",\n \"name\": \"Police Data\",\n \"url\": \"http://example.org/police-data\",\n \"mimeType\": \"application/vnd.google-apps.spreadsheet\",\n}\n\n\ndef P(file=DEFAULT_FILE, has_header=True):\n return dict(file=file, has_header=has_header)\n\n\n@contextlib.contextmanager\ndef _temp_parquet_file(table: pa.Table) -> ContextManager[Path]:\n with tempfile.NamedTemporaryFile() as tf:\n path = Path(tf.name)\n pa.parquet.write_table(table, path, version=\"2.0\", compression=\"SNAPPY\")\n yield path\n\n\n@contextlib.contextmanager\ndef _temp_httpfile(\n url: str,\n status_line: str,\n body: bytes,\n headers: List[Tuple[str, str]] = [(\"Content-Type\", \"text/html; charset=utf-8\")],\n) -> ContextManager[Path]:\n with tempfile.NamedTemporaryFile() as tf:\n path = Path(tf.name)\n httpfile.write(path, {\"url\": url}, status_line, headers, io.BytesIO(body))\n yield path\n\n\ndef _assert_table_file(path: Path, expected: Optional[pa.Table]) -> None:\n if expected is None:\n assert path.stat().st_size == 0\n return\n else:\n assert path.stat().st_size > 0\n\n with pa.ipc.open_file(path) as f:\n actual = f.read_all()\n assert actual.column_names == expected.column_names\n for actual_column, expected_column in zip(\n actual.itercolumns(), expected.itercolumns()\n ):\n assert actual_column.type == expected_column.type\n assert actual_column.to_pylist() == expected_column.to_pylist()\n if pa.types.is_dictionary(actual_column.type):\n for output_chunk, expected_chunk in zip(\n actual_column.iterchunks(), expected_column.iterchunks()\n ):\n assert (\n output_chunk.dictionary.to_pylist()\n == expected_chunk.dictionary.to_pylist()\n )\n\n\n@contextlib.contextmanager\ndef _render(params: Dict[str, Any], fetch_result: Optional[FetchResult]):\n with tempfile.NamedTemporaryFile() as empty_file:\n output_path = Path(empty_file.name)\n errors = render((), params, output_path, fetch_result=fetch_result)\n yield output_path, errors\n\n\ndef test_render_no_file():\n with _render(P(), None) as (path, errors):\n assert errors == []\n _assert_table_file(path, None)\n\n\ndef test_render_fetch_error():\n with tempfile.NamedTemporaryFile() as empty_file:\n fetch_errors = [RenderError(i18n_message(\"x\", {\"y\": \"z\"}))]\n with _render(P(), FetchResult(Path(empty_file.name), fetch_errors)) as (\n path,\n errors,\n ):\n _assert_table_file(path, None)\n assert errors == [i18n_message(\"x\", {\"y\": \"z\"})]\n\n\ndef test_render_deprecated_parquet():\n with _temp_parquet_file(pa.table({\"A\": [1, 2], \"B\": [3, 4]})) as fetched_path:\n with _render(P(), FetchResult(fetched_path)) as (path, errors):\n _assert_table_file(path, pa.table({\"A\": [1, 2], \"B\": [3, 4]}))\n assert errors == []\n\n\ndef test_render_deprecated_parquet_warning():\n with _temp_parquet_file(pa.table({\"A\": [1, 2], \"B\": [3, 4]})) as fetched_path:\n fetch_errors = [RenderError(i18n_message(\"truncated table\"))]\n with _render(P(), FetchResult(fetched_path, fetch_errors)) as (path, errors):\n _assert_table_file(path, pa.table({\"A\": [1, 2], \"B\": [3, 4]}))\n assert errors == [i18n_message(\"truncated table\")]\n\n\ndef test_render_deprecated_parquet_has_header_false():\n # Back in the day, we parsed during fetch. But has_header can change\n # between fetch and render. We were lazy, so we made fetch() follow the\n # most-common path: has_header=True. Then, in render(), we would \"undo\"\n # the change if has_header=False. This mangles the input data, but we\n # have no choice because we lost the input data. It was unwise. We have\n # abandoned supporting these files.\n with _temp_parquet_file(pa.table({\"A\": [1, 2], \"B\": [3, 4]})) as fetched_path:\n with _render(P(has_header=False), FetchResult(fetched_path)) as (path, errors):\n _assert_table_file(path, pa.table({\"A\": [1, 2], \"B\": [3, 4]}))\n assert errors == [i18n_message(\"error.parquet.cannotRemoveHeader\")]\n\n\ndef test_render_has_header_true():\n with _temp_httpfile(\n \"https://blah\",\n \"200 OK\",\n b\"A,B\\na,b\",\n headers=[(\"content-type\", \"text/csv\")],\n ) as fetch_path:\n with _render(P(has_header=True), FetchResult(fetch_path)) as (path, errors):\n _assert_table_file(path, pa.table({\"A\": [\"a\"], \"B\": [\"b\"]}))\n assert errors == []\n\n\ndef test_render_has_header_false():\n with _temp_httpfile(\n \"https://blah\",\n \"200 OK\",\n b\"1,2\\n3,4\",\n headers=[(\"content-type\", \"text/csv\")],\n ) as fetch_path:\n with _render(P(has_header=False), FetchResult(fetch_path)) as (path, errors):\n _assert_table_file(\n path,\n pa.table(\n {\n \"Column 1\": pa.array([1, 3], pa.int8()),\n \"Column 2\": pa.array([2, 4], pa.int8()),\n }\n ),\n )\n assert errors == []\n","repo_name":"CJWorkbench/googlesheets","sub_path":"tests/test_render.py","file_name":"test_render.py","file_ext":"py","file_size_in_byte":5541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74305586452","text":"import trimesh\nimport numpy as np\n\n# 加载包含三个组件的STL\nmesh = trimesh.load('original_single_arm_v22_axis_2.STL')\n# mesh = trimesh.load('original_single_arm_axis_2.STL')\nmesh.show()\n# # 获取需要缩放的组件和其他组件\n# axis2 共有 10个组件 其中需要缩放3\n # base_meshes 1 5 8 9 10\n # other meshes 2 4 6 7\n\nprint(\"axis_2 mesh 组件量:\",len(mesh.split()))\nfor i in range(len(mesh.split())):\n print(i)\n scaled_mesh = mesh.split()[i]\n scaled_mesh.show()\n\n# 加载包含三个组件的STL\nmesh = trimesh.load('original_single_arm_v22_axis_3.STL')\nmesh.show()\n# # 获取需要缩放的组件和其他组件\n# axis3 共有 8个组件 其中需要缩放5\n # base_meshes 1 4 6 7 8\n # other meshes 2 3\nprint(\"axis_3 mesh 组件量:\",len(mesh.split()))\nfor i in range(len(mesh.split())):\n scaled_mesh = mesh.split()[i]\n scaled_mesh.show()\n\n\n# # 切割机构为单独的构件\n# components = mesh.split()\n# print(len(components))\n# components = mesh.split()[1]\n# print(components)\n# # 获取第一个组件的顶点位置\n# index_component = 0 # 需要获取位置的组件的索引\n# vertices = mesh.vertices[mesh.geometry[index_component].vertices]\n\n# # 计算组件的中心位置\n# center = vertices.mean(axis=0)\n\n# print(center) # 输出组件的中心位置","repo_name":"SamKaiYang/Optimization-of-robotic-arm-design_dynamixel","sub_path":"dynamics/src/dynamics/meshes/stl_show.py","file_name":"stl_show.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28907607874","text":"import math\nclass Solution:\n def powerfulIntegers(self, x: int, y: int, bound: int) -> List[int]:\n max_x = int(math.log(bound, x))+1 if x != 1 else 1\n max_y = int(math.log(bound, y))+1 if y != 1 else 1\n ans = set()\n for i in range(max_x):\n for j in range(max_y):\n a = x**i + y**j\n if a > bound:\n break\n ans.add(a)\n return list(ans)\n\n def fast(self, x, y, bound):\n res_set = set()\n for i in range(bound):\n tmp1 = x ** i \n if tmp1 > bound:\n break\n for j in range(bound):\n tmp2 = y ** j\n tmp = tmp1 + tmp2\n if tmp > bound:\n break\n res_set.add(tmp)\n return res_set\n","repo_name":"longhao54/leetcode","sub_path":"easy/970.py","file_name":"970.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30466992726","text":"#!/usr/bin/env python3\n\n\"\"\"Get Wikidata dump records as a JSON stream (one JSON object per line)\"\"\"\n# Modified script taken from this link: \"https://www.reddit.com/r/LanguageTechnology/comments/7wc2oi/does_anyone_know_a_good_python_library_code/dtzsh2j/\"\nimport gzip\nimport json\nfrom pyArango.connection import *\n\n\ndef wikidata(filename):\n with gzip.open(filename, mode='rt') as f:\n f.read(2)\n for line in f:\n try:\n yield json.loads(line.rstrip(',\\n'))\n except json.decoder.JSONDecodeError:\n continue\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=__doc__\n )\n parser.add_argument(\n 'dumpfile',\n help=(\n 'a Wikidata dumpfile from: '\n 'https://dumps.wikimedia.org/wikidatawiki/entities/'\n 'latest-all.json.gz'\n )\n )\n args = parser.parse_args()\n\n conn = Connection(arangoURL=\"http://10.117.0.47:8529\",\n username=\"root\", password=\"root\")\n arangodb = conn[\"kgms\"]\n entity = arangodb[\"entity\"]\n link = arangodb[\"link\"]\n\n for entity_dict in wikidata(args.dumpfile):\n try:\n item =entity[entity_dict['id']]\n except Exception:\n item = entity.createDocument()\n item['_key'] = entity_dict['id']\n if 'labels' in entity_dict:\n item['labels'] = entity_dict['labels']\n if 'descriptions' in entity_dict:\n item['descriptions'] = entity_dict['descriptions']\n if 'aliases' in entity_dict:\n item['aliases'] = entity_dict['aliases']\n if 'sitelinks' in entity_dict:\n item['sitelinks'] = entity_dict['sitelinks']\n\n item.save()\n \n source = entity[entity_dict['id']]\n \n for key in entity_dict['claims']:\n for statement in entity_dict['claims'][key]:\n if 'mainsnak' in statement:\n if 'datavalue' in statement['mainsnak']:\n edge = link.createEdge(statement)\n edge['_key']=statement['id']\n if statement['mainsnak']['datavalue']['type'] == 'wikibase-entityid':\n try:\n target = entity[statement['mainsnak']['datavalue']['value']['id']]\n except Exception:\n item = entity.createDocument()\n item['_key'] = statement['mainsnak']['datavalue']['value']['id']\n item.save()\n target = entity[statement['mainsnak']['datavalue']['value']['id']]\n else:\n target = entity[entity_dict['id']]\n edge.links(source, target) \n edge.save()","repo_name":"yulongbb/knowledge_graph","sub_path":"wikidata/wikidata.py","file_name":"wikidata.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39374486567","text":"from django.urls import include, path, re_path\n\nfrom catalog import views\n\napp_name = 'catalog'\n\nurlpatterns = [\n path('', views.index, name='index'),\n re_path(r'^books/$', views.BookListView.as_view(), name='book_list'),\n re_path(r'^authors/$', views.AuthorListView.as_view(), name='author_list'),\n re_path(r'^authors/add/$', views.AuthorAddView.as_view(), name='author_add'),\n re_path(r'^books/(?P\\d+)/$', views.BookDetailView.as_view(), name='book_detail'),\n re_path(r'^books/delete/(?P\\d+)/$', views.delete_book, name='book_delete'),\n re_path(r'^authors/(?P\\d+)/$', views.AuthorDetailView.as_view(), name='author_detail'),\n re_path(r'^authors/delete/(?P\\d+)/$', views.delete_author, name='author_delete'),\n re_path(r'^mybooks/(?P\\d+)$', views.LoanedBooksByUserListView.as_view(), name='my_borrowed'),\n]\n","repo_name":"KenArsen/world_books","sub_path":"catalog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19010879907","text":"import json\nimport os\nimport time\nfrom time import sleep\nfrom Browser import ElementState, SelectAttribute\nfrom robot.api.deco import library\nfrom qbrix.core.qbrix_robot_base import QbrixRobotTask\n\n@library(scope='GLOBAL', auto_keywords=True, doc_format='reST')\nclass QbrixCMS(QbrixRobotTask):\n\n \"\"\"Qbrix CMS Library\"\"\"\n\n def go_to_digital_experiences(self):\n \"\"\"Go to the Digital Experiences App\"\"\"\n self.shared.go_to_app(\"Digital Experiences\")\n\n def download_all_content(self):\n\n # Get Workspace Names\n results = self.salesforceapi.soql_query(\"SELECT Name FROM ManagedContentSpace WHERE IsDeleted=False\")\n if results[\"totalSize\"] == 0:\n return\n\n # Download content from each workspace\n for workspace in results[\"records\"]:\n self.download_cms_content(workspace[\"Name\"])\n\n def upload_cms_import_file(self, file_path, workspace):\n\n \"\"\"\n Uploads the Content from the CMS import .zip file\n @return:\n @param file_path: Relative path to the .zip file containing the export\n @param workspace: Name of the workspace to upload the content to\n \"\"\"\n\n self.go_to_digital_experiences()\n sleep(5)\n\n # Go To Workspace Page\n if workspace:\n\n # Get the Application ID\n results = self.salesforceapi.soql_query(\n f\"SELECT Id FROM ManagedContentSpace where Name = '{workspace}' LIMIT 1\")\n\n if results[\"totalSize\"] == 1:\n app_id = results[\"records\"][0][\"Id\"]\n\n # Go to the app\n self.browser.go_to(f\"{self.cumulusci.org.instance_url}/lightning/cms/spaces/{app_id}\", timeout='30s')\n\n # Open Import Menu\n iframe_handler = self.shared.iframe_handler()\n drop_down_menu_selector = f\"{iframe_handler} div.slds-page-header__row >> button.slds-button:has-text('Show menu')\"\n import_button_selector = f\"{iframe_handler} div.slds-page-header__row >> lightning-menu-item.slds-dropdown__item:has-text('Import Content')\"\n\n self.browser.click(drop_down_menu_selector)\n sleep(1)\n\n # Upload CMS File\n upload_promise = self.browser.promise_to_upload_file(file_path)\n self.browser.click(import_button_selector)\n self.browser.wait_for_all_promises()\n\n start_time = time.time()\n timeout = 30\n\n while True:\n\n error_message_selector = \"div.modal-body >> div.slds-p-around_medium:has-text('Error encountered during import')\"\n confirm_checkbox_selector = \"div.modal-body >> span.slds-checkbox >> span.slds-checkbox_faux\"\n import_button_selector = \"button.slds-button:has-text('Import')\"\n\n if self.browser.get_element_count(error_message_selector) > 0:\n print(\"Error Occurred During File Upload. CMS Import Failed\")\n return\n \n if self.browser.get_element_count(confirm_checkbox_selector) > 0 or self.browser.get_element_count(import_button_selector) > 0:\n print(\"File Imported OK!\")\n break\n\n # Check if the timeout has been reached\n elapsed_time = time.time() - start_time\n if elapsed_time >= timeout:\n break\n\n time.sleep(1)\n \n self.browser.click(\"div.modal-body >> span.slds-checkbox >> span.slds-checkbox_faux\")\n sleep(1)\n self.browser.click(\"button.slds-button:has-text('Import')\")\n sleep(5)\n self.browser.click(\"button.slds-button:text('ok')\")\n sleep(2)\n\n else:\n print(\"Workspace cannot be None. Skipping\")\n return\n\n def download_cms_content(self, workspace):\n\n \"\"\"\n Initiate the export of a workspace to a content .zip file (which is emailed to the admin)\n @param workspace: Name of workspace\n @return:\n \"\"\"\n\n self.go_to_digital_experiences()\n sleep(5)\n\n # Go To Workspace Page\n if workspace:\n\n # Get the Application ID\n results = self.salesforceapi.soql_query(f\"SELECT Id FROM ManagedContentSpace where Name = '{workspace}' LIMIT 1\")\n\n if results[\"totalSize\"] == 1:\n app_id = results[\"records\"][0][\"Id\"]\n\n # Go to the app\n self.browser.go_to(f\"{self.cumulusci.org.instance_url}/lightning/cms/spaces/{app_id}\", timeout='30s')\n iframe_handler = self.shared.iframe_handler()\n\n # Enhanced workspace handler\n if self.browser.get_element_count(f\"{iframe_handler} lightning-badge.slds-badge:has-text('Enhanced'):visible\") > 0:\n return\n\n # Select all checkboxes\n no_items = False \n while True:\n\n total_cms_elements = self.browser.get_element(f\"{iframe_handler} p.slds-page-header__meta-text\")\n\n if total_cms_elements:\n\n innertext_for_total = self.browser.get_property(f\"{iframe_handler} p.slds-page-header__meta-text\", \"innerText\")\n\n if innertext_for_total == \"0 item(s)\":\n no_items = True\n break\n\n if innertext_for_total and \"+\" not in str(innertext_for_total):\n break\n\n if innertext_for_total and \"+\" in str(innertext_for_total):\n elements = self.browser.get_elements(f\"{iframe_handler} table.slds-table >> sfdc_cms-content-check-box-button\")\n for elem in elements:\n self.browser.scroll_to_element(elem)\n\n else:\n break\n \n if no_items:\n return\n\n elements = self.browser.get_elements(f\"{iframe_handler} table.slds-table >> sfdc_cms-content-check-box-button\")\n for elem in elements:\n self.browser.scroll_to_element(elem)\n self.browser.click(elem)\n\n # Open Export Menu\n\n drop_down_menu_selector = f\"{iframe_handler} div.slds-page-header__row >> button.slds-button:has-text('Show menu')\"\n import_button_selector = f\"{iframe_handler} div.slds-page-header__row >> lightning-menu-item.slds-dropdown__item:has-text('Export Content')\"\n\n self.browser.click(drop_down_menu_selector)\n sleep(1)\n\n self.browser.click(import_button_selector)\n sleep(2)\n self.browser.click(f\"{iframe_handler} button.slds-button:has-text('Export')\")\n sleep(5)\n\n def create_workspace(self, workspace_name, channels=[], enhanced_workspace=True):\n\n \"\"\"\n Create a new workspace\n @param workspace_name: Name of the workspace. This must be unique from other workspaces\n @param channels: Optional channels you want to target. Defaults to all available channels\n @param enhanced_workspace: Set to True if you are creating an Enhanced workspace, otherwise set to False. Defaults to True.\n @return:\n \"\"\"\n\n # Check for existing workspace\n results = self.salesforceapi.soql_query(f\"SELECT Id FROM ManagedContentSpace where Name = '{workspace_name}' LIMIT 1\")\n\n if results[\"totalSize\"] == 1:\n print(\"Workspace exists already, skipping.\")\n return\n\n # Go to Digital Experience Home and initiate Workspace creation\n self.go_to_digital_experiences()\n sleep(3)\n self.browser.go_to(f\"{self.cumulusci.org.instance_url}/lightning/cms/home/\", timeout='30s')\n sleep(3)\n self.browser.click(f\"{self.shared.iframe_handler()} span.label:text-is('Create a CMS Workspace'):visible\")\n\n # Enter initial information\n sleep(2)\n self.browser.click(\"lightning-input:has-text('Name') >> input.slds-input\")\n self.browser.fill_text(\"lightning-input:has-text('Name') >> input.slds-input\", workspace_name)\n\n # Handle enhanced workspace option\n if enhanced_workspace:\n self.browser.click(\"span.slds-text-heading_medium:text-is('Enhanced CMS Workspace')\")\n\n # Handle Channel Selection\n self.browser.click(\"button.nextButton:visible\")\n sleep(2)\n if len(channels) > 0:\n for channel in channels:\n if self.browser.get_element_count(f\"tr.slds-hint-parent:has-text('{channel}')\"):\n self.browser.click(f\"tr.slds-hint-parent:has-text('{channel}') >> div.slds-checkbox_add-button\")\n else:\n for checkbox_add_button in self.browser.get_elements(\"div.slds-checkbox_add-button\"):\n self.browser.click(checkbox_add_button)\n\n # Handle Contributors\n self.browser.click(\"button.nextButton:visible\")\n sleep(2)\n for checkbox_add_button in self.browser.get_elements(\"div.forceSelectableListViewSelectionColumn\"):\n self.browser.click(checkbox_add_button)\n\n # Handle Contributor Access Levels\n self.browser.click(\"button.nextButton:visible\")\n sleep(2)\n for combo_box in self.browser.get_elements(\"lightning-picklist:visible\"):\n self.browser.click(combo_box)\n sleep(1)\n self.browser.click(\"span.slds-listbox__option-text:has-text('Content Admin'):visible\")\n\n # Handle Language\n self.browser.click(\"button.nextButton:visible\")\n sleep(2)\n\n if not enhanced_workspace:\n self.browser.click(\"button.slds-button:has-text('Move selection to Selected'):visible\")\n self.browser.click(\"lightning-combobox.slds-form-element:has-text('Default Language'):visible\")\n self.browser.click(\"lightning-base-combobox-item:has-text('English (United States)'):visible\")\n\n # Complete Screen\n self.browser.click(\"button.nextButton:visible\")\n sleep(1)\n self.browser.click(\"button.nextButton:visible\")\n\n def generate_product_media_file(self):\n\n \"\"\"\n Generates a Product Media Mapping File, which stores information about Product List Images, Product Detail Images and Attachments related to the products.\n @return: .json file is created within the project and stored at this path: cms_data/product_images.json\n \"\"\"\n\n # Get All Active Products which have attached ElectronicMedia\n results = self.salesforceapi.soql_query(f\"SELECT Id, External_ID__c, Name from Product2 WHERE Id IN (Select ProductId from ProductMedia)\")\n\n if results[\"totalSize\"] == 0:\n print(\"No Products found with attached media\")\n return\n\n result_dict = {}\n self.shared.go_to_app(\"Commerce - Admin\")\n\n for product in results[\"records\"]:\n\n product_dict = {}\n\n # Set External ID\n product_dict.update({\"External_ID__c\": product[\"External_ID__c\"]})\n\n self.browser.go_to(f\"{self.cumulusci.org.instance_url}/lightning/r/Product2/{product['Id']}/view\", timeout='30s')\n sleep(4)\n\n self.browser.click(f\"div.uiTabBar >> span.title:text-is('Media')\")\n sleep(10)\n\n # Get Product Detail Images (Max. 8)\n if self.browser.get_element_count(f\"article.slds-card:has-text('Product Detail Images'):visible >> img.fileCardImage:visible\") > 0:\n product_detail_image_list = []\n product_detail_images = self.browser.get_elements(f\"article.slds-card:has-text('Product Detail Images'):visible >> img.fileCardImage:visible\")\n if product_detail_images:\n for prod in product_detail_images:\n prod_property = self.browser.get_property(prod, \"alt\")\n if prod_property:\n print(prod_property)\n product_detail_image_list.append(prod_property)\n\n if len(product_detail_image_list) > 0:\n product_dict.update({\"ProductDetailImages\": product_detail_image_list})\n\n # Get Product List Image (Max. 1)\n if self.browser.get_element_count(f\"article.slds-card:has-text('Product List Image'):visible >> img.fileCardImage:visible\") > 0:\n product_image_list = []\n product_images = self.browser.get_elements(f\"article.slds-card:has-text('Product List Image'):visible >> img.fileCardImage:visible\")\n if product_images:\n for prod in product_images:\n prod_property = self.browser.get_property(prod, \"alt\")\n if prod_property:\n print(prod_property)\n product_image_list.append(prod_property)\n\n if len(product_image_list) > 0:\n product_dict.update({\"ProductImages\": product_image_list})\n\n # Get Attachments (Max. 5)\n if self.browser.get_element_count(f\"article.slds-card:has-text('Attachments'):visible >> span.slds-file__text\") > 0:\n attachment_list = []\n attachment_images = self.browser.get_elements(f\"article.slds-card:has-text('Attachments'):visible >> span.slds-file__text\")\n if attachment_images:\n for prod in attachment_images:\n prod_property = self.browser.get_property(prod, \"title\")\n if prod_property:\n print(prod_property)\n attachment_list.append(prod_property)\n\n if len(attachment_list) > 0:\n product_dict.update({\"Attachments\": attachment_list})\n\n self.browser.click(f\"li.oneConsoleTabItem:has-text('{product['Name']}'):visible >> div.close\")\n\n result_dict.update({f\"Product_{product['External_ID__c']}\": product_dict})\n\n # Save dict to file\n if not os.path.exists(\"cms_data\"):\n os.makedirs(\"cms_data\", exist_ok=True)\n\n with open(\"cms_data/product_images.json\", \"w\") as save_file:\n json.dump(result_dict, save_file, indent=2)\n\n def reassign_product_media_files(self):\n\n \"\"\"\n Assigns Media Files stored in Salesforce CMS to the relevant Products in the target org.\n \"\"\"\n\n # Check for default file\n if not os.path.exists(\"cms_data/product_images.json\"):\n print(\"Missing CMS Definition File. Location: cms_data/product_images.json\")\n raise Exception(\"Required file for robot is missing: cms_data/product_images.json. Please check the file and try again.\")\n\n # Process Mapping File\n with open(\"cms_data/product_images.json\", \"r\") as cms_file:\n product_dict = json.load(cms_file)\n\n if product_dict:\n\n # Go to Admin Console\n self.shared.go_to_app(\"Commerce - Admin\")\n\n # Setup Selectors\n media_tab_selector = \"div.uiTabBar >> span.title:text-is('Media')\"\n\n # Process Product Records\n for product in dict(product_dict).items():\n\n results = self.salesforceapi.soql_query(f\"SELECT Id, External_ID__c, Name from Product2 WHERE External_ID__c = '{product[1]['External_ID__c']}' LIMIT 1\")\n\n if results[\"totalSize\"] == 0:\n print(f\"No Products found for the External ID Provided {product[1]['External_ID__c']}. Skipping...\")\n continue\n\n try:\n # Go To Record Page for Product and select Media tab\n self.browser.go_to(f\"{self.cumulusci.org.instance_url}/lightning/r/Product2/{results['records'][0]['Id']}/view\", timeout='30s')\n self.browser.wait_for_elements_state(media_tab_selector, ElementState.visible, timeout=\"10s\")\n self.browser.click(media_tab_selector)\n sleep(8)\n except TimeoutError:\n print(f\"Unable to access the Media tab for the current Product record with Id ({results['records'][0]['Id']}). Skipping...\")\n continue\n except Exception as e:\n raise e\n\n # Process Product Detail Images\n if \"ProductDetailImages\" in dict(product[1]).keys() and self.browser.get_element_count(f\"article.slds-card:has-text('Product Detail Images'):visible >> img.fileCardImage:visible\") < 8:\n\n for product_detail_image in list(product[1][\"ProductDetailImages\"]):\n\n # Check Max. Number of Product Detail Images has not been reached\n if self.browser.get_element_count(f\"article.slds-card:has-text('Product Detail Images'):visible >> img.fileCardImage:visible\") == 8:\n print(\"The maximum number of images have already been assigned to the Product. Skipping...\")\n continue\n\n # Check that CMS content has not already been assigned\n skip = False\n if self.browser.get_element_count(f\"article.slds-card:has-text('Product Detail Images'):visible >> img.fileCardImage:visible\") > 0:\n product_detail_images = self.browser.get_elements(f\"article.slds-card:has-text('Product Detail Images'):visible >> img.fileCardImage:visible\")\n if product_detail_images:\n for prod in product_detail_images:\n prod_property = self.browser.get_property(prod, \"alt\")\n print(f\"Found alt text: {prod_property}\")\n if prod_property:\n if prod_property in list(product[1][\"ProductDetailImages\"]):\n print(\"Skipping duplicate...\")\n skip = True\n if skip:\n continue\n\n # Assign New Image\n\n self.browser.click(\"article.slds-card:has-text('Product Detail Images'):visible >> :nth-match(button.slds-button:text-is('Add Image'), 1)\")\n self.browser.wait_for_elements_state(\"sfdc_cms-content-uploader-header.slds-col:visible >> input.slds-input\", ElementState.visible, timeout=\"10s\")\n self.browser.fill_text(\"sfdc_cms-content-uploader-header.slds-col:visible >> input.slds-input\", product_detail_image)\n\n # Handle Search Results\n try:\n sleep(2)\n search_results = self.browser.get_elements(f\"tr.slds-hint-parent:has-text('{product_detail_image}'):visible\")\n if len(search_results) == 0:\n self.browser.click(f\"button.slds-button:text-is('Cancel')\")\n continue\n if len(search_results) > 0:\n self.browser.click(\"tr:has(span:text-matches('^{}$')) >> th >> span.slds-checkbox_faux\".format(product_detail_image))\n self.browser.click(f\"button.slds-button:text-is('Save')\")\n self.browser.wait_for_elements_state(media_tab_selector, ElementState.visible, timeout=\"15s\")\n self.browser.click(media_tab_selector)\n except TimeoutError:\n print(\"Unable to find any matches for search results. Skipping...\")\n self.browser.click(f\"button.slds-button:text-is('Cancel')\")\n continue\n else:\n print(\"The maximum number of images have already been assigned to the Product or there are no Product Detail Images to process. Skipping...\")\n\n # Process Product List Image\n\n if \"ProductImages\" in dict(product[1]).keys() and self.browser.get_element_count(f\"article.slds-card:has-text('Product List Image'):visible >> img.fileCardImage:visible\") < 1:\n\n for product_image in list(product[1][\"ProductImages\"]):\n\n # Check Max. Number of Product List Images has not been reached\n if self.browser.get_element_count(f\"article.slds-card:has-text('Product List Image'):visible >> img.fileCardImage:visible\") == 1:\n print(\"The maximum number of images have already been assigned to the Product. Skipping...\")\n continue\n\n # Check that CMS content has not already been assigned\n skip = False\n if self.browser.get_element_count(f\"article.slds-card:has-text('Product List Image'):visible >> img.fileCardImage:visible\") > 0:\n product_images = self.browser.get_elements(f\"article.slds-card:has-text('Product List Image'):visible >> img.fileCardImage:visible\")\n if product_images:\n for prod in product_images:\n prod_property = self.browser.get_property(prod, \"alt\")\n print(f\"Found alt text: {prod_property}\")\n if prod_property:\n if prod_property in list(product[1][\"ProductImages\"]):\n print(\"Skipping duplicate...\")\n skip = True\n if skip:\n continue\n\n # Assign New Image\n\n self.browser.click(\"article.slds-card:has-text('Product List Image'):visible >> :nth-match(button.slds-button:text-is('Add Image'), 1)\")\n self.browser.wait_for_elements_state(\"sfdc_cms-content-uploader-header.slds-col:visible >> input.slds-input\", ElementState.visible, timeout=\"10s\")\n self.browser.fill_text(\"sfdc_cms-content-uploader-header.slds-col:visible >> input.slds-input\", product_image)\n\n # Handle Search Results\n try:\n sleep(2)\n search_results = self.browser.get_elements(f\"tr.slds-hint-parent:has-text('{product_image}'):visible\")\n if len(search_results) == 0:\n self.browser.click(f\"button.slds-button:text-is('Cancel')\")\n continue\n if len(search_results) > 0:\n self.browser.click(\"tr:has(span:text-matches('^{}$')) >> td >> span.slds-radio\".format(product_image))\n self.browser.click(f\"button.slds-button:text-is('Save')\")\n self.browser.wait_for_elements_state(media_tab_selector, ElementState.visible, timeout=\"15s\")\n self.browser.click(media_tab_selector)\n except TimeoutError:\n print(\"Unable to find any matches for search results. Skipping...\")\n self.browser.click(f\"button.slds-button:text-is('Cancel')\")\n continue\n else:\n print(\"The maximum number of images have already been assigned to the Product or there are no Product List Images to process. Skipping...\")\n\n # Process Attachments\n\n if \"Attachments\" in dict(product[1]).keys() and self.browser.get_element_count(f\"article.slds-card:has-text('Attachments'):visible >> span.slds-file__text\") < 5:\n\n for product_attachment in list(product[1][\"Attachments\"]):\n\n # Check Max. Number of Attachments has not been reached\n if self.browser.get_element_count(f\"article.slds-card:has-text('Attachments'):visible >> span.slds-file__text\") == 5:\n print(\"The maximum number of attachments have already been assigned to the Product. Skipping...\")\n continue\n\n # Check that CMS content has not already been assigned\n skip = False\n if self.browser.get_element_count(f\"article.slds-card:has-text('Attachments'):visible >> span.slds-file__text\") > 0:\n product_attachments = self.browser.get_elements(f\"article.slds-card:has-text('Attachments'):visible >> span.slds-file__text\")\n if product_attachments:\n for prod in product_attachments:\n prod_property = self.browser.get_property(prod, \"title\")\n print(f\"Found title text: {prod_property}\")\n if prod_property:\n if prod_property in list(product[1][\"Attachments\"]):\n print(\"Skipping duplicate...\")\n skip = True\n if skip:\n continue\n\n # Assign New Attachment\n\n self.browser.click(\"article.slds-card:has-text('Attachments'):visible >> :nth-match(button.slds-button:text-is('Add Attachment'), 1)\")\n self.browser.wait_for_elements_state(\"sfdc_cms-content-uploader-header.slds-col:visible >> input.slds-input\", ElementState.visible, timeout=\"10s\")\n self.browser.fill_text(\"sfdc_cms-content-uploader-header.slds-col:visible >> input.slds-input\", product_attachment)\n\n # Handle Search Results\n try:\n sleep(2)\n search_results = self.browser.get_elements(f\"tr.slds-hint-parent:has-text('{product_attachment}'):visible\")\n if len(search_results) == 0:\n self.browser.click(f\"button.slds-button:text-is('Cancel')\")\n continue\n if len(search_results) > 0:\n self.browser.click(\"tr:has(span:text-matches('^{}$')) >> th >> span.slds-checkbox_faux\".format(product_attachment))\n self.browser.click(f\"button.slds-button:text-is('Save')\")\n self.browser.wait_for_elements_state(media_tab_selector, ElementState.visible, timeout=\"15s\")\n self.browser.click(media_tab_selector)\n except TimeoutError:\n print(\"Unable to find any matches for search results. Skipping...\")\n self.browser.click(f\"button.slds-button:text-is('Cancel')\")\n continue\n else:\n print(\"The maximum number of attachments have already been assigned to the Product or there are no Product Attachments to process. Skipping...\")\n\n # Close Tab\n try:\n self.browser.click(f\"li.oneConsoleTabItem:has-text('{results['records'][0]['Name']}'):visible >> div.close\")\n except:\n continue\n\n def open_experience_cloud_collections_page(self, experience_cloud_name):\n \"\"\"Browses to the Collections Page of an Experience Cloud Site\"\"\"\n self.shared.go_to_setup_admin_page('SetupNetworks/home', 2)\n self.browser.wait_for_elements_state(\"iframe >>> table.zen-data\", ElementState.visible, \"15s\")\n if self.browser.get_element_count(f\"{self.shared.iframe_handler()} div.pbBody >> table.zen-data >> tr.dataRow:has-text('{experience_cloud_name}')\") > 0:\n self.browser.click(f\"{self.shared.iframe_handler()} div.pbBody >> table.zen-data >> tr.dataRow:has-text('{experience_cloud_name}') >> a.networkManageLink\")\n sleep(2)\n self.browser.switch_page('NEW')\n self.browser.wait_for_elements_state(\"a.js-workspace-contentManager\", ElementState.visible, \"15s\")\n self.browser.click(\"a.js-workspace-contentManager\")\n self.browser.wait_for_elements_state(\"a[id=cmcNodeItem-managedContentCollections]\", ElementState.visible, \"15s\")\n self.browser.click(\"a[id=cmcNodeItem-managedContentCollections]\")\n sleep(1)\n\n def generate_managed_content_collection_file(self, experience_cloud_name):\n \"\"\"Generate json file with details of collections\"\"\"\n self.open_experience_cloud_collections_page(experience_cloud_name)\n self.browser.wait_for_elements_state(\"table.slds-table\", ElementState.visible, \"15s\")\n\n collection_data_dict = dict({})\n\n # Gather Current Details\n tr_elements = self.browser.get_elements(\"table.slds-table >> tr:has(a)\")\n\n for elem in tr_elements:\n self.browser.new_page(self.browser.get_property(f\"{elem} >> a\", \"href\"))\n self.browser.wait_for_elements_state(\"h1.slds-page-header__title\", ElementState.visible, \"15s\")\n\n # Scrape Details\n collection_name = self.browser.get_property(\"h1.slds-page-header__title\", \"innerText\")\n content_type = self.browser.get_property(\"li:has(p[title='Content Type']) >> :nth-match(p, 2)\", \"innerText\")\n\n listview_name = \"\"\n collection_type = \"\"\n collection_content_name_list = []\n\n if self.browser.get_element_count(\"li:has(p[title='Content Source']):visible\") > 0:\n collection_type = \"SALESFORCE\"\n listview_name = self.browser.get_property(\"li:has(p[title='List View']) >> :nth-match(p, 2)\", \"innerText\")\n else:\n collection_type = \"CMS\"\n for table_row in self.browser.get_elements(\"table.slds-table >> tr:has(a)\"):\n collection_content_name_list.append(self.browser.get_property(f\"{table_row} >> a\", \"innerText\"))\n \n # Add Details to Dict\n collection_data_dict.update({\n collection_name: {\n \"collection_type\": collection_type,\n \"content_type\": content_type,\n \"related_cms_content\": collection_content_name_list,\n \"object_name\": content_type,\n \"listview\": listview_name\n }\n }\n )\n\n self.browser.close_page()\n\n if collection_data_dict and len(collection_data_dict):\n save_location = os.path.join(\"datasets\", \"cms_collection_data\")\n os.makedirs(save_location, exist_ok=True)\n\n with open(os.path.join(save_location, \"cms_collection_dataset.json\"), \"w\", encoding=\"utf-8\") as save_file:\n save_file.write(json.dumps(collection_data_dict, indent=4))\n\n def upload_cms_collections(self, site_name, upload_file_location=os.path.join(\"datasets\", \"cms_collection_data\", \"cms_collection_dataset.json\"), ):\n\n if not os.path.exists(upload_file_location):\n raise Exception(\"No CMS Collection Data Found. Unable to upload.\")\n \n with open(upload_file_location, 'r', encoding=\"utf-8\") as dataset_file:\n file_data = json.load(dataset_file)\n\n if file_data:\n\n self.open_experience_cloud_collections_page(site_name)\n \n # Wait for Collections to Load\n no_collection_mode = False\n found_element = False\n counter = 1\n while counter < 10:\n sleep(1)\n create_button_count = self.browser.get_element_count(\"button.newcollection:has-text('Create Collection')\")\n table_count = self.browser.get_element_count(\"table.slds-table\")\n\n if create_button_count > 0:\n no_collection_mode = True\n found_element = True\n print(\"Found Create Collection button...\")\n break\n\n if table_count > 0:\n no_collection_mode = False\n found_element = True\n print(\"Found Table button...\")\n break \n\n counter += 1\n\n if not found_element:\n print(\"No Supported Elements Found\")\n return \n \n # Set Defaults for Robot\n\n modal_next_button = \"div.modal-footer >> button.nextButton\"\n collections_to_add = set()\n\n # Add Content\n\n if no_collection_mode:\n print(\">>> Adding All Collections\")\n\n for collection, collection_details in file_data.items():\n\n # Check if Collection Exists\n if no_collection_mode:\n collections_to_add.add(collection)\n else:\n print(f\">>> Checking Collection {collection}\")\n collection_found = False\n for table_row in self.browser.get_elements(\"table.slds-table >> tr:has(a)\"):\n selection_text = self.browser.get_property(f\"{table_row} >> a\", \"innerText\")\n if selection_text == collection:\n collection_found = True\n break\n \n if collection_found:\n print(f\">>> {collection} Found. Skipping\")\n else:\n print(f\">>> {collection} will be created\")\n collections_to_add.add(collection)\n\n if len(collections_to_add) > 0:\n\n # Check That Salesforce CRM Connections Have Approved Objects\n salesforce_objects = set()\n for key, collection_detail in file_data.items():\n if collection_detail.get(\"collection_type\") == \"SALESFORCE\":\n salesforce_objects.add(collection_detail.get(\"content_type\"))\n\n if len(salesforce_objects) > 0:\n # Check Object is Approved\n self.browser.click(\"a[id=cmcNodeItem-content]\")\n sleep(1)\n self.browser.click(\"a[id=cmcNodeItem-managedContentTypes]\")\n sleep(2)\n\n for obj in salesforce_objects:\n object_exists = False\n if self.browser.get_element_count(\"table.slds-table:visible\") > 0:\n if self.browser.get_element_count(f\"table.slds-table:visible >> tbody >> tr >> th:has-text('{obj}')\") > 0:\n object_exists = True\n \n if not object_exists:\n # Add Object\n self.browser.click(\"button:has-text('Add CRM Connections')\")\n self.browser.fill_text(\"div.communitySetupManagedContentMultiSelectTable >> input.slds-input\", obj)\n sleep(5)\n if self.browser.get_element_count(f\"div.listContainer >> table >> tbody >> tr:has-text('{obj}')\") < 1:\n print(f\"Unable to find Object called '{obj}'. Skipping\")\n continue\n self.browser.click(f\"div.listContainer >> table >> tbody >> :nth-match(tr:has-text('{obj}'), 1) >> th >> div.slds-truncate\")\n self.browser.click(\"button.saveButton:visible\")\n sleep(1)\n\n self.browser.click(\"a[id=cmcNodeItem-managedContentCollections]\")\n sleep(1)\n\n for collection_add in collections_to_add:\n\n # Create Collection\n if no_collection_mode:\n self.browser.click(\"button.slds-button:has-text('Create Collection')\")\n else:\n self.browser.click(\"button.slds-button:text('New')\")\n\n self.browser.wait_for_elements_state(\"div.stepContainer\", ElementState.visible, \"15s\")\n\n # Add Collection Details\n collection_data = file_data.get(collection_add)\n\n collection_type = collection_data.get(\"collection_type\")\n content_type = collection_data.get(\"content_type\")\n cms_collection_content = collection_data.get(\"related_cms_content\")\n sf_list_view = collection_data.get(\"listview\")\n\n # Set Name\n self.browser.fill_text(\"div.stepContainer >> div.slds-form-element__control >> input.slds-input:visible\", collection_add)\n self.browser.press_keys(\"div.stepContainer >> div.slds-form-element__control >> input.slds-input:visible\", \"Enter\")\n\n # Set Type\n if collection_type == \"SALESFORCE\":\n\n # Add Salesforce Details\n self.browser.click(\"div.stepContainer >> div.slds-visual-picker >> label.crm\")\n self.browser.click(modal_next_button)\n sleep(1)\n self.browser.click(\"div.stepContainer >> button.slds-combobox__input:visible\")\n sleep(1)\n self.browser.click(f\"div.activeStep >> div.slds-listbox >> lightning-base-combobox-item:has-text('{content_type}')\")\n sleep(5)\n if self.browser.get_element_count(f\"div.activeStep >> table >> tbody >> tr:has-text('{sf_list_view}')\") < 1:\n print(f\"Unable to Find List View '{sf_list_view}'\")\n continue\n self.browser.click(f\"div.activeStep >> table >> tbody >> tr:has-text('{sf_list_view}') >> span.slds-radio\")\n self.browser.click(modal_next_button)\n sleep(2)\n \n\n elif collection_type == \"CMS\":\n # Add CMS Content\n self.browser.click(\"div.stepContainer >> div.slds-visual-picker >> label.cms\")\n self.browser.click(modal_next_button)\n sleep(1)\n self.browser.select_options_by(\"div.slds-select_container >> select.slds-select\", SelectAttribute.label, content_type)\n self.browser.click(\"div.activeStep >> div.slds-visual-picker >> label.manual\")\n self.browser.click(modal_next_button)\n sleep(2)\n for cms_content in cms_collection_content:\n self.browser.fill_text(\"div.activeStep >> input.slds-input\", cms_content)\n sleep(3)\n if self.browser.get_element_count(f\"div.listContainer >> table >> tbody >> tr:has-text('{cms_content}')\") < 1:\n print(f\"Unable to find CMS Content called '{cms_content}'. Skipping\")\n continue\n self.browser.click(f\"div.listContainer >> table >> tbody >> tr:has-text('{cms_content}') >> th >> div.slds-truncate\")\n self.browser.click(modal_next_button)\n sleep(2)\n\n else:\n print(\"TYPE NOT FOUND\")\n\n self.browser.wait_for_elements_state(\"a[id=cmcNodeItem-managedContentCollections]\", ElementState.visible, \"15s\")\n self.browser.click(\"a[id=cmcNodeItem-managedContentCollections]\")\n sleep(2)\n\n","repo_name":"sfdc-qbranch/nextgen-cert-demoarchitecht-vamsi","sub_path":"qbrix/robot/QbrixCMS.py","file_name":"QbrixCMS.py","file_ext":"py","file_size_in_byte":40214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7833042347","text":"#导入银行模块\r\nimport bankaccount\r\n\r\n#定义主函数\r\ndef main():\r\n\r\n #用户输入初始额度\r\n start_bal = float(input('请输入银行初始存款:'))\r\n\r\n #将用户输入量存入账户中生成的对象\r\n\r\n savings = bankaccount.Bankaccount(start_bal)\r\n\r\n print(type(savings))\r\n\r\n print(f'当前账户中余额为{savings.get_balance()}')\r\n\r\n #用户输入存款\r\n save = float(input('存入:'))\r\n savings.deposit(save)\r\n print(f'存入{save},当前账户中余额为{savings.get_balance()}')\r\n\r\n\r\n #用户输入开销\r\n pay = float(input('开销是:'))\r\n #从银行中扣除开销\r\n savings.withdraw(pay)\r\n\r\n print(f'消费{pay}后,当前账户中余额为{savings}')\r\n\r\nmain()\r\n","repo_name":"Demon1630/python","sub_path":"银行模块调用.py","file_name":"银行模块调用.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22377358557","text":"import ds_queue\nimport ds_stack\nimport ds_linked_list\n\nmyQueue = ds_queue.Queue()\nmyStack = ds_stack.Stack()\nmyLinkedList = ds_linked_list.LinkedList()\nprint(\"Welcome\")\n\n\n# While to repeat the main menu\nwhile True:\n\t# While to repeat the menu of each option\n\twhile True:\n\t\ttry:\n\t\t\toption = int(input(\"Choose the option\\n1.- Queue\\n2.- Stack\\n3.- Linked list\\n4.- Exit\\nYour answer: \"))\n\t\t\tif(option!=1 and option!=2 and option!=3 and option!=4):\n\t\t\t\tprint(\"\\nOops! That was no valid option. Try again...\")\n\t\t\telse:\t\n\t\t\t\tbreak\n\t\texcept ValueError:\n\t\t\tprint(\"Oops! That was no valid number. Try again...\")\n\n\t# Options for queue\n\tif option==1:\n\t\twhile True:\n\t\t\topt = int(input(\"\\nYou choose queue, what do you want to do?\\n1.- Check if the queue is empty\\n2.- Enqueue\\n3.- Dequeue\\n4.- Peek\\n5.- Exit\\nYour answer: \"))\n\t\t\tif opt==1:\n\t\t\t\tprint(\"\\nYou choosed 'Check if the queue is empty'\")\n\t\t\t\t# is_empty method returns a boolean, checking the value to know if the queue os empty or not\n\t\t\t\tif(myQueue.is_empty()):\n\t\t\t\t\tprint(\"\\nThe queue is empty\")\n\t\t\t\telse:\n\t\t\t\t\tprint(\"\\nThe queue is not empty\")\n\t\t\telif opt==2:\n\t\t\t\tprint(\"You choosed 'Enqueue'\")\n\t\t\t\tdata = input(\"Intruduce the new element\\nYour answer:\")\n\t\t\t\t# The enqueue method ask for the value to add as a parameter\n\t\t\t\tmyQueue.enqueue(data)\n\t\t\t\tprint(data, \" enqueued succesfully\")\n\t\t\telif opt==3:\n\t\t\t\tprint(\"You choosed 'Dequeue'\")\n\t\t\t\t# dequeue method returns the value dequeued\n\t\t\t\tdequeuedData = myQueue.dequeue()\n\t\t\t\tprint (dequeuedData, \" dequeued succesfully\")\n\t\t\telif opt==4:\n\t\t\t\tprint(\"You choosed 'Peek'\")\n\t\t\t\t# peek method returns the the firts value added\n\t\t\t\tdataFront = myQueue.peek()\n\t\t\t\tprint(\"The value is: \", dataFront)\n\t\t\telif opt==5:\n\t\t\t\tprint(\"You choose exit\\n\")\n\t\t\t\tbreak\n\n\t# Options for stack\n\telif option==2:\n\t\twhile True:\n\t\t\topt = int(input(\"\\nYou choose stack, what do you want to do?\\n1.- Check if is_empty\\n2.- push\\n3.- pop\\n4.- peek\\n5.- Exit\\nYour answer: \"))\n\t\t\tif opt==1:\n\t\t\t\tprint(\"\\nYou choosed 'Check if the stack is empty'\")\n\t\t\t\t# is_empty method returns a boolean, checking the value to know if the queue os empty or not\n\t\t\t\tif(myStack.is_empty()):\n\t\t\t\t\tprint(\"\\nThe stack is empty\")\n\t\t\t\telse:\n\t\t\t\t\tprint(\"\\nThe stack is not empty\")\n\t\t\telif opt==2:\n\t\t\t\tprint(\"You choosed 'Push'\")\n\t\t\t\tdata = input(\"Intruduce the new element\\nYour answer:\")\n\t\t\t\t# The push method ask for the value to add as a parameter\n\t\t\t\tmyStack.push(data)\n\t\t\t\tprint(data, \" stack succesfully\")\n\t\t\telif opt==3:\n\t\t\t\tprint(\"You choosed 'pop'\")\n\t\t\t\t# pop method returns the value deleted\n\t\t\t\tpopeed_data = myStack.pop()\n\t\t\t\tprint (popeed_data, \" popped succesfully\")\n\t\t\telif opt==4:\n\t\t\t\tprint(\"You choosed 'Peek'\")\n\t\t\t\t# peek method returns the the last value added\n\t\t\t\ttopData = myStack.peek()\n\t\t\t\tprint(\"The value is: \", topData)\n\t\t\telif opt==5:\n\t\t\t\tprint(\"You choose exit\\n\")\n\t\t\t\tbreak\n\t\t\t\n\t# Options for linked list\n\telif option==3:\n\t\twhile True:\n\t\t\toptQ = int(input(\"\\nYou choose linked list, what do you want to do?\\n1.- Insert at beginning\\n2.- Insert at end\\n3.- Insert after node\\n4.- Delete node\\n5.- Display\\n6.- Exit\\nYour answer: \"))\n\t\t\tif optQ==1:\n\t\t\t\tprint(\"You choose 'Insert at beginning'\")\n\t\t\t\tdata = input(\"Introduce the new element\\nYour answer: \")\n\t\t\t\tmyLinkedList.insert_at_beginning(data)\n\t\t\t\tprint(data, \" enqueued correctly\")\n\t\t\telif optQ==2:\n\t\t\t\tprint(\"You choose 'Insert at end'\")\n\t\t\t\tdata = input(\"Introduce the new element\\nYour answer: \")\n\t\t\t\tmyLinkedList.insert_at_end(data)\n\t\t\t\tprint(data, \" enqueued correctly at the end\")\n\t\t\telif optQ==3:\n\t\t\t\tprint(\"You choose 'Insert after node'\")\n\t\t\t\ttarget_data = input(\"After which elemente you want to add the element\\nYour answer: \")\n\t\t\t\tdata = input(\"Introduce the new element\\nYour answer:\")\n\t\t\t\tmyLinkedList.insert_after_node(target_data, data)\n\t\t\t\tprint(data, \"enqueued after \", target_data, \" correctly\")\n\t\t\telif optQ==4:\n\t\t\t\tprint(\"You choose 'delete node'\")\n\t\t\t\ttarget_data = input(\"Which element do you want to delete?\\nYour answer: \")\n\t\t\t\tmyLinkedList.delete_node(target_data)\n\t\t\t\tprint(target_data, \"Deleted correctly\")\n\t\t\telif optQ==5:\n\t\t\t\tprint(\"You choose 'Display'\")\n\t\t\t\tmyLinkedList.display()\n\t\t\telif optQ==6:\n\t\t\t\tprint(\"You choose 'Exit'\")\n\t\t\t\tbreak\n\t\t\n\t# Option for exit\n\telif option==4:\n\t\tprint(\"Thank u! we love u teacher\")\n\t\tbreak\n\t","repo_name":"eduardoCorpus7193/xochitl-2-proyecto-u2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9582672149","text":"# Cree un programa que incorpore el módulo sys, al cual desde la terminal\n# se le puedan pasar tres parámetros. El programa debe tomar los parámetros\n# e indicar en la terminal si son múltiplos de dos.\n\nimport sys\n\nnum1 = int(sys.argv[1])\nnum2 = int(sys.argv[2])\nnum3 = int(sys.argv[3])\n\nif num1 % 2 == 0 and num2 % 2 == 0 and num3 % 2 == 0:\n print(\"Los 3 numeros son multiplos de 2\")\nelif num1 % 2 != 0 and num2 % 2 == 0 and num3 % 2 == 0:\n print(f\"{num1} no es multiplo de 2\")\nelif num1 % 2 == 0 and num2 % 2 != 0 and num3 % 2 == 0:\n print(f\"{num2} no es multiplo de 2\")\nelif num1 % 2 == 0 and num2 % 2 == 0 and num3 % 2 != 0:\n print(f\"{num3} no es multiplo de 2\")\nelif num1 % 2 != 0 and num2 % 2 != 0 and num3 % 2 == 0:\n print(f\"{num1} y {num2} no son multiplos de 2\")\nelif num1 % 2 == 0 and num2 % 2 == 0 and num3 % 2 != 0:\n print(f\"{num1} y {num3} no son multiplos de 2\")\nelif num1 % 2 == 0 and num2 % 2 != 0 and num3 % 2 != 0:\n print(f\"{num2} y {num3} no son multiplos de 2\")\nelse:\n print(\"Ningun numero es multiplo de 2\")\n","repo_name":"IgnacioG2000/python-course","sub_path":"Unidad_1/ejercicios_unidad_1/1_2.py","file_name":"1_2.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30811485032","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# https://stackoverflow.com/questions/55210627/regex-for-iban-mask/55210903#55210903\n\nalphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\nibans = ['IBAN NL 91ABNA0417463300',\n 'IBAN NL91ABNA0417164300',\n 'Iban: NL 69 ABNA 4020325616']\n\n\ndef normalize(item):\n stage1 = \"\".join(item.split()).upper()\n stage2 = ''\n for item in stage1:\n if item in alphabet:\n stage2 = stage2 + item\n\n return stage2.split('IBAN')[1]\n\n\nif __name__ == '__main__':\n print('\\n'.join('{} {}'.format(i[2:4], i[8:]) for i in [normalize(iban) for iban in ibans]))\n","repo_name":"caa06d9c/Examples","sub_path":"Python/basic/regexp/iban/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"9667391008","text":"from django.core.files.uploadedfile import SimpleUploadedFile\nimport httpretty\n\nfrom ..models import Overlay\nfrom ..maps import google_maps\n\nPLUME_FILE_FIXTURE = \"test_fixtures/plume.png\"\nSATALLITE_MAP_FILE_FIXTURE = \"test_fixtures/satallite_map_fixture.png\"\n\ndef create_overlay(name, latitude=Overlay.GHG_OFFICE_LATITIUDE, longtitude=Overlay.GHG_OFFICE_LONGTITUDE, process=True):\n plume = SimpleUploadedFile(name=PLUME_FILE_FIXTURE, content=open(PLUME_FILE_FIXTURE, 'rb').read(), content_type='image/png')\n overlay = Overlay(\n name=name,\n latitude=latitude,\n longtitude=longtitude,\n plume=plume,\n )\n overlay.full_clean()\n\n if process:\n httpretty.enable() # enable HTTPretty so that it will monkey patch the socket module\n httpretty.register_uri(\n httpretty.GET,\n google_maps.GoogleMaps.BASE_URL,\n body= _satallite_map_contents()\n )\n overlay.process_and_save()\n httpretty.disable()\n httpretty.reset()\n else:\n overlay.save()\n return overlay\n\ndef _satallite_map_contents():\n return open(SATALLITE_MAP_FILE_FIXTURE, 'rb').read()","repo_name":"tahnok/plumify","sub_path":"overlays/tests/factories.py","file_name":"factories.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8335660777","text":"import pygame\n\nfrom lists.ezquestions import child_safety_questions_ez\nfrom lists.medquestions import child_safety_questions_med\nfrom lists.hardquestions import child_safety_questions_hard\nfrom sprites.enemy import Enemy\nfrom sprites.player import Player\nimport random\npygame.font.init()\nscreen = pygame.display.set_mode((1280, 720))\nfont = pygame.font.Font(None, 36)\nbg = pygame.image.load(\"assets/bg.png\").convert_alpha()\n# Define colors\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nHOVER_COLOR = (200, 200, 200) \nHOVER_SCALE = 1.05\n\n\nclass ezQuestions():\n def __init__(self) -> None:\n self.ezquestions = list(child_safety_questions_ez.values())\n self.question_index = 0\n self.current_question_index = 0\n self.hovered_option = None\n self.player = Player(100, 100)\n self.enemy = Enemy(200, 200)\n\n def display_question(self, question_data, hovered_option):\n question = question_data[\"question\"]\n options = question_data[\"options\"]\n\n question_text = font.render(question, True, BLACK)\n question_rect = question_text.get_rect(topleft=(50, 50))\n pygame.draw.rect(screen, WHITE, question_rect.inflate(20, 10), border_radius=10)\n screen.blit(question_text, question_rect)\n\n y_position = 150\n for option_index, option in enumerate(options):\n option_text = font.render(option, True, BLACK)\n option_rect = option_text.get_rect(topleft=(50, y_position))\n\n if self.hovered_option == option_index:\n # Apply a colorful hover animation\n pygame.draw.rect(screen, HOVER_COLOR, option_rect.inflate(20, 10), border_radius=10)\n option_text = font.render(option, True, WHITE) # Change text color when hovered\n scaled_width = int(option_rect.width * HOVER_SCALE)\n scaled_height = int(option_rect.height * HOVER_SCALE)\n scaled_text = pygame.transform.scale(option_text, (scaled_width, scaled_height))\n option_rect = scaled_text.get_rect(topleft=(50, y_position))\n else:\n pygame.draw.rect(screen, WHITE, option_rect.inflate(20, 10), border_radius=10)\n\n screen.blit(option_text, option_rect)\n y_position += 60\n def get_hovered_option(self):\n mouse_x, mouse_y = pygame.mouse.get_pos()\n \n if self.current_question_index < len(self.ezquestions):\n\n self.hovered_option = self.ezquestions[self.current_question_index][\"correct_answer\"]\n\n for option_index, option_y in enumerate(range(150, 350, 50)):\n if option_y <= mouse_y < option_y + 50:\n \n self.hovered_option = option_index\n break\n def reset_game(self):\n self.current_question_index = 0\n self.player.x = 200\n self.enemy.x = 10\n random.shuffle(self.ezquestions) \n def finish(self):\n if self.player.x > 1280:\n screen.blit(screen, (0,0))\n pygame.display.update()\n pygame.time.delay(2000) # Display the \"You Win\" screen for 2 seconds\n self.reset_game() # Reset the game\n screen.blit(bg, (0, 0)) # Clear the screen\n\n # Check if you have more questions to display\n if self.current_question_index < len(self.ezquestions):\n self.display_question(self.ezquestions[self.current_question_index], self.hovered_option)\n\n def get_clicked_option(self):\n mouse_x, mouse_y = pygame.mouse.get_pos()\n \n if self.current_question_index < len(self.ezquestions):\n correct_answer = self.ezquestions[self.current_question_index][\"correct_answer\"]\n if 150 <= mouse_y <= 200:\n global chosen_option\n chosen_option = self.ezquestions[self.current_question_index][\"options\"][0]\n elif 200 <= mouse_y <= 250:\n chosen_option = self.ezquestions[self.current_question_index][\"options\"][1]\n elif 250 <= mouse_y <= 300:\n chosen_option = self.ezquestions[self.current_question_index][\"options\"][2]\n elif 250 <= mouse_y <= 350:\n chosen_option = self.ezquestions[self.current_question_index][\"options\"][3]\n else:\n chosen_option = None\n \n if chosen_option == correct_answer and 150 <= mouse_y <= 350:\n self.current_question_index += 1\n return True\n \n\n else:\n return False\n ","repo_name":"pogrammar/Child-Safety-Trivia","sub_path":"questionconfig.py","file_name":"questionconfig.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"35746540057","text":"import numpy as np\n\ndef loadData(fname):\n data = []\n with open(fname, 'r') as f:\n for l in f:\n data.append(l.split(\" \"))\n f.close\n return np.array(data, float)\n\ndef logReg(npArr, t, lRate):\n w = np.array([0,0], float)\n x = npArr[:, [0,1]]\n y = npArr[:, 2]\n for i in range(t):\n g = 0\n for j in range(npArr.shape[0]):\n g +=y[j]*x[j]/(1+np.exp(y[j]*np.dot(w, x[j])))\n g = -g/npArr.shape[0]\n v = -g\n w += lRate*v\n return w\n","repo_name":"Rathcke/uni","sub_path":"ml/ass3/src/logistic_regr.py","file_name":"logistic_regr.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"1838089457","text":"#!/usr/bin/python3\n\"\"\"script used for get content of website\"\"\"\nimport urllib.request\n\n\ndef main():\n \"\"\"Start of the program\"\"\"\n url = \"https://alx-intranet.hbtn.io/status\"\n with urllib.request.urlopen(url) as response:\n html = response.read()\n print(\"Body response:\")\n print(\"- type: \" + str(type(html)))\n print(\"- content: \" + str(html))\n print(\"- utf8 content: \" + html.decode())\n\n\n\"\"\"\"\nBody response:$\n - type: $\n - content: b'OK'$\n - utf8 content: OK$\n\"\"\"\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ahmedelia/alx-higher_level_programming","sub_path":"0x11-python-network_1/0-hbtn_status.py","file_name":"0-hbtn_status.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34521726644","text":"from utils import CifarDataset, trainer\r\nimport torch\r\nfrom torch.utils.data.dataloader import DataLoader\r\nimport time\r\nimport torch.optim as optim\r\nimport torch.nn as nn\r\nimport os\r\nimport subprocess\r\n\r\ndef main(params):\r\n #global variables\r\n result = {}\r\n model_index = 0\r\n model_names = ['vgg11_cifar10', 'vgg13_cifar10', 'vgg19_cifar10']\r\n model_name = model_names[model_index]\r\n\r\n # check device\r\n device = torch.device('cuda') if torch.cuda.is_available() and params['device'] == 'gpu' else torch.device('cpu')\r\n print(\"using %s\"%(device))\r\n\r\n # start to supervise\r\n subprocess.Popen('nvidia-smi dmon -d 1 -s u > ' + model_name + '_gpu' + '.out', shell=True)\r\n time.sleep(10)\r\n\r\n # start to prepare data\r\n start_prepare_data = int(time.time() * 1000)\r\n # step 1: download zipfile from minio\r\n # debug:download_from_minio('data', params['data_object'])\r\n # step 2: prepare dataset\r\n dataset = CifarDataset(params['data_object'])\r\n # step 3: prepare dataloader\r\n dataloader = DataLoader(dataset, batch_size=params['batch_size'])\r\n\r\n # start to prepare model\r\n start_prepare_model = int(time.time() * 1000)\r\n # step 1: download model weight from minio\r\n # debug: download_from_minio('models', '%s.pth' % model_name)\r\n # step 2: load model\r\n model = torch.load('%s.pth' % model_name)\r\n model = model.to(device)\r\n model.train()\r\n\r\n # start to train\r\n start_train = int(time.time() * 1000)\r\n # step 1: define loss function and optimizer\r\n criterion = nn.CrossEntropyLoss()\r\n optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\r\n # step 2: train the network\r\n trainer(model, dataloader, criterion, optimizer, device)\r\n\r\n end_train = int(time.time() * 1000)\r\n\r\n result.update({\r\n 'device': device == torch.device('cuda'),\r\n 'prepare_data': start_prepare_model - start_prepare_data,\r\n 'prepare_model': start_train - start_prepare_model,\r\n 'predict': end_train - start_train\r\n })\r\n\r\n # finish supervising\r\n time.sleep(10)\r\n os.system('bash ./Kill.sh')\r\n\r\n return result\r\n\r\nif __name__ == \"__main__\":\r\n response = main({\"data_object\": \"./\", \"device\": \"gpu\", \"batch_size\": 16})\r\n print(response)\r\n","repo_name":"jasong-ovo/jasongAISIG","sub_path":"supervisingTask/trainingCode/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12571765112","text":"import timeit\nimport sys\nfrom cStringIO import StringIO\n\nimport redirfile\n\n\nclass RedirSys(object):\n def __init__(self, fd=redirfile.STDOUT):\n self.fd = fd\n if fd not in (1,2):\n raise ValueError\n self.streamname = [None, 'stdout', 'stderr'][fd]\n \n self.started = False\n\n def start(self):\n if not self.started:\n self.buffer = StringIO()\n self.oldstream = getattr(sys, self.streamname)\n setattr(sys, self.streamname, self.buffer)\n self.started = True\n\n def stop(self):\n if self.started:\n setattr(sys, self.streamname, self.oldstream)\n self.started = False\n return self.buffer.getvalue()\n else:\n return None\n\ntfile = timeit.Timer(\"rf.start(); print text; rf.stop()\", \nsetup=\"\"\"\nfrom redirfile import Redirector\nrf = Redirector()\ntext = 'A'*4192\n\"\"\",\n)\ntsys = timeit.Timer(\"rf.start(); print text; rf.stop()\",\nsetup=\"\"\"\nfrom timetest import RedirSys\nrf = RedirSys()\ntext = 'A'*4192\n\"\"\",\n)\ntfile2 = timeit.Timer(\"rf.start(); print text; rf.stop()\",\nsetup=\"\"\"\nfrom redirfile import RedirectorOneFile\nrf = RedirectorOneFile()\ntext = 'A'*4192\n\"\"\",\n)\n","repo_name":"rkern/redir","sub_path":"timetest.py","file_name":"timetest.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"73202276692","text":"#\n# Dan C\n# Data Scienct Boot Camp\n# python-challenge PyBank\n#\nimport os\nimport csv\n\n# Setup the path and open the Results file\npybank = os.path.join('.','analysis', 'PyBank.txt')\npybankwriter = open(pybank,'w')\n\ncsvpath = os.path.join('.', 'Resources', 'budget_data.csv')\n\n# Open the CSV file, read a row and push past the header \nwith open(csvpath) as csvfile:\n\n csvreader = csv.reader(csvfile, delimiter=',')\n csv_header = next(csvreader)\n\n # Create some vars and zero them\n pnl_total = 0\n change_sum = 0\n previous_pnl = 0\n cnt = 0\n\n # Create 2 dicts - one for the whole dataset, the other for the change deltas\n pnls = {}\n avg = {}\n\n # Process the cvs file, count th rows, sum total pnl\n for row in csvreader:\n cnt += 1\n pnls[row[0]] = int(row[1])\n pnl_total = pnl_total + int(row[1])\n\n # Load up the monthly change dict \n if cnt > 1 :\n avg[row[0]] =int(row[1]) - previous_pnl\n change_sum = change_sum + int(row[1]) - previous_pnl\n previous_pnl = pnls.get(row[0])\n \n \n # Print to the console \n print(\"\\n Financial Analysis \\n\",\"-----------------------------\")\n print(\" Total Months:\", csvreader.line_num - 1)\n print(\" Total: \", '${0}'.format(pnl_total))\n print(\" Average Change: \", \"${:.2f}\".format(change_sum / (csvreader.line_num - 1)))\n grt_inc = max(avg, key=avg.get)\n print(\" Greatest Increase in Profits:\",grt_inc,\"(${0})\".format(avg[grt_inc]))\n grt_dec = min(avg, key=avg.get)\n print(\" Greatest Decrease in Profits:\",grt_dec,\"(${0})\".format(avg[grt_dec]))\n \n # Print to the txt file\n print(\"\\n Financial Analysis \\n\",\"-----------------------------\", file = pybankwriter)\n print(\" Total Months:\", csvreader.line_num - 1, file = pybankwriter)\n print(\" Total: \", '${0}'.format(pnl_total), file = pybankwriter)\n print(\" Average Change: \", \"${:.2f}\".format(change_sum / (csvreader.line_num - 1)), file = pybankwriter)\n print(\" Greatest Increase in Profits:\",grt_inc,\"(${0})\".format(avg[grt_inc]), file = pybankwriter)\n print(\" Greatest Decrease in Profits:\",grt_dec,\"(${0})\".format(avg[grt_dec]), file = pybankwriter)","repo_name":"usa4148/python_challenge","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33405476241","text":"import os\nimport pandas as pd\nimport numpy as np\n\nfrom Terminal import Terminal\n\n\nclass DataFrameEditorTerminal(Terminal):\n def __init__(self):\n super().__init__()\n\n self.filepath = None\n self.df = None\n self.index = None\n\n self.initialize_commands()\n self.save_status = True\n \n def initialize_commands(self):\n super().initialize_commands()\n \n self.add_command(\"f\", self.process_filepath_input, \"Change filepath to arg if given, else show current filepath.\")\n self.add_command(\"s\", self.change_save_status, \"Turn on saving to file if arg is 1, else turn off if arg is 0.\")\n self.add_command(\"i\", self.process_index_input, \"Change DataFrame index being edited if arg given, else show current.\")\n self.add_command(\"c\", self.print_columns, \"Print sorted list of columns in the df.\")\n self.add_command(\"nan\", self.print_nan_columns, \"Print columns of this row that are NaN.\")\n\n def process_filepath_input(self, s=None):\n if s is None:\n return self.filepath\n\n if not s.endswith(\".csv\"):\n print(\"Only CSV is supported right now.\")\n return\n\n if os.path.exists(s):\n try:\n df = pd.read_csv(s, index_col=\"Index\")\n except Exception as e:\n print(\"Exception raised in reading this csv:\")\n print(e.__class__.__name__, e)\n return\n self.filepath = s\n self.df = df\n else:\n self.filepath = s\n self.df = pd.DataFrame()\n\n def process_index_input(self, s=None):\n if s is None:\n return self.index\n if self.df is None:\n print(\"You have no df.\")\n return\n\n if s not in self.df.index:\n print(\"That row does not exist. New row will be added when you populate a column.\")\n\n self.index = s\n\n def change_save_status(self, a=None):\n return self.change_binary_attribute(\"save_status\", a)\n\n def print_columns(self):\n if self.df is None:\n print(\"You have no df.\")\n return\n\n for col in sorted(self.df.columns.values):\n print(col)\n\n def print_nan_columns(self):\n if self.df is None:\n print(\"You have no df.\")\n return\n if self.index is None:\n print(\"You have no row selected.\")\n return\n cols = [x for x in self.df.columns.values if np.isnan(self.df.loc[self.index, x])]\n for col in cols:\n print(col)\n\n def process_normal_input(self, s):\n if \"=\" not in s:\n if s in self.df.columns:\n return self.df.loc[self.index, s]\n else:\n print(\"Column not found. You can set new values in the form column_name=value\")\n return\n try:\n col, val = s.split(\"=\")\n except ValueError:\n print(\"input should be in the form column_name=value\")\n return\n\n col = col.strip()\n if col not in self.df.columns:\n print(\"Column not found. It will be created when you set a value.\")\n try:\n val = float(val)\n except ValueError:\n print(\"invalid value for float: {}\".format(val))\n return\n\n self.df.loc[self.index, col] = val\n self.save()\n\n def save(self):\n self.df = self.df.set_index(self.df.index.rename(\"Index\"))\n self.df.to_csv(self.filepath)\n\n\nif __name__ == \"__main__\":\n terminal = DataFrameEditorTerminal()\n terminal.run()","repo_name":"Kuhron/programming","sub_path":"DataFrameEditorTerminal.py","file_name":"DataFrameEditorTerminal.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"26221072592","text":"\"\"\"Change pypi & conda source mirrors.\"\"\"\n\nimport os\nimport subprocess\nimport sys\nimport time\nfrom urllib.parse import urlparse\n\nimport click\n\n\ndef modify_pypi_mirror():\n # https://pypi.org/\n\n mirrors = [\n \"https://pypi.douban.com/simple\",\n \"https://mirrors.ustc.edu.cn/pypi/web/simple/\",\n \"https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/\",\n \"https://mirrors.aliyun.com/pypi/simple/\", # very slow\n ]\n\n packages = [\"httpie\", \"scapy\"]\n\n uninstall_args = [\"pip\", \"uninstall\", \"-y\", *packages]\n\n install_args = [\"pip\", \"install\", *packages, \"--no-cache-dir\", \"-i\"]\n\n min_cost = 0\n mirror = mirrors[0]\n\n for index in range(len(mirrors)):\n subprocess.check_call(uninstall_args)\n\n ts = time.perf_counter()\n subprocess.check_call([*install_args, mirrors[index]])\n te = time.perf_counter()\n\n cost = te - ts\n click.echo(f\"[{cost:.02f}] {mirrors[index]}\")\n\n if min_cost == 0 or cost < min_cost:\n min_cost = cost\n mirror = mirrors[index]\n\n if min_cost < 10:\n break\n\n click.echo(f\"[faster] {mirror}\")\n\n user = os.path.expanduser(\"~\")\n conf = os.path.join(user, \".pip/pip.conf\")\n\n if sys.platform.startswith(\"win\"):\n conf = os.path.join(user, \"AppData/Roaming/pip/pip.ini\")\n\n os.makedirs(os.path.dirname(conf), exist_ok=True)\n\n with open(conf, \"w\", encoding=\"utf-8\", newline=\"\\n\") as fp:\n fp.write(\n \"[global]\\n\"\n f\"index-url={mirror}\\n\"\n \"[install]\\n\"\n f\"trusted-host={urlparse(mirror).netloc}\\n\",\n )\n\n\ndef modify_conda_mirror():\n # https://www.anaconda.com/\n\n conf = os.path.join(os.path.expanduser(\"~\"), \".condarc\")\n\n with open(conf, \"w\", encoding=\"utf-8\", newline=\"\\n\") as fp:\n fp.write(\n \"channels:\\n\"\n \" - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main/\\n\"\n \" - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/\\n\"\n \" - https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge/\\n\"\n \" - https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/pytorch/\\n\"\n \" - https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/pytorch3d/\\n\"\n \"ssl_verify: false\\n\"\n \"auto_activate_base: false\\n\"\n )\n\n\n@click.command(help=\"Change pypi & conda mirrors.\")\ndef modify_python_mirror():\n modify_pypi_mirror(), modify_conda_mirror()\n","repo_name":"fujiawei-dev/toolkit-py","sub_path":"toolkit/scaffold/mirror/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"5573818885","text":"from django import forms\nfrom orders.models import Order\n\n\nclass OrderForm(forms.ModelForm):\n class Meta:\n model = Order\n fields = ('full_name', 'email', 'phone_number',\n 'street_address1', 'street_address2', 'town_or_city',\n 'postcode', 'country',)\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Add placeholders and classes, remove auto generated labels and set \\\n autofocus on first field\n \"\"\"\n super().__init__(*args, **kwargs)\n placeholders = {\n 'full_name': 'Full Name',\n 'email': 'Email Address',\n 'phone_number': 'Phone Number',\n 'postcode': 'Postal Code',\n 'town_or_city': 'Town or City',\n 'street_address1': 'Street Address 1',\n 'street_address2': 'Street Address 2',\n }\n self.fields['full_name'].widget.attrs['autofocus'] = True\n\n for field in self.fields:\n if field != 'country':\n # If the field is required, add a star\n if self.fields[field].required:\n placeholder = f'{placeholders[field]} *'\n else:\n placeholder = placeholders[field]\n # set placeholder values as per above\n self.fields[field].widget.attrs['placeholder'] = placeholder\n # Add the css class we haven't created yet\n self.fields[field].widget.attrs['class'] = 'stripe-style-input'\n # Remove labels\n self.fields[field].label = False\n","repo_name":"hartnetl/ridley-me-this-2","sub_path":"checkout/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"17484861883","text":"# © 2018 - today Numigi (tm) and all its contributors (https://bit.ly/numigiens)\n# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).\n\nfrom odoo.exceptions import ValidationError\nfrom odoo.tests import common\n\n\nclass TestSendDeclarationSurveyByEmail(common.SavepointCase):\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.survey = cls.env.ref('hr_employee_declaration.demo_survey')\n cls.mail_template = cls.env.ref('hr_employee_declaration.demo_mail_template')\n cls.mail_template.auto_delete = False\n cls.employee_user = cls.env['res.users'].create({\n 'name': 'Employee',\n 'login': 'employee',\n 'email': 'employee@example.com',\n })\n cls.employee = cls.env['hr.employee'].create({\n 'name': 'John Doe',\n 'user_id': cls.employee_user.id,\n 'declaration_survey_id': cls.survey.id,\n 'declaration_mail_template_id': cls.mail_template.id,\n })\n cls.manager_user = cls.env['res.users'].create({\n 'name': 'Employee',\n 'login': 'manager',\n 'email': 'manager@example.com',\n })\n cls.manager = cls.env['hr.employee'].create({\n 'name': 'Manager',\n 'user_id': cls.manager_user.id,\n })\n\n def test_ifSendToIsEmpty_sendSurveyToEmployee(self):\n self.employee.send_declaration_survey_by_email()\n declaration = self.employee.declaration_ids\n\n assert declaration.partner_id\n assert declaration.partner_id == self.employee.user_id.partner_id\n\n def test_ifSendToIsFilled_sendSurveyToTargetEmployee(self):\n self.employee.declaration_recipient_id = self.manager\n self.employee.send_declaration_survey_by_email()\n declaration = self.employee.declaration_ids\n\n assert declaration.partner_id\n assert declaration.partner_id == self.manager.user_id.partner_id\n\n def test_ifMailTemplateIdNotSelected_raiseValidationError(self):\n self.employee.declaration_mail_template_id = False\n with self.assertRaises(ValidationError):\n self.employee.send_declaration_survey_by_email()\n\n def test_ifSurveyNotSelected_raiseValidationError(self):\n self.employee.declaration_survey_id = False\n with self.assertRaises(ValidationError):\n self.employee.send_declaration_survey_by_email()\n\n def test_ifEmployeeHasNoUser_raiseValidationError(self):\n self.employee.user_id = False\n with self.assertRaises(ValidationError):\n self.employee.send_declaration_survey_by_email()\n\n def test_ifCustomRecipientHasNoUser_raiseValidationError(self):\n self.employee.declaration_recipient_id = self.manager\n self.manager.user_id = False\n with self.assertRaises(ValidationError):\n self.employee.send_declaration_survey_by_email()\n\n def test_token_is_added_to_email(self):\n self.employee.send_declaration_survey_by_email()\n\n mail = self.env['mail.mail'].search([\n ('partner_ids', '=', self.employee_user.partner_id.id),\n ], limit=1, order='id desc')\n\n assert self.employee.declaration_ids.token\n assert self.employee.declaration_ids.token in mail.body\n assert self.employee.declaration_ids.token in mail.body_html\n\n def test_mako_fields_are_converted_in_body(self):\n self.employee.send_declaration_survey_by_email()\n\n mail = self.env['mail.mail'].search([\n ('partner_ids', '=', self.employee_user.partner_id.id),\n ], limit=1, order='id desc')\n\n assert self.employee.display_name in mail.body\n\n def test_mako_fields_are_converted_in_subject(self):\n self.employee.declaration_recipient_id = self.manager\n self.employee.send_declaration_survey_by_email()\n\n mail = self.env['mail.mail'].search([\n ('partner_ids', '=', self.manager_user.partner_id.id),\n ], limit=1, order='id desc')\n\n assert self.manager.display_name in mail.subject\n","repo_name":"Numigi/odoo-hr-addons","sub_path":"hr_employee_declaration/tests/test_send_declaration_survey_by_email.py","file_name":"test_send_declaration_survey_by_email.py","file_ext":"py","file_size_in_byte":4041,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"42109839966","text":"def get_prime(n):\n prime = [True] * n\n for i in range(2, int(n**0.5) + 1):\n if prime[i] == True:\n for j in range(i+i, n, i):\n prime[j] = False\n return [i for i in range(2, n) if prime[i] == True]\n\ndef solution(n):\n answer = len(get_prime(n+1))\n \n return answer","repo_name":"KyoungnamMin/ProblemSolve","sub_path":"Programmers/소수 찾기 에라토스체.py","file_name":"소수 찾기 에라토스체.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12722593343","text":"from typing import Optional, Dict, Any, List\nimport logging\n\nfrom syne_tune.backend import LocalBackend\nfrom syne_tune.callbacks.hyperband_remove_checkpoints_callback import (\n HyperbandRemoveCheckpointsCommon,\n)\nfrom syne_tune.constants import ST_TUNER_TIME\nfrom syne_tune.experiments import load_experiment\nfrom syne_tune.optimizer.baselines import MOBSTER\nfrom syne_tune.results_callback import ExtraResultsComposer, StoreResultsCallback\nfrom syne_tune.util import find_first_of_type\nfrom syne_tune import Tuner, StoppingCriterion\n\nfrom benchmarking.benchmark_definitions.mlp_on_fashionmnist import (\n mlp_fashionmnist_benchmark,\n)\n\n\n# This is used to monitor what the checkpoint removal mechanism is doing, and\n# writing out results. This is optional, the mechanism works without this.\nclass CPRemovalExtraResults(ExtraResultsComposer):\n def __call__(self, tuner: Tuner) -> Optional[Dict[str, Any]]:\n callback = find_first_of_type(tuner.callbacks, HyperbandRemoveCheckpointsCommon)\n return None if callback is None else callback.extra_results()\n\n def keys(self) -> List[str]:\n return HyperbandRemoveCheckpointsCommon.extra_results_keys()\n\n\nif __name__ == \"__main__\":\n logging.getLogger().setLevel(logging.DEBUG)\n random_seed = 31415927\n n_workers = 4\n max_num_checkpoints = 10\n # This time may be too short to see positive effects:\n max_wallclock_time = 1800\n # Monitor how checkpoint removal is doing over time, appending this\n # information to results.csv.zip?\n monitor_cp_removal_in_results = True\n\n # We pick the MLP on FashionMNIST benchmark\n benchmark = mlp_fashionmnist_benchmark()\n\n # Local backend\n # By setting ``delete_checkpoints=True``, we ask for checkpoints to be removed\n # once a trial cannot be resumed anymore\n trial_backend = LocalBackend(\n entry_point=str(benchmark.script),\n delete_checkpoints=True,\n )\n\n # MOBSTER (model-based ASHA) with promotion scheduling (pause and resume).\n # Checkpoints are written for each paused trial, and these are not removed,\n # because in principle, every paused trial may be resumed in the future.\n # If checkpoints are large, this may fill up your disk.\n # Here, we use speculative checkpoint removal to keep the number of checkpoints\n # to at most ``max_num_checkpoints``. To this end, paused trials are ranked by\n # expected cost of removing their checkpoint.\n scheduler = MOBSTER(\n benchmark.config_space,\n type=\"promotion\",\n max_resource_attr=benchmark.max_resource_attr,\n resource_attr=benchmark.resource_attr,\n mode=benchmark.mode,\n metric=benchmark.metric,\n random_seed=random_seed,\n early_checkpoint_removal_kwargs=dict(\n max_num_checkpoints=max_num_checkpoints,\n ),\n )\n\n stop_criterion = StoppingCriterion(max_wallclock_time=max_wallclock_time)\n # The tuner activates early checkpoint removal iff\n # ``trial_backend.delete_checkpoints``. In this case, it requests details\n # from the scheduler (which is ``early_checkpoint_removal_kwargs`` in our\n # case). Early checkpoint removal is done by appending a callback to those\n # normally used with the tuner.\n if monitor_cp_removal_in_results:\n # We can monitor how well checkpoint removal is working by storing\n # extra results (this is optional)\n extra_results_composer = CPRemovalExtraResults()\n callbacks = [\n StoreResultsCallback(extra_results_composer=extra_results_composer)\n ]\n else:\n extra_results_composer = None\n callbacks = None\n tuner = Tuner(\n trial_backend=trial_backend,\n scheduler=scheduler,\n stop_criterion=stop_criterion,\n n_workers=n_workers,\n callbacks=callbacks,\n )\n tuner.run()\n\n if monitor_cp_removal_in_results:\n # We have monitored how checkpoint removal has been doing over time. Here,\n # we just look at the information at the end of the experiment\n results_df = load_experiment(tuner.name).results\n final_pos = results_df.loc[:, ST_TUNER_TIME].argmax()\n final_row = dict(results_df.loc[final_pos])\n extra_results_at_end = {\n name: final_row[name] for name in extra_results_composer.keys()\n }\n logging.info(f\"Extra results at end of experiment:\\n{extra_results_at_end}\")\n\n # We can obtain additional details from the callback, which is the last one\n # in ``tuner``\n callback = find_first_of_type(tuner.callbacks, HyperbandRemoveCheckpointsCommon)\n trials_resumed = callback.trials_resumed_without_checkpoint()\n if trials_resumed:\n logging.info(\n f\"The following {len(trials_resumed)} trials were resumed without a checkpoint:\\n{trials_resumed}\"\n )\n else:\n logging.info(\"No trials were resumed without a checkpoint\")\n","repo_name":"awslabs/syne-tune","sub_path":"examples/launch_fashionmnist_checkpoint_removal.py","file_name":"launch_fashionmnist_checkpoint_removal.py","file_ext":"py","file_size_in_byte":4904,"program_lang":"python","lang":"en","doc_type":"code","stars":332,"dataset":"github-code","pt":"67"} +{"seq_id":"39561928598","text":"import requests\nfrom bs4 import BeautifulSoup\n\nFALL_2010 = 1109\nFALL_2013 = 1139\nFALL_2019 = 1199\nWINTER_2020 = 1201\nUNI = 'University of Waterloo'\n\ndef extract_info(university_name, professor_name):\n url = requests.get('https://www.ratemyprofessors.com/search.jsp?queryoption=HEADER&queryBy=teacherName&schoolName={}&schoolID=&query={}'.format(university_name.replace(' ', '+'), professor_name.replace(' ','+')))\n html = url.text\n tid_url = ''\n soup = BeautifulSoup(html, features='lxml')\n line = soup.find_all('li', class_=\"listing PROFESSOR\")\n if len(line) == 0:\n return 'No score found!'\n line = line[0].find_all('a', href=True)\n tid_url = line[0]['href']\n if tid_url is '':\n return 'No score found'\n #print(tid_url)\n url = requests.get('https://www.ratemyprofessors.com' + str(tid_url))\n html = url.text\n soup = BeautifulSoup(html, features='lxml')\n line = soup.find_all('div', class_=\"RatingValue__Numerator-qw8sqy-2 gxuTRq\")\n if len(line) == 0:\n return 'No score found'\n return line[0].text\n\ndef generate_terms(start, end):\n term_to_generate = start\n list_of_terms = []\n while term_to_generate <= end:\n list_of_terms.append(term_to_generate)\n term_to_generate_str = str(term_to_generate)\n if term_to_generate_str[-1] is '9':\n term_to_generate = term_to_generate + 2\n else:\n term_to_generate = term_to_generate + 4\n return list_of_terms\n\ndef term_to_string(val):\n term = str(val)\n ret = ''\n year = 0\n if term[0] is '0':\n year = 1900\n else:\n year = 2000\n if term[3] is '1':\n ret = 'Winter'\n elif term[3] is '5':\n ret = 'Spring'\n elif term[3] is '9':\n ret = 'Fall'\n year = year + int(term[1:3])\n ret = ret + ' ' + str(year)\n return ret\n\ndef read_inputs():\n level = ''\n subject = ''\n coursenum = ''\n while (level != 'U' and level != 'G'):\n print(\"Please enter either U for Undergradute course schedule or G for Graduate course schedule\")\n level = input()\n if level is 'U':\n level = 'under'\n else:\n level = 'grad'\n print(\"Please enter the subject code for the course you wish to search (e.g. CS, ECON, MATH... etc)\")\n subject = input()\n subject = subject.upper()\n print(\"Please enter the course number for the course you wish to search (e.g. 135, 101, 245, ... etc)\")\n coursenum = input()\n return level, subject, coursenum\n\ndef process_requests(level, subject, coursenum):\n fall = {}\n winter = {}\n spring = {}\n fall_last_taught = {}\n winter_last_taught = {}\n spring_last_taught = {}\n term = 0\n base_url = 'https://info.uwaterloo.ca/cgi-bin/cgiwrap/infocour/salook.pl?sess='\n other_url = '&level=%s&subject=%s&cournum=%s' % (level, subject, coursenum)\n list_of_terms = generate_terms(FALL_2013, WINTER_2020)\n for sess in list_of_terms:\n url = requests.get(base_url + str(sess) + other_url)\n html = url.text\n soup = BeautifulSoup(html, features='lxml')\n line = soup.find_all('tr')\n list_of_profs = []\n for each in line:\n line2 = each.find_all('td')\n if 'LEC' in str(line2) and 'TST' not in str(line2):\n line3 = ''\n for each2 in line2:\n line3 = each2.text\n if ',' in str(line3):\n prof = str(line3)\n if prof not in list_of_profs:\n list_of_profs.append(prof)\n if term is 0:\n for prof in list_of_profs:\n if prof in fall:\n fall[prof] = fall[prof] + 1\n else:\n fall[prof] = 1\n fall_last_taught[prof] = sess\n elif term is 1:\n for prof in list_of_profs:\n if prof in winter:\n winter[prof] = winter[prof] + 1\n else:\n winter[prof] = 1\n winter_last_taught[prof] = sess\n elif term is 2:\n for prof in list_of_profs:\n if prof in spring:\n spring[prof] = spring[prof] + 1\n else:\n spring[prof] = 1\n spring_last_taught[prof] = sess\n else:\n print(\"NO\")\n term = (term + 1) % 3\n\n print('Here\\'s the trend of ' + subject + coursenum + ' from Fall 2013 to Winter 2020')\n print('\\n' + 'FALL: ' + '\\n')\n for prof in sorted(fall, key=fall.get, reverse=True):\n prof_split = prof.split(',')\n score = extract_info(UNI, prof_split[1] + ' ' + prof_split[0])\n print(prof + ' : ' + str(fall[prof]) + '(' + str(score) + ')')\n print('(Last taught: ' + term_to_string(fall_last_taught[prof]) + ')')\n print('\\n' + 'WINTER: ' + '\\n')\n for prof in sorted(winter, key=winter.get, reverse=True):\n prof_split = prof.split(',')\n score = extract_info(UNI, prof_split[1] + ' ' + prof_split[0])\n print(prof + ' : ' + str(winter[prof]) + '(' + str(score) + ')')\n print('(Last taught: ' + term_to_string(winter_last_taught[prof]) + ')')\n print('\\n' + 'SPRING: ' + '\\n')\n for prof in sorted(spring, key=spring.get, reverse=True):\n prof_split = prof.split(',')\n score = extract_info(UNI, prof_split[1] + ' ' + prof_split[0])\n print(prof + ' : ' + str(spring[prof]) + '(' + str(score) + ')')\n print('(Last taught: ' + term_to_string(spring_last_taught[prof]) + ')')\n \ndef main():\n level, subject, coursenum = read_inputs()\n process_requests(level, subject, coursenum)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"MingyuP/UWClassTrend","sub_path":"UWClassTrend.py","file_name":"UWClassTrend.py","file_ext":"py","file_size_in_byte":5681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6800434674","text":"from datetime import datetime\r\n\r\nfrom Domain.tranzactie import Tranzactie\r\n\r\n\r\nclass TranzactieValidator:\r\n def valideaza(self, tranzactie: Tranzactie):\r\n try:\r\n datetime.strptime(tranzactie.datasiora, '%d.%m.%Y %H:%M')\r\n except ValueError:\r\n raise ValueError(\"Formatul datei trebuie sa fie: DD.MM.YYYY H:M\")\r\n","repo_name":"oargaclaudia/Python-Projects","sub_path":"lab-8910-oargaclaudia/Domain/tranzactieValidator.py","file_name":"tranzactieValidator.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16386281951","text":"'''\nLoad data from tsv files with column description from json file to elasticsearch. Create index for data (dlrmetadata),\ncolumn descriptions (columndescription) and data table (dataoverview). Upload new data to index / update existing data.\n'''\n\nfrom elasticsearch import helpers, Elasticsearch\nimport json\nimport glob, os, sys\nimport argparse\nimport datetime\nimport pandas as pd\nimport numpy as np\n\nfrom outlierEvenMore import timeConv, delMissesAndOutliers, markMissesAndOutliers\n\n\ndef getfieldnames(f, data):\n\treturn [w.replace('.','') for w in data['columns']]\n\n\ndef getConstants(f, data):\n\t'''\n\tadd constants from .json file\n\t'''\n\tfieldnames = []\n\tvalues = []\n\tfor const in data['constants']:\n\t\tfieldnames += [const['id']]\n\t\tvalues += [const['value']]\n\tconst = dict(zip(fieldnames, values))\n\treturn const\n\n\n#ToDo : delete get_location since not used anymore\ndef get_location(row):\n\tfor _,item in row.iteritems():\n\t\tif type(item)==str and item.startswith(\"POLYGON((\"):\n\t\t\t#POLYGON((lon lat, lon2 lat2, ...., lonN latN))\n\t\t\tlocation = [[float(x) for x in pair.strip().split(\" \")] for pair in item[9:-2].split(\",\")]\n\t\t\treturn {\"type\":\"polygon\", \"coordinates\":[location]}\n\t\t\t#return {\"type\": \"polygon\", \"coordinates\": [[[1.0,1.0],[1.0,10.0],[10.0,10.0],[10.0,1.0],[1.0,1.0]]]}\n\t\telif type(item)==str and item.startswith(\"POINT(\"):\n\t\t\tlocation = [float(x) for x in item[6:-1].split(\" \")]\n\t\t\treturn {\"type\": \"point\", \"coordinates\":[location]}\n\t\treturn {\"type\": \"polygon\", \"coordinates\": [[[1.0,1.0],[1.0,10.0],[10.0,10.0],[10.0,1.0],[1.0,1.0]]]}\n\n\n# ToDo: clean_row since not used anymore\ndef clean_row(row):\n\tfor idx,item in row.iteritems():\n\t\tif type(item)==str and item.startswith(\"POLYGON((\"):\n\t\t\trow[idx].replace('N(', 'N (')\n\t\telif type(item)==str and item.startswith(\"POINT(\"):\n\t\t\trow[idx].replace('T(', 'T (')\n\t\treturn row\n\n\ndef getpolygonmean(polygon):\n\tmeanlon, meanlat = None, None\n\tif type(polygon) == str and polygon.startswith(\"POLYGON((\"):\n\t p = polygon.strip(')POLYGON(').split(',')\n\t coords = np.array([[float(x) for x in ort.strip().split(' ')] for ort in p])\n\t meanlon, meanlat = coords.mean(axis=0)\n\telif type(polygon) == str and polygon.startswith(\"POINT(\"):\n\t\tmeanlon, meanlat = [float(x) for x in polygon[6:-1].split(\" \")]\n\treturn [meanlon, meanlat]\n\n\ndef bulk_action(df, INDEX_NAME, TYPE):\n\t'''\tPrepare generator for bulk upload\n\tInput:\n\t\tdf: pandas dataframe\n\t\tINDEX_NAME: elasticsearch index (dlrmetadata)\n\t\tTYPE: elasticsearch document type (doc)\n\t'''\n\tfor idx, row in df.iterrows():\n\t\trow = clean_row(row)\n\t\tyield {\n\t\t\t'_op_type': 'index',\n\t\t\t'_index': INDEX_NAME,\n\t\t\t'_id': row[0],\n\t\t\t'_type': TYPE,\n\t\t\t'_source': row.to_json()\n\t\t\t\t#'location': get_location(row)\n\t\t}\n\n\ndef addDocument(data, meta, filename, INDEX_NAME, TYPE, es):\n\t'''\tBulk upload documents to dlrmetadata index\n\tINPUT:\n\t\tdata: opened tsc file\n\t\tmeta: opened json file\n\t\tfilename: string with filename\n\t\tINDEX_NAME: elasticsearch index to add data to\n\t\tTYPE: elasticsearch type for index\n\t\tes: elasticsearch version\n\t'''\n\tdata.seek(0)\n\tmeta.seek(0)\n\tmetadata = json.loads(meta.read().decode('utf-8').replace('\\0', ''))\n\tmeta.seek(0)\n\tfieldnames = getfieldnames(meta, metadata)\n\tmeta.seek(0)\n\tconstants = getConstants(meta, metadata)\n\tdf = pd.read_csv(data, names=fieldnames, sep='\\t', low_memory=False)\n\t#df = timeConv(df)\n\t#df = markMissesAndOutliers(df)\n\n\t# add filename to match constants and data and mission0 for simple selection later on\n\tdf['filename'] = filename\n\tif \"mission0\" in constants:\n\t\tdf[\"mission0\"] = constants[\"mission0\"]\n\tprint('DATA PREPARED FOR UPLOAD ....')\n\n\tfor idx in df.columns:\n\t\t#column isglobal0 not only Bool type as stated in description, not clear what values mean ...\n\t\tif idx == 'isglobal0':\n\t\t\tdf.drop(columns='isglobal0', inplace=True)\n\t\telif type(df[idx][0]) == str and df[idx][0].startswith(\"POLYGON((\"):\n\t\t\t# Elasticsearch can' handle polygon of form 'POLYGON((0 0, 0 0, 0 0, 0 0, 0 0))' therefore chenged to POINT\n\t\t\tdf[idx] = df[idx].apply(lambda x: x.replace(\"POLYGON((0 0, 0 0, 0 0, 0 0, 0 0))\", \"POINT(0 0)\") if type(x) == str else x)\n\t\t\tmeanlong, meanlat = np.array([getpolygonmean(x) for x in df[idx]]).T\n\t\t\tdf['polygonmeanlon'] = meanlong\n\t\t\tdf['polygonmeanlat'] = meanlat\n\n\t# Bulk upload files and check if successful\n\tsuccessid, failedid = 0, 0\n\tfailedids = []\n\tfor success, info in helpers.parallel_bulk(es, bulk_action(df, INDEX_NAME, TYPE),\n\t thread_count=4, chunk_size=50, raise_on_error=True):\n\n\n\t\tsuccessid += info['index']['_shards']['successful']\n\t\tfailedid += info['index']['_shards']['failed']\n\t\tif info['index']['_shards']['failed'] > 0:\n\t\t\tfailedids += [info['index']['_id']]\n\t\t#if not success:\n\t\t#\tfailed += 1\n\t\t#else:\n\t\t#\tsuccess += 1\n\tprint(data, 'added to elasticsearch index ', INDEX_NAME, '\\n',\n\t 'success: ', successid, 'failed: ', failedid)\n\treturn successid, failedid, failedids\n\n\ndef updateFile(datafile, metafile, filename, es):\n\t'''\n\tFunction is called when upload file button is clicked. Creates indexes for data (dlrmetadata), column descriptions\n\t(columndescription) and data table on data.html page (dataoverview). Uploads new data to index / updates existing data\n\tif filenames are equal.\n\tInput:\n\t\tdatafile: .tsv file containing data\n\t\tmetaflie: .json file containing datafile information, esp. column names\n\t\tfilename: string with filename, since datafile and metafile are already open\n\t\tes: elasticsearch class\n\t'''\n\n\t# Get config files\n\tindexnames = json.load(open('../../elasticsearch/elconfig.json'))\n\n\t# Column description index creation or if already existsing deleted and created again\n\n\tif not es.indices.exists(indexnames['COLUMNDESCRIPTION']) or indexnames['UPDATE_COLUMNDESCRIPTION'] == 'True':\n\t\tprint('Start uploading column description')\n\t\ttry:\n\t\t\tcolumnDescription = json.load(open('../../elasticsearch/_columnDescription.json'))\n\t\t\tfor entry in columnDescription:\n\t\t\t\tes.index(index=indexnames['COLUMNDESCRIPTION'], doc_type='doc', body=entry)\n\t\texcept (FileNotFoundError, FileExistsError):\n\t\t\tprint('_columnDescription must be located in vis_ttt/elasticseach to continue relocate file')\n\t\t\tsys.exit(0)\n\n\n\t''' Index for data from .tsv file is created if not exists. To save disk space and speed up indexing we set numer of\n\tshards to 1 and number of replicas to 0 as well as polygon precision set to 100km. For polygons ignore_malformed\n\tis necessary since some polygons are not well defined.\n\tDisk space required for polygon depending on precision (original file size: 2mB):\n\tprecision 100 km : 5 mb\n\tprecision 10 km : 27 mb\n\tprecision 1 km : too much (1117 docs.count: 375mb)\n\t'''\n\tprint('STARTED WITH ', filename, ' ....')\n\n\t# Create index for data if not exists\n\tif not es.indices.exists(indexnames['DATA']):\n\t\tdlrmetadatabody = {\n\t\t\t\t\"settings\": {\n\t\t\t\t\t\"number_of_shards\": 1,\n\t\t\t\t\t\"number_of_replicas\": 0,\n\t\t\t\t},\n\t\t\t\t\"mappings\":{\n\t\t\t\t\t\"doc\":{\n\t\t\t\t\t\t\"properties\":{\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t}\n\t\tcolumn_types_dict = {\n\t\t\t\"GeoObject\": {\n\t\t\t\t\"type\": \"text\" #if polygones are not used they could be uploaded as string\n\t\t\t\t#\"type\": \"geo_shape\",\n\t\t\t\t#\"precision\": \"100km\",\n\t\t\t\t#\"ignore_malformed\": True\n\t\t\t},\n\t\t\t\"Date\": {\n\t\t\t\t\"type\": \"date\",\n\t\t\t\t\"format\": \"yyyy-MM-dd HH:mm:ss.SSS\",\n\t\t\t\t\"ignore_malformed\": True\n\t\t\t},\n\t\t\t\"Double\": {\n\t\t\t\t\"type\": \"double\"\n\t\t\t},\n\t\t\t\"Integer\": {\n\t\t\t\t\"type\": \"integer\"\n\t\t\t},\n\t\t\t\"Character\": {\n\t\t\t\t\"type\": \"text\"\n\t\t\t},\n\t\t\t\"Identifier\": {\n\t\t\t\t\"type\": \"keyword\"\n\t\t\t},\n\t\t\t\"Boolean\": {\n\t\t\t\t\"type\": \"boolean\"\n\t\t\t},\n\t\t\t\"Text\": {\n\t\t\t\t\"type\": \"text\"\n\t\t\t}\n\t\t}\n\n\t\t# Insert column description depending on type into mappings\n\t\tcoldesc = json.load(open('../../elasticsearch/_columnDescription.json'))\n\t\tfor col in coldesc:\n\t\t\tif 'id' in col:\n\t\t\t\tcolumn_type = col['type']\n\t\t\t\tdlrmetadatabody['mappings']['doc']['properties'][str(col['id'])] = column_types_dict[column_type]\n\t\tes.indices.create(index=indexnames['DATA'], body=dlrmetadatabody)\n\t\tprint('DLRMETADATA INDEX CREATED ....')\n\n\t# Index exists or was created, not set vriables for speed up\n\t#es.indices.put_settings(index='dlrmetadata', body={'index': {\"refresh_interval\" : '-1'}})\n\n\t# If file is updated, delete old data\n\tif es.indices.exists(indexnames['DATA']):\n\t\tes.delete_by_query(index=indexnames['DATA'], doc_type='doc', body={'query': {'match': {'filename': filename}}})\n\t\tprint('OLD DOCUMENTS DELETED ....')\n\n\t# Upload new documents\n\tprint('START INDEXING TO DLRMETADATA ....')\n\tsuccess, failed, failedids = addDocument(datafile, metafile, filename, INDEX_NAME=indexnames['DATA'], TYPE='doc', es=es)\n\n\t# Now update data overview\n\tmetadata = json.loads(metafile.read().decode('utf-8').replace('\\0', ''))\n\tdatafile.seek(0)\n\n\t# Delete data from table if exists\n\tif es.indices.exists(indexnames['DATAOVERVIEW']):\n\t\tes.delete_by_query(index=indexnames['DATAOVERVIEW'], doc_type='doc', body={'query': {'match': {'filename': filename}}})\n\n\t# Create metadata about filename\n\tnow = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n\tconstants = getConstants(metafile, metadata)\n\tfilenames = {'filename': filename.split('.')[0],\n\t 'size': len(datafile.read()),\n\t 'addDate': now,\n\t 'updateDate': now,\n\t 'success': success,\n\t 'failed': failed,\n\t 'failedids': failedids}\n\tfilenames.update(constants)\n\tes.index(index=indexnames['DATAOVERVIEW'], doc_type='doc', body=filenames)\n\tprint('ADDED ', filenames['filename'], ' TO DATAOVERVIEW ....')\n\n\tprint('FINISHED ', filename, '....')","repo_name":"maschill/vis_ttt","sub_path":"code/elasticSearch.py","file_name":"elasticSearch.py","file_ext":"py","file_size_in_byte":9469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5976952973","text":"import sys, pygame\n\nfrom alphabeta import alphabeta\nfrom connect4 import State, done, score, successor, draw\nfrom constants import *\n\n# ser = serial.Serial()\n# ser.timeout = 1\n# ser.port = '/dev/ttyACM0' \n# ser.open()\n\ndef get_play_options(state):\n options = []\n for j in range(0, state.width):\n for i in range(state.height - 1, -1, -1):\n if state.board[i][j] != EMPTY_SLOT: continue\n options.append((i, j))\n break\n return options\n\ndef end_game(status, current_state):\n if status == GAME_MIN_WINNER:\n sys.stdout.write('You win!\\n')\n elif status == GAME_MAX_WINNER:\n sys.stdout.write('Algorithm wins!\\n')\n elif status == GAME_DRAW:\n sys.stdout.write('It\\s a draw!\\n')\n\n input()\n sys.exit(1)\n \ndef update_game(current_state, screen):\n status = done(current_state)\n\n draw(current_state, screen, grid_width, grid_height,\n field_size, circle_size)\n pygame.display.flip()\n\n sys.stdout.write(str(current_state) + '\\n')\n \n if status != GAME_NO_WINNER:\n end_game(status, current_state)\n\n# pygame parameters\ngrid_width = 510\ngrid_height = 510\n\nfield_size = 100\ncircle_size = 40\n\ndef human_vs_computer():\n current_state = State(width = 5, height = 5)\n\n pygame.init()\n\n screen = pygame.display.set_mode((grid_width, grid_height))\n game_over = False\n\n while not game_over:\n\n draw(current_state, screen, grid_width, grid_height,\n field_size, circle_size)\n\n pygame.display.flip()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_over = True\n\n # Human Turn\n sys.stdout.write('\\n')\n print(current_state)\n options = get_play_options(current_state)\n for i in range(0, current_state.width):\n sys.stdout.write(' ' + str(i + 1) + ' ')\n sys.stdout.write('\\n ')\n\n # while True:\n # ard = ser.readline().decode().strip('\\r\\n')\n\n # if (ard != ''):\n # break\n\n # col = int(ard) - 1\n col = int(input()) - 1 \n\n for i, j in options:\n if col != j: continue\n current_state.board[i][j] = MIN_PLAYER\n\n update_game(current_state, screen)\n \n # AI Turn\n current_state, _score = alphabeta(current_state, 5, MAX_PLAYER, score, done, successor)\n update_game(current_state, screen)\n\n #draw(current_state, screen, grid_width, grid_height,\n # field_size, circle_size)\n\n pygame.quit()\n\n \nif __name__ == '__main__':\n human_vs_computer()\n","repo_name":"sara-knezevic/arduino_connect4","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74862719253","text":"import json\nimport unittest\nfrom ddt import ddt, data, unpack\n\nfrom data_catalog.query_translation import ElasticSearchQueryTranslator, \\\n ElasticSearchFilterExtractor, ElasticSearchBaseQueryCreator, InvalidQueryError\nfrom unittest import TestCase\n\n\n@ddt\nclass FilterExtractorTests(TestCase):\n\n def setUp(self):\n self.filter_extractor = ElasticSearchFilterExtractor()\n\n # first uuids (list), then input filters (list),\n # then output query filters (json)\n # then output post_filters (json)\n # then dataset_filtering value (True, False, None)\n example_singleFilter_org = (\n ['org-id-001'],\n [{'format': ['csv']}],\n {\n 'or': [\n {'term': {'orgUUID': 'org-id-001'}},\n {'term': {'isPublic': 'true'}}\n ]\n },\n {\n 'and': [\n {'term': {'format': 'csv'}}\n ]\n },\n None\n )\n\n example_singleFilter_onlyPublic = (\n ['org-id-001'],\n [{'format': ['csv']}],\n {\n 'and': [\n {'term': {'isPublic': 'true'}}\n ]\n },\n {\n 'and': [\n {'term': {'format': 'csv'}}\n ]\n },\n True\n )\n\n example_singleFilter_onlyPrivate = (\n ['org-id-001'],\n [{'format': ['csv']}],\n {\n 'and': [\n {'term': {'orgUUID': 'org-id-001'}},\n {'term': {'isPublic': 'false'}}\n ]\n },\n {\n 'and': [\n {'term': {'format': 'csv'}}\n ]\n },\n False\n )\n\n example_multivaluedFilterQuery_org = (\n ['org-id-002'],\n [\n {'category': ['health', 'finance']}\n ],\n {\n 'or': [\n {'term': {'orgUUID': 'org-id-002'}},\n {'term': {'isPublic': 'true'}}\n ]\n },\n {\n 'and': [\n {'terms': {'category': ['health', 'finance']}}\n ]\n },\n None\n )\n\n example_multivaluedFilterQuery_onlyPublic = (\n ['org-id-002'],\n [\n {'category': ['health', 'finance']}\n ],\n {\n 'and': [\n {'term': {'isPublic': 'true'}}\n ]\n },\n {\n 'and': [\n {'terms': {'category': ['health', 'finance']}}\n ]\n },\n True\n )\n\n example_multivaluedFilterQuery_onlyPrivate = (\n ['org-id-002'],\n [\n {'category': ['health', 'finance']}\n ],\n {\n 'and': [\n {'term': {'orgUUID': 'org-id-002'}},\n {'term': {'isPublic': 'false'}}\n ]\n },\n {\n 'and': [\n {'terms': {'category': ['health', 'finance']}}\n ]\n },\n False\n )\n\n example_multipleFilterQuery_org = (\n ['org-id-003'],\n [\n {'format': ['csv']},\n {'category': ['health']}\n ],\n {\n 'or': [\n {'term': {'orgUUID': 'org-id-003'}},\n {'term': {'isPublic': 'true'}}\n ]\n },\n {\n 'and': [\n {'term': {'format': 'csv'}},\n {'term': {'category': 'health'}}\n ]\n },\n None\n )\n\n example_multipleFilterQuery_onlyPublic = (\n ['org-id-003'],\n [\n {'format': ['csv']},\n {'category': ['health']}\n ],\n {\n 'and': [\n {'term': {'isPublic': 'true'}}\n ]\n },\n {\n 'and': [\n {'term': {'format': 'csv'}},\n {'term': {'category': 'health'}}\n ]\n },\n True\n )\n\n example_multipleFilterQuery_onlyPrivate = (\n ['org-id-003'],\n [\n {'format': ['csv']},\n {'category': ['health']}\n ],\n {\n 'and': [\n {'term': {'orgUUID': 'org-id-003'}},\n {'term': {'isPublic': 'false'}}\n ]\n },\n {\n 'and': [\n {'term': {'format': 'csv'}},\n {'term': {'category': 'health'}}\n ]\n },\n False\n )\n\n example_upperCaseFilterValue_org = (\n ['org-id-004'],\n [\n {'format': ['CSV']}\n ],\n {\n 'or': [\n {'term': {'orgUUID': 'org-id-004'}},\n {'term': {'isPublic': 'true'}}\n ]\n },\n {\n 'and': [\n {'term': {'format': 'csv'}}\n ]\n },\n None\n )\n\n example_upperCaseFilterValue_onlyPublic = (\n ['org-id-004', 'public'],\n [\n {'format': ['CSV']}\n ],\n {\n 'and': [\n {'term': {'isPublic': 'true'}}\n ]\n },\n {\n 'and': [\n {'term': {'format': 'csv'}}\n ]\n },\n True\n )\n\n example_upperCaseFilterValue_onlyPrivate = (\n ['org-id-004'],\n [\n {'format': ['CSV']}\n ],\n {\n 'and': [\n {'term': {'orgUUID': 'org-id-004'}},\n {'term': {'isPublic': 'false'}}\n ]\n },\n {\n 'and': [\n {'term': {'format': 'csv'}}\n ]\n },\n False\n )\n\n example_fromToTimeQuery_org = (\n ['org-id-005'],\n [\n {'creationTime': ['2014-05-18', '2014-11-03']}\n ],\n {\n 'and': [\n {'range': {'creationTime': {'from': '2014-05-18', 'to': '2014-11-03'}}},\n {\n 'or': [\n {'term': {'orgUUID': 'org-id-005'}},\n {'term': {'isPublic': 'true'}}\n ]\n }\n\n ]\n },\n {},\n None\n )\n\n example_fromToTimeQuery_onlyPublic = (\n ['org-id-005'],\n [\n {'creationTime': ['2014-05-18', '2014-11-03']}\n ],\n {\n 'and': [\n {'range': {'creationTime': {'from': '2014-05-18', 'to': '2014-11-03'}}},\n {'term': {'isPublic': 'true'}}\n ]\n },\n {},\n True\n )\n\n example_fromToTimeQuery_onlyPrivate = (\n ['org-id-005'],\n [\n {'creationTime': ['2014-05-18', '2014-11-03']}\n ],\n {\n 'and': [\n {'range': {'creationTime': {'from': '2014-05-18', 'to': '2014-11-03'}}},\n {'term': {'orgUUID': 'org-id-005'}},\n {'term': {'isPublic': 'false'}}\n ]\n },\n {},\n False\n )\n\n example_beforeTimeQuery_org = (\n ['org-id-006'],\n [\n {'creationTime': [-1, '2014-11-03']}\n ],\n {\n 'and': [\n {'range': {'creationTime': {'to': '2014-11-03'}}},\n {\n 'or': [\n {'term': {'orgUUID': 'org-id-006'}},\n {'term': {'isPublic': 'true'}}\n ]\n }\n\n ]\n },\n {},\n None\n )\n\n example_afterTimeQuery_org = (\n ['org-id-007'],\n [\n {'creationTime': ['2014-05-18', -1]}\n ],\n {\n 'and': [\n {'range': {'creationTime': {'from': '2014-05-18'}}},\n {\n 'or': [\n {'term': {'orgUUID': 'org-id-007'}},\n {'term': {'isPublic': 'true'}}\n ]\n }\n\n ]\n },\n {},\n None\n )\n\n @data(example_singleFilter_org,\n example_singleFilter_onlyPublic,\n example_singleFilter_onlyPrivate,\n example_multivaluedFilterQuery_org,\n example_multivaluedFilterQuery_onlyPublic,\n example_multivaluedFilterQuery_onlyPrivate,\n example_multipleFilterQuery_org,\n example_multipleFilterQuery_onlyPublic,\n example_multipleFilterQuery_onlyPrivate,\n example_upperCaseFilterValue_org,\n example_upperCaseFilterValue_onlyPublic,\n example_upperCaseFilterValue_onlyPrivate,\n example_fromToTimeQuery_org,\n example_fromToTimeQuery_onlyPublic,\n example_fromToTimeQuery_onlyPrivate,\n example_beforeTimeQuery_org,\n example_afterTimeQuery_org\n )\n @unpack\n def test_filterExtraction_properFilter_filterExtracted(self,\n org_uuid_list,\n input_filters,\n query_filters,\n post_filters,\n dataset_filtering):\n self._assert_filter_extraction_ddt(org_uuid_list,\n input_filters,\n query_filters,\n post_filters,\n dataset_filtering)\n\n example_nonListAsFilterValues = (\n {'filters': [{'filter name': 'filter value'}]},\n ['org-id-008'],\n True\n )\n\n example_nonDictAsFilter = (\n {'filters': ['not a dictionary']},\n ['org-id-09'],\n True\n )\n\n example_invalidFilterName = (\n {'filters': [{'nonexistent_mapping_field': ['some value']}]},\n ['org-id-010'],\n True\n )\n\n example_wrongNumberTimeParameters = (\n {'filters': [{'creationTime': ['2014-11-03', '2014-11-04', '2014-11-05']}]},\n ['org-id-011'],\n True\n )\n\n # @data(example_nonListAsFilterValues,\n # example_nonDictAsFilter,\n # example_invalidFilterName,\n # example_wrongNumberTimeParameters)\n # @unpack\n # def test_filterExtractionErrors_improperFilter_invalidQueryError(self,\n # invalid_filters,\n # org_uuid_list,\n # dataset_filtering):\n # with self.assertRaises(InvalidQueryError):\n # self.filter_extractor.extract_filter(invalid_filters, org_uuid_list, dataset_filtering)\n\n def _assert_filter_extraction_ddt(self,\n org_uuid_list,\n input_filters,\n test_query_filter,\n test_post_filter,\n dataset_filtering):\n \"\"\"input_filters -- Dictionary of list of dictionaries in a form:\n {'filters': [\n {filter_name: [filter_value_1, ...]},\n {filter_name2: [filter_value_2_1, ...]}\n ]}\"\"\"\n filters = {'filters': input_filters}\n output_filter, post_filter = self.filter_extractor.extract_filter(filters, org_uuid_list, dataset_filtering, False)\n self.assertDictEqual(test_query_filter, output_filter)\n self.assertDictEqual(test_post_filter, post_filter)\n\n\nclass ElasticSearchBaseQueryCreationTests(TestCase):\n MATCH_ALL = {'match_all': {}}\n\n def setUp(self):\n self.query_creator = ElasticSearchBaseQueryCreator()\n\n def test_baseQueryCreation_textQueryProvided_baseQueryCreated(self):\n TEXT = 'some text query'\n proper_base_query = {\n 'bool': {\n 'should': [\n {\n 'wildcard': {\n 'title': {\n 'value': '*{}*'.format(TEXT),\n 'boost': 3\n }\n }\n },\n {\n 'match': {\n 'dataSample': {\n 'query': TEXT,\n 'boost': 2\n }\n }\n },\n {\n 'match': {\n 'sourceUri': {\n 'query': TEXT,\n }\n }\n }\n ]\n }\n }\n\n self.assertDictEqual(\n proper_base_query,\n self.query_creator.create_base_query({'query': TEXT}))\n\n def test_baseQueryCreation_noQueryElement_matchAllReturned(self):\n self.assertDictEqual(\n self.MATCH_ALL,\n self.query_creator.create_base_query({}))\n\n def test_baseQueryCreation_emptyQuery_matchAllReturned(self):\n self.assertDictEqual(\n self.MATCH_ALL,\n self.query_creator.create_base_query({'query': ''}))\n\n\nclass ElasticSearchQueryTranslationTests(TestCase):\n def setUp(self):\n self.translator = ElasticSearchQueryTranslator()\n self.org_uuid = ['orgid007']\n\n def test_queryTranslation_sizeInQuery_sizeAddedToOutput(self):\n SIZE = 123\n size_query = json.dumps({'size': SIZE})\n\n translated_query = self.translator.translate(size_query, self.org_uuid, None, False)\n\n self.assertEqual(SIZE, json.loads(translated_query)['size'])\n\n def test_queryTranslation_fromInQuery_fromAddedToOutput(self):\n FROM = 345\n from_query = json.dumps({'from': FROM})\n\n translated_query = self.translator.translate(from_query, self.org_uuid, True, False)\n\n self.assertEqual(FROM, json.loads(translated_query)['from'])\n\n def test_combiningQueryAndFilter_queryWithFilter_filteredQueryCreated(self):\n FAKE_BASE_QUERY = {'yup': 'totally fake'}\n FAKE_FILTER = {'uhuh': 'this filter is also fake'}\n FAKE_POST_FILTER = {'hello': 'fake filter'}\n expected_query = {\n 'query': {\n 'filtered': {\n 'filter': FAKE_FILTER,\n 'query': FAKE_BASE_QUERY\n }\n },\n 'post_filter': FAKE_POST_FILTER,\n 'aggregations': {\n 'categories': {\n 'terms': {\n 'size': 100,\n 'field': 'category'\n }\n },\n 'formats': {\n 'terms': {\n 'field': 'format'\n }\n }\n }\n }\n\n output_query = self.translator._combine_query_and_filters(FAKE_BASE_QUERY, FAKE_FILTER, FAKE_POST_FILTER)\n\n self.assertDictEqual(expected_query, output_query)\n\n def test_queryTranslation_queryIsNotJson_invalidQueryError(self):\n with self.assertRaises(InvalidQueryError):\n self.translator.translate('{\"this is not a proper JSON\"}', self.org_uuid, None, False)\n\n def test_decodingInputQuery_noneQuery_emptyDictReturned(self):\n self.assertDictEqual(\n {},\n self.translator._get_query_dict(None))\n\n def test_queryTranslation_fullFeaturedQuery_queryTranslated(self):\n input_query = {\n 'query': 'blabla',\n 'filters': [\n {'format': ['csv']}\n ],\n 'size': 3,\n 'from': 14\n }\n\n output_query_string = self.translator.translate(json.dumps(input_query), self.org_uuid, True, False)\n output_query = json.loads(output_query_string)\n\n self.assertIn('filtered', output_query['query'])\n self.assertIn('size', output_query)\n self.assertIn('from', output_query)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Bhanuprakash-ch/data-catalog","sub_path":"tests/test_query_translation.py","file_name":"test_query_translation.py","file_ext":"py","file_size_in_byte":15842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15512857781","text":"import glob\nfrom .models import *\nfrom .serializers import *\nfrom rest_framework.views import APIView\nfrom rest_framework.parsers import MultiPartParser, FormParser\nfrom rest_framework.response import Response\nfrom rest_framework import status, renderers\nimport pandas as pd\nimport numpy as np\nfrom .Service.classifier import Classifier\nfrom .Service.features_extraction import Extractor\n\n\nclass CollectingData(APIView):\n parser_classes = (MultiPartParser, FormParser)\n\n def post(self, request, *args, **kwargs):\n try:\n res = request.data\n user = res['user']\n data = np.array(res.getlist('myArray[]'))\n data = data.reshape(-1, 5)\n cols = res.getlist('form[]')\n df = pd.DataFrame(data=data, columns=cols)\n print(df)\n path = get_file_path(user)\n df.to_csv(path)\n features = ExtractedFeatures(user=user, features_file=path)\n serializer = ExtractedFeatureSerializer(features)\n\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except Exception as e:\n print('error', e)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\nclass VerificationData(APIView):\n parser_classes = (MultiPartParser, FormParser)\n\n def post(self, request, *args, **kwargs):\n try:\n res = request.data\n user = res['user']\n data = np.array(res.getlist('myArray[coords][]'))\n data = data.reshape(-1, 5)\n cols = res.getlist('form[]')\n test_df = pd.DataFrame(data=data, columns=cols)\n training_Files = glob.glob(\"media/features/\" + user + \"/*\")\n training_features = Extractor.extract_features(training_Files)\n cls = Classifier(training_features)\n pred, score = cls.classify(test_df)\n answer = cls.decide(pred)\n if answer:\n return Response('Correct user', status=status.HTTP_201_CREATED)\n else:\n return Response('Incorrect user', status=status.HTTP_201_CREATED)\n except Exception as e:\n print('error', e)\n return Response(status=status.HTTP_400_BAD_REQUEST)","repo_name":"DoubleWSolusions/BehavioralVerificationSystem","sub_path":"backend/Verication/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18669685832","text":"#using while loops to build a guessing game\nsecret_number = 9 #while loops can have else part\nguess_count = 0\nguess_limit = 3\nwhile guess_count < guess_limit: \n guess = int(input('Guess: '))\n guess_count += 1\n if guess == secret_number:\n print('You won!')\n break #to terminate our loop\nelse:\n print('Sorry, you failed')\n\n","repo_name":"Zakiah07/01-hello-world","sub_path":"16-guessing game.py","file_name":"16-guessing game.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26982697742","text":"import os.path\n\nfrom google.appengine.ext.webapp import template\n\nimport model\n\nclass Mail(object):\n @classmethod\n def _render(cls, tmpl, values):\n path = os.path.join(os.path.dirname(__file__), 'templates', tmpl)\n lines = template.render(path, values).split('\\n')\n return (lines[0], '\\n'.join(lines[1:]))\n\n @classmethod\n def generate_new_week_emails(cls, last_week_id):\n ups = model.UserPreference.get_all()\n for up in ups:\n last_key = model.WeekUpdate.key_for(up, last_week_id)\n wu = model.WeekUpdate.get(last_key)\n if wu is not None:\n last_body = wu.body\n else:\n last_body = 'No update last week.'\n\n d = dict(reminder=False, name=up.user.nickname(),\n last_week_body=last_body)\n\n (subject, body) = cls._render('new_week.txt', d)\n yield (up.user.email(), subject, body)\n\n @classmethod\n def generate_reminder_emails(cls, current_week_id, last_week_id):\n ups = model.UserPreference.get_all_without_updates_for_week(\n current_week_id)\n\n for up in ups:\n last_key = model.WeekUpdate.key_for(up, last_week_id)\n wu = model.WeekUpdate.get(last_key)\n if wu is not None:\n last_body = wu.body\n else:\n last_body = 'No update last week.'\n\n d = dict(reminder=True, name=up.user.nickname(),\n last_week_body=last_body)\n\n (subject, body) = cls._render('new_week.txt', d)\n yield (up.user.email(), subject, body)\n\n @classmethod\n def generate_summary_emails(cls, week_id):\n wus = model.WeekUpdate.get_all_for_week_id(week_id)\n updates = []\n emails = []\n for wu in wus:\n name = wu.user_preference.user.nickname()\n email = wu.user_preference.user.email()\n body = wu.body\n updates.append(dict(name=name, email=email, body=body))\n emails.append(email)\n\n (subject, body) = cls._render('summary.txt', dict(updates=updates))\n for email in emails:\n yield (email, subject, body)\n\n","repo_name":"skrul/upto","sub_path":"mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"18667407404","text":"import re\n\npipe_pattern = re.compile(\"[|]\")\ndef escape_pipes_filenames(path):\n return re.sub(pipe_pattern, \"\\|\", path)\n\nclass SlurmScriptGenerator(object):\n def __init__(\n self,\n jobname=\"somejerb\",\n nodes=1,\n tasks_per_node=1,\n cpus_per_task=1,\n mem_per_cpu=2,\n time=10,\n account=\"tyjames1\",\n partition=\"standard\"\n ):\n\n self.jobname = jobname\n self.header = {\n \"#SBATCH --job-name\": jobname,\n \"#SBATCH --mail-type\": \"BEGIN,END\",\n \"#SBATCH --nodes\": nodes,\n \"#SBATCH --ntasks-per-node\": tasks_per_node,\n \"#SBATCH --cpus-per-task\": cpus_per_task,\n \"#SBATCH --mem-per-cpu\": f\"{mem_per_cpu}g\",\n \"#SBATCH --time\": f\"{time}:00:00\",\n \"#SBATCH --account\": account,\n \"#SBATCH --partition\": partition\n }\n\n self.commands = list()\n\n def add_command(self, cmd) -> None:\n if isinstance(cmd, list):\n self.commands.append(\" \".join(cmd).strip())\n elif isinstance(cmd, str):\n self.commands.append(cmd.strip())\n else:\n raise TypeError(\"Command must be list or str.\")\n\n return None\n\n def __repr__(self):\n nl = \"\\n\"\n stringify = [f\"{k}={v}\" for k,v in self.header.items()]\n str_header = f\"{nl.join(stringify)}\"\n return f\"#!/bin/bash{nl}{str_header}{nl*2}{nl.join(self.commands)}\"\n\n def write(self):\n with open(f\"{self.jobname}.sh\", 'w') as f:\n f.write(self.__repr__())\n","repo_name":"Michigan-Mycology/Chytrid-Phylogenomics","sub_path":"scripts/slurm/scriptgen.py","file_name":"scriptgen.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"22856329062","text":"from wpilib.command import Command\n\nimport robot\n\n\nclass ArcFollowerCommand(Command):\n\n def __init__(self, xTwo, yTwo, slopeStart, slopeEnd, xOne=0.0, yOne=0.0): # NOTE: Give slope as an integer or fraction, NO DECIMALS!\n super().__init__('Arc Follower')\n\n '''\n\n Experimental command for a standard tank system with a NavX.\n\n xTwo: The end point's x-coordinate\n yTwo: The end point's y-coordinate\n slopeStart: The starting 'angle' of the robot. Think of this as rise over run! 75 (the default) is about a straight, vertical line.\n slopeEnd: The ending 'angle' of the robot. See slopeStart.\n xOne: The start point's x-coordinate, typically 0.0.\n yOne: The start point's y-coordinate, typically 0.0.\n\n '''\n\n self.requires(robot.drivetrain)\n\n if xOne == xTwo:\n raise Exception('Use the drive command for a vertical line! . . . I really hope it\\'s a vertical line . . . ')\n\n special = True\n\n if '/' in str(slopeStart):\n yPrimeOne = float(str(slopeStart).split('/')[0])\n else:\n yPrimeOne = float(slopeStart)\n\n if '/' in str(slopeEnd):\n yPrimeTwo = float(str(slopeEnd).split('/')[0])\n else:\n yPrimeTwo = float(slopeEnd)\n\n if xOne == 0.0:\n special = False\n\n a, b, c, d = robot.drivetrain.generatePolynomial(xOne, yOne, xTwo, yTwo, yPrimeOne, yPrimeTwo, special)\n\n eq = robot.drivetrain.getEquation(a, b, c, d)\n\n self.arcLength, self.derivative = robot.drivetrain.calcArcLength(xOne, xTwo, eq)\n\n self.finalX = xTwo\n\n def initialize(self):\n robot.drivetrain.resetEncoders()\n robot.drivetrain.resetGyro()\n robot.drivetrain.zeroDisplacement()\n\n robot.drivetrain.assignDerivative(self.derivative)\n robot.drivetrain.assignArcLength(self.arcLength)\n robot.drivetrain.assignFinalX(self.finalX)\n\n def execute(self):\n robot.drivetrain.angleControlDrive(robot.drivetrain.getHeadingDifference())\n\n def isFinished(self):\n return ((robot.drivetrain.rotationsToInches(robot.drivetrain.getPositions()[0]) / 12) >= self.arcLength)\n\n def end(self):\n robot.drivetrain.stop()\n","repo_name":"FRC2539/2021Revolver","sub_path":"commands/drivetrain/arcfollowercommand.py","file_name":"arcfollowercommand.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14784526828","text":"class Dimensions:\n MIN_DIMENSION = 10\n MAX_DIMENSION = 1000\n\n def __init__(self, a, b, c):\n self.__a = a\n self.__b = b\n self.__c = c\n\n @property\n def a(self):\n return self.__a\n\n @a.setter\n def a(self, val):\n self.__a = val\n\n @property\n def b(self):\n return self.__b\n\n @b.setter\n def b(self, val):\n self.__b = val\n\n @property\n def c(self):\n return self.__c\n\n @c.setter\n def c(self, val):\n self.__c = val\n\n def __setattr__(self, key, value):\n if key == '_Dimensions__a' and type(value) in (int, float) and self.MIN_DIMENSION <= value <= self.MAX_DIMENSION:\n object.__setattr__(self, key, value)\n elif key == '_Dimensions__b' and type(value) in (int, float) and self.MIN_DIMENSION <= value <= self.MAX_DIMENSION:\n object.__setattr__(self, key, value)\n elif key == '_Dimensions__c' and type(value) in (int, float) and self.MIN_DIMENSION <= value <= self.MAX_DIMENSION:\n object.__setattr__(self, key, value)\n if key == f\"MIN_DIMENSION\" or key == f\"MAX_DIMENSION\":\n raise AttributeError(\"Менять атрибуты MIN_DIMENSION и MAX_DIMENSION запрещено.\")\n\nd = Dimensions(10.5, 20.1, 30)\nd.a = 8\nd.b = 15\na, b, c = d.a, d.b, d.c # a=10.5, b=15, c=30\nd.MAX_DIMENSION = 10 # исключение AttributeError\nprint(d.__dict__)","repo_name":"iliaman1/OOP_ot_Balakireva","sub_path":"3 magic methods/3.1 setattr, getatttribute, getattr, delattr/task 7.py","file_name":"task 7.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39243464881","text":"#This file is a construction of multi-later perceptron\nimport numpy as np\nfrom sklearn import datasets\nimport matplotlib.pyplot as plt\n#############importing keras##########\nfrom keras import Sequential\nfrom keras.layers import Dense, Activation\nfrom keras import optimizers\n\n#################Start################\n# Import the Iris dataset\niris = datasets.load_iris()\nX = iris.data[:100, :] # Features: Take just the first 2 dimensions from the first 100 elements.\ny = iris.target[:100]\n\n#Build model\nmodel = Sequential() #secuencial es que agrega los nodos hacia la derecha\nmodel.add(Dense(units=1, input_dim=X.shape[1], activation='sigmoid')) #Se agregan unidades (neuronas) con el numero de inputs\n#model.add(Activation('sigmoid')) #tipo de activacion\n#model.summary()\n\n# Compile model\nsgd = optimizers.SGD(lr=0.01) # se define el learning rate\nmodel.compile(optimizer='sgd', loss='mean_squared_error')\n\n#fitting\nmodel.fit(X, y, epochs=50, batch_size=5)\n######################################\n\n\n###################################### Different layers #######\n\nmodel2= Sequential()\nmodel2.add(Dense(units=2, input_dim=X.shape[1], activation='relu'))\nmodel2.add(Dense(units=5, activation='relu'))\nmodel2.add(Dense(units=1, activation='sigmoid'))\nmodel2.summary()\n\n# Compile model\nsgd = optimizers.SGD(lr=0.01) # se define el learning rate\nmodel2.compile(optimizer='sgd', loss='mean_squared_error')\n\n#fitting\nhist = model2.fit(X, y, epochs=50, batch_size=5)\n\ny_pred = model2.predict(X)\n\n\nplt.plot(y_pred) #ver las predicciones\nplt.plot(y)\nplt.show()\n\nyp_r = np.round(y_pred)\nplt.plot(yp_r)\n\nplt.plot(hist.history['loss']) #ver la evolucion de los errores promedio.\n","repo_name":"spiralizing/DeepCourse","sub_path":"MultilayerPerceptron/BinaryClassifications/Keras_MLP.py","file_name":"Keras_MLP.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39101591592","text":"\r\nfrom pathlib import Path\r\n# from tkinter import *\r\nfrom tkinter import Tk, Canvas, Entry, Text, Button, PhotoImage\r\n\r\n\r\nOUTPUT_PATH = Path(__file__).parent\r\nASSETS_PATH = OUTPUT_PATH / Path(r\"E:\\PROGRAMMING\\Python\\GUI\\Practice 1 (f)\\build\\assets\")\r\n\r\n\r\ndef relative_to_assets(path: str) -> Path:\r\n return ASSETS_PATH / Path(path)\r\n\r\n\r\nwindow = Tk()\r\n\r\nwindow.geometry(\"870x600\")\r\nwindow.configure(bg = \"#FFFFFF\")\r\n\r\n\r\ncanvas = Canvas(\r\n window,\r\n bg = \"#FFFFFF\",\r\n height = 600,\r\n width = 870,\r\n bd = 0,\r\n highlightthickness = 0,\r\n relief = \"ridge\"\r\n)\r\n\r\ncanvas.place(x = 0, y = 0)\r\ncanvas.create_rectangle(\r\n 0.0,\r\n 0.0,\r\n 870.0,\r\n 96.0,\r\n fill=\"#A00AAD\",\r\n outline=\"\")\r\n\r\ncanvas.create_text(\r\n 79.0,\r\n 16.0,\r\n anchor=\"nw\",\r\n text=\"ZeeBook\",\r\n fill=\"#FFFFFF\",\r\n font=(\"MontserratRoman Bold\", 48 * -1)\r\n)\r\n\r\nimage_image_1 = PhotoImage(\r\n file=relative_to_assets(\"image_1.png\"))\r\nimage_1 = canvas.create_image(\r\n 224.0,\r\n 157.0,\r\n image=image_image_1\r\n)\r\n\r\nimage_image_2 = PhotoImage(\r\n file=relative_to_assets(\"image_2.png\"))\r\nimage_2 = canvas.create_image(\r\n 432.0,\r\n 261.0,\r\n image=image_image_2\r\n)\r\n\r\nimage_image_3 = PhotoImage(\r\n file=relative_to_assets(\"image_3.png\"))\r\nimage_3 = canvas.create_image(\r\n 641.0,\r\n 157.0,\r\n image=image_image_3\r\n)\r\n\r\ncanvas.create_text(\r\n 63.0,\r\n 130.0,\r\n anchor=\"nw\",\r\n text=\"Income\",\r\n fill=\"#711D1D\",\r\n font=(\"MontserratRoman Bold\", 22 * -1)\r\n)\r\n\r\ncanvas.create_text(\r\n 100.0,\r\n 234.0,\r\n anchor=\"nw\",\r\n text=\"Balance\",\r\n fill=\"#164D64\",\r\n font=(\"MontserratRoman Bold\", 22 * -1)\r\n)\r\n\r\ncanvas.create_text(\r\n 480.0,\r\n 130.0,\r\n anchor=\"nw\",\r\n text=\"Expense\",\r\n fill=\"#135024\",\r\n font=(\"MontserratRoman Bold\", 22 * -1)\r\n)\r\n\r\ncanvas.create_text(\r\n 61.0,\r\n 160.0,\r\n anchor=\"nw\",\r\n text=\" $500 USD\",\r\n fill=\"#6D0A0A\",\r\n font=(\"MontserratRoman SemiBold\", 16 * -1)\r\n)\r\n\r\ncanvas.create_text(\r\n 96.0,\r\n 264.0,\r\n anchor=\"nw\",\r\n text=\" $240 USD\",\r\n fill=\"#164D64\",\r\n font=(\"MontserratRoman SemiBold\", 16 * -1)\r\n)\r\n\r\ncanvas.create_text(\r\n 478.0,\r\n 160.0,\r\n anchor=\"nw\",\r\n text=\" $1000 USD\",\r\n fill=\"#135024\",\r\n font=(\"MontserratRoman SemiBold\", 16 * -1)\r\n)\r\n\r\nimage_image_4 = PhotoImage(\r\n file=relative_to_assets(\"image_4.png\"))\r\nimage_4 = canvas.create_image(\r\n 710.0,\r\n 453.0,\r\n image=image_image_4\r\n)\r\n\r\ncanvas.create_text(\r\n 42.0,\r\n 321.0,\r\n anchor=\"nw\",\r\n text=\"Add Expense\",\r\n fill=\"#700579\",\r\n font=(\"MontserratRoman Bold\", 24 * -1)\r\n)\r\n\r\ncanvas.create_text(\r\n 47.0,\r\n 352.0,\r\n anchor=\"nw\",\r\n text=\"Name\",\r\n fill=\"#700579\",\r\n font=(\"MontserratRoman SemiBold\", 16 * -1)\r\n)\r\n\r\ncanvas.create_text(\r\n 47.0,\r\n 435.0,\r\n anchor=\"nw\",\r\n text=\"Name\",\r\n fill=\"#700579\",\r\n font=(\"MontserratRoman SemiBold\", 16 * -1)\r\n)\r\n\r\nentry_image_1 = PhotoImage(\r\n file=relative_to_assets(\"entry_1.png\"))\r\nentry_bg_1 = canvas.create_image(\r\n 203.0,\r\n 401.5,\r\n image=entry_image_1\r\n)\r\nentry_1 = Entry(\r\n bd=0,\r\n bg=\"#D9D9D9\",\r\n fg=\"#000716\",\r\n highlightthickness=0\r\n)\r\nentry_1.place(\r\n x=60.5,\r\n y=379.0,\r\n width=285.0,\r\n height=43.0\r\n)\r\n\r\nentry_image_2 = PhotoImage(\r\n file=relative_to_assets(\"entry_2.png\"))\r\nentry_bg_2 = canvas.create_image(\r\n 203.0,\r\n 481.5,\r\n image=entry_image_2\r\n)\r\nentry_2 = Entry(\r\n bd=0,\r\n bg=\"#D9D9D9\",\r\n fg=\"#000716\",\r\n highlightthickness=0\r\n)\r\nentry_2.place(\r\n x=60.5,\r\n y=459.0,\r\n width=285.0,\r\n height=43.0\r\n)\r\n\r\nbutton_image_1 = PhotoImage(\r\n file=relative_to_assets(\"button_1.png\"))\r\nbutton_1 = Button(\r\n image=button_image_1,\r\n borderwidth=0,\r\n highlightthickness=0,\r\n command=lambda: print(\"button_1 clicked\"),\r\n relief=\"flat\"\r\n)\r\nbutton_1.place(\r\n x=42.0,\r\n y=521.0,\r\n width=326.0,\r\n height=57.0\r\n)\r\nwindow.resizable(False, False)\r\nwindow.mainloop()\r\n","repo_name":"i-osama/Simple-Tkinter-Dashboard","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32199678219","text":"row1 = [\"X\",\"X\",\"X\"]\nrow2 = [\"X\",\"X\",\"X\"]\nrow3 = [\"X\",\"X\",\"X\"]\n\nmap = [row1,row2,row3]\nprint(f\" 1 2 3\\n1 {row1}\\n2 {row2}\\n3 {row3}\")\nrowPosition = int(input(\"Where do you wanna put the treasure? (row): (1,2,3): \"))-1\ncolPosition = int(input(\"Where do you wanna put the treasure? (column): (1,2,3): \"))-1\nmap[rowPosition][colPosition] = \"O\"\nprint(f\" 1 2 3\\n1 {row1}\\n2 {row2}\\n3 {row3}\")\n","repo_name":"safakyilmaz-mis/python","sub_path":"putTreasue.py","file_name":"putTreasue.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"20546681751","text":"\n\nimport numpy as np\nimport cv2\n\nfrom simple_waymo_open_dataset_reader import label_pb2\nfrom utilities.config import bev_config, lidar_config, obj_config\n\n\nclass ObjectPixel:\n def __init__(self):\n id: int\n x: float\n y: float\n z: float\n w: float\n l: float\n yaw: float\n\n\nclass Bev:\n def __init__(self):\n pass\n\n def convert_from_metric_into_px_coords(self, object_3d):\n ob_3d = object_3d\n obj_px = ObjectPixel()\n obj_px.id = ob_3d.type\n\n obj_px.x = (ob_3d.center_y - lidar_config.lim_y[0]) / \\\n (lidar_config.diff_y) * bev_config.width\n\n obj_px.y = (ob_3d.center_x - lidar_config.lim_x[0]) / \\\n (lidar_config.diff_x) * bev_config.height\n\n obj_px.z = ob_3d.center_z - lidar_config.lim_z[0]\n obj_px.w = ob_3d.width / (lidar_config.diff_y) * bev_config.width\n obj_px.l = ob_3d.length / (lidar_config.diff_x) * bev_config.height\n obj_px.yaw = -ob_3d.heading\n\n return obj_px\n\n def get_object_corners_within_bev_image(self, obj_px):\n bev_corners = np.zeros((4, 2), dtype=np.float32)\n\n cos_yaw = np.cos(obj_px.yaw)\n sin_yaw = np.sin(obj_px.yaw)\n\n bev_corners[0, 0] = obj_px.x - obj_px.w / 2 * cos_yaw - obj_px.l / 2 * sin_yaw # front left\n bev_corners[0, 1] = obj_px.y - obj_px.w / 2 * sin_yaw + obj_px.l / 2 * cos_yaw\n bev_corners[1, 0] = obj_px.x - obj_px.w / 2 * cos_yaw + obj_px.l / 2 * sin_yaw # rear left\n bev_corners[1, 1] = obj_px.y - obj_px.w / 2 * sin_yaw - obj_px.l / 2 * cos_yaw\n bev_corners[2, 0] = obj_px.x + obj_px.w / 2 * cos_yaw + obj_px.l / 2 * sin_yaw # rear right\n bev_corners[2, 1] = obj_px.y + obj_px.w / 2 * sin_yaw - obj_px.l / 2 * cos_yaw\n bev_corners[3, 0] = obj_px.x + obj_px.w / 2 * cos_yaw - obj_px.l / 2 * sin_yaw # front right\n bev_corners[3, 1] = obj_px.y + obj_px.w / 2 * sin_yaw + obj_px.l / 2 * cos_yaw\n\n return bev_corners\n\n def project_detections_into_bev(self, bev_map, objects_3d, color=[]):\n\n for object_3d in objects_3d:\n # extract detection\n if object_3d.type != label_pb2.Label.TYPE_VEHICLE:\n continue\n obj_px = self.convert_from_metric_into_px_coords(object_3d)\n\n # draw object bounding box into birds-eye view\n if not color:\n color = obj_config.colors[int(obj_px.id)]\n\n bev_corners = self.get_object_corners_within_bev_image(obj_px)\n\n # draw object as box\n corners_int = bev_corners.reshape(-1, 1, 2).astype(int)\n cv2.polylines(bev_map, [corners_int], True, color, 2)\n\n # draw colored line to identify object front\n corners_int = corners_int.reshape(-1, 2)\n\n cv2.line(bev_map, (corners_int[0, 0], corners_int[0, 1]),\n (corners_int[3, 0], corners_int[3, 1]),\n (255, 255, 0), 2)\n","repo_name":"davidscmx/camera_lidar_fusion","sub_path":"sensors/bev.py","file_name":"bev.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"27660034214","text":"from django.core.exceptions import PermissionDenied\n\n\nclass PublicOrReadPermMixin:\n\n def get_object(self, queryset=None):\n obj = super().get_object(queryset)\n if not obj.is_public():\n if not self.request.user.is_authenticated:\n raise PermissionDenied\n if not self.request.user.has_perm('%s.read_%s' % (obj._meta.app_label, obj._meta.model_name)):\n raise PermissionDenied\n return obj\n","repo_name":"wmleung2/Code-LMs","sub_path":"Evaluation/test_sets/Python/GeotrekCE/Geotrek-admin/geotrek__common__permissions.py","file_name":"geotrek__common__permissions.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"20331331042","text":"import json\nfrom datetime import date, datetime\n\nimport app.models as m\nfrom app.extensions import db\n\n# TODO These tests can be reduced to mixin tests..\n\ndef test_workout_update():\n workout = m.Workout(date_proposed=date.today())\n\n exercise1 = m.Exercise(name='ABC')\n exercise2 = m.Exercise(name='DEF')\n exercise3 = m.Exercise(name='GHI')\n\n exercises = [exercise1, exercise2, exercise3]\n for index, ex in enumerate(exercises):\n s1 = m.SetEntry(reps=10, set_num=0, weight=0)\n s2 = m.SetEntry(reps=10, set_num=1, weight=0)\n s3 = m.SetEntry(reps=10, set_num=2, weight=0)\n entry = m.ExerciseEntry(ex_num=index, exercise=ex, sets=[s1, s2, s3])\n workout.entries.append(entry)\n\n user = m.User(username='abc', password='def')\n user.workouts = [workout]\n\n db.session.add(user)\n db.session.commit()\n\n saved_workout = m.Workout.query.first()\n assert saved_workout is not None\n\n data, errors = saved_workout.dump()\n\n data['comment'] = 'Great stuff!'\n\n # These don't get updated.\n data['date_proposed'] = None\n data['date_completed'] = None\n\n data['entries'][0]['sets'][1]['weight'] = 100\n\n saved_workout.update(**data)\n updated, errors = saved_workout.dump()\n\n assert updated['date_completed'] is None\n assert updated['date_proposed'] is not None\n assert updated['comment'] == data['comment']\n assert updated['entries'][0]['sets'][1]['weight'] == 100\n\n\ndef test_db_relationships():\n workout = m.Workout(date_proposed=date.today())\n\n exercise1 = m.Exercise(name='ABC')\n exercise2 = m.Exercise(name='DEF')\n exercise3 = m.Exercise(name='GHI')\n\n exercises = [exercise1, exercise2, exercise3]\n for index, ex in enumerate(exercises):\n s1 = m.SetEntry(reps=10, set_num=0, weight=0)\n s2 = m.SetEntry(reps=10, set_num=1, weight=0)\n s3 = m.SetEntry(reps=10, set_num=2, weight=0)\n entry = m.ExerciseEntry(ex_num=index, exercise=ex, sets=[s1, s2, s3])\n workout.entries.append(entry)\n\n user = m.User(username='abc', password='def')\n user.workouts = [workout]\n\n db.session.add(user)\n db.session.commit()\n\n workout = m.Workout.query.first()\n assert workout is not None\n assert workout.date_proposed == date.today()\n assert workout.date_completed is None\n assert workout.comment is None\n\n for entry_index, entry in enumerate(workout.entries):\n assert entry.exercise in exercises\n assert entry.ex_num == entry_index\n assert entry.exercise_id in [e.id for e in exercises]\n assert len(entry.sets) == 3\n for index, s in enumerate(entry.sets):\n assert s.set_num == index\n assert s.reps == 10\n assert s.weight == 0\n\n\ndef test_json():\n workout = m.Workout(date_proposed=date.today())\n\n exercise1 = m.Exercise(name='ABC')\n exercise2 = m.Exercise(name='DEF')\n exercise3 = m.Exercise(name='GHI')\n\n exercises = [exercise1, exercise2,exercise3]\n for index, ex in enumerate(exercises):\n s1 = m.SetEntry(reps=10, set_num=0, weight=0)\n s2 = m.SetEntry(reps=10, set_num=1, weight=0)\n s3 = m.SetEntry(reps=10, set_num=2, weight=0)\n entry = m.ExerciseEntry(ex_num=index, exercise=ex, sets=[s1, s2, s3])\n workout.entries.append(entry)\n\n user = m.User(username='abc', password='def')\n user.workouts = [workout]\n\n db.session.add(user)\n db.session.commit()\n\n data, errors = workout.dump()\n assert data['date_proposed'] == date.today().isoformat()\n assert data['date_completed'] is None\n assert data['comment'] is None\n assert data['id'] == 1\n\n sorted(workout.entries, key=lambda x: x.ex_num)\n\n combined_exercises = zip(data['entries'], workout.entries)\n for index, [json_entry, db_entry] in enumerate(combined_exercises):\n assert index == db_entry.ex_num\n assert json_entry['exercise']['name'] == db_entry.exercise.name\n\n assert len(json_entry['sets']) == 3\n combined_sets = zip(json_entry['sets'], db_entry.sets)\n for index, [json_set, db_set] in enumerate(combined_sets):\n assert index == db_set.set_num\n assert json_set['reps'] == db_set.reps\n assert json_set['weight'] == db_set.weight\n","repo_name":"SeanBE/logs-api","sub_path":"tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":4256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30881511046","text":"#!/usr/bin/env python\n\nfrom setuptools import setup\nfrom codecs import open\n\nwith open('README.rst', encoding='utf-8') as f:\n readme = f.read()\n\nsetup(\n name = 'postmortem',\n version = '0.8.0',\n author = 'Ken Kundert',\n author_email = 'postmortem@nurdletech.com',\n description = 'Produces a package of information for dependents and partners to be opened upon death.',\n long_description = readme,\n long_description_content_type = 'text/x-rst',\n url = 'https://github.com/kenkundert/postmortem',\n download_url = 'https://github.com/kenkundert/postmortem/tarball/master',\n license = 'GPLv3+',\n scripts = 'postmortem'.split(),\n install_requires = [\n 'appdirs',\n 'avendesora>=1.14',\n 'arrow',\n 'docopt',\n 'inform>=1.16',\n 'nestedtext>=3.0',\n 'python-gnupg>=0.4.4',\n # Be careful. There's a package called 'gnupg' that's an \n # incompatible fork of 'python-gnupg'. If both are installed, the \n # user will probably have compatibility issues.\n 'voluptuous',\n ],\n python_requires='>=3.8',\n zip_safe = True,\n keywords = 'postmortem'.split(),\n classifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: End Users/Desktop',\n 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',\n 'Natural Language :: English',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Topic :: Utilities',\n ],\n)\n","repo_name":"KenKundert/postmortem","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"28565918621","text":"from subprocess import Popen\nfrom guibot.guibot import GuiBot\nimport time\nimport netrc\nimport sys\nimport pyautogui\npyautogui.FAILSAFE = False\n\nclass WebEx:\n def __init__(self, args):\n self.website = args.website\n self.id = args.meeting_id\n self.profile = args.profile\n self.capture = args.capture\n self.timer = args.time\n self.host = args.host\n self.browser_type = args.browser\n \n if self.capture:\n self.filter = args.filter\n self.count = args.trials\n\n def launch_driver(self):\n proc = Popen(['google-chrome'])\n time.sleep(5)\n\n if self.browser_type == 'chrome':\n pyautogui.typewrite('chrome://webrtc-internals')\n pyautogui.hotkey('enter')\n pyautogui.hotkey('ctrl', 't')\n pyautogui.typewrite(self.id)\n pyautogui.hotkey('enter')\n\n def launch_app(self):\n guibot = GuiBot()\n guibot.add_path('autogui_images')\n\n print(\"in launch app\")\n time.sleep(5)\n if guibot.exists('webex_1_join_meeting.png'):\n guibot.click('webex_1_join_meeting.png')\n\n time.sleep(5)\n\n if guibot.exists('webex_2_next_btn.png'):\n guibot.click('webex_2_next_btn.png')\n time.sleep(5)\n pyautogui.typewrite('vcqoe')\n pyautogui.hotkey('enter')\n time.sleep(5)\n if guibot.exists('webex_ok_btn.png'):\n guibot.click('webex_ok_btn.png')\n \n time.sleep(5)\n \n if guibot.exists('webex_3_join_meeting.png'):\n guibot.click('webex_3_join_meeting.png')\n return\n\n def exit(self):\n\n guibot = GuiBot()\n guibot.add_path('autogui_images')\n\n if self.browser_type == 'chrome':\n pyautogui.hotkey('ctrl', 'tab')\n time.sleep(1)\n guibot.click('dump_webrtc.png')\n guibot.click('webrtc_download.png')\n time.sleep(1)\n pyautogui.hotkey('ctrl', 'w')\n time.sleep(1)\n if guibot.exists('webex_dl_close_btn.png'):\n guibot.click('webex_dl_close_btn.png')\n time.sleep(1)\n pyautogui.move(0, 100)\n if guibot.exists('webex_4_end_call.png'):\n guibot.click('webex_4_end_call.png')\n time.sleep(1)\n if guibot.exists('webex_5_leave_meeting.png'):\n guibot.click('webex_5_leave_meeting.png')\n time.sleep(5)\n pyautogui.hotkey('ctrl', 'w')\n\n else:\n pyautogui.move(0, 100)\n if guibot.exists('webex_4_end_call.png'):\n guibot.click('webex_4_end_call.png')\n time.sleep(1)\n if guibot.exists('webex_5_leave_meeting.png'):\n guibot.click('webex_5_leave_meeting.png')\n time.sleep(3)\n res = Popen('pkill chrome', shell=True)\n\n return\n","repo_name":"noise-lab/vcaml","sub_path":"src/data_collection/in-lab/applications/webex.py","file_name":"webex.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"8632825077","text":"from flask import Flask, jsonify\nimport sys\nimport re\nimport glob\n# import ipaddress\nfrom ipaddress import IPv4Interface\n\ndef classify(s):\n \"\"\"\n :param s: String to classify\n :return: Tuple of arguments\n \"\"\"\n m = re.match('^ ip address ([0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+) ([0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+)', s)\n\n if m:\n return {\"ip\":IPv4Interface(str(m.group(1)) + \"/\" + str(m.group(2)))}\n\n m = re.match(\"^interface (.+)\", s)\n if m:\n return {\"int\":m.group(1)}\n\n m = re.match(\"^hostname (.+)\", s)\n if m:\n return {\"host\":m.group(1)}\n\n return (\"UNCLASSIFIED\")\n\nip_addresses = []\ninterfaces = []\nhosts = []\n\nfor current_file_name in glob.glob(\"C:\\\\PY_WORK\\\\*.txt\"):\n\n with open(current_file_name) as f:\n for current_line in f:\n c = classify(current_line)\n if \"ip\" in c:\n ip_addresses.append(c)\n if \"int\" in c:\n interfaces.append(c)\n if \"host\" in c:\n hosts.append(c)\n\nprint(ip_addresses)\n#print(interfaces)\na = hosts\n\nprint(a)\n\n#####################################3\napp = Flask(__name__)\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return \"Добрый день! Вы используете справочник хостов.\"\n\n@app.route('/config')\ndef configs():\n return str(a)\n\n\n#@app.route('/hostname//')\n#def hostname(name1, name2):\n # return \"Зафиксировано обращение к \" + name1 + \", \" + name2\n#@app.route('/config/hostname')\n#def config/hostname():\n # return ip_addresses\n\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"SVKuznetsov/p4ne","sub_path":"Lab2.2/WEB_SERVER.py","file_name":"WEB_SERVER.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22222812677","text":"import json\nfrom base64 import decodestring\nfrom datetime import datetime\n\nimport nose.tools as nt\n\nfrom .. import jsonutil\nfrom ..jsonutil import json_clean, encode_images\nfrom ipython_genutils.py3compat import unicode_to_str, str_to_bytes, iteritems\n\n\nclass Int(int):\n def __str__(self):\n return 'Int(%i)' % self\n\ndef test():\n # list of input/expected output. Use None for the expected output if it\n # can be the same as the input.\n pairs = [(1, None), # start with scalars\n (1.0, None),\n ('a', None),\n (True, None),\n (False, None),\n (None, None),\n # Containers\n ([1, 2], None),\n ((1, 2), [1, 2]),\n (set([1, 2]), [1, 2]),\n (dict(x=1), None),\n ({'x': 1, 'y':[1,2,3], '1':'int'}, None),\n # More exotic objects\n ((x for x in range(3)), [0, 1, 2]),\n (iter([1, 2]), [1, 2]),\n (Int(5), 5),\n (datetime(1991, 7, 3, 12, 00), \"1991-07-03T12:00:00.000000\"),\n ]\n \n for val, jval in pairs:\n if jval is None:\n jval = val\n out = json_clean(val)\n # validate our cleanup\n nt.assert_equal(out, jval)\n # and ensure that what we return, indeed encodes cleanly\n json.loads(json.dumps(out))\n\n\ndef test_encode_images():\n # invalid data, but the header and footer are from real files\n pngdata = b'\\x89PNG\\r\\n\\x1a\\nblahblahnotactuallyvalidIEND\\xaeB`\\x82'\n jpegdata = b'\\xff\\xd8\\xff\\xe0\\x00\\x10JFIFblahblahjpeg(\\xa0\\x0f\\xff\\xd9'\n pdfdata = b'%PDF-1.\\ntrailer<>]>>>>>>'\n \n fmt = {\n 'image/png' : pngdata,\n 'image/jpeg' : jpegdata,\n 'application/pdf' : pdfdata\n }\n encoded = encode_images(fmt)\n for key, value in iteritems(fmt):\n # encoded has unicode, want bytes\n decoded = decodestring(encoded[key].encode('ascii'))\n nt.assert_equal(decoded, value)\n encoded2 = encode_images(encoded)\n nt.assert_equal(encoded, encoded2)\n \n b64_str = {}\n for key, encoded in iteritems(encoded):\n b64_str[key] = unicode_to_str(encoded)\n encoded3 = encode_images(b64_str)\n nt.assert_equal(encoded3, b64_str)\n for key, value in iteritems(fmt):\n # encoded3 has str, want bytes\n decoded = decodestring(str_to_bytes(encoded3[key]))\n nt.assert_equal(decoded, value)\n\ndef test_lambda():\n with nt.assert_raises(ValueError):\n json_clean(lambda : 1)\n\n\ndef test_exception():\n bad_dicts = [{1:'number', '1':'string'},\n {True:'bool', 'True':'string'},\n ]\n for d in bad_dicts:\n nt.assert_raises(ValueError, json_clean, d)\n\n\ndef test_unicode_dict():\n data = {u'üniço∂e': u'üniço∂e'}\n clean = jsonutil.json_clean(data)\n nt.assert_equal(data, clean)\n","repo_name":"pyparallel/pyparallel","sub_path":"Lib/site-packages/ipykernel-4.1.1-py3.3.egg/ipykernel/tests/test_jsonutil.py","file_name":"test_jsonutil.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","stars":579,"dataset":"github-code","pt":"67"} +{"seq_id":"15075262102","text":"dec=[(1,10)]\r\nroman=[]\r\nn=int(input(\"Enter the roman number:\"))\r\ndec=' '\r\n\r\nfor i in range(len(roman)):\r\n m=n//roman[i]\r\n for j in range(1000):\r\n dec+=dec[i]\r\n n=n%roman[i]\r\nprint(dec)","repo_name":"kumarswatantra2000/pallu3","sub_path":"python/asc.py","file_name":"asc.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6826471107","text":"accounts = [\n\t{ 'client_name': 'Igor', 'account_number': 11234543, 'balance': 203004099.2 },\n\t{ 'client_name': 'Vladimir', 'account_number': 43546731, 'balance': 5204100071.23 },\n\t{ 'client_name': 'Sergei', 'account_number': 23456311, 'balance': 1353600.0 }\n]\n\n# Create function that returns the name and balance of cash on an account in a list\ndef get_name_and_balance(a, num):\n for item in a:\n if item['account_number'] == num:\n print(item['client_name'] + \",\", item['balance'])\n\n#egyszerusitve!\nget_name_and_balance(accounts, 11234543)\n\n# The output should be: \"Igor\", \"203004099.2\"\n\n# Create function that transfers an amount of cash from one account to another\n# it should have three parameters:\n#\n# - from account_number\n# - to account_number\n# - amount to transfer\n#\n# Print \"404 - account not found\" if any of the account numbers don't exist\n\ndef transfer_amount(_from, _to, amount):\n account_numbers = []\n for item in accounts:\n account_numbers.append(item['account_number'])\n if _from not in account_numbers or _to not in account_numbers:\n print(\"404 - account not found\")\n else:\n for item in accounts:\n if item ['account_number'] == _from:\n item['balance'] -= amount\n elif item ['account_number'] == _to:\n item['balance'] += amount\n print(accounts)\n\n\ntransfer_amount(43546731, 23456311, 500.0)\n#After printing the \"accounts\" it should look like:\n# accounts = [\n#\t{ 'client_name': 'Igor', 'account_number': 11234543, 'balance': 203004099.2 },\n#\t{ 'client_name': 'Vladimir', 'account_number': 43546731, 'balance': 5204099571.23 },\n#\t{ 'client_name': 'Sergei', 'account_number': 23456311, 'balance': 1354100.0 }\n#]","repo_name":"green-fox-academy/Komaxor","sub_path":"week3/thu/27_bank_transfer.py","file_name":"27_bank_transfer.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"33483932715","text":"from django_filters.rest_framework import CharFilter, FilterSet, filters\n\nfrom recipes.models import Ingredients, Recipes, Tags\n\n\nclass IngredientsFilter(FilterSet):\n \"\"\"Класс для фильтрации обьектов Ingredients.\"\"\"\n\n name = CharFilter(field_name=\"name\", lookup_expr=\"istartswith\")\n\n class Meta:\n model = Ingredients\n fields = (\"name\",)\n\n\nclass RecipeFilter(FilterSet):\n tags = filters.ModelMultipleChoiceFilter(\n field_name=\"tags__slug\",\n to_field_name=\"slug\",\n queryset=Tags.objects.all(),\n )\n\n is_favorited = filters.BooleanFilter(method=\"filter_is_favorited\")\n is_in_shopping_cart = filters.BooleanFilter(\n method=\"filter_is_in_shopping_cart\"\n )\n\n class Meta:\n model = Recipes\n fields = (\n \"tags\",\n \"author\",\n )\n\n def filter_is_favorited(self, queryset, name, value):\n user = self.request.user\n if value and user.is_anonymous:\n return queryset\n return queryset.filter(favorite__user=user)\n\n def filter_is_in_shopping_cart(self, queryset, name, value):\n user = self.request.user\n if value and user.is_anonymous:\n return queryset\n return queryset.filter(cart__user=user)\n","repo_name":"FULLLIFE4rever/foodgram-project-react","sub_path":"backend/api/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"204301811","text":"import os\nimport pytest\n\nimport testinfra.utils.ansible_runner\n\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(\n os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')\n\nrules = [\n 'iptables -t nat -I PREROUTING -p tcp --dport 80 -j REDIRECT --to-port 443',\n 'iptables -t nat -I OUTPUT -p tcp -o lo --dport 80 -j REDIRECT --to-port 443',\n 'iptables -A INPUT -p tcp -m tcp --dport 22 -j ACCEPT',\n 'iptables -A INPUT -p tcp -m tcp --dport 80 -j ACCEPT',\n 'iptables -A INPUT -p tcp -m tcp --dport 443 -j ACCEPT',\n 'iptables -A INPUT -m limit --limit 15/minute -j LOG --log-level 7 --log-prefix \"Dropped by firewall: \"'\n]\n\njails = {\n 'sshd': [\n '[sshd]', 'enabled = true', 'port = ssh', 'filter = sshd',\n 'findtime = 600', 'maxretry = 5', 'bantime = 3600'\n ]\n}\n\nkernel_opts = [\n 'kernel.sysrq = 0', 'kernel.core_uses_pid = 1', 'net.ipv4.ip_forward = 0',\n 'net.ipv4.tcp_syncookies = 1', 'net.ipv4.tcp_synack_retries = 5',\n 'net.ipv4.conf.all.send_redirects = 0',\n 'net.ipv4.conf.default.send_redirects = 0',\n 'net.ipv4.conf.all.accept_source_route = 0',\n 'net.ipv4.conf.all.accept_redirects = 0',\n 'net.ipv4.conf.all.secure_redirects = 0',\n 'net.ipv4.conf.all.log_martians = 1',\n 'net.ipv4.conf.default.accept_source_route = 0',\n 'net.ipv4.conf.default.accept_redirects = 0',\n 'net.ipv4.conf.default.secure_redirects = 0',\n 'net.ipv4.icmp_echo_ignore_broadcasts = 1',\n 'net.ipv4.conf.all.rp_filter = 1', 'net.ipv4.conf.default.rp_filter = 1',\n 'net.ipv6.conf.default.router_solicitations = 0',\n 'net.ipv6.conf.default.accept_ra_rtr_pref = 0',\n 'net.ipv6.conf.default.accept_ra_pinfo = 0',\n 'net.ipv6.conf.default.accept_ra_defrtr = 0',\n 'net.ipv6.conf.default.autoconf = 0',\n 'net.ipv6.conf.default.dad_transmits = 0',\n 'net.ipv6.conf.default.max_addresses = 1', 'fs.file-max = 65535',\n 'kernel.pid_max = 65536', 'net.ipv4.ip_local_port_range = 2000 65000',\n 'net.ipv4.tcp_rfc1337 = 1', 'vm.swappiness = 5'\n]\n\nsshd_opts = [\n 'PasswordAuthentication no', 'PermitEmptyPasswords no',\n 'PermitRootLogin without-password', 'PermitUserEnvironment yes',\n 'PubkeyAuthentication yes'\n]\n\n\n@pytest.mark.parametrize('pkg', ['fail2ban', 'iptables', 'sudo'])\ndef test_pkg(host, pkg):\n package = host.package(pkg)\n assert package.is_installed\n\n\n@pytest.mark.parametrize('svc', ['firewall', 'ssh', 'fail2ban'])\ndef test_svc(host, svc):\n service = host.service(svc)\n assert service.is_running\n assert service.is_enabled\n\n\n@pytest.mark.parametrize('rule', rules)\ndef test_firewall(host, rule):\n firewall_rules = host.file('/etc/firewall')\n assert firewall_rules.exists\n assert firewall_rules.contains(rule)\n\n\n@pytest.mark.parametrize('jail', jails)\ndef test_fail2ban_jails(host, jail):\n jails = host.file('/etc/fail2ban/jail.local')\n assert jails.exists\n for line in jail:\n assert jails.contains(line)\n\n\n@pytest.mark.parametrize('opt', kernel_opts)\ndef test_kernel_hardened(host, opt):\n kernel_config = host.file('/etc/sysctl.conf')\n assert kernel_config.exists\n assert kernel_config.contains(opt)\n\n\n@pytest.mark.parametrize('opt', sshd_opts)\ndef test_sshd_opts(host, opt):\n sshd_config = host.file('/etc/ssh/sshd_config')\n assert sshd_config.exists\n assert sshd_config.contains(opt)\n","repo_name":"alexandrebouthinon/ansible-roles","sub_path":"base.security/molecule/default/tests/test_role.py","file_name":"test_role.py","file_ext":"py","file_size_in_byte":3351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71441272535","text":"from googleapiclient.discovery import build\nimport json\nfrom filesManager import fileManager\nfrom makeCsv import makeCSV\n\nclass getYoutubeChannel:\n\n def getChannelInfo(self,fileName,keyWord):\n fileM = fileManager()\n APIKEY = fileM.readFile('','.key')\n YOUTUBE_API_SERVICE_NAME='youtube'\n YOUTUBE_API_VERSION='v3'\n youtube=build(\n YOUTUBE_API_SERVICE_NAME,\n YOUTUBE_API_VERSION,\n developerKey = APIKEY\n )\n\n try:\n searchResponse=youtube.search().list(\n q=keyWord,\n part='id,snippet',\n maxResults=25\n ).execute()\n except Exception as e:\n print('An error occurred while fetching search results.')\n return\n channelDataList = []\n with open(fileName,'w',encoding='utf-8') as f:\n for searchResults in searchResponse.get('items',[]):\n if searchResults['id']['kind'] != 'youtube#channel':\n continue\n\n channelId = searchResults['id']['channelId']\n channelResponse = youtube.channels().list(\n part = 'statistics,snippet',\n id = channelId\n ).execute()\n\n channelData = {}\n channelData['channelName'] = searchResults['snippet']['channelTitle']\n channelData['videoCount'] = channelResponse['items'][0]['statistics']['videoCount']\n channelData['viewCount'] = channelResponse['items'][0]['statistics']['viewCount']\n channelData['subscribers'] = channelResponse['items'][0]['statistics']['subscriberCount']\n channelData['country'] = channelResponse['items'][0]['snippet'].get('country','')\n\n channelDataList.append(channelData)\n mkCSV = makeCSV()\n\n print(searchResults)\n print(json.dumps(channelResponse,indent=2,ensure_ascii=False),file=f)\n \n\n","repo_name":"jtakumi/getYoutubeChannel","sub_path":"getYoutubeChannel.py","file_name":"getYoutubeChannel.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41128347883","text":"from django import template\nimport cloudinaryResponsive\n\nregister = template.Library()\n\n# custom filter to access dict values by keys when a key is a variable\n@register.filter(name='key')\ndef key(d, key_name):\n try:\n value = d[key_name]\n except KeyError:\n from django.conf import settings\n value = settings.TEMPLATE_STRING_IF_INVALID\n return value\n\n# custom tag to render element from Cloudinary pictureID\n@register.simple_tag\ndef pictureCloudinary(publicID, sizes, srcset, alt, portrait=False):\n \"\"\"\n constructs HTML5 element with passed\n sizes, srcset and alt parameters from Cloudinary publicID image\n\n typical form of the returned result:\n \n \n \n \n\n if 'portrait is set':\n \n \n \n \n \n \n \"\"\"\n # list from srcset string\n srcset = [i for i in srcset.split()]\n return cloudinaryResponsive.pictureElement(publicID, sizes, srcset, alt, portrait)\n","repo_name":"sergeynikiforov/sergey-nikiforov.com","sub_path":"sn_app/templatetags/sn_app_extras.py","file_name":"sn_app_extras.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73811272533","text":"from collections import *\nimport sys\nN = int(input())\nS = list(map(int,input().split()))\n\n#度数表の作成\nnum = Counter(S)\n\n#もし重複がなければすべての値が1ならばSの要素数と一致する\nfor i in num.values():\n if(i > 1):\n print('NO')\n sys.exit()\nprint('YES')","repo_name":"carbscountry/atcoder_practice","sub_path":"5-3-bucket/distinct/distinct.py","file_name":"distinct.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40275321977","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 28 17:43:35 2021\n\n@author: marco\n\"\"\"\n\nimport numpy as np\nimport os\nimport csv\nimport tensorflow as tf\nfrom keras import optimizers\nfrom keras.layers import Input\nfrom keras.models import Model, load_model\nfrom keras.layers import Dense, Reshape\nfrom keras.layers import Convolution1D, MaxPooling1D, BatchNormalization, Conv2D, MaxPooling2D\nfrom keras.layers import Lambda, concatenate\nfrom tensorflow.keras.initializers import Constant\nfrom keras.utils import np_utils\n\nnum_points = 8192\n# number of categories\nk = 12\n# epoch number\nepo = 50\n# batch size\nbatch_size = 32\n# define optimizer\nadam = optimizers.Adam(lr=0.001, decay=0.7)\n# label colors\nclass2color = {0:\t[0,255,0], #green ceiling\n 1:\t[0,0,255], #blue floor\n 2:\t[0,255,255], #turquoise wall\n 3: [255,255,0], #yellow light\n 4: [255,0,255], #purple bookshelf\n 5: [255,0,0], #red chair\n 6: [100, 100, 100], #grey table\n 7: [0, 75, 0], #darkgreen vase\n 8: [255,125,0], #orange lamp\n 9: [150,255,0], #lightgreen laptop\n 10: [0,150,255], #lightblue monitor\n 11: [0, 0, 0], #black sofa\n 12: [75,0,0], #darkred\n} \n\n\ndef sample_pc(data, indices):\n points = None\n labels = None\n \n for i in range(len(indices)):\n start = indices[i][0]\n end = indices[i][1]\n #if end - start < 1024:\n # continue\n p, l = sample_pc_cluster(data[start:end, :])\n if points is None or labels is None:\n points = p\n labels = l\n else:\n points = np.vstack((points, p))\n labels = np.vstack((labels, l))\n point_array = points.reshape(-1, num_points, 9)\n label_array = labels.reshape(-1, num_points, 1)\n return (point_array, label_array)\n\ndef load_csv(csv_filename):\n file = np.genfromtxt(csv_filename, delimiter=';')\n cluster_indices = []\n start = 0\n cur = 0\n i = 0\n for i in range(file.shape[0]):\n if file[i, 0] == 1000:\n cur = i\n if cur != start:\n cluster_indices.append((start, cur))\n start = i+1\n elif cur == start:\n start = i+1\n \n return (file, cluster_indices)\n\ndef sample_pc_cluster(cluster_data):\n selected_points = []\n if cluster_data.shape[0] > num_points*2:\n index = np.random.choice(len(cluster_data), num_points*2, replace=False)\n else:\n index = np.random.choice(len(cluster_data), num_points*2, replace=True)\n for i in range(len(index)):\n selected_points.append(cluster_data[index[i]])\n selected_points = np.array(selected_points)\n data = np.take(selected_points, [0,1,2,3,4,5,6,7,8], axis=1)\n label = np.take(selected_points, [9], axis=1)\n return (data, label)\n\ndef write_processed_data(data, path, filename):\n \n with open(os.path.join(path, filename), 'w') as output:\n writer = csv.writer(output, delimiter=';', lineterminator='\\n')\n for i in range(data.shape[0]):\n writer.writerow(np.around(data[i, :], 4))\n \ndef write_stats(iou, acc, path, filename):\n mean_iou = 0\n for i in range(k):\n mean_iou += iou[i]\n mean_iou = mean_iou / k\n \n with open(os.path.join(path, filename), 'w') as output:\n writer = csv.writer(output, delimiter=';', lineterminator='\\n')\n writer.writerow(\"IoU per class:\")\n writer.writerow(iou)\n writer.writerow(\"Mean IoU:\")\n writer.writerow(mean_iou)\n writer.writerow(\"Accuracy:\")\n writer.writerow(acc)\n \ndef change_color(data, labels):\n for i in range(data.shape[0]):\n col = int(labels[i])\n data[i, 3] = class2color[col][0] / 255\n data[i, 4] = class2color[col][1] / 255\n data[i, 5] = class2color[col][2] / 255\n \ndef evaluate(gt_labels, pred):\n \n print(gt_labels.shape)\n print(pred.shape)\n true_pos = [0 for _ in range(k)]\n false_pos = [0 for _ in range(k)]\n gt = [0 for _ in range(k)]\n \n for i in range(gt_labels.shape[0]):\n gt[int(gt_labels[i])] += 1\n if int(gt_labels[i]) == int(pred[i]):\n true_pos[int(gt_labels[i])] += 1\n else:\n false_pos[int(pred[i])] += 1\n iou_list = []\n acc = 0\n for j in range(k):\n if float(gt[j]) == 0:\n iou_list.append(-1)\n continue\n iou = true_pos[j]/float(gt[j] + false_pos[j])\n iou_list.append(iou)\n acc += true_pos[j]\n \n acc = acc / float(gt_labels.shape[0])\n \n return iou_list, acc\n\n\ndef main():\n path = os.path.dirname(os.path.realpath(__file__))\n model_path = os.path.join(path, \"eval_models\")\n data_path = os.path.join(path, \"eval_data\")\n pred_path = os.path.join(path, \"predicted\")\n filenames = [d for d in os.listdir(data_path)]\n models_for_eval = [x for x in os.listdir(model_path)]\n print(models_for_eval)\n \n for m in models_for_eval:\n p = os.path.join(model_path, m)\n model = load_model(p, compile=True)\n iou_list = [0 for _ in range(k)]\n mean_acc = 0\n output_path = os.path.join(pred_path, m)\n if not os.path.exists(output_path): \n os.mkdir(output_path)\n for d in range(len(filenames)):\n progress = (d/len(filenames))*100\n print(\"Fileread started. Current progress: \" + str(progress) +\"%\")\n cur_points, cluster_indices = load_csv(os.path.join(data_path, filenames[d]))\n eval_points, eval_labels = sample_pc(cur_points, cluster_indices)\n predictions = model.predict(eval_points)\n predictions = predictions.reshape(-1, k)\n predictions = predictions.argmax(1)\n eval_labels = eval_labels.reshape(-1, 1)\n iou, acc = evaluate(eval_labels, predictions)\n for i in range(k):\n if iou[i] >= 0:\n iou_list[i] += iou[i]\n mean_acc += acc\n \n if d%10 == 0:\n eval_points = eval_points.reshape(-1, 9)\n output = np.empty_like(eval_points[:, :6])\n np.copyto(output, eval_points[:, :6])\n write_processed_data(output, output_path, \"OG_\" + m + \"_\" + filenames[d])\n change_color(output, eval_labels)\n write_processed_data(output, output_path, \"GT_\" + m + \"_\" + filenames[d]) \n change_color(output, predictions)\n write_processed_data(output, output_path, \"Pred_\" + m + \"_\" + filenames[d])\n for i in range(k):\n iou_list[i] = iou_list[i] / len(filenames)\n write_stats(iou_list, mean_acc/len(filenames), output_path, \"Stats_Model_\" + m + \".txt\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"m-117/POG_a_3D_PointCloud_Generator","sub_path":"PointNet/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":6884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8070268006","text":"import datetime\nimport logging\nimport re\n\nfrom sqlalchemy import Column, ForeignKeyConstraint, Integer, String\nfrom sqlalchemy.orm import relationship\n\nfrom challenge.exceptions import CreditCartdInvalid, ParamInvalid, ExpirationDateExceeded\n\nfrom .payments import STATUS_SUCCESS, Payments\n\nlog = logging.getLogger(__file__)\n\nBRANDS = {\n 'Visa': re.compile(r'^4[0-9]{6,}$'),\n 'Mastercard': re.compile(\n r'^5[1-5][0-9]{5,}|222[1-9][0-9]{3,}|22[3-9][0-9]{4,}|2[3-6][0-9]{5,}|27[01][0-9]{4,}|2720[0-9]{3,}$',\n re.VERBOSE),\n 'American Express': re.compile(r'^3[47][0-9]{5,}$'),\n 'Diners Club': re.compile(r'^3(?:0[0-5]|[68][0-9])[0-9]{4,}$'),\n 'Discover': re.compile(r'^6(?:011|5[0-9]{2})[0-9]{3,}$'),\n 'JCB': re.compile('^(?:2131|1800|35[0-9]{3})[0-9]{3,}$')}\n\n\nclass CreditCard(Payments):\n \"\"\"Model table creditCard payments.\"\"\"\n\n __tablename__ = 'credits_cards'\n __type__ = 'card'\n\n id = Column(Integer, primary_key=True)\n card_name = Column(String(65), nullable=False)\n card_number = Column(String(19), nullable=False)\n card_expiration_date = Column(String(7), nullable=False)\n card_flag = Column(String(35), nullable=False)\n card_cvv = Column(Integer, nullable=False)\n\n payment = relationship(\"Payments\",\n uselist=False,\n single_parent=True,\n cascade=\"all, delete, delete-orphan\",\n post_update=True\n )\n\n __table_args__ = (ForeignKeyConstraint([id], [\"payments.id\"]),\n {})\n __mapper_args__ = {'polymorphic_identity': 'cd'}\n\n def __init__(self):\n Payments.__init__(self)\n\n @staticmethod\n def __check_validate_credit_card(card_number):\n \"\"\"Validate Credit Card.\"\"\"\n def digits_of(n):\n return [int(d) for d in str(n)]\n digits = digits_of(card_number)\n odd_digits = digits[-1::-2]\n even_digits = digits[-2::-2]\n checksum = 0\n checksum += sum(odd_digits)\n for d in even_digits:\n checksum += sum(digits_of(d * 2))\n return checksum % 10 == 0\n\n def __validate_credit_card(self, p_number):\n \"\"\"Validate credit card.\"\"\"\n if not p_number or not self.__check_validate_credit_card(p_number):\n raise CreditCartdInvalid(\"Credit Card Invalid\")\n\n @staticmethod\n def __check_provider(number):\n \"\"\"Checks a credit card number and returns a matching brand name.\"\"\"\n for brand, regexp in BRANDS.items():\n if regexp.match(number):\n return brand\n return 'Not Identify'\n\n def __validate_date_expiration(self, p_value):\n try:\n\n date_format = '%m/%y'\n result = datetime.datetime.strptime(p_value, date_format)\n now = datetime.datetime.now()\n if now > result:\n raise ExpirationDateExceeded(\"Expiration Date Exceeded\")\n except ValueError:\n raise ParamInvalid(\"time data '12310/19' does not match format '%m/%y'\")\n\n def __set_card_name(self, p_value):\n \"\"\"Set card name.\"\"\"\n if not p_value:\n raise ParamInvalid(\"Card Name can't null.\")\n self.card_name = p_value\n\n def __set_card_number(self, p_value):\n \"\"\"Set card number.\"\"\"\n self.__validate_credit_card(p_value)\n self.card_number = p_value\n\n def __set_flag_card(self, p_value):\n \"\"\"Set Flag card.\"\"\"\n self.card_flag = self.__check_provider(p_value)\n\n def __set_card_expiration_date(self, p_value):\n \"\"\"Set card expiration.\"\"\"\n if not p_value:\n raise ParamInvalid(\"Card Expiration date can't null.\")\n self.__validate_date_expiration(p_value)\n self.card_expiration_date = p_value\n\n def __set_card_cvv(self, p_value):\n \"\"\"Set card_cvv buyer.\"\"\"\n if not p_value:\n raise ParamInvalid(\"Card CV invalid. %s\" % p_value)\n self.card_cvv = p_value\n\n def create(self, kwargs):\n \"\"\"Create Payment credit_card.\"\"\"\n name_buyer = kwargs.get(\"name_buyer\")\n card_name = kwargs.get(\"card_name\")\n card_number = kwargs.get(\"card_number\").strip(\" \")\n card_expiration_date = kwargs.get(\"card_expiration_date\")\n card_cvv = kwargs.get(\"card_cvv\")\n kwargs.update({\"status\": STATUS_SUCCESS})\n self.add_params_default(kwargs)\n self.__set_card_number(card_number)\n self.__set_flag_card(card_number)\n self.__set_card_cvv(card_cvv)\n self.__set_card_expiration_date(card_expiration_date)\n self.__set_card_name(card_name)\n self._commit(self)\n log.info('Payment credit card with sucess. %s' % name_buyer)\n return {\"card_flag\": self.card_flag,\n \"id\": self.id}\n\n def get_info_dict(self):\n \"\"\"Get Info object.\"\"\"\n defaul_parms = super(CreditCard, self).get_info_dict()\n result = {\"card_name\": self.card_name,\n \"type\": \"Credit Card\",\n \"card_number\": self.card_number,\n \"card_flag\": self.card_flag,\n \"card_expiration_date\": self.card_expiration_date,\n \"card_cvv\": self.card_cvv}\n result.update(defaul_parms)\n return result\n","repo_name":"laerteallan/challange","sub_path":"challenge/models/credit_card.py","file_name":"credit_card.py","file_ext":"py","file_size_in_byte":5301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73426839893","text":"import re\n\n# Match one or more digits \np = re.compile('\\d+')\nDIR = 'C:\\\\Users\\\\alexi_000\\\\OneDrive\\\\Documents\\\\thesis\\\\code\\\\crepo\\\\TwofoldStarter\\\\Results'\nFILE = '\\\\twofold_2starters_10_40.txt'\nfile = open(DIR + FILE, \"r\")\nspaceSize = '\\;'\ncomma = ', ' + spaceSize\nlatexFile = DIR + '\\\\starters.tex'\noutput = open(latexFile, \"w\")\nprevOrder = -1\nfor line in file:\n #print('In line ', line)\n nums = p.findall(line)\n #print(nums)\n order = int(nums[0])\n if(order != prevOrder):\n output.write('\\n')\n prevOrder = order\n \n paramSum = 0\n params = []\n i = 1\n latexCode = '$OP('\n #print('order is ', order)\n \n \n while(paramSum != order):\n #print('paramSum is ', paramSum)\n param = int(nums[i])\n paramSum += param\n params.append(param)\n i += 1\n\n starter = nums[len(params)+1:]\n #print('starter section is ', starter)\n latexCode += str(params[0])\n \n for j in range(1, len(params)):\n latexCode += comma\n latexCode += str(params[j])\n\n latexCode += '): \\:'\n start = 0\n \n for j in range(0, len(params)):\n latexCode += '('\n if(j > 0):\n start += int(params[j-1])\n\n if(int(starter[start]) == order -1):\n latexCode += '\\infty'\n else:\n latexCode += starter[start]\n \n for k in range(1, params[j]):\n vertex = starter[start + k]\n latexCode += comma\n \n if(int(vertex) == order-1):\n latexCode += '\\infty'\n else:\n latexCode += vertex\n\n latexCode += ')'\n\n latexCode += '$\\\\\\\\'\n print(latexCode)\n output.write(latexCode + '\\n')\n \n \n \n\n \n","repo_name":"aviidlee/twofold_oberwolfach","sub_path":"TwofoldStarter/src/results_to_latex.py","file_name":"results_to_latex.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16447856049","text":"from flask import Blueprint, request\nimport os\nimport openai\nfrom pymongo import MongoClient\nfrom novellamemory.novellaGPT import NovellaGPT\n\nchat_bp = Blueprint('chat', __name__)\n\nclient = MongoClient('mongodb://localhost:27017/')\ndb = client['novella']\n\nopenai.api_key = os.getenv('NOVELLA_API_KEY')\nopenai.api_base = os.getenv('NOVELLA_API_BASE')\n\ndef getBrainstormHistoryById(id = \"\"):\n chatCollection = db['chat']\n if (id == ''):\n return chatCollection.find_one()['brainstorm']['memory']\n else:\n return chatCollection.find({\"_id\": id})['brainstorm']['memory']\n\ndef getChapterBrainstormByTitle(title, id = \"\"):\n chapterHistory = getChapterHistoryById(id = \"\")\n print(\"title:\", title)\n selectedChapter = list(filter(lambda item: item[\"title\"] == title, chapterHistory))[0]\n return chapterHistory, selectedChapter\ndef getChapterHistoryById(id = \"\"):\n chatCollection = db['chat']\n if (id == ''):\n return chatCollection.find_one()['chapters']\n else:\n return chatCollection.find({\"_id\": id})['chapters']\n \n\ndef summaryBrainstorm(messages, request = \"Summary the story so far with title, detail story progress, divide the story into 4 acts.\", ):\n # summary the story so far\n new_messages = messages + [{\"content\": request, \"role\" : \"user\"}]\n summaryResponse = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=new_messages,\n )\n return summaryResponse\n\n@chat_bp.route(\"\", methods=(\"GET\", \"POST\"))\ndef getBrainstormResponse():\n messages = getBrainstormHistoryById(id = '')\n if request.method == 'GET':\n return {\n \"memory\": messages, \n \"summary\": db['chat'].find_one()['brainstorm']['summary'],\n \"chapters\": db['chat'].find_one()['chapters']\n }, 200\n data = request.get_json()\n content = data['content']\n messages.append({\"role\": \"user\", \"content\": content})\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=messages\n )\n messages.append({\"content\": response.choices[0].message.content, \"role\" : \"assistant\"})\n db['chat'].update_one({}, { \"$set\": { \"brainstorm.memory\": messages } })\n # Add suggestion list\n messages.append({\"content\": \"Suggest the next brainstorming step in less than 40 characters\", \"role\" : \"user\"})\n suggestionResponse = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=messages,\n n=4\n )\n suggestionList = [suggestionResponse.choices[i].message.content for i in range(4)]\n # Summary the story so far and save to db.\n summaryResponse = summaryBrainstorm(messages)\n db['chat'].update_one({}, { \"$set\": { \"brainstorm.summary\": summaryResponse.choices[0].message.content } })\n\n return {\"content\": response.choices[0].message.content, \"role\" : \"assistant\", \"suggestionList\" : suggestionList}, 200\n\ndef updateStorySummaryByChapter(selectedChapter):\n chapterHistory = db['story'].find_one({})['chapters']\n db['story'].update_one({}, { \"$set\": { \"chapters\": [{**chapter, \"description\": selectedChapter['summary']} if chapter['title'] == selectedChapter['title'] else chapter for chapter in chapterHistory] } })\n\n@chat_bp.route(\"/chapter\", methods=(\"GET\", \"POST\"))\ndef getChapterBrainstormResponse():\n data = request.get_json()\n chapterHistory, selectedChapter = getChapterBrainstormByTitle(data['title'])\n messages = selectedChapter['memory']\n\n content = data['content']\n messages.append({\"role\": \"user\", \"content\": content})\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=messages\n )\n messages.append({\"content\": response.choices[0].message.content, \"role\" : \"assistant\"})\n selectedChapter['memory'] = messages\n selectedChapter['summary'] = summaryBrainstorm(messages, request=\"Write a short description for this chapter.\").choices[0].message.content\n db['chat'].update_one({}, { \"$set\": { \"chapters\": [selectedChapter if chapter['title'] == selectedChapter['title'] else chapter for chapter in chapterHistory] } })\n updateStorySummaryByChapter(selectedChapter)\n\n # Add suggestion list\n messages.append({\"content\": \"Suggest the next brainstorming step for this chapter in less than 40 characters\", \"role\" : \"user\"})\n suggestionResponse = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=messages,\n n=4\n )\n # Summary the story so far and save to db.\n suggestionList = [suggestionResponse.choices[i].message.content for i in range(4)]\n return {\"content\": response.choices[0].message.content, \"role\" : \"assistant\", \"suggestionList\" : suggestionList, \"summary\": selectedChapter['summary']}, 200\n\n\n# @chat_bp.route(\"\", methods=(\"GET\", \"POST\"))\n# def getBrainstormResponse():\n# messages = getBrainstormHistoryById(id = '')\n# if request.method == 'GET':\n# return {\"memory\": messages}, 200\n# data = request.get_json()\n# content = data['content']\n# messages.append({\"role\": \"user\", \"content\": content})\n# chat = NovellaGPT()\n# response = chat.predict(content)\n# messages.append({\"content\": response, \"role\" : \"assistant\"})\n# db['chat'].update_one({}, { \"$set\": { \"memory\": messages } })\n\n# return {\"content\": response, \"role\" : \"assistant\"}, 200","repo_name":"tuyentran0500/novella","sub_path":"backend/api/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":5329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42382121840","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport os\nimport re\nimport traceback\nimport unicodedata\nimport werkzeug.exceptions\nimport werkzeug.routing\nimport werkzeug.urls\n\n# optional python-slugify import (https://github.com/un33k/python-slugify)\ntry:\n import slugify as slugify_lib\nexcept ImportError:\n slugify_lib = None\n\nimport flectra\nfrom flectra import api, models, registry, exceptions, tools, http\nfrom flectra.addons.base.models.ir_http import RequestUID, ModelConverter\nfrom flectra.addons.base.models.qweb import QWebException\nfrom flectra.http import request\nfrom flectra.osv import expression\nfrom flectra.tools import config, ustr, pycompat\n\nfrom ..geoipresolver import GeoIPResolver\n\n_logger = logging.getLogger(__name__)\n\n# global resolver (GeoIP API is thread-safe, for multithreaded workers)\n# This avoids blowing up open files limit\nflectra._geoip_resolver = None\n\n# ------------------------------------------------------------\n# Slug API\n# ------------------------------------------------------------\n\ndef _guess_mimetype(ext=False, default='text/html'):\n exts = {\n '.css': 'text/css',\n '.less': 'text/less',\n '.scss': 'text/scss',\n '.js': 'text/javascript',\n '.xml': 'text/xml',\n '.csv': 'text/csv',\n '.html': 'text/html',\n }\n return ext is not False and exts.get(ext, default) or exts\n\n\ndef slugify_one(s, max_length=0):\n \"\"\" Transform a string to a slug that can be used in a url path.\n This method will first try to do the job with python-slugify if present.\n Otherwise it will process string by stripping leading and ending spaces,\n converting unicode chars to ascii, lowering all chars and replacing spaces\n and underscore with hyphen \"-\".\n :param s: str\n :param max_length: int\n :rtype: str\n \"\"\"\n s = ustr(s)\n if slugify_lib:\n # There are 2 different libraries only python-slugify is supported\n try:\n return slugify_lib.slugify(s, max_length=max_length)\n except TypeError:\n pass\n uni = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore').decode('ascii')\n slug_str = re.sub(r'[\\W_]', ' ', uni).strip().lower()\n slug_str = re.sub(r'[-\\s]+', '-', slug_str)\n return slug_str[:max_length] if max_length > 0 else slug_str\n\n\ndef slugify(s, max_length=0, path=False):\n if not path:\n return slugify_one(s, max_length=max_length)\n else:\n res = []\n for u in s.split('/'):\n if slugify_one(u, max_length=max_length) != '':\n res.append(slugify_one(u, max_length=max_length))\n # check if supported extension\n path_no_ext, ext = os.path.splitext(s)\n if ext and ext in _guess_mimetype():\n res[-1] = slugify_one(path_no_ext) + ext\n return '/'.join(res)\n\n\ndef slug(value):\n if isinstance(value, models.BaseModel):\n if not value.id:\n raise ValueError(\"Cannot slug non-existent record %s\" % value)\n # [(id, name)] = value.name_get()\n identifier, name = value.id, getattr(value, 'seo_name', False) or value.display_name\n else:\n # assume name_search result tuple\n identifier, name = value\n slugname = slugify(name or '').strip().strip('-')\n if not slugname:\n return str(identifier)\n return \"%s-%d\" % (slugname, identifier)\n\n\n# NOTE: as the pattern is used as it for the ModelConverter (ir_http.py), do not use any flags\n_UNSLUG_RE = re.compile(r'(?:(\\w{1,2}|\\w[A-Za-z0-9-_]+?\\w)-)?(-?\\d+)(?=$|/)')\n\n\ndef unslug(s):\n \"\"\"Extract slug and id from a string.\n Always return un 2-tuple (str|None, int|None)\n \"\"\"\n m = _UNSLUG_RE.match(s)\n if not m:\n return None, None\n return m.group(1), int(m.group(2))\n\n\ndef unslug_url(s):\n \"\"\" From /blog/my-super-blog-1\" to \"blog/1\" \"\"\"\n parts = s.split('/')\n if parts:\n unslug_val = unslug(parts[-1])\n if unslug_val[1]:\n parts[-1] = str(unslug_val[1])\n return '/'.join(parts)\n return s\n\n\n# ------------------------------------------------------------\n# Language tools\n# ------------------------------------------------------------\n\ndef url_lang(path_or_uri, lang_code=None):\n ''' Given a relative URL, make it absolute and add the required lang or\n remove useless lang.\n Nothing will be done for absolute or invalid URL.\n If there is only one language installed, the lang will not be handled\n unless forced with `lang` parameter.\n\n :param lang_code: Must be the lang `code`. It could also be something\n else, such as `'[lang]'` (used for url_return).\n '''\n Lang = request.env['res.lang']\n location = pycompat.to_text(path_or_uri).strip()\n force_lang = lang_code is not None\n try:\n url = werkzeug.urls.url_parse(location)\n except ValueError:\n # e.g. Invalid IPv6 URL, `werkzeug.urls.url_parse('http://]')`\n url = False\n # relative URL with either a path or a force_lang\n if url and not url.netloc and not url.scheme and (url.path or force_lang):\n location = werkzeug.urls.url_join(request.httprequest.path, location)\n lang_url_codes = [url_code for _, url_code, *_ in Lang.get_available()]\n lang_code = pycompat.to_text(lang_code or request.context['lang'])\n lang_url_code = Lang._lang_code_to_urlcode(lang_code)\n lang_url_code = lang_url_code if lang_url_code in lang_url_codes else lang_code\n\n if (len(lang_url_codes) > 1 or force_lang) and is_multilang_url(location, lang_url_codes):\n ps = location.split(u'/')\n default_lg = request.env['ir.http']._get_default_lang()\n if ps[1] in lang_url_codes:\n # Replace the language only if we explicitly provide a language to url_for\n if force_lang:\n ps[1] = lang_url_code\n # Remove the default language unless it's explicitly provided\n elif ps[1] == default_lg.url_code:\n ps.pop(1)\n # Insert the context language or the provided language\n elif lang_url_code != default_lg.url_code or force_lang:\n ps.insert(1, lang_url_code)\n location = u'/'.join(ps)\n return location\n\n\ndef url_for(url_from, lang_code=None, no_rewrite=False):\n ''' Return the url with the rewriting applied.\n Nothing will be done for absolute URL, invalid URL, or short URL from 1 char.\n\n :param url_from: The URL to convert.\n :param lang_code: Must be the lang `code`. It could also be something\n else, such as `'[lang]'` (used for url_return).\n :param no_rewrite: don't try to match route with website.rewrite.\n '''\n new_url = False\n\n # don't try to match route if we know that no rewrite has been loaded.\n routing = getattr(request, 'website_routing', None) # not modular, but not overridable\n if not getattr(request.env['ir.http'], '_rewrite_len', {}).get(routing):\n no_rewrite = True\n\n path, _, qs = (url_from or '').partition('?')\n\n if (not no_rewrite and path and (\n len(path) > 1\n and path.startswith('/')\n and '/static/' not in path\n and not path.startswith('/web/')\n )):\n new_url = request.env['ir.http'].url_rewrite(path)\n new_url = new_url if not qs else new_url + '?%s' % qs\n\n return url_lang(new_url or url_from, lang_code=lang_code)\n\n\ndef is_multilang_url(local_url, lang_url_codes=None):\n ''' Check if the given URL content is supposed to be translated.\n To be considered as translatable, the URL should either:\n 1. Match a POST (non-GET actually) controller that is `website=True` and\n either `multilang` specified to True or if not specified, with `type='http'`.\n 2. If not matching 1., everything not under /static/ or /web/ will be translatable\n '''\n if not lang_url_codes:\n lang_url_codes = [url_code for _, url_code, *_ in request.env['res.lang'].get_available()]\n spath = local_url.split('/')\n # if a language is already in the path, remove it\n if spath[1] in lang_url_codes:\n spath.pop(1)\n local_url = '/'.join(spath)\n\n url = local_url.partition('#')[0].split('?')\n path = url[0]\n\n # Consider /static/ and /web/ files as non-multilang\n if '/static/' in path or path.startswith('/web/'):\n return False\n\n query_string = url[1] if len(url) > 1 else None\n\n # Try to match an endpoint in werkzeug's routing table\n try:\n func = request.env['ir.http']._get_endpoint_qargs(path, query_args=query_string)\n # /page/xxx has no endpoint/func but is multilang\n return (not func or (\n func.routing.get('website', False)\n and func.routing.get('multilang', func.routing['type'] == 'http')\n ))\n except Exception as exception:\n _logger.warning(exception)\n return False\n\n\nclass ModelConverter(ModelConverter):\n\n def __init__(self, url_map, model=False, domain='[]'):\n super(ModelConverter, self).__init__(url_map, model)\n self.domain = domain\n self.regex = _UNSLUG_RE.pattern\n\n def to_url(self, value):\n return slug(value)\n\n def to_python(self, value):\n matching = re.match(self.regex, value)\n _uid = RequestUID(value=value, match=matching, converter=self)\n record_id = int(matching.group(2))\n env = api.Environment(request.cr, _uid, request.context)\n if record_id < 0:\n # limited support for negative IDs due to our slug pattern, assume abs() if not found\n if not env[self.model].browse(record_id).exists():\n record_id = abs(record_id)\n return env[self.model].with_context(_converter_value=value).browse(record_id)\n\n\nclass IrHttp(models.AbstractModel):\n _inherit = ['ir.http']\n\n rerouting_limit = 10\n\n @classmethod\n def _get_converters(cls):\n \"\"\" Get the converters list for custom url pattern werkzeug need to\n match Rule. This override adds the website ones.\n \"\"\"\n return dict(\n super(IrHttp, cls)._get_converters(),\n model=ModelConverter,\n )\n\n @classmethod\n def _get_default_lang(cls):\n lang_code = request.env['ir.default'].sudo().get('res.partner', 'lang')\n if lang_code:\n return request.env['res.lang']._lang_get(lang_code)\n return request.env['res.lang'].search([], limit=1)\n\n @api.model\n def get_frontend_session_info(self):\n session_info = super(IrHttp, self).get_frontend_session_info()\n\n IrHttpModel = request.env['ir.http'].sudo()\n modules = IrHttpModel.get_translation_frontend_modules()\n user_context = request.session.get_context() if request.session.uid else {}\n lang = user_context.get('lang')\n translation_hash = request.env['ir.translation'].get_web_translations_hash(modules, lang)\n\n session_info.update({\n 'translationURL': '/website/translations',\n 'cache_hashes': {\n 'translations': translation_hash,\n },\n })\n return session_info\n\n @api.model\n def get_translation_frontend_modules(self):\n Modules = request.env['ir.module.module'].sudo()\n extra_modules_domain = self._get_translation_frontend_modules_domain()\n extra_modules_name = self._get_translation_frontend_modules_name()\n if extra_modules_domain:\n new = Modules.search(\n expression.AND([extra_modules_domain, [('state', '=', 'installed')]])\n ).mapped('name')\n extra_modules_name += new\n return extra_modules_name\n\n @classmethod\n def _get_translation_frontend_modules_domain(cls):\n \"\"\" Return a domain to list the domain adding web-translations and\n dynamic resources that may be used frontend views\n \"\"\"\n return []\n\n @classmethod\n def _get_translation_frontend_modules_name(cls):\n \"\"\" Return a list of module name where web-translations and\n dynamic resources may be used in frontend views\n \"\"\"\n return ['web']\n\n bots = \"bot|crawl|slurp|spider|curl|wget|facebookexternalhit\".split(\"|\")\n\n @classmethod\n def is_a_bot(cls):\n # We don't use regexp and ustr voluntarily\n # timeit has been done to check the optimum method\n user_agent = request.httprequest.environ.get('HTTP_USER_AGENT', '').lower()\n try:\n return any(bot in user_agent for bot in cls.bots)\n except UnicodeDecodeError:\n return any(bot in user_agent.encode('ascii', 'ignore') for bot in cls.bots)\n\n @classmethod\n def _get_frontend_langs(cls):\n return [code for code, _ in request.env['res.lang'].get_installed()]\n\n @classmethod\n def get_nearest_lang(cls, lang_code):\n \"\"\" Try to find a similar lang. Eg: fr_BE and fr_FR\n :param lang_code: the lang `code` (en_US)\n \"\"\"\n if not lang_code:\n return False\n short_match = False\n short = lang_code.partition('_')[0]\n for code in cls._get_frontend_langs():\n if code == lang_code:\n return code\n if not short_match and code.startswith(short):\n short_match = code\n return short_match\n\n @classmethod\n def _geoip_setup_resolver(cls):\n # Lazy init of GeoIP resolver\n if flectra._geoip_resolver is not None:\n return\n geofile = config.get('geoip_database')\n try:\n flectra._geoip_resolver = GeoIPResolver.open(geofile) or False\n except Exception as e:\n _logger.warning('Cannot load GeoIP: %s', ustr(e))\n\n @classmethod\n def _geoip_resolve(cls):\n if 'geoip' not in request.session:\n record = {}\n if flectra._geoip_resolver and request.httprequest.remote_addr:\n record = flectra._geoip_resolver.resolve(request.httprequest.remote_addr) or {}\n request.session['geoip'] = record\n\n @classmethod\n def _add_dispatch_parameters(cls, func):\n Lang = request.env['res.lang']\n # only called for is_frontend request\n if request.routing_iteration == 1:\n context = dict(request.context)\n path = request.httprequest.path.split('/')\n is_a_bot = cls.is_a_bot()\n\n lang_codes = [code for code, *_ in Lang.get_available()]\n nearest_lang = not func and cls.get_nearest_lang(Lang._lang_get_code(path[1]))\n cook_lang = request.httprequest.cookies.get('frontend_lang')\n cook_lang = cook_lang in lang_codes and cook_lang\n\n if nearest_lang:\n lang = Lang._lang_get(nearest_lang)\n else:\n nearest_ctx_lg = not is_a_bot and cls.get_nearest_lang(request.env.context.get('lang'))\n nearest_ctx_lg = nearest_ctx_lg in lang_codes and nearest_ctx_lg\n preferred_lang = Lang._lang_get(cook_lang or nearest_ctx_lg)\n lang = preferred_lang or cls._get_default_lang()\n\n request.lang = lang\n context['lang'] = lang._get_cached('code')\n\n # bind modified context\n request.context = context\n\n @classmethod\n def _dispatch(cls):\n \"\"\" Before executing the endpoint method, add website params on request, such as\n - current website (record)\n - multilang support (set on cookies)\n - geoip dict data are added in the session\n Then follow the parent dispatching.\n Reminder : Do not use `request.env` before authentication phase, otherwise the env\n set on request will be created with uid=None (and it is a lazy property)\n \"\"\"\n request.routing_iteration = getattr(request, 'routing_iteration', 0) + 1\n\n func = None\n routing_error = None\n\n # handle // in url\n if request.httprequest.method == 'GET' and '//' in request.httprequest.path:\n new_url = request.httprequest.path.replace('//', '/') + '?' + request.httprequest.query_string.decode('utf-8')\n return werkzeug.utils.redirect(new_url, 301)\n\n # locate the controller method\n try:\n rule, arguments = cls._match(request.httprequest.path)\n func = rule.endpoint\n request.is_frontend = func.routing.get('website', False)\n except werkzeug.exceptions.NotFound as e:\n # either we have a language prefixed route, either a real 404\n # in all cases, website processes them exept if second element is static\n # Checking static will avoid to generate an expensive 404 web page since\n # most of the time the browser is loading and inexisting assets or image. A standard 404 is enough.\n # Earlier check would be difficult since we don't want to break data modules\n path_components = request.httprequest.path.split('/')\n request.is_frontend = len(path_components) < 3 or path_components[2] != 'static' or not '.' in path_components[-1]\n routing_error = e\n\n request.is_frontend_multilang = not func or (func and request.is_frontend and func.routing.get('multilang', func.routing['type'] == 'http'))\n\n # check authentication level\n try:\n if func:\n cls._authenticate(func)\n elif request.uid is None and request.is_frontend:\n cls._auth_method_public()\n except Exception as e:\n return cls._handle_exception(e)\n\n cls._geoip_setup_resolver()\n cls._geoip_resolve()\n\n # For website routes (only), add website params on `request`\n if request.is_frontend:\n request.redirect = lambda url, code=302: werkzeug.utils.redirect(url_for(url), code)\n\n cls._add_dispatch_parameters(func)\n\n path = request.httprequest.path.split('/')\n default_lg_id = cls._get_default_lang()\n if request.routing_iteration == 1:\n is_a_bot = cls.is_a_bot()\n nearest_lang = not func and cls.get_nearest_lang(request.env['res.lang']._lang_get_code(path[1]))\n url_lg = nearest_lang and path[1]\n\n # The default lang should never be in the URL, and a wrong lang\n # should never be in the URL.\n wrong_url_lg = url_lg and (url_lg != request.lang.url_code or url_lg == default_lg_id.url_code)\n # The lang is missing from the URL if multi lang is enabled for\n # the route and the current lang is not the default lang.\n # POST requests are excluded from this condition.\n missing_url_lg = not url_lg and request.is_frontend_multilang and request.lang != default_lg_id and request.httprequest.method != 'POST'\n # Bots should never be redirected when the lang is missing\n # because it is the only way for them to index the default lang.\n if wrong_url_lg or (missing_url_lg and not is_a_bot):\n if url_lg:\n path.pop(1)\n if request.lang != default_lg_id:\n path.insert(1, request.lang.url_code)\n path = '/'.join(path) or '/'\n routing_error = None\n redirect = request.redirect(path + '?' + request.httprequest.query_string.decode('utf-8'))\n redirect.set_cookie('frontend_lang', request.lang.code)\n return redirect\n elif url_lg:\n request.uid = None\n path.pop(1)\n routing_error = None\n return cls.reroute('/'.join(path) or '/')\n elif missing_url_lg and is_a_bot:\n # Ensure that if the URL without lang is not redirected, the\n # current lang is indeed the default lang, because it is the\n # lang that bots should index in that case.\n request.lang = default_lg_id\n request.context = dict(request.context, lang=default_lg_id.code)\n\n if request.lang == default_lg_id:\n context = dict(request.context)\n context['edit_translations'] = False\n request.context = context\n\n if routing_error:\n return cls._handle_exception(routing_error)\n\n # removed cache for auth public\n result = super(IrHttp, cls)._dispatch()\n\n cook_lang = request.httprequest.cookies.get('frontend_lang')\n if request.is_frontend and cook_lang != request.lang.code and hasattr(result, 'set_cookie'):\n result.set_cookie('frontend_lang', request.lang.code)\n\n return result\n\n @classmethod\n def reroute(cls, path):\n if isinstance(path, str):\n path = path.encode(\"utf-8\")\n path = path.decode(\"latin1\", \"replace\")\n\n if not hasattr(request, 'rerouting'):\n request.rerouting = [request.httprequest.path]\n if path in request.rerouting:\n raise Exception(\"Rerouting loop is forbidden\")\n request.rerouting.append(path)\n if len(request.rerouting) > cls.rerouting_limit:\n raise Exception(\"Rerouting limit exceeded\")\n request.httprequest.environ['PATH_INFO'] = path\n # void werkzeug cached_property. TODO: find a proper way to do this\n for key in ('full_path', 'url', 'base_url'):\n request.httprequest.__dict__.pop(key, None)\n # since werkzeug 2.0 `path`` became an attribute and is not a cached property anymore\n if hasattr(type(request.httprequest), 'path'): # cached property\n request.httprequest.__dict__.pop('path', None)\n else: # direct attribute\n request.httprequest.path = '/' + path.lstrip('/')\n\n return cls._dispatch()\n\n @classmethod\n def _postprocess_args(cls, arguments, rule):\n super(IrHttp, cls)._postprocess_args(arguments, rule)\n\n try:\n _, path = rule.build(arguments)\n assert path is not None\n except flectra.exceptions.MissingError:\n return cls._handle_exception(werkzeug.exceptions.NotFound())\n except Exception as e:\n return cls._handle_exception(e)\n\n if getattr(request, 'is_frontend_multilang', False) and request.httprequest.method in ('GET', 'HEAD'):\n generated_path = werkzeug.urls.url_unquote_plus(path)\n current_path = werkzeug.urls.url_unquote_plus(request.httprequest.path)\n if generated_path != current_path:\n if request.lang != cls._get_default_lang():\n path = '/' + request.lang.url_code + path\n if request.httprequest.query_string:\n path += '?' + request.httprequest.query_string.decode('utf-8')\n return werkzeug.utils.redirect(path, code=301)\n\n @classmethod\n def _get_exception_code_values(cls, exception):\n \"\"\" Return a tuple with the error code following by the values matching the exception\"\"\"\n code = 500 # default code\n values = dict(\n exception=exception,\n traceback=traceback.format_exc(),\n )\n if isinstance(exception, exceptions.UserError):\n values['error_message'] = exception.args[0]\n code = 400\n if isinstance(exception, exceptions.AccessError):\n code = 403\n\n elif isinstance(exception, QWebException):\n values.update(qweb_exception=exception)\n\n if type(exception.error) == exceptions.AccessError:\n code = 403\n\n elif isinstance(exception, werkzeug.exceptions.HTTPException):\n code = exception.code\n\n values.update(\n status_message=werkzeug.http.HTTP_STATUS_CODES.get(code, ''),\n status_code=code,\n )\n\n return (code, values)\n\n @classmethod\n def _get_values_500_error(cls, env, values, exception):\n values['view'] = env[\"ir.ui.view\"]\n return values\n\n @classmethod\n def _get_error_html(cls, env, code, values):\n return code, env['ir.ui.view']._render_template('http_routing.%s' % code, values)\n\n @classmethod\n def _handle_exception(cls, exception):\n is_frontend_request = bool(getattr(request, 'is_frontend', False))\n if not is_frontend_request:\n # Don't touch non frontend requests exception handling\n return super(IrHttp, cls)._handle_exception(exception)\n try:\n response = super(IrHttp, cls)._handle_exception(exception)\n\n if isinstance(response, Exception):\n exception = response\n else:\n # if parent excplicitely returns a plain response, then we don't touch it\n return response\n except Exception as e:\n if 'werkzeug' in config['dev_mode']:\n raise e\n exception = e\n\n code, values = cls._get_exception_code_values(exception)\n\n if code is None:\n # Hand-crafted HTTPException likely coming from abort(),\n # usually for a redirect response -> return it directly\n return exception\n\n if not request.uid:\n cls._auth_method_public()\n\n # We rollback the current transaction before initializing a new\n # cursor to avoid potential deadlocks.\n\n # If the current (failed) transaction was holding a lock, the new\n # cursor might have to wait for this lock to be released further\n # down the line. However, this will only happen after the\n # request is done (and in fact it won't happen). As a result, the\n # current thread/worker is frozen until its timeout is reached.\n\n # So rolling back the transaction will release any potential lock\n # and, since we are in a case where an exception was raised, the\n # transaction shouldn't be committed in the first place.\n request.env.cr.rollback()\n\n with registry(request.env.cr.dbname).cursor() as cr:\n env = api.Environment(cr, request.uid, request.env.context)\n if code == 500:\n _logger.error(\"500 Internal Server Error:\\n\\n%s\", values['traceback'])\n values = cls._get_values_500_error(env, values, exception)\n elif code == 403:\n _logger.warning(\"403 Forbidden:\\n\\n%s\", values['traceback'])\n elif code == 400:\n _logger.warning(\"400 Bad Request:\\n\\n%s\", values['traceback'])\n try:\n code, html = cls._get_error_html(env, code, values)\n except Exception:\n code, html = 418, env['ir.ui.view']._render_template('http_routing.http_error', values)\n\n return werkzeug.wrappers.Response(html, status=code, content_type='text/html;charset=utf-8')\n\n @api.model\n @tools.ormcache('path')\n def url_rewrite(self, path):\n new_url = False\n router = http.root.get_db_router(request.db).bind('')\n try:\n _ = router.match(path, method='POST')\n except werkzeug.exceptions.MethodNotAllowed:\n _ = router.match(path, method='GET')\n except werkzeug.routing.RequestRedirect as e:\n # get path from http://{path}?{current query string}\n new_url = e.new_url.split('?')[0][7:]\n except werkzeug.exceptions.NotFound:\n new_url = path\n except Exception as e:\n raise e\n\n return new_url or path\n\n # merge with def url_rewrite in master/14.1\n @api.model\n @tools.cache('path', 'query_args')\n def _get_endpoint_qargs(self, path, query_args=None):\n router = http.root.get_db_router(request.db).bind('')\n endpoint = False\n try:\n endpoint = router.match(path, method='POST', query_args=query_args)\n except werkzeug.exceptions.MethodNotAllowed:\n endpoint = router.match(path, method='GET', query_args=query_args)\n except werkzeug.routing.RequestRedirect as e:\n new_url = e.new_url[7:] # remove scheme\n assert new_url != path\n endpoint = self._get_endpoint_qargs(new_url, query_args)\n endpoint = endpoint and [endpoint]\n except werkzeug.exceptions.NotFound:\n pass # endpoint = False\n return endpoint and endpoint[0]\n","repo_name":"flectra-hq/flectra","sub_path":"addons/http_routing/models/ir_http.py","file_name":"ir_http.py","file_ext":"py","file_size_in_byte":28554,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"67"} +{"seq_id":"5856446348","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nn = 40000\nZ = np.random.randn(n)\n\nplt.step(sorted(Z), np.arange(1, n + 1) / float(n), label=\"Gaussian\")\n\n# ============================ Question 10.b ============================\nfor k in [1, 8, 64, 512]:\n Zk = np.sum(np.sign(np.random.randn(n, k)) * np.sqrt(1.0 / k), axis=1)\n plt.step(sorted(Zk), np.arange(1, n + 1) / float(n), label=f\"k = {k}\")\n# =======================================================================\n\nplt.xlim(-3,3)\nplt.xlabel(\"Observations\")\nplt.ylabel(\"Probability\")\nplt.legend()\nplt.show()\n","repo_name":"yarengokhn/machineLearningExamples","sub_path":"HW0/q10.py","file_name":"q10.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7717555131","text":"import numpy as np\n\n\ndef ErrorRateAt95Recall(labels, scores):\n distances = (1.0 / (scores + 1e-08))\n recall_point = 0.95\n labels = labels[np.argsort(distances)]\n threshold_index = np.argmax((np.cumsum(labels) >= (recall_point * np.sum(labels))))\n FP = np.sum((labels[:threshold_index] == 0))\n TN = np.sum((labels[threshold_index:] == 0))\n return (float(FP) / float((FP + TN)))\n","repo_name":"menna161/API-Wizard","sub_path":"Dataset/Dataset/np.argmax/snippets/snippet44610.py","file_name":"snippet44610.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34147458802","text":"import requests\nimport json\nfrom pprint import pprint\nfrom datetime import datetime\n\nclass PublicCowinAPIsWrapper():\n\n def __init__(self):\n self.api_server = \"https://cdn-api.co-vin.in/api/\"\n self.headers = {\n 'accept': 'application/json',\n 'content-Type': 'application/json',\n 'authority': 'cdn-api.co-vin.in',\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36',\n 'origin': 'https://apisetu.gov.in',\n 'referer': 'https://apisetu.gov.in/public/marketplace/api/cowin',\n 'sec-fetch-site': 'cross-site',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-dest': 'empty',\n 'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',\n 'sec-ch-ua': ' Not A;Brand\";v=\"99\", \"Chromium\";v=\"90\", \"Google Chrome\";v=\"90\"'\n }\n self.txnId = None\n self.mobile = '9545665253'\n #self.userOTP = None\n self.today_date = datetime.now().strftime('%d-%m-%Y')\n \n def makeRequest(self, api_url_path, data={}, method='GET', raiseOnNotOK=True):\n\n url=self.api_server+api_url_path\n print('Hitting url: %s'%url)\n\n if method == 'GET':\n response = requests.get(url, headers=self.headers, params=data)\n elif method == 'POST':\n data = json.dumps(data) if data else {}\n response = requests.post(url, headers=self.headers, data=data)\n else:\n raise Exception('Unsupported request method: %s received.'%method)\n\n if response.status_code != 200:\n if raiseOnNotOK:\n raise Exception('Response Code:%s, Response: %s'%(response.status_code, response.text))\n else:\n return response.text\n response = response.json()\n if 'txnId' in response:\n self.txnId = response['txnId']\n return response\n\n def generateOTP(self):\n api_url_path = 'v2/auth/public/generateOTP'\n data = {'mobile': self.mobile}\n response = self.makeRequest(api_url_path, data=data, method='POST')\n #pprint(response)\n return response\n\n def confirmOTP(self, otp):\n api_url_path = 'v2/auth/public/confirmOTP'\n data = {'otp': otp, 'txnId': self.txnId}\n print('Sending data: %s'%data)\n response = self.makeRequest(api_url_path, data=data, method='POST')\n #pprint(response)\n return response\n\n def getStates(self):\n api_url_path = 'v2/admin/location/states'\n response = self.makeRequest(api_url_path, data={}, method='GET')\n\n ## List state with IDs & pretty prints them as well.\n states = response['states']\n print(states)\n state_names = [state['state_name'] for state in states]\n longest_state = max(state_names, key=lambda x:len(x))\n #print('Longest state: %s'%longest_state)\n print('State\\t\\t\\t\\tStateID')\n print('')\n print('-'*50)\n for state in states:\n state_name = state['state_name']\n state_id = state['state_id']\n row = '%s%s\\t%s'%(state_name, (len(longest_state) - len(state_name))*' ', state_id)\n print(row)\n return response\n\n def getDistricts(self, state_id):\n api_url_path = 'v2/admin/location/districts/%s'%state_id\n response = self.makeRequest(api_url_path, data={}, method='GET')\n districts = response['districts']\n district_names = [district['district_name'] for district in districts]\n longest_district = max(district_names, key=lambda x:len(x))\n print('Longest district_name: %s'%longest_district)\n\n print('District\\tDistrict ID')\n print('')\n print('-'*50)\n for district in districts:\n district_name = district['district_name']\n district_id = district['district_id']\n row = '%s%s\\t%s'%(district_name, (len(longest_district) - len(district_name))*' ', district_id)\n print(row)\n return response\n\n def _prettyPrintSessionData(self, session):\n session_keys = ['name', 'address', 'available_capacity', 'available_capacity_dose1', 'available_capacity_dose2', 'slots']\n print('-'*50)\n data = {}\n for key in session_keys:\n if key == 'slots':\n data[key] = ', '.join(session[key])\n else:\n data[key] = session[key]\n pprint(data)\n print('-'*50)\n\n def _prettyPrintCentersData(self, center):\n center_keys = ['name', 'address']\n session_keys = ['available_capacity_dose1', 'available_capacity_dose2', 'slots', 'date']\n data = {}\n for key in center_keys:\n data[key] = center[key]\n sessions = center['sessions']\n pprint(data)\n for session in sessions:\n pprint('Date: %s, Dose1: %s, Dose2: %s'%(session['date'], session['available_capacity_dose1'], session['available_capacity_dose2']))\n\n def getVaccinationSessionsByDistrict(self, district_id, printAvailableSessions=False):\n api_url_path = 'v2/appointment/sessions/public/findByDistrict'\n data = {'district_id': district_id, 'date': self.today_date}\n response = self.makeRequest(api_url_path, data=data, method='GET')\n sessions = response['sessions']\n if printAvailableSessions:\n for session in sessions:\n if any([session['available_capacity_dose1'], session['available_capacity_dose2']]):\n self._prettyPrintSessionData(session)\n return response\n\n def getVaccinationSessionsByPIN(self, pin_code, printAvailableSessions=False):\n api_url_path = 'v2/appointment/sessions/public/findByPin'\n data = {'pincode': pin_code, 'date': self.today_date}\n response = self.makeRequest(api_url_path, data=data, method='GET')\n sessions = response['sessions']\n if printAvailableSessions:\n for session in sessions:\n if any([session['available_capacity_dose1'], session['available_capacity_dose2']]):\n self._prettyPrintSessionData(session)\n return response \n\n def getVaccinationSessionsCalendarByPIN(self, pin_code):\n api_url_path = 'v2/appointment/sessions/public/calendarByPin'\n data = {'pincode': pin_code, 'date': self.today_date}\n response = self.makeRequest(api_url_path, data=data, method='GET')\n centers = response['centers']\n for center in centers:\n self._prettyPrintCentersData(center)\n print('')\n return response\n\n def getVaccinationSessionsCalendarByDistrict(self, district_id):\n api_url_path = 'v2/appointment/sessions/public/calendarByDistrict'\n data = {'district_id': district_id, 'date': self.today_date}\n response = self.makeRequest(api_url_path, data=data, method='GET')\n centers = response['centers']\n for center in centers:\n self._prettyPrintCentersData(center)\n print('')\n return response \n\n ## Orchestrates the process.\n def go(self):\n \n \"\"\"\n response = cowin.generateOTP()\n print('OTP successfully sent.')\n\n userOtp = input('Please enter OTP receveived on mobile:%s ->'%cowin.mobile)\n reponse = cowin.confirmOTP(otp=userOtp)\n print(response)\n \"\"\"\n search_input = int(input('Search by: \\n 1. PIN \\n 2. State & District\\n Enter 1 or 2\\n'))\n pprint(search_input)\n if search_input == 2:\n response = self.getStates()\n states = response['states']\n state_ids = [state['state_id'] for state in states]\n while True:\n state_id = int(input('Please enter state_id from the above list: '))\n if state_id not in state_ids:\n print('Not a valid stateID: %s. Please re-enter'%state_id)\n else:\n self.state_id = state_id\n break\n response = self.getDistricts(self.state_id)\n districts = response['districts']\n district_ids = [district['district_id'] for district in districts]\n while True:\n district_id = int(input('Please enter district_id from the above list: '))\n if district_id not in district_ids:\n print('Not a valid stateID: %s. Please re-enter'%district_id)\n else:\n self.district_id = district_id\n break\n\n print('Received districtID:%s'%(self.district_id))\n self.getVaccinationSessionsByDistrict(district_id = self.district_id, printAvailableSessions=True)\n #self.getVaccinationSessionsCalendarByDistrict(district_id = self.district_id)\n elif search_input == 1:\n pin_code = input('Enter Pin code: ')\n self.getVaccinationSessionsByPIN(pin_code=pin_code, printAvailableSessions=True)\n #self.getVaccinationSessionsCalendarByPIN(pin_code=pin_code)\n else:\n raise Exception('Invalid search Input: %s'%search_input)\n\ncowin = PublicCowinAPIsWrapper()\n#cowin.district_id = 363\ncowin.go()","repo_name":"GauravKK08/cowin-vaccine-availability","sub_path":"cowin_public_apis.py","file_name":"cowin_public_apis.py","file_ext":"py","file_size_in_byte":9242,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"6445938470","text":"from fastapi import FastAPI, Request, HTTPException\nimport uvicorn\nimport pandas as pd\nimport pickle\nimport json\n\n\napp = FastAPI()\n\n\ndef load_pipeline(path: str):\n \"\"\"\n This is a helper function to load the pipeline artefacts.\n \"\"\"\n with open(path, \"rb\") as f:\n return pickle.load(f)\n\n\npipeline = load_pipeline(\"/opt/ml/model/pipeline.pkl\")\n\n\n@app.get(\"/ping\")\ndef read_ping():\n \"\"\"\n This function is for ping health check of the service.\n \"\"\"\n return {\"ping\": \"pong\"}\n\n\n@app.post(\"/invocations\")\nasync def invocations(request: Request):\n \"\"\"\n This function predicts the target value of the incoming data point\n and returns the prediction.\n \"\"\"\n try:\n body = await request.body()\n data = body.decode(\"utf-8\")\n data = json.loads(data)\n data_df = pd.DataFrame(data, index=[0])\n prediction = pipeline.predict(data_df)\n return {\"Survived\": int(prediction[0])}\n\n except UnicodeDecodeError as e:\n raise HTTPException(status_code=400, detail=f\"Unicode decode error: {e}, Raw body: {body}\")\n\n except Exception as e:\n raise HTTPException(status_code=400, detail=str(e))\n\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host=\"0.0.0.0\", port=8080)\n","repo_name":"ashotnersisyan-lab/Homeworks","sub_path":"7.Deployment_of_Model_Serving_Endpoint_to_the_Cloud_(AWS)/online_deployment_aws/prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28459039684","text":"import praw\nreddit = praw.Reddit('bot')\n\nimport time\nimport datetime\n\nurl = \"https://old.reddit.com/r/cs40_2022fall/comments/yoc6la/rcs40_2022fall_lounge/\"\nsubmission = reddit.submission(url=url)\n\nfor i in range(1000000):\n print(datetime.datetime.now(), ': made a comment, i=',i)\n try:\n submission.comments[0].reply('this is a reply to a comment')\n except praw.exceptions.APIException:\n print('sleeping for 5 seconds')\n time.sleep(5)\n\n #time.sleep(5)\n","repo_name":"ben-smith23/cmc-csci040","sub_path":"topic_10_Python_RedditBots/praw_posting.py","file_name":"praw_posting.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"19482493537","text":"import os\n#import tkinter\nfrom tkinter import *\nimport subprocess\nfrom utils import genHash\nfrom face_api import AzureAPI\n\nclass GUI_prototype(Frame):\n def __init__(self, master=None):\n Frame.__init__(self, master)\n self.grid()\n self.createWidgets()\n self.myAPI = AzureAPI()\n self.UserId = None \n\n def Register(self):\n if self.UserId == None:\n os.system(\"python3 camera_actual.py\")\n for i in range(1, 10):\n temp_name = \"{}_{:.0f}.jpg\".format('user1/user_photo', i)\n faceid = self.myAPI.GetFaceId(temp_name)\n if faceid == \"\":\n return \"Face Not Detected!\"\n if faceid == \"api_error\":\n return \"API connect Fail!\"\n self.UserId = faceid\n return \"face_saved\"\n else: \n return \"you are not the owner! \"\n\n def Checkout(self):\n if self.UserId == None:\n return \"Have not register yet!\"\n os.system(\"python3 camera_actual.py\") \n for i in range(1, 10):\n temp_name = \"{}_{:.0f}.jpg\".format('user1/user_photo', 11-i)\n cur_id = self.myAPI.GetFaceId(temp_name)\n os.remove(temp_name)\n if cur_id == \"\":\n return \"Face Not Detected!\"\n if cur_id == \"api_error\":\n return \"API connect Fail!\"\n if self.myAPI.VerifyFaceId(self.UserId, cur_id):\n self.UserId = None\n return \"Checkout Successfully!\"\n else:\n return \"Verify fail. Please do again!\"\n\n def register(self):\n self.displayText[\"text\"] = self.Register()\n\n def createWidgets(self):\n self.register_button = Button(self)\n self.register_button[\"text\"] = \"Register\"\n self.register_button.grid(row=2, column=1)\n self.register_button[\"command\"] = self.register\n \n self.start_button = Button(self)\n self.start_button[\"text\"] = \"start chase\"\n self.start_button.grid(row=2, column=2)\n #self.start_button[\"command\"] = \n \n self.checkout_button = Button(self)\n self.checkout_button[\"text\"] = \"Checkout\"\n self.checkout_button.grid(row=2, column=3)\n self.checkout_button[\"command\"] = self.Checkout\n\n self.displayText = Label(self)\n self.displayText[\"text\"] = \"Copyright NTUME fishfanfan\"\n self.displayText.grid(row=3, column=0, columnspan=7)\n \nif __name__ == '__main__':\n root = Tk(className=\"Face chase\")\n app = GUI_prototype(master=root)\n app.mainloop()","repo_name":"B05208038/MakeNTU_2019_G27","sub_path":"face_recog/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9656717094","text":"class Node:\n\n def __init__(self, name: str):\n self.name: str = name\n self.edges: list[tuple[str, int]] = []\n self.heuristic: int = 0\n self.prev: Node = None\n self.estimated_cost: int = 0\n self.true_cost: int = 0\n self.path_cost: int = 0 # cost to get to this node from prev\n\n def add(self, dest: str, cost: int):\n edge = (dest, cost)\n self.edges.append(edge)\n","repo_name":"ben8622/cse5360-ai","sub_path":"task1/classes/Node.py","file_name":"Node.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10577732670","text":"\n# coding: utf-8\n\n# In[2]:\n\n\nimport numpy as np\nimport cv2\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')\nrecognizer = cv2.face.createLBPHFaceRecognizer()\nrecognizer.load(\"trainner.yml\")\n\ncap = cv2.VideoCapture(0)\n\nwhile(True):\n #Capture frame by frame\n ret, frame = cap.read()\n \n #converting the image to gray and finding the face\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, scaleFactor=5.5, minNeighbors=5)\n for(x, y, w, h) in faces:\n roi_color = frame[y:y+h, x:x+w]\n img_item = \"my-image.png\"\n \n id_, conf = recognizer.predict(roi_color)\n if conf>=45 and conf<= 85:\n print(id_)\n \n \n cv2.imwrite(img_item, roi_color)\n \n color = (255, 0, 0)#BGR 0-255\n stroke=2\n endx = x + w\n endy = y + h\n cv2.rectangle(frame,(x,y),(endx, endy), color, stroke)\n \n \n #Display the resulting frame\n cv2.imshow('myframe',frame)\n if cv2.waitKey(20) & 0xFF == ord('q'):\n break\n \ncap.release()\ncv2.destroyAllWindows()\n\n","repo_name":"MeghnaHooda/Face-Recognition","sub_path":"FaceRecognition.py","file_name":"FaceRecognition.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72146421333","text":"#_*_ coding:utf-8 _*_\n\n##import os\ndef reverse(x,*arg):\n flag = 1 if x >= 0 else -1\n a = abs(x)\n new_x = flag * int(str(a)[::-1])\n return new_x if new_x >= -9223372036854775808 and new_x <= 9223372036854775807 else 0\n\na = (input(\"请输入一个整数:\"))\nprint(reverse(a))\n#os.system('pause')\n","repo_name":"daodaoawaker/LeetCode","sub_path":"LeetCode_007/Reverse Integer_3.py","file_name":"Reverse Integer_3.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21963916323","text":"from flask import Flask, request, jsonify\nfrom flask_restful import Resource, Api\nfrom flask_cors import CORS, cross_origin\nimport db\n\napp = Flask(__name__)\napi = Api(app)\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\n\nclass UserManager(Resource):\n @staticmethod\n @cross_origin()\n def get():\n connection,cursor = db.create_connection()\n query = request.args['query']\n print(query)\n response = db.execute_query(connection=connection,cursor=cursor,query_param=query)\n print(response)\n return jsonify(response[0][2])\n\n\napi.add_resource(UserManager, '/api/search')\n\nif __name__ == '__main__':\n app.run(debug=True)\n \n","repo_name":"Harshetamahajan/recipefinder_backend","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"937765278","text":"from __future__ import division\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom builtins import map\n\nimport logging\nfrom datetime import timedelta\n\nfrom tecs.rinex.basic import ObservationData\nfrom tecs.rinex.basic import RinexError\nfrom tecs.rinex.common import validate_epoch, sec2sec_ms\nfrom tecs.rinex.header import RinexVersionType, TimeOfFirstObs, \\\n ApproxPositionXYX, Interval, SysNObsTypes\n\nNAME = 'tecs.rinex.o.v3'\n\n\nclass Obs3(ObservationData):\n \"\"\"RINEX v3.00 observation data\n \"\"\"\n VERSION = 3.0\n\n _epoch_id = '>'\n _obs_rec_len = 16\n\n def __init__(self, fobj, filename):\n super(Obs3, self).__init__(fobj, filename)\n\n self._name = NAME + '.Obs3'\n\n # header labels\n self.ver_type = RinexVersionType(self.VERSION)\n self.tofo = TimeOfFirstObs(self.VERSION)\n self.xyz = ApproxPositionXYX(self.VERSION)\n self.interval = Interval(self.VERSION)\n self.sys_n_obs = SysNObsTypes(self.VERSION)\n\n header = []\n for line in self._fobj:\n label = line[60:].rstrip()\n if label == 'END OF HEADER':\n break\n header.append(line)\n\n self._parse_header(header)\n\n self._det_interval()\n\n self._data_chunks = []\n self._det_data_chunks()\n\n def _next_rec(self, f_obj):\n pass\n\n def _det_data_chunks(self):\n \"\"\"_det_data_chunks(self) -> None\n\n determinate data chunks list.\n \"\"\"\n ns_obs = [len(self.sys_n_obs.value[d]) for d in self.sys_n_obs.value]\n max_n_obs = max(ns_obs)\n\n rl = self._obs_rec_len\n\n start = 3\n stop = max_n_obs * rl + rl\n steps = range(start, stop, rl)\n\n chunks = []\n for i in range(len(steps) - 1):\n chunks.append((steps[i], steps[i + 1]))\n\n self._data_chunks = tuple(chunks)\n\n def _det_interval(self):\n err_msg = 'invalid interval value: {}'\n logger = logging.getLogger(self._name + '._det_interval')\n\n epoch_records = [\n self._next_epoch(),\n self._next_epoch()\n ]\n\n while epoch_records[0] == epoch_records[1]:\n epoch_records[1] = self._next_epoch()\n\n for er in epoch_records:\n if er is None:\n raise RinexError(self.filename, err_msg.format('None'))\n\n dt = epoch_records[1] - epoch_records[0]\n dt = dt.total_seconds()\n\n if dt <= 0:\n raise RinexError(self.filename, err_msg.format(dt))\n\n if self.interval.value != dt:\n msg_wi = 'Wrong interval value in the header of {}: {}; ' \\\n 'using {} instead.'\n logger.warning(msg_wi.format(self.filename,\n self.interval.value,\n dt))\n\n self.interval.value = '{:10.3f}'.format(dt)\n\n self._fobj.seek(0)\n for line in self._fobj:\n line = line[60:].rstrip()\n if line == 'END OF HEADER':\n break\n\n def _next_epoch(self):\n \"\"\"_next_epoch() -> None\n\n retrieves datetime of the next epoch record.\n\n Notes\n -----\n changes self._fobj position.\n \"\"\"\n epoch = None\n for line in self._fobj:\n if not line[0] == self._epoch_id:\n continue\n epoch, flag = self._parse_epoch_record(line)[0:2]\n # special event: no epoch\n if flag > 1:\n continue\n break\n return epoch\n\n # noinspection PyProtectedMember\n def _parse_header(self, header):\n super(Obs3, self)._parse_header(header)\n\n sys_n_obs_slice = ''\n for line in header:\n label = line[60:].rstrip()\n if self.sys_n_obs.label == label:\n sys_n_obs_slice += line\n continue\n\n self.sys_n_obs.value = sys_n_obs_slice\n\n def _parse_epoch_record(self, epoch_record):\n \"\"\"parse_epoch_record(epoch_record)\n\n Parameters\n ----------\n epoch_record : str\n\n Returns\n -------\n epoch_components : tuple\n (epoch, epoch_flag, num_of_sat, clock_offset)\n\n with\n epoch: datetime,\n epoch_flag: int,\n num_of_sat: int\n clock_offset: datetime.timedelta\n\n Notes\n -----\n timestamps accurate to microsecond\n \"\"\"\n\n if not epoch_record[0] == self._epoch_id:\n msg = 'not epoch record: {rec}'.format(rec=epoch_record)\n raise RinexError(self.filename, msg)\n\n # month, day, hour, min; year + ...\n epoch = [epoch_record[i:i + 3] for i in range(6, 17, 3)]\n epoch = [epoch_record[1:6]] + epoch\n\n sec = epoch_record[18:29]\n\n try:\n sec = float(sec)\n sec, micro_sec = sec2sec_ms(sec)\n\n epoch += [sec, micro_sec]\n epoch = list(map(int, epoch))\n\n epoch = validate_epoch(epoch)\n except ValueError:\n epoch = None\n\n try:\n epoch_flag = int(epoch_record[31])\n except (IndexError, ValueError):\n msg = \"Can't extract epoch flag from {rec}\".format(rec=epoch_record)\n raise RinexError(self.filename, msg)\n\n try:\n num_of_sat = int(epoch_record[32:35])\n except ValueError:\n msg = \"Can't extract\" \\\n \" number of satellites from {rec}\".format(rec=epoch_record)\n raise RinexError(self.filename, msg)\n\n try:\n sec = float(epoch_record[42:])\n sec, micro_sec = sec2sec_ms(sec)\n clock_offset = timedelta(0, sec, micro_sec)\n except ValueError:\n clock_offset = timedelta(0)\n\n return epoch, epoch_flag, num_of_sat, clock_offset\n\n def _parse_obs_record(self, record):\n \"\"\"parse_obs_record(record) -> sat, obs_values\n\n Parameters\n ----------\n record : str\n\n Returns\n -------\n sat : str\n satellite\n obs_values : tuple\n (obs_values_1, ..., obs_values_n)\n with obs_values_x = (obs_value, lli_value, sig_strength_value)\n \"\"\"\n\n sat = record[0:3]\n if not sat:\n msg = \"Can't extract satellite from {rec}\".format(rec=record)\n raise RinexError(self.filename, msg)\n\n sat = sat.replace(' ', '0')\n\n if sat[0] not in self.sys_n_obs.value:\n msg = 'There is no such satellite system definition in header:' \\\n ' {ss}.'.format(ss=sat[0])\n raise RinexError(self.filename, msg)\n\n data_record = []\n empty = (0,) * 3\n\n obs_num = len(self.sys_n_obs.value[sat[0]])\n\n for n in range(obs_num):\n s, e = self._data_chunks[n]\n chunk = record[s:e]\n\n if not chunk or chunk.isspace():\n data_record.append(empty)\n continue\n\n val = chunk[:14]\n try:\n if not val or val.isspace():\n val = 0.0\n else:\n val = float(val)\n except ValueError:\n val = 0.0\n\n feature = []\n for i in 14, 15:\n try:\n v = chunk[i]\n if v.isspace():\n v = 0\n else:\n v = int(v)\n except (IndexError, ValueError):\n v = 0\n feature.append(v)\n\n data_record.append((val, feature[0], feature[1]))\n\n return sat, tuple(data_record)\n\n # noinspection PyUnusedLocal\n def _handle_event(self, epoch, epoch_flag, special_records):\n logger = logging.getLogger(self._name + '._handle_event')\n\n # header information follows\n if epoch_flag == 4:\n self._parse_header(special_records)\n # TODO add another event types handlers\n # in that case 'epoch' param may be useful here\n else:\n msg = 'Missed event type: {flag}'.format(flag=epoch_flag)\n logger.error(msg)\n\n def _handle_power_failure(self, epoch):\n logger = logging.getLogger(self._name + '_handle_power_failure')\n msg = '{file} {epoch}: ' \\\n 'power failure between previous and current epoch.'\n msg = msg.format(file=self.filename, epoch=epoch)\n logger.info(msg)\n\n def read_records(self):\n \"\"\"read_records() -> generator\n\n iterate over data records it the file; return (epoch, sat, dataset).\n \"\"\"\n epoch, epoch_flag, num_of_sat, clock_offset = (None,) * 4\n special_records = []\n\n for line in self._fobj:\n if line[0] == self._epoch_id:\n epoch, epoch_flag, num_of_sat, clock_offset = \\\n self._parse_epoch_record(line)\n continue\n\n if epoch_flag > 1:\n if num_of_sat > 0:\n special_records.append(line)\n num_of_sat -= 1\n\n if num_of_sat == 0:\n self._handle_event(epoch, epoch_flag, special_records)\n\n continue\n\n if epoch_flag == 1:\n self._handle_power_failure(epoch)\n epoch_flag = 0\n\n if num_of_sat > 0:\n num_of_sat -= 1\n sat, dataset = self._parse_obs_record(line)\n\n if not sat[0] in self.sys_n_obs.value:\n msg = 'No such satellite {}'.format(sat)\n raise RinexError(self.filename, msg)\n\n # imitation of o.v2.Obs return\n sat_obs = self.sys_n_obs.value[sat[0]]\n assert len(dataset) == len(sat_obs)\n\n rec = {}\n for i, val in enumerate(dataset):\n rec[sat_obs[i]] = val\n\n yield epoch, sat, rec\n\n\nclass Obs301(Obs3):\n \"\"\"Obs301\n \"\"\"\n VERSION = 3.01\n\n def __init__(self, fobj, filename):\n super(Obs301, self).__init__(fobj, filename)\n\n\nclass Obs302(Obs301):\n \"\"\"Obs302\n \"\"\"\n VERSION = 3.02\n\n def __init__(self, fobj, filename):\n super(Obs302, self).__init__(fobj, filename)\n\n\nclass Obs303(Obs302):\n \"\"\"Obs303\n \"\"\"\n VERSION = 3.03\n\n def __init__(self, fobj, filename):\n super(Obs303, self).__init__(fobj, filename)\n","repo_name":"gnss-lab/tec-suite","sub_path":"tecs/rinex/v3/o.py","file_name":"o.py","file_ext":"py","file_size_in_byte":10479,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"67"} +{"seq_id":"6541395811","text":"\"\"\"\nМодуль для работы с музыкой\n\"\"\"\n\nfrom discord import VoiceClient, FFmpegPCMAudio\nfrom settings import FFMPEG_OPTIONS\nfrom bot.schemas import AudioSource, YoutubeVideo\nfrom bot.utils import youtube_utils\n\n\nclass AudioQueue(list):\n \"\"\"\n Очередь музыки\n \"\"\"\n\n _global_queue: dict[str, 'AudioQueue'] = {}\n \"Глобальный словарь очередей для всех серверов\"\n\n @classmethod\n def get_queue(cls, guild_id: int) -> 'AudioQueue':\n \"\"\"Получить очередь для отдельного се��вера\"\"\"\n\n _guild_id = str(guild_id)\n queue = cls._global_queue.get(_guild_id)\n\n # Создать очередь, если её нету\n if queue is None:\n queue = AudioQueue(guild_id)\n cls._global_queue[_guild_id] = queue\n\n return queue\n\n @classmethod\n def del_queue(cls, guild_id: int):\n \"\"\"Удалить очередь из глобального списка для очистки памяти\"\"\"\n\n _guild_id = str(guild_id)\n\n if _guild_id in cls._global_queue:\n del cls._global_queue[_guild_id]\n\n def __init__(self, guild_id: int) -> None:\n super().__init__()\n\n self.guild_id: int = guild_id\n \"ID сервера\"\n self.on_replay: bool = False\n \"Автоповтор музыки\"\n self._current: AudioSource | None = None\n \"Текущая музыка\"\n self._latest: AudioSource | None = None\n \"Последняя проигранная музыка\"\n\n @property\n def full_queue(self) -> list[AudioSource]:\n \"\"\"Очередь с учётом текущей музыки\"\"\"\n\n if self.current is None:\n return self.copy()\n\n return [self.current, *self]\n\n def delete(self):\n \"\"\"Удалить очередь из глобального списка для очистки памяти\"\"\"\n AudioQueue.del_queue(self.guild_id)\n\n def skip(self, count: int = 1):\n \"\"\"Пропустить музыку\"\"\"\n\n for _ in range(count):\n if len(self) == 0:\n self.current = None\n break\n\n self.current = self.pop(0)\n\n def next(self) -> AudioSource | None:\n \"\"\"Следующая музыка\"\"\"\n\n if not self.on_replay or self.current is None:\n self.skip()\n\n return self.current\n\n def set_next(self, audio: AudioSource | list[AudioSource]):\n \"\"\"Установить следующую музыку\"\"\"\n\n if isinstance(audio, list):\n for i, aud in enumerate(audio):\n self.insert(i, aud)\n else:\n self.insert(0, audio)\n\n @property\n def latest(self) -> AudioSource | None:\n \"\"\"Последняя проигранная музыка\"\"\"\n return self._latest\n\n @property\n def current(self) -> AudioSource | None:\n \"\"\"Текущая музыка\"\"\"\n return self._current\n\n @current.setter\n def current(self, value: AudioSource | None):\n self._latest = self.current or self._latest or value\n self._current = value\n\n\nclass AudioController:\n \"\"\"\n Контроллер проигрывания музыки.\n\n Являет собой обёртку над `VoiceClient` для удобного управления воспроизведением.\n\n Поддерживает следующий функционал:\n\n - Очередь музыки\n - Автоповтор музыки\n - Управление проигрыванием (play, stop, skip)\n \"\"\"\n\n _controllers = {}\n\n @classmethod\n def get_controller(cls, voice_client: VoiceClient) -> 'AudioController':\n \"\"\"Получить контроллер для отдельного сервера\"\"\"\n\n cont = cls._controllers.get(voice_client.guild.id)\n\n # Создать контроллер, если его нету\n if cont is None:\n cont = AudioController(voice_client)\n cls._controllers[voice_client.guild.id] = cont\n\n return cont\n\n def __init__(self, voice_client: VoiceClient) -> None:\n self.voice_client = voice_client\n self._loop_running = False\n \"Флаг для остановки цикла проигрывания музыки\"\n\n def _play_loop(self, error: any = None) -> None:\n \"\"\"\n Рекурсивная функция проигрывания музыки.\n\n Передаётся в аргумент `after` метода `VoiceClient.play`\n \"\"\"\n\n # Флаг для остановки цикла\n if not self._loop_running:\n return\n\n next_audio = self.queue.next()\n\n # Остановить цикл, если очередь пуста\n if next_audio is None:\n self._loop_running = False\n return\n\n self._play_music(next_audio)\n\n def _play_music(self, audio: AudioSource):\n \"\"\"Проиграть музыку\"\"\"\n\n ffmpeg_options = FFMPEG_OPTIONS.copy()\n\n # Использовать фильтр для спонсорских сегментов (интеграция SponsorBlock)\n if isinstance(audio, YoutubeVideo):\n ffmpeg_options.setdefault('options', '')\n segments = youtube_utils.get_skip_segments(audio.id)\n\n if segments is not None:\n opts = youtube_utils.get_ffmpeg_sponsor_filter(segments, audio.duration)\n ffmpeg_options['options'] += ' ' + opts\n\n self.voice_client.play(\n FFmpegPCMAudio(audio.source_url, **ffmpeg_options),\n after=self._play_loop\n )\n\n def play(self):\n \"\"\"Проиграть музыку\"\"\"\n\n self._loop_running = True\n self._play_loop()\n\n def stop(self):\n \"\"\"Остановить проигрывание музыки\"\"\"\n\n self._loop_running = False\n self.voice_client.stop()\n\n def skip(self, count: int = 1):\n \"\"\"Пропустить музыку\"\"\"\n\n self.queue.skip(count - 1)\n\n if self.voice_client.is_playing():\n self.voice_client.stop()\n else:\n self.play()\n\n def play_now(self, audio: AudioSource | list[AudioSource]):\n \"\"\"Проиграть музыку сразу\"\"\"\n\n self.queue.set_next(audio)\n\n if self.voice_client.is_playing():\n self.voice_client.stop()\n else:\n self.play()\n\n @property\n def queue(self) -> AudioQueue:\n \"\"\"Очередь музыки\"\"\"\n\n return AudioQueue.get_queue(self.voice_client.guild.id)\n","repo_name":"cubicbyte/musicbot","sub_path":"bot/audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":6766,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15422949842","text":"import numpy as np\nfrom Bio.SubsMat import MatrixInfo\n\nblosum = MatrixInfo.blosum62\n# which is a dictionary in the format:\n# ('W', 'F'): -1\n# i.e. when aligning the amino acids W and F, the score is -1\n\n\nclass Aligner:\n\n def __init__(self, seq1, seq2, gapPenalty):\n self.seq1 = seq1\n self.seq2 = seq2\n self.gapPenalty = gapPenalty\n self.alignMatrix = np.zeros(\n (len(self.seq1)+1, len(self.seq2)+1), dtype=int)\n self.traceBackMatrix = np.zeros(\n (len(self.seq1)+1, len(self.seq2)+1), dtype='U4')\n self.indexToTrace = {\n 0: \"d\",\n 1: \"l\",\n 2: \"u\"\n }\n self.finalScore = 0\n self.identity = 0\n\n # get the best possible value according to the recursion formula given\n # this is the only method that has been modified when compared to e2-1a.py\n def getValue(self, i, j):\n \n # the try except is because we have things like\n # (M, L), but we don't have (L, M), so we need \n # to try both possibilities\n try:\n blosumVal = blosum[(self.seq1[i-1], self.seq2[j-1])]\n except:\n blosumVal = blosum[(self.seq2[j-1], self.seq1[i-1])]\n \n possibleValues = [\n self.alignMatrix[i-1][j-1] + blosumVal,\n self.alignMatrix[i][j-1] + self.gapPenalty,\n self.alignMatrix[i-1][j] + self.gapPenalty\n ]\n\n return max(possibleValues), possibleValues.index(max(possibleValues))\n\n\n # align the sequences building the matrixes\n def align(self):\n\n for row in self.traceBackMatrix:\n row[0] = \"u\"\n\n for i in range(len(self.traceBackMatrix[0])):\n self.traceBackMatrix[0][i] = \"l\"\n self.traceBackMatrix[0][0] = \"f\"\n\n accGap = 0\n for row in self.alignMatrix:\n row[0] = accGap\n accGap += self.gapPenalty\n\n accGap = 0\n for i in range(len(self.alignMatrix[0])):\n self.alignMatrix[0][i] = accGap\n accGap += self.gapPenalty\n\n for i, j in np.ndindex(self.alignMatrix.shape):\n if i == 0:\n continue\n if j == 0:\n continue\n\n self.alignMatrix[i][j], index = self.getValue(i, j)\n self.traceBackMatrix[i][j] = self.indexToTrace[index]\n\n self.finalScore = self.alignMatrix[len(self.seq1)][len(self.seq2)]\n self.makeAlignment()\n\n \n # make the textual alignment\n def makeAlignment(self):\n\n s1 = ''\n s2 = ''\n\n i = len(self.seq1)\n j = len(self.seq2)\n alignType = self.traceBackMatrix[i][j]\n\n while alignType != 'f':\n\n if alignType == 'd':\n s1 = self.seq1[i-1] + s1\n s2 = self.seq2[j-1] + s2\n i -= 1\n j -= 1\n\n if alignType == 'l':\n s1 = '-' + s1\n s2 = self.seq2[j-1] + s2\n j -= 1\n\n if alignType == 'u':\n s1 = self.seq1[i-1] + s1\n s2 = '-' + s2\n i -= 1\n\n alignType = self.traceBackMatrix[i][j]\n\n self.s1 = s1\n self.s2 = s2\n\n self.getIdentity()\n\n\n # get the number of matches/total number (identity)\n def getIdentity(self):\n\n ident = 0\n\n for i in range(len(self.s1)):\n if self.s1[i] == self.s2[i]:\n ident += 1\n\n totalPositions = max([len(self.seq1), len(self.seq2)])\n self.identity = ident/totalPositions\n\n def printResults(self):\n print(self.alignMatrix, '\\n')\n print(self.s1)\n print(self.s2, '\\n')\n print('Final Score:', self.finalScore)\n print('Identity:', self.identity)\n\n\n\n# open the human sequence\nfile1 = open(\"hemoglobins/human.txt\", \"r\")\nhuman = file1.read()\nfile1.close()\n\nanimals = {}\n\nanimalList = [\"chicken\", \"cow\", \"deer\", \"horse\", \"pig\", \"trout\", \"wolf\"]\nanimals = {}\n\n# open each animal sequence\nfor animal in animalList:\n file1 = open(\"hemoglobins/\"+animal+\".txt\", \"r\")\n animalSequence = file1.read()\n file1.close()\n animals[animal] = animalSequence\n\n\ngap = -4\n\nscores = {}\n\n# compare all animal sequences against the human one\nfor animal in animalList:\n\n aligner = Aligner(human, animals[animal], gap)\n aligner.align()\n scores[\"human vs \"+animal] = (aligner.finalScore, aligner.identity)\n\n print(\"Results for human vs \"+animal+\":\\n\")\n aligner.printResults()\n print(\"\\n\")\n\n\n# get the best score obtained by the comparsions using identity as a tie breaker\nbestVal = (0, 0)\nbestKey = ''\ndraws = []\n\nfor key, value in scores.items():\n\n if value[0] > bestVal[0]:\n bestVal = value\n bestKey = key\n draws = []\n\n elif value[0] == bestVal[0]:\n if value[1] > bestVal[1]:\n bestVal = value\n bestKey = key\n draws = []\n\n elif value[1] == bestVal[1]:\n draws.append(key)\n\nif draws == []:\n print(\"\\nThe best score was achieved when \"+bestKey +\n \" were compared. \\nObtained score:\", scores[bestKey][0], \"\\nObtained identity:\", scores[bestKey][1])\n\nelse:\n print(\"\\nMore than one sequence aligment produced the same final score and identity.\\n\")\n print(bestKey, \"- (score, identity):\", scores[bestKey])\n\n for d in draws:\n print(d, \"- (score, identity):\", scores[d])\n\n\n\n\n","repo_name":"colombelli/biocomp","sub_path":"list 2/part 1/e2-2a.py","file_name":"e2-2a.py","file_ext":"py","file_size_in_byte":5381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7045004094","text":"from saleor.graphql.tests.utils import get_graphql_content\n\nORDER_DISCOUNT_ADD_MUTATION = \"\"\"\nmutation OrderDiscountAdd($input: OrderDiscountCommonInput!, $id: ID!) {\n orderDiscountAdd(input:$input, orderId: $id) {\n errors{\n message\n field\n }\n order {\n errors{message field}\n id\n discounts {\n id\n value\n valueType\n type\n }\n }\n }\n}\n\"\"\"\n\n\ndef order_discount_add(\n api_client,\n id,\n input,\n):\n variables = {\"id\": id, \"input\": input}\n\n response = api_client.post_graphql(\n ORDER_DISCOUNT_ADD_MUTATION,\n variables=variables,\n )\n content = get_graphql_content(response)\n data = content[\"data\"][\"orderDiscountAdd\"]\n order_id = data[\"order\"][\"id\"]\n errors = data[\"errors\"]\n\n assert errors == []\n assert order_id is not None\n\n return data\n","repo_name":"saleor/saleor","sub_path":"saleor/tests/e2e/orders/utils/order_discount_add.py","file_name":"order_discount_add.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":19331,"dataset":"github-code","pt":"67"} +{"seq_id":"2455124347","text":"# 3. Напишите программу, которая определит позицию второго вхождения строки\n# в списке либо сообщит, что её нет.\n\n# *Пример:*\n\n# - список: [\"qwe\", \"asd\", \"zxc\", \"qwe\", \"ertqwe\"], ищем: \"qwe\", ответ: 3\n# - список: [\"йцу\", \"фыв\", \"ячс\", \"цук\", \"йцукен\", \"йцу\"], ищем: \"йцу\", ответ: 5\n# - список: [\"йцу\", \"фыв\", \"ячс\", \"цук\", \"йцукен\"], ищем: \"йцу\", ответ: -1\n# - список: [\"123\", \"234\", 123, \"567\"], ищем: \"123\", ответ: -1\n# - список: [], ищем: \"123\", ответ: -1\n\nsome_list = []\nfor i in range(4):\n some_list.append(input())\n\nprint(some_list)\n\nel = input('введите строку')\nfirst = some_list.index(el, start, end)\nsecond = some_list(el, first +1)\nprint(second)","repo_name":"yurafast/python","sub_path":"Seminar-3/task-3_2.py","file_name":"task-3_2.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"31700312139","text":"x,y=map(int,input().split())\r\nsumx=0\r\nsumy=0\r\nfor i in range(1,x):\r\n if(x%i==0):\r\n sumx=sumx+i\r\nfor j in range(1,y):\r\n if(y%j==0):\r\n sumy=sumy+j\r\nif sumx==y:\r\n print(x,\"and \",y,\"are amicable numbers\")\r\nelif sumy==x:\r\n print(x,\"and \",y,\"are amicable numbers\")\r\nelse:\r\n print(x,\"and \",y,\"are not amicable numbers\")","repo_name":"kumarvadivel/python-training","sub_path":"sample18.py","file_name":"sample18.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40864006820","text":"import time\nimport urllib\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nstart_url = \"https://en.wikipedia.org/wiki/Special:Random\"\ntarget_url = \"https://en.wikipedia.org/wiki/Philosophy\"\n\ndef crawl(search_history, target_url, max_steps=25):\n if search_history[-1] == target_url:\n print(\"We've found it, Yeah!!!!!!\")\n return False\n elif len(search_history) > max_steps:\n print(\"It is a loooooooooop!!!!\")\n return False\n elif search_history[-1] in search_history[:-1]:\n print(\"OMG! .Maybe we can not find it . I am going to play my ps5!\")\n return False\n else:\n return True\n\ndef find_first_link(url):\n response = requests.get(url)\n html = response.text\n soup = BeautifulSoup(html, 'html.parser')\n\n content_div = soup.find(id='mw-content-text').find(class_='mw-parser-output')\n\n name_link = None\n\n for element in content_div.find_all('p', recursive=False):\n if element.find('a', recursive=False):\n name_link = element.find('a', recursive=False).get('href')\n break\n\n if not name_link:\n return\n\n first_link = urllib.parse.urljoin('https://en.wikipedia.org/', name_link)\n\n return first_link\n\n\ncrawl_log = [start_url]\n\nwhile crawl(crawl_log, target_url):\n print (crawl_log[-1])\n\n first_link = find_first_link(crawl_log[-1])\n if not first_link:\n print('There is no first link here. I am going home')\n\n crawl_log.append(first_link)\n\n time.sleep(0.1)\n","repo_name":"williamjiamin/py_web_scrapper","sub_path":"py_intro_wiki_scrapper.py","file_name":"py_intro_wiki_scrapper.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"25819178520","text":"class Solution:\n def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:\n # initializing the hashmap with each course mapping to an empty list\n preMap = { i:[] for i in range(numCourses) }\n \n # updating the hashmap with mapping the prereqs to courses\n for crs, pre in prerequisites:\n preMap[crs].append(pre)\n \n # marking all the visited course node\n visitSet = set()\n \n # creating the recursive dfs function, passing only the current node we are visiting\n def dfs(crs):\n if crs in visitSet:\n return False\n if preMap[crs] == []:\n return True\n \n visitSet.add(crs)\n \n for pre in preMap[crs]:\n if not dfs(pre):\n return False\n visitSet.remove(crs) # just like all other graph probs, remove the visited node\n preMap[crs] = [] # and set that to empty list if we come back to this node we'll know it is taken\n\n return True\n \n # now just calling the dfs on each node \n for crs in range(numCourses):\n if not dfs(crs): return False\n return True\n","repo_name":"ikthedar/LeetCode-Blind-Solutions","sub_path":"Graph Problems/Course Schedule.py","file_name":"Course Schedule.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11346448595","text":"#!/home/justin/.venv/learn/bin/python\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom keras.models import load_model\nfrom keras.datasets import mnist\n\nnum = 5\n\n(x1, _), (x2, _) = mnist.load_data()\nx = np.concatenate([x1, x2]).reshape([-1, 28*28])/255\n\npca = PCA()\npca.fit(x)\n\nwhile True:\n rand = np.random.normal(0, pca.explained_variance_, size=[num, pca.n_components_])\n\n x_out = (pca.inverse_transform(rand)+pca.mean_).reshape([-1, 28, 28])\n\n for i in range(num):\n plt.subplot(1, num, i+1)\n plt.imshow(x_out[i])\n plt.show()\n","repo_name":"JustinCWeiler/python-tidbits","sub_path":"learn/autoencoder/mnist_pca.py","file_name":"mnist_pca.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26091737316","text":"import os\nfrom django.core.files.temp import NamedTemporaryFile\nfrom traceback import print_tb\nfrom unicodedata import category\nfrom urllib import request\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.contrib.auth.views import LoginView, LogoutView\nfrom .forms import UserCreateForm, LoginForm,UserChangeForm\nfrom postapp.forms import PostForm\nfrom .models import CustomUser\nfrom django.contrib.auth import login, logout, authenticate\nfrom postapp.models import Category\nfrom postapp.models import Post, Like\nfrom django.http import HttpResponse, JsonResponse\nfrom django.core import serializers\nfrom django.forms.models import model_to_dict\nimport json\nimport datetime\nfrom django.core.files import File # you need this somewhere\nimport urllib\n# class Login(LoginView):\n# template_name = 'registrations/login.html'\n\ndef loginview(request):\n \n if request.method == 'POST':\n postrequest = request.POST\n form =LoginForm(data=postrequest)\n print(form)\n if form.is_valid():\n user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password'])\n print(form.cleaned_data['username'],form.cleaned_data['password'])\n if user is not None:\n login(request=request, user=user)\n \n message = f'Dear {user.username}! You have been logged in'\n request.session['message']= message\n return redirect('home')\n else:\n message = 'Login failed!'\n request.session['message']= message\n return redirect('login')\n \n\n\n return render(request=request, template_name='registrations/login.html', )\ndef logoutview(request):\n \n message = f'Dear {request.user.first_name}! You have been logged in'\n request.session['message']= message\n logout(request=request)\n return redirect('home')\n\ndef CreateUser(request):\n if request.method == 'POST':\n if request.POST.get('password1') == request.POST.get('password2'): \n user_form = UserCreateForm(data = request.POST)\n print(user_form)\n if user_form.is_valid():\n user = user_form.save() \n user.set_password(request.POST.get('password1'))\n try:\n user.save() \n except:\n print('err')\n message = 'The registration was successful. You can log in'\n request.session['message']= message\n # if form.is_valid():\n # form.save()\n return redirect('home')\n else:\n return redirect('registration')\n else:\n form = UserCreateForm()\n return render(request=request, template_name='registrations/registration.html', context={'form':form})\n \ndef profile(request, slug, cat_id=None):\n catsecond = None\n if Category.objects.all().count() <7 :\n catfirst = Category.objects.all()\n else:\n catfirst = Category.objects.all()[:6]\n catsecond = Category.objects.all()[6:]\n categories ={ \n 'catfirst':catfirst,\n 'catsecond':catsecond,\n \n }\n if cat_id == None:\n \n posts = CustomUser.objects.get(slug=slug).post_set.all()\n else:\n print(type(cat_id))\n category = Category.objects.get(id=cat_id)\n posts = CustomUser.objects.get(slug=slug).post_set.filter(category=category)\n is_ajax =request.headers.get('X-Requested-With')== 'XMLHttpRequest'\n print(is_ajax)\n if request.method == 'GET' and is_ajax:\n print(request.GET.get('test'))\n category = Category.objects.get(id=request.GET.get('cat_id'))\n posts = Post.objects.filter(category=category)\n data = serializers.serialize('json', posts)\n print(posts)\n \n return HttpResponse(data,\n content_type=\"application/json\")\n print(posts)\n context ={'posts':posts, 'categories':categories,'userprofile':CustomUser.objects.get(slug=slug)}\n return render(request=request, template_name='profile/profile.html', context=context)\n\ndef editpost(request, slug, id=None):\n \n categories = Category.objects.all()\n author = CustomUser.objects.get(slug=slug)\n # print(request.user.username,CustomUser.objects.get(slug=slug).username )\n post=None\n \n if request.user.slug == author.slug and id!=None:\n post = author.post_set.get(id=id)\n \n # print('_____test____')\n if request.method == 'POST':\n if post:\n form = PostForm(request.POST, request.FILES, instance=post)\n \n if form.is_valid():\n \n form.save()\n return redirect('profile', author.slug)\n else:\n form = PostForm(request.POST,request.FILES)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = author\n post.save()\n return redirect('profile', author.slug)\n title = request.POST.get('title')\n anons = request.POST.get('anons')\n discreption = request.POST.get('discreption')\n category = request.POST.get('categories')\n image = request.FILES\n print(image)\n\n is_ajax = request.headers.get('X-Requested-With')== 'XMLHttpRequest'\n # print(request.FILES, '______POST')\n if is_ajax:\n print(request.POST)\n title = request.POST.get(\"title\")\n discreption = request.POST.get(\"discreption\")\n anons = request.POST.get('anons')\n category = request.POST.get('categories')\n print(title,anons,discreption,category)\n \n print('ajax')\n return JsonResponse({'result':True,})\n \n\n # print(request.GET.get('test'))\n context = {'categories':categories,'post':post,'userprofile':CustomUser.objects.get(slug=slug)}\n return render(request=request, template_name='profile/editpost.html', context=context)\n\ndef removepost(request, slug, id):\n post = Post.objects.filter(author=CustomUser.objects.get(slug=slug), id=id)\n if post:\n print(post)\n post.delete()\n else:\n print(\"I don't get your message\")\n\n return redirect('profile', slug=slug)\ndef detailpost(request,slug, id):\n pass\ndef editprofile(request, slug):\n catsecond = None\n form = UserChangeForm()\n object = CustomUser.objects.get(slug=slug)\n categoriessellect = Category.objects.all()\n\n if Category.objects.all().count() <7 :\n catfirst = Category.objects.all()\n else:\n catfirst = Category.objects.all()[:6]\n catsecond = Category.objects.all()[6:]\n categories ={ \n 'catfirst':catfirst,\n 'catsecond':catsecond,\n \n }\n if request.method == 'POST':\n print(request.POST.get('date_joined'))\n _mutable =request.POST._mutable\n request.POST._mutable = True\n request.POST['date_joined']=datetime.datetime.now()\n\n\n \n request.POST._mutable = _mutable\n form = UserChangeForm(request.POST, request.FILES, instance=object)\n \n print(form)\n if form.is_valid():\n print('_____ valid _____')\n profileuser = CustomUser.objects.filter(slug=slug)\n profileuser.update(\n username = request.POST.get('username'),\n email = request.POST.get('email'),\n bio = request.POST.get('bio'),\n avatar = request.POST.get('avatar'),\n phonenumber = request.POST.get('phonenumber'),\n first_name = request.POST.get('first_name'),\n last_name = request.POST.get('last_name'),\n )\n # if 'image' in request.FILES:\n \n # profileuser.update(avatar=request.FILES['image']) \n \n \n\n return redirect('profile', slug)\n \n\n is_ajax = request.headers.get('X-Requested-With')== 'XMLHttpRequest'\n # print(request.FILES, '______POST')\n if is_ajax:\n print(request.POST)\n title = request.POST.get(\"title\")\n discreption = request.POST.get(\"discreption\")\n anons = request.POST.get('anons')\n category = request.POST.get('categories')\n print(title,anons,discreption,category)\n \n print('ajax')\n return JsonResponse({'result':True,})\n \n\n # print(request.GET.get('test'))\n context = {'categories':categories,'categoriessellect':categoriessellect,'object':object, 'userprofile':object, 'form':form}\n return render(request=request, template_name='profile/editprofile.html', context=context)\n\ndef detailpostprofile(request, slug, id):\n author = CustomUser.objects.get(slug=slug)\n \n catsecond=None\n if id:\n post = Post.objects.get(id=id)\n if Category.objects.all().count() <7 :\n catfirst = Category.objects.all()\n else:\n catfirst = Category.objects.all()[:6]\n catsecond = Category.objects.all()[6:]\n categories ={ \n 'catfirst':catfirst,\n 'catsecond':catsecond,\n \n }\n if request.method=='POST':\n likeObject = Like.objects.filter(post = post, author=author)\n if likeObject and likeObject[0].like==True:\n likeObject.update(like=False)\n elif likeObject and likeObject[0].like==False:\n likeObject.delete()\n else:\n Like.objects.create(author=author, post = post, like=True)\n return redirect('detailpostprofile', slug=slug, id=id)\n try:\n like = Like.objects.get(post= post, author=author)\n except:\n like=None\n\n\n context = {'categories':categories, 'userprofile':CustomUser.objects.get(slug=slug), 'post':post, 'like':like}\n return render(request=request, template_name='profile/detailpostprofile.html', context=context)","repo_name":"Kamoliddin0606/python_5_news_project","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18770566067","text":"import os\nfrom concurrent.futures.thread import ThreadPoolExecutor\nfrom urllib.parse import urlparse\n\nimport requests\nfrom django.conf import settings\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth import login\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseRedirect, HttpResponse, HttpResponseForbidden\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.generic import (\n ListView,\n DetailView,\n DeleteView,\n FormView,\n TemplateView,\n)\n\nfrom app.cache import albums_cache\nfrom app.forms import UserEncodingCreateForm\nfrom app.models import Album, UserEncoding\nfrom face_detection.models import FaceEncoding\nfrom face_detection.services import detector\n\n\nclass TokenAuth(View):\n def get(self, request, *args, **kwargs):\n if \"token\" in request.GET:\n response = requests.get(\n f\"{settings.BASE_HOST}/api/v1/members/me\",\n headers={\"Authorization\": f'Token {request.GET[\"token\"]}'},\n ).json()\n user = User.objects.get(pk=response[\"pk\"])\n\n login(request, user=user)\n request.session[\"token\"] = request.GET[\"token\"]\n return redirect(\"index\")\n\n\n@method_decorator(staff_member_required, \"dispatch\")\n@method_decorator(login_required, \"dispatch\")\nclass AlbumsIndexView(ListView):\n template_name = \"app/albums/index.html\"\n model = Album\n context_object_name = \"albums\"\n ordering = \"-pk\"\n\n\n@method_decorator(login_required, \"dispatch\")\nclass AlbumsDetailView(DetailView):\n template_name = \"app/albums/detail.html\"\n model = Album\n context_object_name = \"album\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n data = requests.get(\n f'{settings.BASE_HOST}/api/v1/photos/albums/{context[\"album\"].pk}',\n headers={\"Authorization\": f'Token {self.request.session[\"token\"]}'},\n ).json()\n\n for photo in data[\"photos\"]:\n parsed = urlparse(photo['file']['full'])\n split = os.path.split(parsed.path)\n photo.update({\n 'album_name': f\"{data['title']} {data['date']}\",\n 'download': f\"{parsed.scheme}://{parsed.hostname}{split[0].replace('media/private', 'members')}/download/{split[1]}\"\n })\n\n context[\"title\"] = data[\"title\"]\n context[\"date\"] = data[\"date\"]\n context[\"photos\"] = data[\"photos\"]\n\n return context\n\n\n@method_decorator(login_required, \"dispatch\")\nclass RandomAlbumView(View):\n def dispatch(self, request, *args, **kwargs):\n album = Album.objects.order_by(\"?\").first()\n return redirect(\"albums:detail\", pk=album.pk)\n\n\n@method_decorator(login_required, \"dispatch\")\nclass MyPhotosView(TemplateView):\n template_name = \"app/myphotos.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n photos = {}\n encodings = FaceEncoding.objects.order_by(\"-album_id\").filter(\n matches__user=self.request.user\n )\n albums = {x.album_id for x in encodings if x.album_id not in albums_cache}\n\n if encodings.exists():\n s = requests.Session()\n s.headers.update(\n {\"Authorization\": f'Token {self.request.session[\"token\"]}'}\n )\n\n def get_url(album_id):\n if album_id not in albums_cache:\n albums_cache[album_id] = s.get(\n f\"{settings.BASE_HOST}/api/v1/photos/albums/{album_id}/\"\n ).json()\n\n with ThreadPoolExecutor(max_workers=20) as pool:\n pool.map(get_url, albums)\n for encoding in encodings:\n data = albums_cache[encoding.album_id]\n for x in filter(lambda x: x[\"pk\"] == encoding.image_id, data[\"photos\"]):\n parsed = urlparse(x['file']['full'])\n split = os.path.split(parsed.path)\n x.update({\n 'album_name': f\"{data['title']} {data['date']}\",\n 'download': f\"{parsed.scheme}://{parsed.hostname}{split[0].replace('media/private', 'members')}/download/{split[1]}\"\n })\n photos[f\"{x['album']}-{x['pk']}\"] = x\n s.close()\n\n context[\"title\"] = \"Photos of you\"\n context[\"photos\"] = photos.values()\n return context\n\n\n@method_decorator(login_required, \"dispatch\")\nclass UserEncodingIndexView(ListView):\n template_name = \"app/encodings/index.html\"\n model = UserEncoding\n context_object_name = \"encodings\"\n\n def get_queryset(self):\n return super().get_queryset().filter(user=self.request.user)\n\n\n@method_decorator(login_required, \"dispatch\")\nclass UserEncodingCreateView(FormView):\n template_name = \"app/encodings/create.html\"\n form_class = UserEncodingCreateForm\n success_url = reverse_lazy(\"encodings:index\")\n\n def form_valid(self, form):\n encodings = detector.obtain_encodings(\n None, None, form.cleaned_data[\"upload_image\"].file\n )\n\n for encoding in encodings:\n user_enc = UserEncoding.objects.create(\n encoding=encoding,\n description=form.cleaned_data.get(\"description\", \"\"),\n user=self.request.user,\n )\n user_enc.calculate_matches()\n return HttpResponseRedirect(self.get_success_url())\n\n\n@method_decorator(login_required, \"dispatch\")\nclass UserEncodingDeleteView(DeleteView):\n template_name = \"app/encodings/delete.html\"\n model = UserEncoding\n success_url = reverse_lazy(\"encodings:index\")\n\n\n@method_decorator(staff_member_required, \"dispatch\")\nclass TestCrashView(View):\n \"\"\"Test view to intentionally crash to test the error handling.\"\"\"\n\n def dispatch(self, request, *args, **kwargs) -> HttpResponse:\n if not request.user.is_superuser:\n return HttpResponseForbidden(\"This is not for you\")\n raise Exception(\"Test exception\")\n","repo_name":"svthalia/face-detect-app","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4461974632","text":"import os\nimport random\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.utils import shuffle\n\n'''\nThe output of Siamese network would be similarity score, which indicates a pair of gait cycle are from the same user or not (binary classification)\n'''\ndef get_data(data_path, batch_size):\n\n data = np.load(data_path, allow_pickle = True)\n num_class = len(data)\n\n pairs = []\n pair_idx = []\n labels = []\n\n for user_idx, user_data in enumerate(data):\n\n user_label = np.zeros(batch_size)\n user_label[:len(user_label)//2] = 1\n\n num_data = len(user_data)\n\n for idx in range(batch_size):\n\n batch = []\n batch_idx = [user_idx]\n random_idx = random.randint(0, num_data-1)\n batch.append(user_data[random_idx])\n\n if idx < len(user_label)//2:\n random_idx = random.randint(0, num_data-1)\n batch.append(user_data[random_idx])\n batch_idx.append(user_idx)\n else:\n random_user = (user_idx + random.randint(1,num_class-1)) % num_class\n random_user_data = data[random_user]\n\n random_idx = random.randint(0, len(random_user_data)-1)\n batch.append(random_user_data[random_idx])\n batch_idx.append(random_user)\n\n pairs.append(batch)\n pair_idx.append(batch_idx)\n labels.append(user_label)\n\n\n return np.array(pairs), np.array(pair_idx), np.array(labels).ravel().reshape(-1, 1)\n\ndef shuffle_data(data, index, label, ratio):\n\n data, index, label = shuffle(data, index, label)\n\n train_data, train_index, train_label = data[:int(len(data)*ratio)], index[:int(len(data)*ratio)], label[:int(len(data)*ratio)]\n test_data, test_index, test_label = data[int(len(data)*ratio):], index[int(len(data)*ratio):], label[int(len(data)*ratio):]\n\n return train_data, train_index, train_label, test_data, test_index, test_label\n\ndef train_test_split(data, index, label, ratio):\n\n train_data, train_index, train_label, test_data, test_index, test_label = shuffle_data(data, index, label, ratio)\n\n while np.any(np.unique(train_index) != np.unique(index)):\n\n train_data, train_index, train_label, test_data, test_index, test_label = shuffle_data(data, index, label, ratio)\n\n return train_data, train_index, train_label, test_data, test_index, test_label\n\ndef Layer(X, num_output, initializer, keep_prob, W_name, b_name):\n\n _, num_feature = X.shape\n\n W = tf.get_variable(W_name, shape = [num_feature, num_output], dtype = tf.float32, initializer = initializer)\n b = tf.Variable(tf.random_normal([num_output]), name = b_name)\n L = tf.matmul(X, W) + b\n L = tf.nn.relu(L)\n L = tf.nn.dropout(L, keep_prob = keep_prob)\n\n return L\n\ndef siamese(input_data, keep_prob, reuse = False):\n\n l1_dim = 2000\n l2_dim = 3000\n l3_dim = 3000\n\n initializer = tf.contrib.layers.xavier_initializer()\n\n with tf.variable_scope('Layer1', reuse = reuse) as scope:\n model = Layer(input_data, l1_dim, initializer, keep_prob, 'W1', 'b1')\n\n with tf.variable_scope('Layer2', reuse = reuse) as scope:\n model = Layer(model, l2_dim, initializer, keep_prob, 'W2', 'b2')\n\n with tf.variable_scope('Layer3', reuse = reuse) as scope:\n model = Layer(model, l3_dim, initializer, keep_prob, 'W3', 'b3')\n\n return model\n\nwalk_data_path = 'data/walk/filtered_interpolation.pkl'\nbatch_size = 80\nnum_iter = 500\ninitial_learning_rate = 10**(-4)\n\npairs, pair_idx, labels = get_data(walk_data_path, batch_size)\nnum_pairs, pair_size, cycle_length, num_feature = pairs.shape\nprint(labels.shape)\ntrain_data, train_index, train_label, test_data, test_index, test_label = train_test_split(pairs, pair_idx, labels, 0.7)\nprint(train_data.shape, train_index.shape, train_label.shape)\n\nleft = tf.placeholder(tf.float32, shape = [None, cycle_length, num_feature], name = 'left')\nright = tf.placeholder(tf.float32, shape = [None, cycle_length, num_feature], name = 'right')\n\nnew_left = tf.reshape(left, [-1, cycle_length*num_feature])\nnew_right = tf.reshape(right, [-1, cycle_length*num_feature])\n\nY = tf.placeholder(tf.float32, shape = [None, 1])\n\nkeep_prob = tf.placeholder(tf.float32)\n\n\nleft_model = siamese(new_left, keep_prob, False)\nright_model = siamese(new_right, keep_prob, True)\n\nwith tf.variable_scope('Difference'):\n difference = tf.math.abs(left_model - right_model)\n\nwith tf.variable_scope('Dense'):\n\n W = tf.get_variable('W', shape = [difference.shape[-1], 1], dtype = tf.float32, initializer = tf.contrib.layers.xavier_initializer())\n b = tf.Variable(tf.random_normal([1]), name = 'b')\n L = tf.matmul(difference, W) + b\n\nwith tf.name_scope('Training'):\n\n cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = L, labels = Y))\n\n global_step = tf.Variable(0)\n learning_rate = tf.train.exponential_decay(initial_learning_rate, global_step, 100, 0.9)\n optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)\n\n similarity_score = tf.nn.sigmoid(L)\n accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(similarity_score), Y), tf.float32))\n\nwith tf.Session() as sess:\n\n sess.run(tf.global_variables_initializer())\n\n for iter in range(num_iter):\n\n c, a, _ = sess.run([cost, accuracy, optimizer], feed_dict = {left: train_data[:, 0, :, :], right: train_data[:, 1, :, :], Y: train_label, keep_prob: 0.7})\n print('Cost: {}, Accuracy: {}'.format(c, a))\n\n acc = sess.run(accuracy, feed_dict = {left: test_data[:, 0, :, :], right: test_data[:, 1, :, :], Y: test_label, keep_prob: 1.0})\n print(acc)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#end\n","repo_name":"binayakranjan/DEEP-GAIT","sub_path":"siamese.py","file_name":"siamese.py","file_ext":"py","file_size_in_byte":5680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"71313735893","text":"#https://gitpython.readthedocs.io/en/stable/tutorial.html\n# > python3.6.2\n\nimport git\n\nPATH = \"E:\\\\divyaprojects\\\\visualpathdevops\\\\class17\\\\sample_repo\"\nGIT_CLONE_URL = \"https://gitlab.com/pavan-projects/april_come.git\"\nBRANCH_NAME = \"automation_branch\"\n\n#clone step\n\ntry:\n repo = git.Repo.clone_from(GIT_CLONE_URL, PATH)\nexcept git.exc.GitCommandError:\n repo = git.Repo(PATH)\n\n\ntry:\n repo.git.checkout(\"-b\", \"devops_branch\")\nexcept git.exc.GitCommandError:\n repo.git.checkout(\"devops_branch\")\n\n\ntry:\n repo.git.add(\"--all\")\n repo.git.commit('-m', \"sample commit\", author=\"parameswara.kuna@gmail.com\")\nexcept git.exc.GitCommandError:\n print(\"noting to add\")\n\n\n\norigin = repo.remote(name=\"origin\")\norigin.push()\n\n\n\n\n\n\n\n","repo_name":"girwarkishor/python-devops","sub_path":"class17/git_automation.py","file_name":"git_automation.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"10913805061","text":"import errno\nimport fcntl\nimport json\nimport os\nimport time\nfrom subprocess import PIPE\nfrom subprocess import Popen\n\nimport mock\nimport service_configuration_lib\nfrom behave import given\nfrom behave import then\nfrom behave import when\nfrom itest_utils import get_service_connection_string\nfrom kazoo.exceptions import NodeExistsError\nfrom steps.setup_steps import modify_configs\n\nfrom paasta_tools.marathon_tools import list_all_marathon_app_ids\nfrom paasta_tools.marathon_tools import load_marathon_service_config_no_cache\nfrom paasta_tools.utils import decompose_job_id\nfrom paasta_tools.utils import SystemPaastaConfig\nfrom paasta_tools.utils import ZookeeperPool\n\n\n@given('paasta-deployd is running')\ndef start_deployd(context):\n try:\n os.makedirs('/nail/etc/services')\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n with ZookeeperPool() as zk:\n try:\n zk.create('/autoscaling')\n except NodeExistsError:\n pass\n context.zk_hosts = '%s/mesos-testcluster' % get_service_connection_string('zookeeper')\n context.soa_dir = '/nail/etc/services'\n if not hasattr(context, 'daemon'):\n context.daemon = Popen('paasta-deployd', stderr=PIPE)\n output = context.daemon.stderr.readline().decode('utf-8')\n start = time.time()\n timeout = start + 60\n while \"Startup finished!\" not in output:\n output = context.daemon.stderr.readline().decode('utf-8')\n if not output:\n raise Exception(\"deployd exited prematurely\")\n print(output.rstrip('\\n'))\n if time.time() > timeout:\n raise Exception(\"deployd never ran\")\n time.sleep(5)\n\n\n@then('paasta-deployd can be stopped')\ndef stop_deployd(context):\n context.daemon.terminate()\n context.daemon.wait()\n\n\n@then('a second deployd does not become leader')\ndef start_second_deployd(context):\n context.daemon1 = Popen('paasta-deployd', stderr=PIPE)\n output = context.daemon1.stderr.readline().decode('utf-8')\n fd = context.daemon1.stderr\n fl = fcntl.fcntl(fd, fcntl.F_GETFL)\n fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)\n for i in range(0, 5):\n try:\n output = context.daemon1.stderr.readline().decode('utf-8')\n print(output.rstrip('\\n'))\n assert 'This node is elected as leader' not in output\n except IOError:\n pass\n time.sleep(1)\n\n\n@then('a second deployd becomes leader')\ndef second_deployd_is_leader(context):\n try:\n output = context.daemon1.stderr.readline().decode('utf-8')\n except IOError:\n output = ''\n start = time.time()\n timeout = start + 60\n while \"This node is elected as leader\" not in output:\n try:\n output = context.daemon1.stderr.readline().decode('utf-8')\n except IOError:\n output = ''\n if output:\n print(output.rstrip('\\n'))\n if time.time() > timeout:\n raise Exception(\"Timed out waiting for second deployd leader\")\n time.sleep(1)\n context.daemon1.terminate()\n context.daemon1.wait()\n\n\n@then('we should see \"{service_instance}\" listed in marathon after {seconds:d} seconds')\ndef check_app_running(context, service_instance, seconds):\n service, instance, _, _ = decompose_job_id(service_instance)\n service_configuration_lib._yaml_cache = {}\n context.marathon_config = load_marathon_service_config_no_cache(service, instance, context.cluster)\n context.app_id = context.marathon_config.format_marathon_app_dict()['id']\n step = 5\n attempts = 0\n context.current_client = context.marathon_clients.get_current_client_for_service(context.marathon_config)\n while (attempts * step) < seconds:\n if context.app_id in list_all_marathon_app_ids(context.current_client):\n break\n time.sleep(step)\n attempts += 1\n assert context.app_id in list_all_marathon_app_ids(context.current_client)\n context.old_app_id = context.app_id\n\n\n@then('we should not see the old version listed in marathon after {seconds:d} seconds')\ndef check_app_not_running(context, seconds):\n step = 5\n attempts = 0\n while (attempts * step) < seconds:\n if context.old_app_id not in list_all_marathon_app_ids(context.current_client):\n return\n time.sleep(step)\n attempts += 1\n assert context.old_app_id not in list_all_marathon_app_ids(context.current_client)\n\n\n@then('we set a new command for our service instance to {cmd}')\ndef set_cmd(context, cmd):\n context.cmd = cmd\n\n\n@then('the appid for \"{service_instance}\" should have changed')\ndef check_sha_changed(context, service_instance):\n service, instance, _, _ = decompose_job_id(service_instance)\n service_configuration_lib._yaml_cache = {}\n context.marathon_config = load_marathon_service_config_no_cache(service, instance, context.cluster)\n assert context.app_id != context.marathon_config.format_marathon_app_dict()['id']\n\n\n@given('we have a secret called \"{secret_name}\" for the service \"{service}\" with signature \"{signature}\"')\ndef create_secret_json_file(context, secret_name, service, signature):\n secret = {\n 'environments': {\n 'devc': {\n 'ciphertext': 'ScrambledNonsense',\n 'signature': signature,\n },\n },\n }\n if not os.path.exists(os.path.join(context.soa_dir, service, \"secrets\")):\n os.makedirs(os.path.join(context.soa_dir, service, \"secrets\"))\n\n with open(os.path.join(context.soa_dir, service, \"secrets\", f\"{secret_name}.json\"), \"w\") as secret_file:\n json.dump(secret, secret_file)\n\n\n@given(\n 'we set the an environment variable called \"{var}\" to \"{val}\" for '\n 'service \"{service}\" and instance \"{instance}\" for framework \"{framework}\"',\n)\ndef add_env_var(context, var, val, service, instance, framework):\n field = 'env'\n value = {var: val}\n modify_configs(context, field, framework, service, instance, value)\n\n\n@when('we set some arbitrary data at \"{zookeeper_path}\" in ZK')\ndef zookeeper_write_bogus_key(context, zookeeper_path):\n with mock.patch.object(SystemPaastaConfig, 'get_zk_hosts', autospec=True, return_value=context.zk_hosts):\n with ZookeeperPool() as zookeeper_client:\n zookeeper_client.ensure_path(zookeeper_path)\n zookeeper_client.set(zookeeper_path, b\"WHATEVER\")\n\n\n@given('we remove autoscaling ZK keys for test-service')\ndef zookeeper_rmr_keys(context):\n context.zk_hosts = '%s/mesos-testcluster' % get_service_connection_string('zookeeper')\n with mock.patch.object(SystemPaastaConfig, 'get_zk_hosts', autospec=True, return_value=context.zk_hosts):\n with ZookeeperPool() as zookeeper_client:\n zookeeper_client.delete(\"/autoscaling/test-service\", recursive=True)\n","repo_name":"eric-erki/An-open-distributed-platform-as-a-service","sub_path":"paasta_itests/steps/paasta_deployd_steps.py","file_name":"paasta_deployd_steps.py","file_ext":"py","file_size_in_byte":6790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18891398176","text":"import pkgutil\nimport tkinter as tk\nimport tkinter.filedialog\nimport tkinter.messagebox\nimport tkinter.simpledialog\nfrom math import floor\nfrom tkinter import ttk\n\nimport ttkthemes\nfrom typing import Any\nfrom typing import Callable\nfrom typing import MutableMapping\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\nfrom typing import cast as _cast\n\nfrom .common import ADDRESS_BITS\nfrom .common import BYTE_ENCODINGS\nfrom .common import LINE_LENGTHS\nfrom .common import PROGRAM_TITLE\nfrom .common import BaseInstanceManager\nfrom .common import BaseEditorWidget\nfrom .common import BaseEngine\nfrom .common import BaseUserInterface\nfrom .common import CellCoord\nfrom .common import CellCoords\nfrom .common import CharCoord\nfrom .common import CharCoords\nfrom .common import CursorMode\nfrom .common import EngineStatus\nfrom .common import FloatCoords\nfrom .common import SelectionMode\nfrom .engine import Engine\nfrom .utils import HEX_SET\nfrom .utils import ValueFormatEnum\nfrom .utils import parse_int\nfrom bytesparse.base import Address\n\n\n# =====================================================================================================================\n\nPixelCoord = int\nPixelCoords = Tuple[PixelCoord, PixelCoord]\n\nCanvasObject = int\n\n\n_THEME: str = 'black'\n_COLOR_BG: str = 'SystemWindow'\n_COLOR_FG: str = 'SystemWindowText'\n_COLOR_OG: str = 'grey25'\n_COLOR_CUR: str = 'red'\n_COLOR_SEL_BG: str = 'SystemHighlight'\n_COLOR_SEL_FG: str = 'SystemHighlightText'\n_COLOR_SEL_OG: str = 'grey75'\n\n_TOOLTIP_FONT: Union[Tuple[str, int], str] = 'TkTooltipFont'\n_TOOLTIP_FG: str = 'SystemButtonText'\n_TOOLTIP_BG: str = 'lightyellow'\n_TOOLTIP_CLEARANCE: PixelCoords = (5, 50)\n\n\ndef _is_shift_in_event(event: Any = None) -> bool:\n return (event.state & 1) != 0 if event else False\n\n\ndef _fix_global_colors(root: ttkthemes.ThemedTk) -> None:\n global _COLOR_BG\n global _COLOR_FG\n global _COLOR_OG\n global _COLOR_SEL_FG\n global _COLOR_SEL_BG\n global _COLOR_SEL_OG\n global _TOOLTIP_FG\n global _TOOLTIP_BG\n ttk_style = ttk.Style()\n\n bg_color = ttk_style.lookup('TLabelFrame', 'background') or _COLOR_BG\n _COLOR_BG = bg_color\n bg_rgb = root.winfo_rgb(bg_color)\n\n fg_color = ttk_style.lookup('TLabelFrame', 'foreground') or _COLOR_FG\n _COLOR_FG = fg_color\n fg_rgb = root.winfo_rgb(fg_color)\n\n _COLOR_OG = mix_color_hex(*fg_rgb, *bg_rgb, 0.25)\n\n sel_bg_color = ttk_style.lookup('TEntry', 'selectbackground') or _COLOR_SEL_BG\n _COLOR_SEL_BG = sel_bg_color\n sel_bg_rgb = root.winfo_rgb(sel_bg_color)\n\n sel_fg_color = ttk_style.lookup('TEntry', 'selectforeground') or _COLOR_SEL_FG\n _COLOR_SEL_FG = sel_fg_color\n sel_fg_rgb = root.winfo_rgb(sel_fg_color)\n\n _COLOR_SEL_OG = mix_color_hex(*sel_fg_rgb, *sel_bg_rgb, 0.25)\n\n _TOOLTIP_FG = _COLOR_SEL_FG\n _TOOLTIP_BG = _COLOR_SEL_BG\n\n\ndef mix_color_hex(x_r, x_g, x_b, y_r, y_g, y_b, m) -> str:\n r = (max(0, min(int(((1 - m) * x_r) + (m * y_r)), 65535)) + 128) // 256\n g = (max(0, min(int(((1 - m) * x_g) + (m * y_g)), 65535)) + 128) // 256\n b = (max(0, min(int(((1 - m) * x_b) + (m * y_b)), 65535)) + 128) // 256\n c = f'#{r:02X}{g:02X}{b:02X}'\n return c\n\n\n# =====================================================================================================================\n\n_image_cache = {}\n\n\ndef load_image(path: str) -> tk.PhotoImage:\n global _image_cache\n image = _image_cache.get(path)\n if image is None:\n # See: https://stackoverflow.com/a/58941536\n data = pkgutil.get_data(__name__, path)\n image = tk.PhotoImage(data=data)\n _image_cache[path] = image\n return image\n\n\n# =====================================================================================================================\n\ndef __merge_extensions(hexrec_format):\n extensions = hexrec_format.Record.EXTENSIONS\n return ';'.join(f'*{ext}' for ext in extensions)\n\n\nif 1: # import location hack\n import hexrec.formats.binary as _hf_binary\n import hexrec.formats.intel as _hf_intel\n import hexrec.formats.mos as _hf_mos\n import hexrec.formats.motorola as _hf_motorola\n import hexrec.formats.tektronix as _hf_tektronix\n\nFILE_TYPES = (\n ('Binary', __merge_extensions(_hf_binary)),\n ('Intel HEX', __merge_extensions(_hf_intel)),\n ('MOS', __merge_extensions(_hf_mos)),\n ('Motorola S-record', __merge_extensions(_hf_motorola)),\n ('Tektronix', __merge_extensions(_hf_tektronix)),\n ('All files', '*'),\n)\n\n\n# =====================================================================================================================\n\nclass Tooltip:\n\n def __init__(\n self,\n widget: ttk.Widget,\n text: str = '',\n time: int = 2000,\n font: Union[Tuple[str, int], str] = _TOOLTIP_FONT,\n fg: Optional[str] = None,\n bg: Optional[str] = None,\n clearance_x: int = _TOOLTIP_CLEARANCE[0],\n clearance_y: int = _TOOLTIP_CLEARANCE[1],\n ):\n self._widget = widget\n self._text = text\n self._time = time\n self._font = font\n self._fg = fg or _TOOLTIP_FG\n self._bg = bg or _TOOLTIP_BG\n self._clearance_x = clearance_x\n self._clearance_y = clearance_y\n self._tooltip: Optional[tk.Toplevel] = None\n\n widget.focus_displayof()\n widget.bind('', self._enter)\n widget.bind('', self._leave)\n\n @property\n def widget(self) -> ttk.Widget:\n return self._widget\n\n def config(\n self,\n text: Optional[str] = None,\n time: Optional[int] = None,\n font: Optional[str] = None,\n fg: Optional[str] = None,\n bg: Optional[str] = None,\n clearance_x: Optional[int] = None,\n clearance_y: Optional[int] = None,\n ) -> None:\n if text is not None:\n self._text = text\n if time is not None:\n self._time = time\n if font is not None:\n self._font = font\n if fg is not None:\n self._fg = fg\n if bg is not None:\n self._bg = bg\n if clearance_x is not None:\n self._clearance_x = clearance_x\n if clearance_y is not None:\n self._clearance_y = clearance_y\n\n def _enter(self, event=None):\n widget = self._widget\n tooltip = self._tooltip\n\n if tooltip is None:\n tooltip = tk.Toplevel(widget)\n self._tooltip = tooltip\n tooltip.overrideredirect(True)\n\n label = tk.Label(tooltip, text=self._text, fg=self._fg, bg=self._bg,\n relief=tk.RIDGE, borderwidth=1, font=self._font)\n label.pack(ipadx=5)\n\n tooltip.update_idletasks()\n\n screen_w = widget.winfo_screenwidth()\n screen_h = widget.winfo_screenheight()\n\n widget_x = widget.winfo_rootx()\n widget_y = widget.winfo_rooty()\n widget_w = widget.winfo_width()\n widget_h = widget.winfo_height()\n\n tooltip_w = tooltip.winfo_width()\n tooltip_h = tooltip.winfo_height()\n\n clearance_x = self._clearance_x\n clearance_y = self._clearance_y\n\n if widget_x + widget_w + clearance_x + tooltip_w < screen_w:\n x = widget_x + clearance_x # widget match\n else:\n x = widget_x + clearance_x - tooltip_w # widget left\n\n if widget_y + clearance_y + tooltip_h < screen_h:\n y = widget_y + widget_h # below widget\n else:\n y = widget_y - tooltip_h # above widget\n\n tooltip.wm_geometry(f'{x:+d}{y:+d}')\n\n if self._time:\n tooltip.after(self._time, self._leave)\n\n def _leave(self, event=None):\n tooltip = self._tooltip\n if tooltip is not None:\n tooltip.destroy()\n self._tooltip = None\n\n\n# =====================================================================================================================\n\nclass ToolbarTray(ttk.Frame):\n\n def __init__(self, parent, text_kwargs=None, **kwargs):\n d = dict(\n # highlightthickness=0, # missing with ttk\n takefocus=0,\n )\n d.update(kwargs)\n kwargs = d\n super().__init__(parent, **kwargs)\n self.pack_propagate(False)\n\n if text_kwargs is None:\n text_kwargs = {}\n text_kwargs.setdefault('width', 1)\n text_kwargs.setdefault('height', 1)\n text_kwargs.setdefault('padx', 0)\n text_kwargs.setdefault('pady', 0)\n # text_kwargs.setdefault('highlightthickness', 0) # missing with ttk\n text_kwargs.setdefault('insertborderwidth', 0)\n text_kwargs.setdefault('selectborderwidth', 0)\n text_kwargs.setdefault('bg', _COLOR_BG)\n text_kwargs.setdefault('takefocus', 0)\n text_kwargs.setdefault('spacing1', 0)\n text_kwargs.setdefault('spacing2', 0)\n text_kwargs.setdefault('spacing3', 0)\n if 'borderwidth' not in text_kwargs and 'bd' not in text_kwargs:\n text_kwargs.setdefault('borderwidth', 0)\n\n container = tk.Text(self, **text_kwargs)\n self._container = container\n container.pack(side=tk.TOP, expand=True, fill=tk.BOTH)\n self._bg = text_kwargs['bg']\n\n container.configure(bg=self._bg, cursor='arrow', state=tk.DISABLED)\n container.bind('', lambda _: 'break')\n container.bind('<1>', lambda _: 'break')\n\n self.bind('', self._on_configure)\n\n def add_widget(self, widget):\n self._container.window_create(tk.INSERT, window=widget)\n\n def _on_configure(self, event=None, force=False):\n self.update_idletasks()\n container = self._container\n container.configure(bg=self._bg)\n\n borderwidth = self.cget('borderwidth')\n widget_height = self.winfo_height()\n content_height = container.count('1.0', tk.END, 'update', 'ypixels')\n height = content_height + (borderwidth * 2)\n\n if widget_height != height or force:\n self.configure(height=height)\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n\nclass Toolbar(ttk.Frame):\n\n def __init__(self, parent, **kwargs):\n if 'borderwidth' not in kwargs and 'bd' not in kwargs:\n kwargs.setdefault('borderwidth', 1)\n kwargs.setdefault('relief', tk.RIDGE)\n super().__init__(parent, **kwargs)\n\n self._widgets: MutableMapping[Any, ttk.Widget] = {}\n self._tooltips: MutableMapping[Any, Tooltip] = {}\n\n @property\n def widget_count(self) -> int:\n return len(self._widgets)\n\n def get_widget(self, name: str) -> ttk.Widget:\n return self._widgets[name]\n\n def add_widget(\n self, widget: ttk.Widget,\n key: Any = None,\n tooltip: Optional[str] = None,\n ) -> ttk.Widget:\n\n self._widgets[key] = widget\n if tooltip and key is None:\n key = tooltip\n if tooltip:\n self._tooltips[key] = Tooltip(widget, text=tooltip)\n return widget\n\n def add_button(\n self,\n key: Any = None,\n tooltip: Optional[str] = None,\n **kwargs\n ) -> ttk.Widget:\n\n if tooltip and not key:\n key = tooltip\n kwargs.setdefault('style', 'Toolbutton')\n kwargs.setdefault('takefocus', 0)\n widget = ttk.Button(self, **kwargs)\n self.add_widget(widget, key=key, tooltip=tooltip)\n return widget\n\n def add_separator(self, **kwargs) -> ttk.Widget:\n kwargs.setdefault('orient', tk.VERTICAL)\n widget = ttk.Separator(self, **kwargs)\n key = -self.widget_count # separators have negative integer key\n self.add_widget(widget, key=key)\n return widget\n\n def finalize(self, pad_x: int = 4, pad_y: int = 1, pad_y_sep: int = 4) -> None:\n last = self.widget_count - 1\n\n for index, (key, widget) in enumerate(self._widgets.items()):\n pad_l = 0\n pad_r = 0\n pad_v = pad_y\n sticky = None\n\n if isinstance(key, int):\n if key < 0:\n pad_l = pad_x\n pad_r = pad_x\n pad_v = pad_y_sep\n sticky = tk.NS\n\n else:\n pad_l = pad_x if index <= 0 else 0\n pad_r = pad_x if index >= last else 0\n pad_v = pad_y\n sticky = tk.NS\n\n widget.grid(row=0, column=index, padx=(pad_l, pad_r), pady=pad_v, sticky=sticky)\n\n\n# =====================================================================================================================\n\nclass EditorWidget(BaseEditorWidget, ttk.Frame):\n\n def __init__(\n self,\n parent,\n engine: BaseEngine,\n status: EngineStatus,\n width: PixelCoord = 200,\n height: PixelCoord = 100,\n pad_x: int = 4,\n pad_y: int = 2,\n **kwargs: Any,\n ):\n self._engine = engine\n self._status = status # read-only\n\n kwargs.setdefault('padding', (0, 0))\n ttk.Frame.__init__(self, parent, width=width, height=height, **kwargs)\n\n BaseEditorWidget.__init__(self)\n\n self.__init_misc(pad_x, pad_y)\n self.__init_address_bar()\n self.__init_offset_bar()\n self.__init_cells_view()\n self.__init_chars_view()\n self.__init_cursor()\n self.__init_layout()\n self.__init_bindings()\n\n self.on_cells_focus_out()\n\n def __init_misc(self, pad_x: PixelCoord, pad_y: PixelCoord) -> None:\n self._pad_x = pad_x\n self._pad_y = pad_y\n\n font = tk.font.Font(font=('Consolas', 10))\n font_w = font.measure('#')\n font_h = font.metrics('linespace')\n\n self._font = font\n self._font_w = font_w\n self._font_h = font_h\n\n status = self._status\n line_length = status.line_length\n cell_format_length = status.cell_format_length\n cell_spacing = status.cell_spacing\n offset_format_length = status.offset_format_length\n offset_spacing = status.offset_spacing\n\n offset_w = pad_x + (font_w * (line_length * (offset_format_length + offset_spacing) - 1)) + pad_x\n view_w = pad_x + (font_w * (line_length * (cell_format_length + cell_spacing) - 1)) + pad_x\n self._view_w = max(offset_w, view_w)\n\n self._sel_start_address_prev: Address = -1 # dummy\n self._sel_endin_address_prev: Address = -1 # dummy\n\n def __init_address_bar(self) -> None:\n pad_x, pad_y = self._pad_x, self._pad_y\n font_w, font_h = self._font_w, self._font_h\n address_format_length = self._status.address_format_length\n\n address_skip_label = ttk.Label(self, anchor=tk.NW, font=self._font, padding=(pad_x, pad_y), borderwidth=0)\n self._address_skip_label = address_skip_label\n Tooltip(address_skip_label, text='Address skip')\n\n address_canvas_w = pad_x + (font_w * address_format_length) + pad_x\n address_canvas_h = pad_y + (font_h * 16) + pad_y\n self._address_canvas_size: PixelCoords = (address_canvas_w, address_canvas_h)\n\n address_canvas = tk.Canvas(self, width=address_canvas_w, height=address_canvas_h,\n bg=_COLOR_BG, borderwidth=0, highlightthickness=0)\n self._address_canvas = address_canvas\n self._address_canvas_w: PixelCoord = address_canvas_w\n\n self._addrs_text_id: MutableMapping[CellCoord, CanvasObject] = {}\n\n def __init_offset_bar(self) -> None:\n pad_x, pad_y = self._pad_x, self._pad_y\n font_w, font_h = self._font_w, self._font_h\n font = self._font\n\n offset_w = self._view_w\n offset_h = pad_y + font_h + pad_y\n self._offset_canvas_size: PixelCoords = (offset_w, offset_h)\n\n offset_canvas = tk.Canvas(self, width=offset_w, height=offset_h,\n bg=_COLOR_BG, borderwidth=0, highlightthickness=0,\n scrollregion=(0, 0, offset_w, 1))\n self._offset_canvas = offset_canvas\n\n offset_text_id: CanvasObject = offset_canvas.create_text(1 + pad_x, pad_y, text='', anchor=tk.NW,\n font=font, fill=_COLOR_FG)\n self._offset_text_id = offset_text_id\n\n def __init_cells_view(self) -> None:\n pad_y = self._pad_y\n font_h = self._font_h\n view_w = self._view_w\n view_h = pad_y + (font_h * 16) + pad_y\n self._cells_pixel_size: PixelCoords = (view_w, view_h)\n self._cells_pixel_x: PixelCoord = 0 # dummy\n self._cells_pixel_y: PixelCoord = 0 # dummy\n self._cells_pixel_y_prev: PixelCoord = -1 # dummy\n\n cells_canvas = tk.Canvas(self, width=view_w, height=view_h, borderwidth=1, highlightthickness=0,\n relief=tk.SUNKEN, bg=_COLOR_BG, cursor='xterm',\n scrollregion=(0, 0, view_w, 1), takefocus=1)\n self._cells_canvas = cells_canvas\n\n self._cells_text_id: MutableMapping[CellCoords, CanvasObject] = {}\n self._cells_rect_id: MutableMapping[CellCoords, CanvasObject] = {}\n\n cells_vbar = ttk.Scrollbar(self, orient=tk.VERTICAL)\n cells_vbar.set(0, 1)\n cells_vbar.configure(command=self._on_vbar)\n self._cells_vbar: ttk.Scrollbar = cells_vbar\n\n cells_hbar = ttk.Scrollbar(self, orient=tk.HORIZONTAL)\n cells_hbar.configure(command=self._on_hbar)\n cells_canvas.configure(xscrollcommand=cells_hbar.set)\n self._offset_canvas.configure(xscrollcommand=cells_hbar.set)\n self._cells_hbar: ttk.Scrollbar = cells_hbar\n\n # Cell status cache, faster than Tk\n self._cells_dirty: set = set()\n self._cells_pixel: MutableMapping[CellCoords, PixelCoords] = {}\n self._cells_selected: set = set()\n self._cells_text_str: MutableMapping[CellCoords, str] = {}\n\n def __init_chars_view(self) -> None:\n pad_x = self._pad_x\n font_w = self._font_w\n status = self._status\n line_length = status.line_length\n\n chars_w = pad_x + (font_w * line_length) + pad_x\n chars_h = self._cells_pixel_size[1]\n chars_canvas = tk.Canvas(self, width=chars_w, height=chars_h, borderwidth=1, highlightthickness=0,\n relief=tk.SUNKEN, bg=_COLOR_BG, cursor='xterm',\n scrollregion=(0, 0, chars_w, 1), takefocus=1)\n self._chars_canvas = chars_canvas\n\n self._chars_text_id: MutableMapping[CellCoords, CanvasObject] = {}\n self._chars_rect_id: MutableMapping[CellCoords, CanvasObject] = {}\n\n chars_hbar = ttk.Scrollbar(self, orient=tk.HORIZONTAL)\n chars_hbar.configure(command=chars_canvas.xview)\n chars_canvas.configure(xscrollcommand=chars_hbar.set)\n self._chars_hbar: ttk.Scrollbar = chars_hbar\n\n self._chars_title = ttk.Label(self, text='Text', anchor=tk.W)\n\n def __init_layout(self) -> None:\n self._address_skip_label.grid(row=0, column=0, sticky=tk.EW)\n self._offset_canvas.grid(row=0, column=1, sticky=tk.EW)\n self._chars_title.grid(row=0, column=3, sticky=tk.EW)\n\n self._address_canvas.grid(row=1, column=0, sticky=tk.NSEW)\n self._cells_canvas.grid(row=1, column=1, sticky=tk.NSEW)\n self._cells_vbar.grid(row=1, column=2, sticky=tk.NS)\n self._chars_canvas.grid(row=1, column=3, sticky=tk.NSEW)\n\n self._cells_hbar.grid(row=2, column=1, sticky=tk.EW)\n self._chars_hbar.grid(row=2, column=3, sticky=tk.EW)\n\n self.rowconfigure(1, weight=1)\n self.columnconfigure(1, weight=6, minsize=64)\n self.columnconfigure(3, weight=1, minsize=64)\n\n def __init_cursor(self) -> None:\n color = _COLOR_FG\n\n self._cells_cursor_color: str = color\n cells_cursor_id = self._cells_canvas.create_line(-2, -2, -1, -1, width=2, fill=color, tags='cursor')\n self._cells_cursor_id: CanvasObject = cells_cursor_id\n\n self._chars_cursor_color: str = color\n chars_cursor_id = self._chars_canvas.create_line(-2, -2, -1, -1, width=2, fill=color, tags='cursor')\n self._chars_cursor_id: CanvasObject = chars_cursor_id\n\n def __init_bindings(self) -> None:\n\n control_bindings = {\n '': self.on_key_reserve_cell,\n '': self.on_key_delete_cell,\n '': self.on_key_clear_cell,\n '': self.on_key_clear_back,\n '': self.on_key_clear_next,\n '': self.on_key_delete,\n '$': self.on_key_fill,\n '%': self.on_key_flood,\n\n '': self.on_key_cut,\n '': self.on_key_cut,\n\n '': self.on_key_copy,\n '': self.on_key_copy,\n '': self.on_key_copy,\n\n '': self.on_key_paste,\n '': self.on_key_paste,\n '': self.on_key_paste,\n\n '': self.on_key_crop,\n\n '': self.on_key_move_focus,\n '': self.on_key_move_apply,\n\n '': self.on_key_scroll_line_up,\n '': self.on_key_scroll_page_up,\n\n '': self.on_key_scroll_line_down,\n '': self.on_key_scroll_page_down,\n\n '': self.on_key_scroll_top,\n '': self.on_key_scroll_bottom,\n\n '': self.on_key_move_left_digit,\n '': self.on_key_move_left_digit,\n\n '': self.on_key_move_left_byte,\n '': self.on_key_move_left_byte,\n\n '': self.on_key_goto_block_previous,\n '': self.on_key_goto_block_previous,\n\n '': self.on_key_move_right_digit,\n '': self.on_key_move_right_digit,\n\n '': self.on_key_move_right_byte,\n '': self.on_key_move_right_byte,\n\n '': self.on_key_goto_block_next,\n '': self.on_key_goto_block_next,\n\n '': self.on_key_move_line_up,\n '': self.on_key_move_line_up,\n\n '': self.on_key_move_page_up,\n '': self.on_key_move_page_up,\n\n '': self.on_key_move_line_down,\n '': self.on_key_move_line_down,\n\n '': self.on_key_move_page_down,\n '': self.on_key_move_page_down,\n\n '': self.on_key_goto_line_start,\n '': self.on_key_goto_line_start,\n\n '': self.on_key_goto_line_endin,\n '': self.on_key_goto_line_endin,\n\n '': self.on_key_goto_memory_focus,\n '': self.on_key_goto_memory_apply,\n\n '': self.on_key_goto_memory_start,\n '': self.on_key_goto_memory_start,\n\n '': self.on_key_goto_memory_endin,\n '': self.on_key_goto_memory_endin,\n\n '': self.on_key_goto_memory_endex,\n '': self.on_key_goto_memory_endex,\n\n '': self.on_key_goto_block_start,\n '': self.on_key_goto_block_start,\n\n '': self.on_key_goto_block_endin,\n '': self.on_key_goto_block_endin,\n\n '': self.on_key_copy_address,\n '': self.on_key_set_address,\n '': self.on_key_select_all,\n '': self.on_key_select_range,\n '': self.on_key_escape_selection,\n '': self.on_key_switch_cursor_mode,\n '': self.on_key_redraw,\n\n '': self.on_key_undo,\n '': self.on_key_undo,\n '': self.on_key_redo,\n '': self.on_key_redo,\n '': self.on_key_redo,\n }\n\n mouse_bindings = {\n '': self.on_cells_selection_press,\n '': self.on_cells_selection_double,\n '': self.on_cells_selection_motion,\n '': self.on_cells_selection_motion,\n '': self.on_cells_selection_release,\n '': self.on_cells_chars_wheel,\n }\n\n # Bind data view canvas actions\n cells_canvas = self.cells_canvas\n\n for key, handler in control_bindings.items():\n cells_canvas.bind(key, handler)\n\n for key, handler in mouse_bindings.items():\n cells_canvas.bind(key, handler)\n\n for key in HEX_SET:\n cells_canvas.bind(key, self.on_key_digit_cells)\n\n cells_canvas.bind('', self.on_cells_focus_in)\n cells_canvas.bind('', self.on_cells_focus_out)\n\n # Bind address canvas actions\n address_canvas = self.address_canvas\n address_canvas.bind('', self.on_cells_chars_wheel)\n\n # Bind chars canvas actions\n control_bindings.update({\n '': self.on_key_move_left_byte,\n '': self.on_key_move_right_byte,\n })\n\n mouse_bindings = {\n '': self.on_chars_selection_press,\n '': self.on_chars_selection_double,\n '': self.on_chars_selection_motion,\n '': self.on_chars_selection_motion,\n '': self.on_chars_selection_release,\n '': self.on_cells_chars_wheel,\n }\n\n chars_canvas = self.chars_canvas\n\n for key, handler in control_bindings.items():\n chars_canvas.bind(key, handler)\n\n for key, handler in mouse_bindings.items():\n chars_canvas.bind(key, handler)\n\n chars_canvas.bind('', self.on_key_digit_chars)\n chars_canvas.bind('', self.on_chars_focus_in)\n chars_canvas.bind('', self.on_chars_focus_out)\n\n # Bind widget actions\n self.bind('', self.on_configure)\n\n def focus_set(self) -> None:\n self.focus_set_cells()\n\n def focus_set_cells(self) -> None:\n self._cells_canvas.focus_set()\n\n def focus_set_chars(self) -> None:\n if self._chars_visible:\n self._chars_canvas.focus_set()\n else:\n self.focus_set_cells()\n\n @property\n def cells_canvas(self) -> tk.Canvas:\n return self._cells_canvas\n\n @property\n def address_canvas(self) -> tk.Canvas:\n return self._address_canvas\n\n @property\n def offset_canvas(self) -> tk.Canvas:\n return self._offset_canvas\n\n @property\n def chars_canvas(self) -> tk.Canvas:\n return self._chars_canvas\n\n @BaseEditorWidget.chars_visible.setter\n def chars_visible(self, visible: bool) -> None:\n visible = bool(visible)\n\n if self._chars_visible < visible:\n self._chars_visible = visible\n self._chars_title.grid()\n self._chars_canvas.grid()\n self._chars_hbar.grid()\n self.columnconfigure(3, weight=1, minsize=64)\n self.redraw()\n\n elif self._chars_visible > visible:\n self._chars_visible = visible\n self._chars_title.grid_remove()\n self._chars_canvas.grid_remove()\n self._chars_hbar.grid_remove()\n self.columnconfigure(3, weight=0, minsize=0)\n self.redraw()\n\n def get_half_page_height(self) -> CellCoord:\n cell_y = self._cells_canvas.winfo_height() // (self._font_h * 2)\n return cell_y\n\n def _on_hbar(self, *args):\n self._cells_canvas.xview(*args)\n self._offset_canvas.xview(*args)\n # self._cells_pixel_x = self._cells_canvas.canvasx(0)\n view_ratio_x = self._offset_canvas.xview()[0]\n self._cells_pixel_x = floor(self._cells_pixel_size[0] * view_ratio_x)\n\n def _on_vbar(self, mode, *args):\n cells_pixel_y = cells_pixel_y_prev = self._cells_pixel_y\n font_w, font_h = self._font_w, self._font_h\n\n if mode == tk.MOVETO:\n offset, = args\n\n elif mode == tk.SCROLL:\n step, what = args\n step = int(step)\n\n if what == tk.UNITS:\n if step > 0:\n for _ in range(step):\n remainder = cells_pixel_y % font_h\n cells_pixel_y += font_h - remainder\n elif step < 0:\n for _ in range(-step):\n remainder = cells_pixel_y % font_h\n cells_pixel_y -= remainder if remainder else font_h\n\n elif what == tk.PAGES:\n page_h = font_h * 0x100\n if step > 0:\n for _ in range(step):\n remainder = cells_pixel_y % page_h\n cells_pixel_y += page_h - remainder\n elif step < 0:\n for _ in range(-step):\n remainder = cells_pixel_y % page_h\n cells_pixel_y -= remainder if remainder else page_h\n\n if cells_pixel_y_prev != cells_pixel_y:\n self._cells_pixel_y = cells_pixel_y\n self.update_view()\n self.update_vbar()\n\n def on_key_digit_cells(self, event=None):\n self.after_idle(self._on_key_digit_cells, event)\n\n def _on_key_digit_cells(self, event=None):\n digit_char = event.char\n if digit_char.isprintable():\n self._engine.on_key_digit_cells(digit_char)\n\n def on_key_digit_chars(self, event=None):\n self.after_idle(self._on_key_digit_chars, event)\n\n def _on_key_digit_chars(self, event=None):\n digit_char = event.char\n if digit_char.isprintable():\n self._engine.on_key_digit_chars(digit_char)\n\n def on_key_reserve_cell(self, event=None):\n self._engine.on_key_reserve_cell()\n\n def on_key_delete_cell(self, event=None):\n self._engine.on_key_delete_cell()\n\n def on_key_clear_cell(self, event=None):\n self._engine.on_key_clear_cell()\n\n def on_key_clear_back(self, event=None):\n self._engine.on_key_clear_back()\n\n def on_key_clear_next(self, event=None):\n self._engine.on_key_clear_next()\n\n def on_key_delete(self, event=None):\n self._engine.on_key_delete()\n\n def on_key_fill(self, event=None):\n self._engine.on_key_fill()\n\n def on_key_flood(self, event=None):\n self._engine.on_key_flood()\n\n def on_key_cut(self, event=None):\n self._engine.on_key_cut()\n\n def on_key_copy(self, event=None):\n self._engine.on_key_copy()\n\n def on_key_paste(self, event=None):\n self._engine.on_key_paste()\n\n def on_key_crop(self, event=None):\n self._engine.on_key_crop()\n\n def on_key_move_focus(self, event=None):\n self._engine.on_key_move_focus()\n\n def on_key_move_apply(self, event=None):\n self._engine.on_key_move_apply()\n\n def on_key_scroll_line_up(self, event=None):\n self._engine.on_key_scroll_line_up()\n\n def on_key_scroll_page_up(self, event=None):\n self._engine.on_key_scroll_page_up()\n\n def on_key_scroll_line_down(self, event=None):\n self._engine.on_key_scroll_line_down()\n\n def on_key_scroll_page_down(self, event=None):\n self._engine.on_key_scroll_page_down()\n\n def on_key_scroll_top(self, event=None):\n self._engine.on_key_scroll_top()\n\n def on_key_scroll_bottom(self, event=None):\n self._engine.on_key_scroll_bottom()\n\n def on_key_move_left_digit(self, event=None):\n shift = _is_shift_in_event(event)\n self._engine.on_key_move_left_digit(shift)\n\n def on_key_move_right_digit(self, event=None):\n shift = _is_shift_in_event(event)\n self._engine.on_key_move_right_digit(shift)\n\n def on_key_move_left_byte(self, event=None):\n shift = _is_shift_in_event(event)\n self._engine.on_key_move_left_byte(shift)\n\n def on_key_move_right_byte(self, event=None):\n shift = _is_shift_in_event(event)\n self._engine.on_key_move_right_byte(shift)\n\n def on_key_move_line_up(self, event=None):\n shift = _is_shift_in_event(event)\n self._engine.on_key_move_line_up(shift)\n\n def on_key_move_page_up(self, event=None):\n shift = _is_shift_in_event(event)\n self._engine.on_key_move_page_up(shift)\n\n def on_key_move_line_down(self, event=None):\n shift = _is_shift_in_event(event)\n self._engine.on_key_move_line_down(shift)\n\n def on_key_move_page_down(self, event=None):\n shift = _is_shift_in_event(event)\n self._engine.on_key_move_page_down(shift)\n\n def on_key_goto_line_start(self, event=None):\n shift = _is_shift_in_event(event)\n self._engine.on_key_goto_line_start(shift)\n\n def on_key_goto_line_endin(self, event=None):\n shift = _is_shift_in_event(event)\n self._engine.on_key_goto_line_endin(shift)\n\n def on_key_goto_memory_apply(self, event=None):\n self._engine.on_key_goto_memory_apply()\n\n def on_key_goto_memory_focus(self, event=None):\n self._engine.on_key_goto_memory_focus()\n\n def on_key_goto_memory_start(self, event=None):\n shift = _is_shift_in_event(event)\n self._engine.on_key_goto_memory_start(shift)\n\n def on_key_goto_memory_endin(self, event=None):\n shift = _is_shift_in_event(event)\n self._engine.on_key_goto_memory_endin(shift)\n\n def on_key_goto_memory_endex(self, event=None):\n shift = _is_shift_in_event(event)\n self._engine.on_key_goto_memory_endex(shift)\n\n def on_key_goto_block_previous(self, event=None):\n shift = _is_shift_in_event(event)\n self._engine.on_key_goto_block_previous(shift)\n\n def on_key_goto_block_next(self, event=None):\n shift = _is_shift_in_event(event)\n self._engine.on_key_goto_block_next(shift)\n\n def on_key_goto_block_start(self, event=None):\n shift = _is_shift_in_event(event)\n self._engine.on_key_goto_block_start(shift)\n\n def on_key_goto_block_endin(self, event=None):\n shift = _is_shift_in_event(event)\n self._engine.on_key_goto_block_endin(shift)\n\n def on_key_copy_address(self, event=None):\n self._engine.on_key_copy_address()\n\n def on_key_set_address(self, event=None):\n self._engine.on_key_set_address()\n\n def on_key_select_all(self, event=None):\n self._engine.on_key_select_all()\n\n def on_key_select_range(self, event=None):\n self._engine.on_key_select_range()\n\n def on_key_escape_selection(self, event=None):\n self._engine.on_key_escape_selection()\n\n def on_key_switch_cursor_mode(self, event=None):\n self._engine.on_key_switch_cursor_mode()\n\n def on_key_redraw(self, event=None):\n self._engine.on_key_redraw()\n\n def on_key_undo(self, event=None):\n self._engine.on_key_undo()\n\n def on_key_redo(self, event=None):\n self._engine.on_key_redo()\n\n def on_cells_selection_press(self, event=None):\n cell_x, cell_y, digit = self.event_to_cursor_coords(event)\n self._engine.on_cells_selection_press(cell_x, cell_y, digit)\n\n def on_cells_selection_double(self, event=None):\n cell_x, cell_y, digit = self.event_to_cursor_coords(event)\n self._engine.on_cells_selection_double(cell_x, cell_y, digit)\n\n def on_cells_selection_motion(self, event=None):\n cell_x, cell_y, digit = self.event_to_cursor_coords(event)\n self._engine.on_cells_selection_motion(cell_x, cell_y, digit)\n\n def on_cells_selection_release(self, event=None):\n cell_x, cell_y, digit = self.event_to_cursor_coords(event)\n self._engine.on_cells_selection_release(cell_x, cell_y, digit)\n\n def on_chars_selection_press(self, event=None):\n char_x, char_y = self.event_to_char_coords(event)\n self._engine.on_chars_selection_press(char_x, char_y)\n\n def on_chars_selection_double(self, event=None):\n char_x, char_y = self.event_to_char_coords(event)\n self._engine.on_chars_selection_double(char_x, char_y)\n\n def on_chars_selection_motion(self, event=None):\n char_x, char_y = self.event_to_char_coords(event)\n self._engine.on_chars_selection_motion(char_x, char_y)\n\n def on_chars_selection_release(self, event=None):\n char_x, char_y = self.event_to_char_coords(event)\n self._engine.on_chars_selection_release(char_x, char_y)\n\n def on_cells_chars_wheel(self, event=None):\n self.scroll_wheel(event)\n\n def on_configure(self, event=None):\n self.update_idletasks()\n\n view_ratio_x = self._offset_canvas.xview()[0]\n self._cells_pixel_x = floor(self._cells_pixel_size[0] * view_ratio_x)\n\n self.update_vbar()\n self.update_view()\n\n def on_cells_focus_in(self, event=None):\n self._cells_cursor_color = _COLOR_CUR\n self.update_cursor()\n\n def on_cells_focus_out(self, event=None):\n self._cells_cursor_color = _COLOR_FG\n self.update_cursor()\n\n def on_chars_focus_in(self, event=None):\n self._chars_cursor_color = _COLOR_CUR\n self.update_cursor()\n\n def on_chars_focus_out(self, event=None):\n self._chars_cursor_color = _COLOR_FG\n self.update_cursor()\n\n def _on_wheel(self, event=None):\n step = -int(event.delta) // self._font_h\n self._on_vbar(tk.SCROLL, step, tk.UNITS)\n\n def get_cell_bounds_y(self) -> Tuple[CellCoord, CellCoord]:\n pad_y = self._pad_y\n pixel_h = self.cells_canvas.winfo_height()\n cell_start_y = floor(self.pixel_to_cell_coords(0, pad_y)[1])\n cell_endex_y = floor(self.pixel_to_cell_coords(0, pixel_h - pad_y)[1])\n return cell_start_y, cell_endex_y\n\n def mark_dirty_cell(\n self,\n cell_x: CellCoord,\n cell_y: CellCoord,\n ):\n self._cells_dirty.add((cell_x, cell_y))\n\n def mark_dirty_all(self):\n self._cells_dirty.update(self._cells_text_id.keys())\n\n def mark_dirty_inline(\n self,\n start_x: Optional[CellCoord] = None,\n start_y: Optional[CellCoord] = None,\n endin_x: Optional[CellCoord] = None,\n endin_y: Optional[CellCoord] = None,\n ):\n if start_y is None:\n start_y = self._cell_start[1]\n if endin_y is None:\n endin_y = self._cell_endex[1] - 1\n\n if start_y <= endin_y:\n if start_x is None:\n start_x = self._cell_start[0]\n if endin_x is None:\n endin_x = self._cell_endex[0] - 1\n\n status = self._status\n line_length = status.line_length\n cells_dirty = self._cells_dirty\n\n if start_y == endin_y:\n cells_dirty.update((x, start_y) for x in range(start_x, endin_x + 1))\n else:\n cells_dirty.update((x, start_y) for x in range(start_x, line_length))\n cells_dirty.update((x, y) for y in range(start_y + 1, endin_y) for x in range(0, line_length))\n cells_dirty.update((x, endin_y) for x in range(0, endin_x + 1))\n\n def mark_dirty_range(\n self,\n start_address: Optional[Address] = None,\n endex_address: Optional[Address] = None,\n ):\n status = self._status\n\n if start_address is None:\n start_x, start_y = None, None\n else:\n start_x, start_y = status.address_to_cell_coords(start_address)\n\n if endex_address is None:\n endin_x, endin_y = None, None\n else:\n endin_x, endin_y = status.address_to_cell_coords(max(start_address, endex_address - 1))\n\n self.mark_dirty_inline(start_x, start_y, endin_x, endin_y)\n\n def update_vbar(self):\n status = self._status\n memory = status.memory\n\n memory_start = memory.start\n memory_endex = memory.endex\n memory_start_y = status.address_to_cell_coords(memory_start)[1]\n memory_endex_y = status.address_to_cell_coords(max(memory_start, memory_endex - 1))[1] + 1\n\n pixel_w, pixel_h = (self._cells_pixel_size[0], self._cells_canvas.winfo_height())\n cell_start_y = self.pixel_to_cell_coords(0, 0)[1]\n cell_endin_y = self.pixel_to_cell_coords(pixel_w - 1, pixel_h - 1)[1]\n\n ratio_start = (cell_start_y - memory_start_y) / (memory_endex_y - memory_start_y)\n ratio_endin = (cell_endin_y - memory_start_y) / (memory_endex_y - memory_start_y)\n vbar_start = max(0., min(ratio_start, 1.))\n vbar_endin = max(0., min(ratio_endin, 1.))\n\n self._cells_vbar.set(vbar_start, vbar_endin)\n\n def update_view(\n self,\n force_geometry: bool = False,\n force_selection: bool = False,\n force_content: bool = False,\n ):\n status = self._status\n cells_canvas = self._cells_canvas\n\n # Resize canvas if required\n pad_x, pad_y = self._pad_x, self._pad_y\n font_w, font_h = self._font_w, self._font_h\n cell_format_length = status.cell_format_length\n line_length = status.line_length\n view_w = pad_x + (font_w * (line_length * (cell_format_length + status.cell_spacing) - 1)) + pad_x\n pixel_w = self._cells_pixel_size[0]\n if view_w != pixel_w:\n pixel_w, pixel_h = view_w, cells_canvas.winfo_height()\n cells_pixel_size = pixel_w, pixel_h\n cells_canvas.configure(width=pixel_w, height=pixel_h)\n else:\n cells_pixel_size = (pixel_w, cells_canvas.winfo_height())\n pixel_w, pixel_h = cells_pixel_size\n\n cell_start_x, cell_start_y = self.pixel_to_cell_coords(0, 0)\n cell_start_x, cell_start_y = max(0, floor(cell_start_x)), floor(cell_start_y)\n cell_endex_x, cell_endex_y = self.pixel_to_cell_coords(pixel_w, pixel_h)\n cell_endex_x, cell_endex_y = min(floor(cell_endex_x) + 1, line_length), floor(cell_endex_y) + 1\n self._cell_start = (cell_start_x, cell_start_y)\n self._cell_endex = (cell_endex_x, cell_endex_y)\n self._address_start = status.cell_coords_to_address(cell_start_x, cell_start_y)\n self._address_endex = status.cell_coords_to_address(cell_endex_x, cell_endex_y)\n\n changed_geometry = (force_geometry or\n self._cells_pixel_size != cells_pixel_size or\n self._cells_pixel_y_prev != self._cells_pixel_y)\n\n changed_selection = (force_selection or\n self._sel_start_address_prev != status.sel_start_address or\n self._sel_endin_address_prev != status.sel_endin_address)\n\n changed_content = force_content\n\n if changed_geometry:\n self._update_geometry()\n\n if changed_geometry or changed_content:\n self._update_content()\n\n if changed_geometry or changed_selection:\n self._update_background()\n\n self._cells_dirty.clear()\n self.update_cursor()\n\n self._cells_pixel_size = cells_pixel_size\n self._cells_pixel_y_prev = self._cells_pixel_y\n self._sel_start_address_prev = status.sel_start_address\n self._sel_endin_address_prev = status.sel_endin_address\n\n def _update_geometry(self):\n status = self._status\n cell_start_x, cell_start_y = self._cell_start\n cell_endex_x, cell_endex_y = self._cell_endex\n\n address_canvas = self._address_canvas\n chars_canvas = self._chars_canvas\n cells_canvas = self._cells_canvas\n line_length = status.line_length\n\n addrs_text_id = self._addrs_text_id\n cells_text_id = self._cells_text_id\n cells_rect_id = self._cells_rect_id\n chars_text_id = self._chars_text_id\n chars_rect_id = self._chars_rect_id\n\n cells_key_keep = set()\n cells_key_miss = set()\n addrs_key_keep = set()\n addrs_key_miss = set()\n\n # Mark missing cells and addresses, and those to be kept\n for y in range(cell_start_y, cell_endex_y):\n for x in range(cell_start_x, cell_endex_x):\n x_y = (x, y)\n if x_y in cells_text_id:\n cells_key_keep.add(x_y)\n else:\n cells_key_miss.add(x_y)\n if y in addrs_text_id:\n addrs_key_keep.add(y)\n else:\n addrs_key_miss.add(y)\n\n cells_key_trash = [x_y for x_y in cells_text_id if x_y not in cells_key_keep]\n addrs_key_trash = [y for y in addrs_text_id if y not in addrs_key_keep]\n\n font_w, font_h = self._font_w, self._font_h\n pad_x, pad_y = self._pad_x, self._pad_y\n\n # Update address skip\n address_format = status.address_format_string\n address_skip = status.address_skip\n text = address_format.format(address_skip)\n self._address_skip_label.configure(text=text)\n\n # Instance missing addresses\n for y in addrs_key_miss:\n address = status.cell_coords_to_address(0, y)\n text = address_format.format(address)\n addr_pixel_x = pad_x\n addr_pixel_y = pad_y + (y * font_h) - self._cells_pixel_y\n if addrs_key_trash:\n addr_text_id = addrs_text_id.pop(addrs_key_trash.pop())\n address_canvas.coords(addr_text_id, addr_pixel_x, addr_pixel_y)\n address_canvas.itemconfigure(addr_text_id, text=text)\n else:\n addr_text_id = address_canvas.create_text(addr_pixel_x, addr_pixel_y, text=text, anchor=tk.NW,\n font=self._font, fill=_COLOR_FG)\n addrs_text_id[y] = addr_text_id\n\n # Remove trashed addresses\n for y in addrs_key_trash:\n address_canvas.delete(addrs_text_id.pop(y))\n\n # Update kept addresses\n for y in addrs_key_keep:\n addr_pixel_x = pad_x\n addr_pixel_y = pad_y + (y * font_h) - self._cells_pixel_y\n address_canvas.coords(addrs_text_id[y], addr_pixel_x, addr_pixel_y)\n\n # Instance missing cells\n cells_dirty = self._cells_dirty\n cells_pixel = self._cells_pixel\n cells_selected = self._cells_selected\n cells_text = self._cells_text_str\n font = self._font\n cell_format_length = status.cell_format_length\n rect_w_tail = cell_format_length * font_w\n rect_w_body = rect_w_tail + (font_w * status.cell_spacing)\n rect_h = font_h\n cell_x_endin = line_length - 1\n cell_text = '?' * status.cell_format_length\n char_text = '?'\n chars_visible = self._chars_visible\n char_text_id = None\n char_rect_id = None\n\n for x_y in cells_key_miss:\n cell_pixel_x, cell_pixel_y = self.cell_coords_to_pixel(*x_y)\n char_pixel_x, char_pixel_y = self.char_coords_to_pixel(*x_y)\n rect_w = rect_w_body if x_y[0] < cell_x_endin else rect_w_tail\n\n if cells_key_trash:\n key = cells_key_trash.pop()\n\n cell_text_id = cells_text_id.pop(key)\n cells_canvas.coords(cell_text_id, cell_pixel_x, cell_pixel_y)\n cells_canvas.itemconfigure(cell_text_id, text=cell_text)\n\n cell_rect_id = cells_rect_id.pop(key)\n cells_canvas.itemconfigure(cell_rect_id, state=tk.HIDDEN)\n cells_canvas.coords(cell_rect_id,\n cell_pixel_x, cell_pixel_y,\n cell_pixel_x + rect_w, cell_pixel_y + rect_h)\n\n if chars_visible:\n char_text_id = chars_text_id.pop(key)\n chars_canvas.coords(char_text_id, char_pixel_x, char_pixel_y)\n chars_canvas.itemconfigure(char_text_id, text=char_text)\n\n char_rect_id = chars_rect_id.pop(key)\n chars_canvas.itemconfigure(char_rect_id, state=tk.HIDDEN)\n chars_canvas.coords(char_rect_id,\n char_pixel_x, char_pixel_y,\n char_pixel_x + font_w, char_pixel_y + rect_h)\n\n else:\n cell_text_id = cells_canvas.create_text(cell_pixel_x, cell_pixel_y,\n tags='cell_text', text=cell_text,\n anchor=tk.NW, font=font, fill=_COLOR_FG)\n\n cell_rect_id = cells_canvas.create_rectangle(cell_pixel_x, cell_pixel_y,\n cell_pixel_x + rect_w, cell_pixel_y + rect_h,\n tags='cell_rect', outline='', fill=_COLOR_SEL_BG,\n state=tk.HIDDEN)\n\n if chars_visible:\n char_text_id = chars_canvas.create_text(char_pixel_x, char_pixel_y,\n tags='char_text', text=char_text,\n anchor=tk.NW, font=font, fill=_COLOR_FG)\n\n char_rect_id = chars_canvas.create_rectangle(char_pixel_x, char_pixel_y,\n char_pixel_x + font_w, char_pixel_y + font_h,\n tags='char_rect', outline='', fill=_COLOR_SEL_BG,\n state=tk.HIDDEN)\n\n cells_text_id[x_y] = cell_text_id\n cells_rect_id[x_y] = cell_rect_id\n if chars_visible:\n chars_text_id[x_y] = char_text_id\n chars_rect_id[x_y] = char_rect_id\n\n cells_dirty.add(x_y)\n cells_pixel[x_y] = (-1, -1) # invalidate\n cells_selected.add(x_y)\n cells_text[x_y] = '' # invalidate\n\n cells_canvas.tag_raise('cell_text')\n cells_canvas.tag_lower('cell_rect')\n if chars_visible:\n chars_canvas.tag_raise('char_text')\n chars_canvas.tag_lower('char_rect')\n\n # Remove trashed cells\n for x_y in cells_key_trash:\n cells_canvas.delete(cells_text_id.pop(x_y))\n cells_canvas.delete(cells_rect_id.pop(x_y))\n if chars_visible:\n chars_canvas.delete(chars_text_id.pop(x_y))\n chars_canvas.delete(chars_rect_id.pop(x_y))\n cells_dirty.discard(x_y)\n cells_pixel.pop(x_y)\n cells_selected.discard(x_y)\n cells_text.pop(x_y)\n\n # Update kept cells\n for x_y in cells_key_keep:\n cell_pixel = self.cell_coords_to_pixel(*x_y)\n char_pixel = self.char_coords_to_pixel(*x_y)\n\n if cells_pixel[x_y] != cell_pixel:\n cells_pixel[x_y] = cell_pixel\n cell_pixel_x, cell_pixel_y = cell_pixel\n char_pixel_x, char_pixel_y = char_pixel\n\n cells_canvas.coords(cells_text_id[x_y], cell_pixel_x, cell_pixel_y)\n\n rect_w = rect_w_body if x_y[0] < cell_x_endin else rect_w_tail\n cells_canvas.coords(cells_rect_id[x_y],\n cell_pixel_x, cell_pixel_y,\n cell_pixel_x + rect_w, cell_pixel_y + rect_h)\n\n if chars_visible:\n chars_canvas.coords(chars_text_id[x_y], char_pixel_x, char_pixel_y)\n\n chars_canvas.coords(chars_rect_id[x_y],\n char_pixel_x, char_pixel_y,\n char_pixel_x + font_w, char_pixel_y + font_h)\n\n # Update canvas sizes\n offset_canvas = self._offset_canvas\n offset_spacing = status.offset_spacing\n offset_format_format = status.offset_format_string.format\n offset_format_spacing = ' ' * offset_spacing\n text = offset_format_spacing.join(offset_format_format(x) for x in range(line_length))\n text = offset_format_spacing[:-1] + text\n offset_canvas.itemconfigure(self._offset_text_id, text=text)\n\n offset_format_length = status.offset_format_length\n offset_canvas_w = pad_x + (font_w * (line_length * (offset_format_length + offset_spacing) - 1)) + pad_x\n view_w = pad_x + (font_w * (line_length * (cell_format_length + status.cell_spacing) - 1)) + pad_x\n view_w = offset_canvas_w = max(offset_canvas_w, view_w)\n offset_canvas.configure(width=offset_canvas_w, scrollregion=(0, 0, offset_canvas_w, 1))\n cells_canvas.configure(width=view_w, scrollregion=(0, 0, view_w, 1))\n\n chars_canvas_w = pad_x + (font_w * line_length) + pad_x\n chars_canvas.configure(width=chars_canvas_w, scrollregion=(0, 0, chars_canvas_w, 1))\n\n address_format_length = status.address_format_length\n address_canvas_w = pad_x + (font_w * address_format_length) + pad_x\n address_canvas.configure(width=address_canvas_w)\n\n def _update_content(self):\n status = self._status\n cell_start_x, cell_start_y = self._cell_start\n cell_endex_x, cell_endex_y = self._cell_endex\n\n cells_canvas = self._cells_canvas\n chars_canvas = self._chars_canvas\n chars_title = self._chars_title\n\n cells_text_id = self._cells_text_id\n cells_dirty = self._cells_dirty\n cells_text_str = self._cells_text_str\n chars_text_id = self._chars_text_id\n\n address = status.cell_coords_to_address(cell_start_x, cell_start_y)\n rover = status.memory.values(address, ...).__next__\n text_format = status.cell_format_string.format\n text_empty = '-' * status.cell_format_length\n char_empty = ' '\n chars_visible = self._chars_visible\n chars_table = status.chars_table\n\n chars_title.configure(text=f'Text / {status.chars_encoding}')\n\n for y in range(cell_start_y, cell_endex_y):\n for x in range(cell_start_x, cell_endex_x):\n value = rover()\n x_y = (x, y)\n\n if x_y in cells_dirty:\n text_before = cells_text_str[x_y]\n text_after = text_empty if value is None else text_format(value)\n\n if text_before != text_after:\n cells_text_str[x_y] = text_after\n cells_canvas.itemconfigure(cells_text_id[x_y], text=text_after)\n\n if chars_visible:\n c = char_empty if value is None else chars_table[value]\n chars_canvas.itemconfigure(chars_text_id[x_y], text=c)\n\n address += 1\n\n def _update_background(self):\n status = self._status\n cell_start_x, cell_start_y = self._cell_start\n cell_endex_x, cell_endex_y = self._cell_endex\n\n selection_mode = status.sel_mode\n sm_norm = SelectionMode.NORMAL\n sm_rect = SelectionMode.RECTANGLE\n sel_address_start = status.sel_start_address\n sel_address_endin = status.sel_endin_address\n sel_start_cell_x, sel_start_cell_y = status.sel_start_cell\n sel_endin_cell_x, sel_endin_cell_y = status.sel_endin_cell\n\n cells_canvas = self._cells_canvas\n chars_canvas = self._chars_canvas\n\n cells_dirty = self._cells_dirty\n cells_selected_before = self._cells_selected\n cells_selected_after = set()\n\n if selection_mode == sm_norm:\n # Straighten any backwards selections\n if sel_address_endin < sel_address_start:\n sel_address_endin, sel_address_start = sel_address_start, sel_address_endin\n\n # Mark those cells within the selected address range\n address = status.cell_coords_to_address(cell_start_x, cell_start_y)\n for y in range(cell_start_y, cell_endex_y):\n for x in range(cell_start_x, cell_endex_x):\n if sel_address_start <= address <= sel_address_endin:\n cells_selected_after.add((x, y))\n address += 1\n\n elif selection_mode == sm_rect:\n # Straighten any backwards selections\n if sel_endin_cell_x < sel_start_cell_x:\n sel_endin_cell_x, sel_start_cell_x = sel_start_cell_x, sel_endin_cell_x\n if sel_endin_cell_y < sel_start_cell_y:\n sel_endin_cell_y, sel_start_cell_y = sel_start_cell_y, sel_endin_cell_y\n\n # Mark those cells within the selected rectangle range\n for y in range(cell_start_y, cell_endex_y):\n for x in range(cell_start_x, cell_endex_x):\n if ((sel_start_cell_x <= x <= sel_endin_cell_x and\n sel_start_cell_y <= y <= sel_endin_cell_y)):\n cells_selected_after.add((x, y))\n\n # Update only those cells that changed selection state\n cells_text_id = self._cells_text_id\n cells_rect_id = self._cells_rect_id\n chars_text_id = self._chars_text_id\n chars_rect_id = self._chars_rect_id\n chars_visible = self._chars_visible\n palette = (_COLOR_FG, _COLOR_OG)\n palette_sel = (_COLOR_SEL_FG, _COLOR_SEL_OG)\n\n for y in range(cell_start_y, cell_endex_y):\n for x in range(cell_start_x, cell_endex_x):\n x_y = (x, y)\n selected_after = x_y in cells_selected_after\n\n if x_y in cells_dirty:\n selected_before = not selected_after # force update\n else:\n selected_before = x_y in cells_selected_before\n\n if selected_before < selected_after:\n color = palette_sel[x & 1]\n cells_canvas.itemconfigure(cells_text_id[x_y], fill=color)\n cells_canvas.itemconfigure(cells_rect_id[x_y], state=tk.NORMAL)\n if chars_visible:\n chars_canvas.itemconfigure(chars_text_id[x_y], fill=color)\n chars_canvas.itemconfigure(chars_rect_id[x_y], state=tk.NORMAL)\n\n elif selected_before > selected_after:\n color = palette[x & 1]\n cells_canvas.itemconfigure(cells_text_id[x_y], fill=color)\n cells_canvas.itemconfigure(cells_rect_id[x_y], state=tk.HIDDEN)\n if chars_visible:\n chars_canvas.itemconfigure(chars_text_id[x_y], fill=color)\n chars_canvas.itemconfigure(chars_rect_id[x_y], state=tk.HIDDEN)\n\n self._cells_selected = cells_selected_after\n\n def update_cursor(self):\n status = self._status\n cell_start_x, cell_start_y = self._cell_start\n cell_endex_x, cell_endex_y = self._cell_endex\n cursor_cell_x, cursor_cell_y = status.cursor_cell\n cells_canvas = self._cells_canvas\n chars_canvas = self._chars_canvas\n chars_visible = self._chars_visible\n\n if ((cell_start_x <= cursor_cell_x <= cell_endex_x and\n cell_start_y <= cursor_cell_y <= cell_endex_y)):\n\n cursor_pixel_x, cursor_pixel_y = self.cell_coords_to_pixel(cursor_cell_x, cursor_cell_y)\n font_w, font_h = self._font_w, self._font_h\n cursor_pixel_x += status.cursor_digit * font_w\n cells_canvas.itemconfigure(self._cells_cursor_id, fill=self._cells_cursor_color)\n chars_canvas.itemconfigure(self._chars_cursor_id, fill=self._chars_cursor_color)\n\n if status.cursor_mode == CursorMode.OVERWRITE:\n # Draw a box around the cursor character\n cells_canvas.coords(self._cells_cursor_id,\n cursor_pixel_x - 1, cursor_pixel_y - 1,\n cursor_pixel_x - 1, cursor_pixel_y + font_h + 1,\n cursor_pixel_x + font_w + 1, cursor_pixel_y + font_h + 1,\n cursor_pixel_x + font_w + 1, cursor_pixel_y - 1,\n cursor_pixel_x - 1, cursor_pixel_y - 1)\n\n if chars_visible:\n cursor_pixel_x, cursor_pixel_y = self.char_coords_to_pixel(cursor_cell_x, cursor_cell_y)\n chars_canvas.coords(self._chars_cursor_id,\n cursor_pixel_x - 1, cursor_pixel_y - 1,\n cursor_pixel_x - 1, cursor_pixel_y + font_h + 1,\n cursor_pixel_x + font_w + 1, cursor_pixel_y + font_h + 1,\n cursor_pixel_x + font_w + 1, cursor_pixel_y - 1,\n cursor_pixel_x - 1, cursor_pixel_y - 1)\n\n else:\n # Draw a vertical line on the left side of the cursor character\n cells_canvas.coords(self._cells_cursor_id,\n cursor_pixel_x - 1, cursor_pixel_y - 1,\n cursor_pixel_x - 1, cursor_pixel_y + font_h + 1)\n\n if chars_visible:\n cursor_pixel_x, cursor_pixel_y = self.char_coords_to_pixel(cursor_cell_x, cursor_cell_y)\n chars_canvas.coords(self._chars_cursor_id,\n cursor_pixel_x - 1, cursor_pixel_y - 1,\n cursor_pixel_x - 1, cursor_pixel_y + font_h + 1)\n\n cells_canvas.tag_raise('cursor')\n chars_canvas.tag_raise('cursor')\n\n else:\n # Park to an invisible spot\n cells_canvas.coords(self._cells_cursor_id, -2, -2, -1, -1)\n if chars_visible:\n chars_canvas.coords(self._chars_cursor_id, -2, -2, -1, -1)\n\n def redraw(self):\n for cell_text_id in self._cells_text_id.values():\n self._cells_canvas.delete(cell_text_id)\n\n for cell_rect_id in self._cells_rect_id.values():\n self._cells_canvas.delete(cell_rect_id)\n\n for char_text_id in self._chars_text_id.values():\n self._chars_canvas.delete(char_text_id)\n\n for char_rect_id in self._chars_rect_id.values():\n self._chars_canvas.delete(char_rect_id)\n\n for addr_text_id in self._addrs_text_id.values():\n self._address_canvas.delete(addr_text_id)\n\n self._addrs_text_id.clear()\n self._cells_text_id.clear()\n self._cells_rect_id.clear()\n self._cells_dirty.clear()\n self._cells_pixel.clear()\n self._cells_selected.clear()\n self._cells_text_str.clear()\n self._chars_text_id.clear()\n self._chars_rect_id.clear()\n\n self.update_view(force_geometry=True, force_selection=True, force_content=True)\n self.update_vbar()\n\n def pixel_to_char_coords(self, pixel_x: PixelCoord, pixel_y: PixelCoord) -> FloatCoords:\n char_x = (pixel_x - self._pad_x) / self._font_w\n char_y = (pixel_y - self._pad_y + self._cells_pixel_y) / self._font_h\n return char_x, char_y\n\n def char_coords_to_pixel(self, char_x: CharCoord, char_y: CharCoord) -> PixelCoords:\n pixel_x = self._pad_x + (char_x * self._font_w)\n pixel_y = self._pad_y + (char_y * self._font_h) - self._cells_pixel_y\n return pixel_x, pixel_y\n\n def pixel_to_cell_coords(self, pixel_x: PixelCoord, pixel_y: PixelCoord) -> FloatCoords:\n status = self._status\n char_x, char_y = self.pixel_to_char_coords(pixel_x, pixel_y)\n cell_format_length = status.cell_format_length\n cell_spacing = status.cell_spacing\n cell_x = (char_x - (cell_spacing - 1)) / (cell_format_length + cell_spacing)\n cell_y = char_y\n return cell_x, cell_y\n\n def cell_coords_to_pixel(self, cell_x: CellCoord, cell_y: CellCoord) -> PixelCoords:\n status = self._status\n cell_format_length = status.cell_format_length\n cell_spacing = status.cell_spacing\n char_x = cell_x * (cell_format_length + cell_spacing) + (cell_spacing - 1)\n char_y = cell_y\n return self.char_coords_to_pixel(char_x, char_y)\n\n def pixel_to_cursor_coords(self, pixel_x: PixelCoord, pixel_y: PixelCoord) -> Tuple[CellCoord, CellCoord, int]:\n status = self._status\n char_x, char_y = self.pixel_to_char_coords(pixel_x, pixel_y)\n cell_format_length = status.cell_format_length\n cell_spacing = status.cell_spacing\n line_length = status.line_length\n\n cell_format_length_spaced = cell_format_length + cell_spacing\n digit_x_unspaced = char_x - (cell_spacing - 1)\n remainder = digit_x_unspaced % cell_format_length_spaced\n cell_x = floor(digit_x_unspaced / cell_format_length_spaced)\n cell_y = floor(char_y)\n\n if cell_x < 0:\n cell_x = 0\n digit = 0\n elif cell_x >= line_length:\n cell_x = line_length - 1\n digit = cell_format_length - 1\n else:\n if remainder < 1:\n digit = 0\n elif remainder < cell_format_length + .5:\n digit = min(floor(remainder), cell_format_length - 1)\n elif cell_x < line_length - 1:\n cell_x += 1\n digit = 0\n else:\n digit = cell_format_length - 1\n\n return cell_x, cell_y, digit\n\n def event_to_cursor_coords(self, event) -> Tuple[CellCoord, CellCoord, int]:\n return self.pixel_to_cursor_coords(event.x + self._cells_pixel_x, event.y)\n\n def event_to_char_coords(self, event) -> CharCoords:\n chars_pixel_x = self._chars_canvas.canvasx(0)\n char_x, char_y = self.pixel_to_char_coords(event.x + chars_pixel_x, event.y)\n char_x = max(0, min(floor(char_x), self._status.line_length - 1))\n char_y = floor(char_y)\n return char_x, char_y\n\n def scroll_up(self, delta_y: int = 1) -> None:\n self._on_vbar(tk.SCROLL, -delta_y, tk.UNITS)\n\n def scroll_down(self, delta_y: int = 1) -> None:\n self._on_vbar(tk.SCROLL, +delta_y, tk.UNITS)\n\n def scroll_page_up(self) -> None:\n self.scroll_up(self.get_half_page_height())\n\n def scroll_page_down(self) -> None:\n self.scroll_down(self.get_half_page_height())\n\n def scroll_top(self, delta_y: CellCoord = 0) -> None:\n status = self._status\n cursor_cell_y = status.cursor_cell[1] - delta_y\n font_h = self._font_h\n cells_pixel_y = cursor_cell_y * font_h\n changed = (self._cells_pixel_y != cells_pixel_y)\n self._cells_pixel_y = cells_pixel_y\n self.update_view(force_geometry=changed)\n\n def scroll_bottom(self, delta_y: CellCoord = 0) -> None:\n status = self._status\n cursor_cell_y = status.cursor_cell[1] + delta_y\n font_h = self._font_h\n pad_y = self._pad_y\n pixel_h = self._cells_canvas.winfo_height() - (pad_y * 2)\n cells_pixel_y = ((cursor_cell_y + 1 - (pixel_h // font_h)) * font_h) - (pixel_h % font_h) + pad_y\n changed = (self._cells_pixel_y != cells_pixel_y)\n self._cells_pixel_y = cells_pixel_y\n self.update_view(force_geometry=changed)\n\n def scroll_wheel(self, event=None):\n step = -int(event.delta) // self._font_h\n self._on_vbar(tk.SCROLL, step, tk.UNITS)\n\n def ask_big_selection(self, size: Address) -> bool:\n answer = tk.messagebox.askquestion(\n 'Big selection',\n (f'{size} ({size:X}h) byes are selected.\\n'\n f'Such a big size could create problems.\\n'\n f'Continue?')\n )\n return answer == tk.YES\n\n\n# =====================================================================================================================\n\nclass UserInterface(BaseUserInterface):\n\n def __init__(\n self,\n manager: 'InstanceManager',\n engine_factory: Callable[..., BaseEngine],\n ) -> None:\n\n super().__init__(manager)\n self._root = manager.root\n\n self._engine_factory = engine_factory\n engine = engine_factory(self)\n self.engine = engine\n\n self.__init_top()\n self.__init_tkvars()\n self.__init_menus()\n self.__init_toolbars()\n self.__init_statusbar()\n self.__init_editor()\n\n self.update_title_by_file_path()\n self.update_menus_by_selection()\n\n self.top.deiconify()\n\n def quit(self):\n self.top.destroy()\n super().quit()\n\n def create_new(self) -> 'UserInterface':\n manager = _cast(InstanceManager, self._manager)\n ui = UserInterface(manager, self._engine_factory)\n return ui\n\n def __init_top(self):\n top = tk.Toplevel(self._root)\n self.top = top\n\n top.withdraw()\n top.protocol('WM_DELETE_WINDOW', self._on_delete_window)\n top.title(PROGRAM_TITLE)\n top.minsize(600, 400)\n\n def _on_delete_window(self):\n self.engine.on_file_exit()\n\n def __init_tkvars(self):\n # Editor variables\n status = self.engine.status\n top = self.top\n\n self.line_length_tkvar = tk.IntVar(top, name='line_length', value=status.line_length)\n self.chars_visible_tkvar = tk.BooleanVar(top, name='chars_visible', value=True)\n self.chars_encoding_tkvar = tk.StringVar(top, name='chars_encoding', value='ascii')\n\n self.cell_mode_tkvar = tk.IntVar(top, name='cell_mode', value=int(status.cell_format_mode))\n self.cell_prefix_tkvar = tk.BooleanVar(top, name='cell_prefix', value=status.cell_format_prefix)\n self.cell_suffix_tkvar = tk.BooleanVar(top, name='cell_suffix', value=status.cell_format_suffix)\n self.cell_zeroed_tkvar = tk.BooleanVar(top, name='cell_zeroed', value=status.cell_format_zeroed)\n\n self.address_mode_tkvar = tk.IntVar(top, name='address_mode', value=int(status.address_format_mode))\n self.address_prefix_tkvar = tk.BooleanVar(top, name='address_prefix', value=status.address_format_prefix)\n self.address_suffix_tkvar = tk.BooleanVar(top, name='address_suffix', value=status.address_format_suffix)\n self.address_zeroed_tkvar = tk.BooleanVar(top, name='address_zeroed', value=status.address_format_zeroed)\n self.address_skip_tkvar = tk.IntVar(top, name='address_skip', value=status.address_skip)\n self.address_bits_tkvar = tk.IntVar(top, name='address_bits', value=status.address_bits)\n\n self.offset_mode_tkvar = tk.IntVar(top, name='offset_mode', value=int(status.offset_format_mode))\n self.offset_prefix_tkvar = tk.BooleanVar(top, name='offset_prefix', value=status.offset_format_prefix)\n self.offset_suffix_tkvar = tk.BooleanVar(top, name='offset_suffix', value=status.offset_format_suffix)\n self.offset_zeroed_tkvar = tk.BooleanVar(top, name='offset_zeroed', value=status.offset_format_zeroed)\n\n # Add variable tracing\n self.line_length_tkvar.trace_add('write', self.on_tkvar_line_length)\n self.chars_visible_tkvar.trace_add('write', self.on_tkvar_chars_visible)\n self.chars_encoding_tkvar.trace_add('write', self.on_tkvar_chars_encoding)\n\n self.cell_mode_tkvar.trace_add('write', self.on_tkvar_cell_mode)\n self.cell_prefix_tkvar.trace_add('write', self.on_tkvar_cell_prefix)\n self.cell_suffix_tkvar.trace_add('write', self.on_tkvar_cell_suffix)\n self.cell_zeroed_tkvar.trace_add('write', self.on_tkvar_cell_zeroed)\n\n self.address_mode_tkvar.trace_add('write', self.on_tkvar_address_mode)\n self.address_prefix_tkvar.trace_add('write', self.on_tkvar_address_prefix)\n self.address_suffix_tkvar.trace_add('write', self.on_tkvar_address_suffix)\n self.address_zeroed_tkvar.trace_add('write', self.on_tkvar_address_zeroed)\n self.address_skip_tkvar.trace_add('write', self.on_tkvar_address_skip)\n self.address_bits_tkvar.trace_add('write', self.on_tkvar_address_bits)\n\n self.offset_mode_tkvar.trace_add('write', self.on_tkvar_offset_mode)\n self.offset_prefix_tkvar.trace_add('write', self.on_tkvar_offset_prefix)\n self.offset_suffix_tkvar.trace_add('write', self.on_tkvar_offset_suffix)\n self.offset_zeroed_tkvar.trace_add('write', self.on_tkvar_offset_zeroed)\n\n # TODO: Find/replace variables\n self.find_text_tkvar = tk.StringVar(top, name='find_text')\n self.find_base_tkvar = tk.IntVar(top, name='find_base')\n self.replace_text_tkvar = tk.StringVar(top, name='replace_text')\n\n def __init_menus(self):\n menu_bar = tk.Menu(self.top, tearoff=False)\n self.menu_bar = menu_bar\n\n self.__init_menu_file()\n self.__init_menu_edit()\n self.__init_menu_view()\n self.__init_menu_navigation()\n self.__init_menu_help()\n\n menu_bar.add_cascade(label='File', underline=0, menu=self.menu_file)\n menu_bar.add_cascade(label='Edit', underline=0, menu=self.menu_edit)\n menu_bar.add_cascade(label='View', underline=0, menu=self.menu_view)\n menu_bar.add_cascade(label='Navigate', underline=0, menu=self.menu_nav)\n menu_bar.add_cascade(label='Help', underline=0, menu=self.menu_help)\n\n self.top.configure(menu=menu_bar)\n\n def __init_menu_file(self):\n self.menu_file = menu = tk.Menu(self.top, tearoff=False)\n\n menu.add_command(label='New', underline=0, accelerator='Ctrl+N', command=self.on_file_new,\n image=load_image('image/16x16/document_new_thick.png'), compound=tk.LEFT)\n\n menu.add_command(label='Open', underline=0, accelerator='Ctrl+O', command=self.on_file_open,\n image=load_image('image/16x16/fileopen.png'), compound=tk.LEFT)\n\n menu.add_command(label='Import', underline=0, accelerator='Ctrl+I', command=self.on_file_import,\n image=load_image('image/16x16/fileimport.png'), compound=tk.LEFT)\n\n menu.add_command(label='Save', underline=0, accelerator='Ctrl+S', command=self.on_file_save,\n image=load_image('image/16x16/filesave.png'), compound=tk.LEFT)\n\n menu.add_command(label='Save As', underline=0, accelerator='Ctrl+Shift+S', command=self.on_file_save_as,\n image=load_image('image/16x16/filesaveas.png'), compound=tk.LEFT)\n\n menu.add_separator()\n\n menu.add_command(label='Settings', underline=2, accelerator='Ctrl+Shift+T', state=tk.DISABLED,\n command=self.on_file_settings,\n image=load_image('image/16x16/configure.png'), compound=tk.LEFT)\n\n menu.add_separator()\n\n menu.add_command(label='Exit', underline=1, accelerator='Ctrl+W', command=self.on_file_exit,\n image=load_image('image/16x16/kill.png'), compound=tk.LEFT)\n\n def __init_menu_edit(self):\n self.menu_edit = menu = tk.Menu(self.top, tearoff=False)\n\n menu.add_command(label='Undo', underline=1, accelerator='Ctrl+Z', state=tk.DISABLED, command=self.on_edit_undo,\n image=load_image('image/16x16/undo.png'), compound=tk.LEFT)\n\n menu.add_command(label='Redo', underline=0, accelerator='Ctrl+Y', state=tk.DISABLED, command=self.on_edit_redo,\n image=load_image('image/16x16/redo.png'), compound=tk.LEFT)\n\n menu.add_separator()\n\n menu.add_command(label='Cut', underline=1, accelerator='Ctrl+X', command=self.on_edit_cut,\n image=load_image('image/16x16/editcut.png'), compound=tk.LEFT)\n\n menu.add_command(label='Copy', underline=0, accelerator='Ctrl+C', command=self.on_edit_copy,\n image=load_image('image/16x16/editcopy.png'), compound=tk.LEFT)\n\n menu.add_command(label='Paste', underline=0, accelerator='Ctrl+V', command=self.on_edit_paste,\n image=load_image('image/16x16/editpaste.png'), compound=tk.LEFT)\n\n menu.add_separator()\n\n menu.add_command(label='Cursor mode', underline=7, accelerator='Ins', command=self.on_edit_cursor_mode,\n image=load_image('image/16x16/edit.png'), compound=tk.LEFT)\n\n menu.add_command(label='Insert', underline=0, accelerator='+', command=self.on_edit_reserve,\n image=load_image('image/16x16/document_new.png'), compound=tk.LEFT)\n\n menu.add_command(label='Delete', underline=0, accelerator='- (Del)', command=self.on_edit_delete,\n image=load_image('image/16x16/editdelete.png'), compound=tk.LEFT)\n\n menu.add_command(label='Clear', underline=1, accelerator='. (Del)', command=self.on_edit_clear,\n image=load_image('image/16x16/eraser.png'), compound=tk.LEFT)\n\n menu.add_command(label='Fill', underline=0, accelerator='$', command=self.on_edit_fill,\n image=load_image('image/16x16/fill.png'), compound=tk.LEFT)\n\n menu.add_command(label='Flood', underline=2, accelerator='%', command=self.on_edit_flood,\n image=load_image('image/16x16/color_fill.png'), compound=tk.LEFT)\n\n menu.add_command(label='Crop', underline=0, accelerator='Ctrl+K', command=self.on_edit_crop,\n image=load_image('image/16x16/crop.png'), compound=tk.LEFT)\n\n menu.add_command(label='Move', underline=0, accelerator='Ctrl+M',\n command=self.on_edit_move_focus,\n image=load_image('image/16x16/move.png'), compound=tk.LEFT)\n\n menu.add_separator()\n\n menu.add_command(label='Select all', underline=7, accelerator='Ctrl+A', command=self.on_edit_select_all,\n image=load_image('image/16x16/select-all.png'), compound=tk.LEFT)\n\n menu.add_command(label='Select range', underline=7, accelerator='Ctrl+R',\n command=self.on_edit_select_range,\n image=load_image('image/16x16/select-range.png'), compound=tk.LEFT)\n\n menu.add_command(label='Copy current address', accelerator='Alt+Ins', command=self.on_edit_copy_address,\n image=load_image('image/16x16/copy-address.png'), compound=tk.LEFT)\n\n def __init_menu_view(self):\n self.menu_view = menu = tk.Menu(self.top, tearoff=False)\n\n # Line submenu\n line = tk.Menu(menu, tearoff=False)\n self.menu_line = line\n\n for value in LINE_LENGTHS:\n line.add_radiobutton(label=f'{value:3d}', variable=self.line_length_tkvar, value=value)\n\n line.add_separator()\n\n line.add_command(label='Custom', command=self.on_view_line_length_custom)\n\n # Address bits submenu\n bits = tk.Menu(menu, tearoff=False)\n self.menu_line = bits\n\n for value in ADDRESS_BITS:\n bits.add_radiobutton(label=f'{value:3d}', variable=self.address_bits_tkvar, value=value)\n\n bits.add_separator()\n\n bits.add_command(label='Custom', command=self.on_view_address_bits_custom)\n\n # Encoding submenu\n encm = tk.Menu(menu, tearoff=False)\n self.menu_encoding = encm\n\n for i, encoding in enumerate(BYTE_ENCODINGS):\n encm.add_radiobutton(label=encoding, variable=self.chars_encoding_tkvar, value=encoding,\n columnbreak=(i and not i % 16))\n\n # Cell submenu\n cell = tk.Menu(menu, tearoff=False)\n self._cell = cell\n\n cell.add_radiobutton(label='Hex UPPER', underline=0, accelerator='Ctrl+Alt+H',\n variable=self.cell_mode_tkvar, value=int(ValueFormatEnum.HEXADECIMAL_UPPER),\n image=load_image('image/16x16/char-hex-upper.png'), compound=tk.LEFT)\n\n cell.add_radiobutton(label='Hex lower', underline=12, accelerator='Ctrl+Alt+Shift+H',\n variable=self.cell_mode_tkvar, value=int(ValueFormatEnum.HEXADECIMAL_LOWER),\n image=load_image('image/16x16/char-hex-lower.png'), compound=tk.LEFT)\n\n cell.add_radiobutton(label='Decimal', underline=0, accelerator='Ctrl+Alt+D',\n variable=self.cell_mode_tkvar, value=int(ValueFormatEnum.DECIMAL),\n image=load_image('image/16x16/char-decimal.png'), compound=tk.LEFT)\n\n cell.add_radiobutton(label='Octal', underline=0, accelerator='Ctrl+Alt+O',\n variable=self.cell_mode_tkvar, value=int(ValueFormatEnum.OCTAL),\n image=load_image('image/16x16/char-octal.png'), compound=tk.LEFT)\n\n cell.add_radiobutton(label='Binary', underline=0, accelerator='Ctrl+Alt+B',\n variable=self.cell_mode_tkvar, value=int(ValueFormatEnum.BINARY),\n image=load_image('image/16x16/char-binary.png'), compound=tk.LEFT)\n\n cell.add_separator()\n\n cell.add_checkbutton(label='Prefix', underline=0,\n variable=self.cell_prefix_tkvar, offvalue=False, onvalue=True)\n\n cell.add_checkbutton(label='Suffix', underline=0,\n variable=self.cell_suffix_tkvar, offvalue=False, onvalue=True)\n\n cell.add_checkbutton(label='Leading zeros', underline=8,\n variable=self.cell_zeroed_tkvar, offvalue=False, onvalue=True)\n\n # Address submenu\n address = tk.Menu(menu, tearoff=False)\n self._address = address\n\n address.add_radiobutton(label='Hex UPPER', underline=0,\n variable=self.address_mode_tkvar, value=int(ValueFormatEnum.HEXADECIMAL_UPPER),\n image=load_image('image/16x16/char-hex-upper.png'), compound=tk.LEFT)\n\n address.add_radiobutton(label='Hex lower', underline=12,\n variable=self.address_mode_tkvar, value=int(ValueFormatEnum.HEXADECIMAL_LOWER),\n image=load_image('image/16x16/char-hex-lower.png'), compound=tk.LEFT)\n\n address.add_radiobutton(label='Decimal', underline=0,\n variable=self.address_mode_tkvar, value=int(ValueFormatEnum.DECIMAL),\n image=load_image('image/16x16/char-decimal.png'), compound=tk.LEFT)\n\n address.add_radiobutton(label='Octal', underline=0,\n variable=self.address_mode_tkvar, value=int(ValueFormatEnum.OCTAL),\n image=load_image('image/16x16/char-octal.png'), compound=tk.LEFT)\n\n address.add_radiobutton(label='Binary', underline=0,\n variable=self.address_mode_tkvar, value=int(ValueFormatEnum.BINARY),\n image=load_image('image/16x16/char-binary.png'), compound=tk.LEFT)\n\n address.add_separator()\n\n address.add_checkbutton(label='Prefix', underline=0,\n variable=self.address_prefix_tkvar, offvalue=False, onvalue=True)\n\n address.add_checkbutton(label='Suffix', underline=0,\n variable=self.address_suffix_tkvar, offvalue=False, onvalue=True)\n\n address.add_checkbutton(label='Leading zeros', underline=8,\n variable=self.address_zeroed_tkvar, offvalue=False, onvalue=True)\n\n # Offset submenu\n offset = tk.Menu(menu, tearoff=False)\n self._offset = offset\n\n offset.add_radiobutton(label='Hex UPPER', underline=0,\n variable=self.offset_mode_tkvar, value=int(ValueFormatEnum.HEXADECIMAL_UPPER),\n image=load_image('image/16x16/char-hex-upper.png'), compound=tk.LEFT)\n\n offset.add_radiobutton(label='Hex lower', underline=12,\n variable=self.offset_mode_tkvar, value=int(ValueFormatEnum.HEXADECIMAL_LOWER),\n image=load_image('image/16x16/char-hex-lower.png'), compound=tk.LEFT)\n\n offset.add_radiobutton(label='Decimal', underline=0,\n variable=self.offset_mode_tkvar, value=int(ValueFormatEnum.DECIMAL),\n image=load_image('image/16x16/char-decimal.png'), compound=tk.LEFT)\n\n offset.add_radiobutton(label='Octal', underline=0,\n variable=self.offset_mode_tkvar, value=int(ValueFormatEnum.OCTAL),\n image=load_image('image/16x16/char-octal.png'), compound=tk.LEFT)\n\n offset.add_radiobutton(label='Binary', underline=0,\n variable=self.offset_mode_tkvar, value=int(ValueFormatEnum.BINARY),\n image=load_image('image/16x16/char-binary.png'), compound=tk.LEFT)\n\n offset.add_separator()\n\n offset.add_checkbutton(label='Prefix', underline=0,\n variable=self.offset_prefix_tkvar, offvalue=False, onvalue=True)\n\n offset.add_checkbutton(label='Suffix', underline=0,\n variable=self.offset_suffix_tkvar, offvalue=False, onvalue=True)\n\n offset.add_checkbutton(label='Leading zeros', underline=8,\n variable=self.offset_zeroed_tkvar, offvalue=False, onvalue=True)\n\n # Menu\n menu.add_cascade(label='Line length', underline=0, menu=line,\n image=load_image('image/16x16/text_left.png'), compound=tk.LEFT)\n\n menu.add_cascade(label='Address bits', underline=8, menu=bits,\n image=load_image('image/16x16/memory.png'), compound=tk.LEFT)\n\n menu.add_separator()\n\n menu.add_cascade(label='Cell format', underline=0, menu=cell,\n image=load_image('image/16x16/memory-cell.png'), compound=tk.LEFT)\n\n menu.add_cascade(label='Address format', underline=0, menu=address,\n image=load_image('image/16x16/memory-address.png'), compound=tk.LEFT)\n\n menu.add_cascade(label='Offset format', underline=0, menu=offset,\n image=load_image('image/16x16/memory-offset.png'), compound=tk.LEFT)\n\n menu.add_separator()\n\n menu.add_checkbutton(label='Characters', underline=1,\n variable=self.chars_visible_tkvar, offvalue=False, onvalue=True)\n\n menu.add_cascade(label='Encoding', underline=0, menu=encm,\n image=load_image('image/16x16/fonts.png'), compound=tk.LEFT)\n\n menu.add_separator()\n\n menu.add_command(label='Redraw', underline=0, accelerator='F5', command=self.on_view_redraw,\n image=load_image('image/16x16/hotsync.png'), compound=tk.LEFT)\n\n def __init_menu_navigation(self):\n self.menu_nav = menu = tk.Menu(self.top, tearoff=False)\n\n menu.add_command(label='Memory address', underline=7, accelerator='Ctrl+G',\n command=self.on_nav_goto_memory_address_start_focus,\n image=load_image('image/16x16/goto.png'), compound=tk.LEFT)\n\n menu.add_command(label='Memory start', underline=7, accelerator='Ctrl+Home',\n command=self.on_nav_goto_memory_start,\n image=load_image('image/16x16/top-light.png'), compound=tk.LEFT)\n\n menu.add_command(label='Memory end', underline=7, accelerator='Ctrl+End',\n command=self.on_nav_goto_memory_endin,\n image=load_image('image/16x16/bottom-light.png'), compound=tk.LEFT)\n\n menu.add_command(label='Memory end-ex', underline=12, accelerator='Ctrl+Alt+End',\n command=self.on_nav_goto_memory_endex)\n\n menu.add_command(label='Set address skip', underline=9, command=self.on_nav_address_skip,\n image=load_image('image/16x16/player_fwd.png'), compound=tk.LEFT)\n\n menu.add_separator()\n\n menu.add_command(label='Previous block', underline=6, accelerator='Alt+Left',\n command=self.on_nav_goto_block_previous,\n image=load_image('image/16x16/arrow-left.png'), compound=tk.LEFT)\n\n menu.add_command(label='Next block', underline=7, accelerator='Alt+Right',\n command=self.on_nav_goto_block_next,\n image=load_image('image/16x16/arrow-right.png'), compound=tk.LEFT)\n\n menu.add_command(label='Block start', underline=6, accelerator='Alt+Home',\n command=self.on_nav_goto_block_start,\n image=load_image('image/16x16/arrow-up-dash.png'), compound=tk.LEFT)\n\n menu.add_command(label='Block end', underline=7, accelerator='Alt+End',\n command=self.on_nav_goto_block_endin,\n image=load_image('image/16x16/arrow-down-dash.png'), compound=tk.LEFT)\n\n menu.add_separator()\n\n menu.add_command(label='Previous byte', underline=6, accelerator='Ctrl+Left',\n command=self.on_nav_goto_byte_previous,\n image=load_image('image/16x16/back-light.png'), compound=tk.LEFT)\n\n menu.add_command(label='Next byte', underline=6, accelerator='Ctrl+Right',\n command=self.on_nav_goto_byte_next,\n image=load_image('image/16x16/next-light.png'), compound=tk.LEFT)\n\n menu.add_command(label='Line start', underline=6, accelerator='Home',\n command=self.on_nav_goto_line_start,\n image=load_image('image/16x16/start-light.png'), compound=tk.LEFT)\n\n menu.add_command(label='Line end', underline=7, accelerator='End',\n command=self.on_nav_goto_line_endin,\n image=load_image('image/16x16/finish-light.png'), compound=tk.LEFT)\n\n menu.add_separator()\n\n menu.add_command(label='Scroll up', underline=7, accelerator='Ctrl+Up',\n command=self.on_nav_scroll_line_up,\n image=load_image('image/16x16/1uparrow.png'), compound=tk.LEFT)\n\n menu.add_command(label='Scroll down', underline=7, accelerator='Ctrl+Down',\n command=self.on_nav_scroll_line_down,\n image=load_image('image/16x16/1downarrow.png'), compound=tk.LEFT)\n\n menu.add_command(label='Scroll half-page up', underline=18, accelerator='Ctrl+PgUp',\n command=self.on_nav_scroll_page_up,\n image=load_image('image/16x16/2uparrow.png'), compound=tk.LEFT)\n\n menu.add_command(label='Scroll half-page down', underline=19, accelerator='Ctrl+PgDn',\n command=self.on_nav_scroll_page_down,\n image=load_image('image/16x16/2downarrow.png'), compound=tk.LEFT)\n\n menu.add_command(label='Scroll align top', underline=8, accelerator='Ctrl+Alt+PgUp',\n command=self.on_nav_scroll_top,\n image=load_image('image/16x16/top.png'), compound=tk.LEFT)\n\n menu.add_command(label='Scroll align bottom', underline=7, accelerator='Ctrl+Alt+PgDn',\n command=self.on_nav_scroll_bottom,\n image=load_image('image/16x16/bottom.png'), compound=tk.LEFT)\n\n def __init_menu_help(self):\n self.menu_help = menu = tk.Menu(self.top, tearoff=False)\n\n menu.add_command(label='About', underline=0, command=self.on_help_about,\n image=load_image('image/16x16/info.png'), compound=tk.LEFT)\n\n def __init_toolbars(self):\n toolbar_tray = ToolbarTray(self.top, padding=(0, 0), borderwidth=1, relief=tk.SUNKEN)\n self.toolbar_tray = toolbar_tray\n\n self.__init_toolbar_file()\n self.__init_toolbar_edit()\n self.__init_toolbar_address()\n self.__init_toolbar_blocks()\n\n toolbar_tray.add_widget(self.toolbar_file)\n toolbar_tray.add_widget(self.toolbar_edit)\n toolbar_tray.add_widget(self.toolbar_address)\n toolbar_tray.add_widget(self.toolbar_blocks)\n\n toolbar_tray.pack(side=tk.TOP, expand=False, fill=tk.X, anchor=tk.N)\n\n def __init_toolbar_file(self):\n self.toolbar_file = toolbar = Toolbar(self.toolbar_tray)\n\n toolbar.add_button(tooltip='New', image=load_image('image/22x22/filenew.png'),\n command=self.on_file_new)\n\n toolbar.add_separator()\n\n toolbar.add_button(tooltip='Open', image=load_image('image/22x22/fileopen.png'),\n command=self.on_file_open)\n\n toolbar.add_button(tooltip='Import', image=load_image('image/22x22/fileimport.png'),\n command=self.on_file_import)\n\n toolbar.add_separator()\n\n toolbar.add_button(tooltip='Save', image=load_image('image/22x22/filesave.png'),\n command=self.on_file_save)\n\n toolbar.add_button(tooltip='Save As', image=load_image('image/22x22/filesaveas.png'),\n command=self.on_file_save_as)\n\n toolbar.add_separator()\n\n toolbar.add_button(tooltip='Settings', image=load_image('image/22x22/configure.png'),\n command=self.on_file_settings, state=tk.DISABLED)\n\n toolbar.finalize()\n\n def __init_toolbar_edit(self):\n self.toolbar_edit = toolbar = Toolbar(self.toolbar_tray)\n\n toolbar.add_button(tooltip='Cut', image=load_image('image/22x22/editcut.png'),\n command=self.on_edit_cut)\n\n toolbar.add_button(tooltip='Copy', image=load_image('image/22x22/editcopy.png'),\n command=self.on_edit_copy)\n\n toolbar.add_button(tooltip='Paste', image=load_image('image/22x22/editpaste.png'),\n command=self.on_edit_paste)\n\n toolbar.add_separator()\n\n toolbar.add_button(tooltip='Insert', image=load_image('image/22x22/document_new.png'),\n command=self.on_edit_reserve)\n\n toolbar.add_button(tooltip='Delete', image=load_image('image/22x22/editdelete.png'),\n command=self.on_edit_delete)\n\n toolbar.add_button(tooltip='Clear', image=load_image('image/22x22/eraser.png'),\n command=self.on_edit_clear)\n\n toolbar.add_button(tooltip='Fill', image=load_image('image/22x22/fill.png'),\n command=self.on_edit_fill)\n\n toolbar.add_button(tooltip='Flood', image=load_image('image/22x22/color_fill.png'),\n command=self.on_edit_flood)\n\n toolbar.add_button(tooltip='Crop', image=load_image('image/22x22/crop.png'),\n command=self.on_edit_crop)\n\n toolbar.add_separator()\n\n toolbar.add_button(tooltip='Undo', image=load_image('image/22x22/undo_dark.png'), state=tk.DISABLED,\n command=self.on_edit_undo)\n\n toolbar.add_button(tooltip='Redo', image=load_image('image/22x22/redo_dark.png'), state=tk.DISABLED,\n command=self.on_edit_redo)\n\n toolbar.finalize()\n\n def __init_toolbar_address(self):\n self.toolbar_address = toolbar = Toolbar(self.toolbar_tray)\n\n toolbar.add_button(tooltip='Move to address', image=load_image('image/22x22/move.png'), key='Move',\n command=self.on_edit_move_apply)\n\n toolbar.add_button(tooltip='Go to address', image=load_image('image/22x22/goto.png'),\n command=self.on_nav_goto_memory_address_start_apply)\n\n self.start_entry = ttk.Entry(toolbar, width=20, justify=tk.RIGHT)\n self.start_entry.bind('', self.on_nav_goto_memory_address_start_apply)\n self.start_entry.bind('', self.on_nav_editor_focus)\n toolbar.add_widget(self.start_entry, key=toolbar.widget_count, tooltip='Start address')\n\n toolbar.add_button(tooltip='Set current', image=load_image('image/22x22/curfiledir.png'),\n command=self.on_nav_goto_memory_address_copy)\n\n self.endin_entry = ttk.Entry(toolbar, width=20, justify=tk.RIGHT)\n self.endin_entry.bind('', self.on_edit_select_range)\n self.endin_entry.bind('', self.on_nav_editor_focus)\n toolbar.add_widget(self.endin_entry, key=toolbar.widget_count, tooltip='End address')\n\n toolbar.add_button(tooltip='Select range', image=load_image('image/22x22/7days.png'),\n command=self.on_edit_select_range)\n\n toolbar.finalize()\n\n def __init_toolbar_blocks(self):\n self.toolbar_blocks = toolbar = Toolbar(self.toolbar_tray)\n\n toolbar.add_button(tooltip='Memory start', image=load_image('image/22x22/top.png'),\n command=self.on_nav_goto_memory_start)\n\n toolbar.add_button(tooltip='Memory end', image=load_image('image/22x22/bottom.png'),\n command=self.on_nav_goto_memory_endin)\n\n toolbar.add_separator()\n\n toolbar.add_button(tooltip='Previous block', image=load_image('image/22x22/arrow-left.png'),\n command=self.on_nav_goto_block_previous)\n\n toolbar.add_button(tooltip='Next block', image=load_image('image/22x22/arrow-right.png'),\n command=self.on_nav_goto_block_next)\n\n toolbar.add_separator()\n\n toolbar.add_button(tooltip='Block start', image=load_image('image/22x22/arrow-up-dash.png'),\n command=self.on_nav_goto_block_start)\n\n toolbar.add_button(tooltip='Block end', image=load_image('image/22x22/arrow-down-dash.png'),\n command=self.on_nav_goto_block_endin)\n\n toolbar.finalize()\n\n def __init_statusbar(self):\n self.statusbar_frame = sb_frame = ttk.Frame(self.top)\n self.statusbar_address = sb_address = ttk.Label(sb_frame, anchor=tk.W, relief=tk.SUNKEN, borderwidth=1)\n self.statusbar_selection = sb_selection = ttk.Label(sb_frame, anchor=tk.W, relief=tk.SUNKEN, borderwidth=1)\n self.statusbar_cursor = sb_cursor = ttk.Label(sb_frame, anchor=tk.W, relief=tk.SUNKEN, borderwidth=1)\n\n sb_address.grid(row=0, column=0, sticky=tk.EW)\n sb_selection.grid(row=0, column=1, sticky=tk.EW)\n sb_cursor.grid(row=0, column=2, sticky=tk.EW)\n\n sb_frame.rowconfigure(0, weight=0)\n sb_frame.columnconfigure(0, weight=2)\n sb_frame.columnconfigure(1, weight=2)\n sb_frame.columnconfigure(2, weight=1)\n\n sb_frame.pack(side=tk.BOTTOM, fill=tk.X)\n\n def __init_editor(self):\n engine = self.engine\n self.editor = editor = EditorWidget(self.top, engine, engine.status)\n editor.pack(side=tk.TOP, expand=True, fill=tk.BOTH)\n\n self.__init_popup_cell()\n self.__init_popup_address()\n self.__init_popup_offset()\n self.__init_popup_chars()\n\n def __init_popup_cell(self):\n menu = tk.Menu(tearoff=False)\n self.cells_popup = menu\n\n # View submenu\n view = tk.Menu(menu, tearoff=False)\n self.cells_popup_view = view\n\n view.add_radiobutton(label='Hex UPPER', underline=0,\n variable=self.cell_mode_tkvar, value=int(ValueFormatEnum.HEXADECIMAL_UPPER),\n image=load_image('image/16x16/char-hex-upper.png'), compound=tk.LEFT)\n\n view.add_radiobutton(label='Hex lower', underline=12,\n variable=self.cell_mode_tkvar, value=int(ValueFormatEnum.HEXADECIMAL_LOWER),\n image=load_image('image/16x16/char-hex-lower.png'), compound=tk.LEFT)\n\n view.add_radiobutton(label='Decimal', underline=0,\n variable=self.cell_mode_tkvar, value=int(ValueFormatEnum.DECIMAL),\n image=load_image('image/16x16/char-decimal.png'), compound=tk.LEFT)\n\n view.add_radiobutton(label='Octal', underline=0,\n variable=self.cell_mode_tkvar, value=int(ValueFormatEnum.OCTAL),\n image=load_image('image/16x16/char-octal.png'), compound=tk.LEFT)\n\n view.add_radiobutton(label='Binary', underline=0,\n variable=self.cell_mode_tkvar, value=int(ValueFormatEnum.BINARY),\n image=load_image('image/16x16/char-binary.png'), compound=tk.LEFT)\n\n view.add_separator()\n\n view.add_checkbutton(label='Prefix', underline=0,\n variable=self.cell_prefix_tkvar, offvalue=False, onvalue=True)\n\n view.add_checkbutton(label='Suffix', underline=0,\n variable=self.cell_suffix_tkvar, offvalue=False, onvalue=True)\n\n view.add_checkbutton(label='Leading zeros', underline=8,\n variable=self.cell_zeroed_tkvar, offvalue=False, onvalue=True)\n\n # Menu\n menu.add_cascade(label='Cell format', underline=0, menu=view,\n image=load_image('image/16x16/memory-cell.png'), compound=tk.LEFT)\n\n menu.add_separator()\n\n menu.add_command(label='Cut', underline=1, command=self.on_edit_cut,\n image=load_image('image/16x16/editcut.png'), compound=tk.LEFT)\n\n menu.add_command(label='Copy', underline=0, command=self.on_edit_copy,\n image=load_image('image/16x16/editcopy.png'), compound=tk.LEFT)\n\n menu.add_command(label='Paste', underline=0, command=self.on_edit_paste,\n image=load_image('image/16x16/editpaste.png'), compound=tk.LEFT)\n\n menu.add_separator()\n\n menu.add_command(label='Insert', underline=0, command=self.on_edit_reserve,\n image=load_image('image/16x16/document_new.png'), compound=tk.LEFT)\n\n menu.add_command(label='Delete', underline=0, command=self.on_edit_delete,\n image=load_image('image/16x16/editdelete.png'), compound=tk.LEFT)\n\n menu.add_command(label='Clear', underline=1, command=self.on_edit_clear,\n image=load_image('image/16x16/eraser.png'), compound=tk.LEFT)\n\n menu.add_command(label='Fill', underline=0, command=self.on_edit_fill,\n image=load_image('image/16x16/fill.png'), compound=tk.LEFT)\n\n menu.add_command(label='Flood', underline=2, command=self.on_edit_flood,\n image=load_image('image/16x16/color_fill.png'), compound=tk.LEFT)\n\n menu.add_command(label='Crop', underline=0, command=self.on_edit_crop,\n image=load_image('image/16x16/crop.png'), compound=tk.LEFT)\n\n menu.add_command(label='Move', underline=0, command=self.on_edit_move_focus,\n image=load_image('image/16x16/move.png'), compound=tk.LEFT)\n\n menu.add_separator()\n\n menu.add_command(label='Export', underline=0, command=self.on_edit_export,\n image=load_image('image/16x16/fileexport.png'), compound=tk.LEFT)\n\n self.editor.cells_canvas.bind('', self._on_popup_cell)\n\n def _on_popup_cell(self, event):\n try:\n self.cells_popup.tk_popup(event.x_root, event.y_root)\n finally:\n self.cells_popup.grab_release()\n\n def __init_popup_address(self):\n engine = self.engine\n\n menu = tk.Menu(tearoff=False)\n self.address_popup = menu\n\n # View submenu\n view = tk.Menu(menu, tearoff=False)\n self.address_popup_view = view\n\n view.add_radiobutton(label='Hex UPPER', underline=0,\n variable=self.address_mode_tkvar, value=int(ValueFormatEnum.HEXADECIMAL_UPPER),\n image=load_image('image/16x16/char-hex-upper.png'), compound=tk.LEFT)\n\n view.add_radiobutton(label='Hex lower', underline=12,\n variable=self.address_mode_tkvar, value=int(ValueFormatEnum.HEXADECIMAL_LOWER),\n image=load_image('image/16x16/char-hex-lower.png'), compound=tk.LEFT)\n\n view.add_radiobutton(label='Decimal', underline=0,\n variable=self.address_mode_tkvar, value=int(ValueFormatEnum.DECIMAL),\n image=load_image('image/16x16/char-decimal.png'), compound=tk.LEFT)\n\n view.add_radiobutton(label='Octal', underline=0,\n variable=self.address_mode_tkvar, value=int(ValueFormatEnum.OCTAL),\n image=load_image('image/16x16/char-octal.png'), compound=tk.LEFT)\n\n view.add_radiobutton(label='Binary', underline=0,\n variable=self.address_mode_tkvar, value=int(ValueFormatEnum.BINARY),\n image=load_image('image/16x16/char-binary.png'), compound=tk.LEFT)\n\n view.add_separator()\n\n view.add_checkbutton(label='Prefix', underline=0,\n variable=self.address_prefix_tkvar, offvalue=False, onvalue=True)\n\n view.add_checkbutton(label='Suffix', underline=0,\n variable=self.address_suffix_tkvar, offvalue=False, onvalue=True)\n\n view.add_checkbutton(label='Leading zeros', underline=8,\n variable=self.address_zeroed_tkvar, offvalue=False, onvalue=True)\n\n # Address bits submenu\n bits = tk.Menu(menu, tearoff=False)\n self.menu_line = bits\n\n for value in ADDRESS_BITS:\n bits.add_radiobutton(label=f'{value:3d}', variable=self.address_bits_tkvar, value=value)\n\n bits.add_separator()\n\n bits.add_command(label='Custom', command=self.on_view_address_bits_custom)\n\n # Menu\n menu.add_cascade(label='Address format', underline=0, menu=view,\n image=load_image('image/16x16/memory-address.png'), compound=tk.LEFT)\n\n menu.add_cascade(label='Address bits', underline=8, menu=bits,\n image=load_image('image/16x16/memory.png'), compound=tk.LEFT)\n\n menu.add_separator()\n\n menu.add_command(label='Memory address', underline=7, command=self.on_nav_goto_memory_address_start_focus,\n image=load_image('image/16x16/goto.png'), compound=tk.LEFT)\n\n menu.add_command(label='Memory start', underline=7, command=self.on_nav_goto_memory_start,\n image=load_image('image/16x16/top-light.png'), compound=tk.LEFT)\n\n menu.add_command(label='Memory end', underline=7, command=self.on_nav_goto_memory_endin,\n image=load_image('image/16x16/bottom-light.png'), compound=tk.LEFT)\n\n menu.add_separator()\n\n menu.add_command(label='Previous block', underline=6, command=self.on_nav_goto_block_previous,\n image=load_image('image/16x16/arrow-left.png'), compound=tk.LEFT)\n\n menu.add_command(label='Next block', underline=7, command=self.on_nav_goto_block_next,\n image=load_image('image/16x16/arrow-right.png'), compound=tk.LEFT)\n\n menu.add_command(label='Block start', underline=6, command=self.on_nav_goto_block_start,\n image=load_image('image/16x16/arrow-up-dash.png'), compound=tk.LEFT)\n\n menu.add_command(label='Block end', underline=7, command=self.on_nav_goto_block_endin,\n image=load_image('image/16x16/arrow-down-dash.png'), compound=tk.LEFT)\n\n self.editor.address_canvas.bind('', self._on_popup_address)\n\n def _on_popup_address(self, event):\n try:\n self.address_popup.tk_popup(event.x_root, event.y_root)\n finally:\n self.address_popup.grab_release()\n\n def __init_popup_offset(self):\n engine = self.engine\n\n menu = tk.Menu(tearoff=False)\n self.offset_popup = menu\n\n # View submenu\n view = tk.Menu(menu, tearoff=False)\n self.offset_popup_view = view\n\n view.add_radiobutton(label='Hex UPPER', underline=0,\n variable=self.offset_mode_tkvar, value=int(ValueFormatEnum.HEXADECIMAL_UPPER),\n image=load_image('image/16x16/char-hex-upper.png'), compound=tk.LEFT)\n\n view.add_radiobutton(label='Hex lower', underline=12,\n variable=self.offset_mode_tkvar, value=int(ValueFormatEnum.HEXADECIMAL_LOWER),\n image=load_image('image/16x16/char-hex-lower.png'), compound=tk.LEFT)\n\n view.add_radiobutton(label='Decimal', underline=0,\n variable=self.offset_mode_tkvar, value=int(ValueFormatEnum.DECIMAL),\n image=load_image('image/16x16/char-decimal.png'), compound=tk.LEFT)\n\n view.add_radiobutton(label='Octal', underline=0,\n variable=self.offset_mode_tkvar, value=int(ValueFormatEnum.OCTAL),\n image=load_image('image/16x16/char-octal.png'), compound=tk.LEFT)\n\n view.add_radiobutton(label='Binary', underline=0,\n variable=self.offset_mode_tkvar, value=int(ValueFormatEnum.BINARY),\n image=load_image('image/16x16/char-binary.png'), compound=tk.LEFT)\n\n view.add_separator()\n\n view.add_checkbutton(label='Prefix', underline=0,\n variable=self.offset_prefix_tkvar, offvalue=False, onvalue=True)\n\n view.add_checkbutton(label='Suffix', underline=0,\n variable=self.offset_suffix_tkvar, offvalue=False, onvalue=True)\n\n view.add_checkbutton(label='Leading zeros', underline=8,\n variable=self.offset_zeroed_tkvar, offvalue=False, onvalue=True)\n\n # Line submenu\n line = tk.Menu(menu, tearoff=False)\n self.offset_popup_line = line\n\n for value in LINE_LENGTHS:\n line.add_radiobutton(label=f'{value:3d}', variable=self.line_length_tkvar, value=value)\n\n line.add_separator()\n\n line.add_command(label='Custom', command=self.on_view_line_length_custom)\n\n # Menu\n menu.add_cascade(label='Offset format', underline=0, menu=view,\n image=load_image('image/16x16/memory-offset.png'), compound=tk.LEFT)\n\n menu.add_cascade(label='Line length', underline=0, menu=line,\n image=load_image('image/16x16/text_left.png'), compound=tk.LEFT)\n\n self.editor.offset_canvas.bind('', self._on_popup_offset)\n\n def _on_popup_offset(self, event):\n try:\n self.offset_popup.tk_popup(event.x_root, event.y_root)\n finally:\n self.offset_popup.grab_release()\n\n def __init_popup_chars(self):\n menu = tk.Menu(tearoff=False)\n self.chars_popup = menu\n\n # Encoding submenu\n encm = tk.Menu(menu, tearoff=False)\n self.chars_popup_encoding = encm\n\n encm.add_command(label='Custom', underline=0, command=self.on_view_chars_encoding_custom)\n\n encm.add_separator()\n\n for i, encoding in enumerate(BYTE_ENCODINGS):\n encm.add_radiobutton(label=encoding, variable=self.chars_encoding_tkvar, value=encoding,\n columnbreak=(i and not i % 16))\n\n # Menu\n menu.add_cascade(label='Encoding', underline=0, menu=encm,\n image=load_image('image/16x16/fonts.png'), compound=tk.LEFT)\n\n menu.add_separator()\n\n menu.add_command(label='Cut', underline=1, command=self.on_edit_cut,\n image=load_image('image/16x16/editcut.png'), compound=tk.LEFT)\n\n menu.add_command(label='Copy', underline=0, command=self.on_edit_copy,\n image=load_image('image/16x16/editcopy.png'), compound=tk.LEFT)\n\n menu.add_command(label='Paste', underline=0, command=self.on_edit_paste,\n image=load_image('image/16x16/editpaste.png'), compound=tk.LEFT)\n\n menu.add_separator()\n\n menu.add_command(label='Insert', underline=0, command=self.on_edit_reserve,\n image=load_image('image/16x16/document_new.png'), compound=tk.LEFT)\n\n menu.add_command(label='Delete', underline=0, command=self.on_edit_delete,\n image=load_image('image/16x16/editdelete.png'), compound=tk.LEFT)\n\n menu.add_command(label='Clear', underline=1, command=self.on_edit_clear,\n image=load_image('image/16x16/eraser.png'), compound=tk.LEFT)\n\n menu.add_command(label='Fill', underline=0, command=self.on_edit_fill,\n image=load_image('image/16x16/fill.png'), compound=tk.LEFT)\n\n menu.add_command(label='Flood', underline=2, command=self.on_edit_flood,\n image=load_image('image/16x16/color_fill.png'), compound=tk.LEFT)\n\n menu.add_command(label='Crop', underline=0, command=self.on_edit_crop,\n image=load_image('image/16x16/crop.png'), compound=tk.LEFT)\n\n menu.add_command(label='Move', underline=0, command=self.on_edit_move_focus,\n image=load_image('image/16x16/move.png'), compound=tk.LEFT)\n\n menu.add_separator()\n\n menu.add_command(label='Export', underline=0, command=self.on_edit_export,\n image=load_image('image/16x16/fileexport.png'), compound=tk.LEFT)\n\n self.editor.chars_canvas.bind('', self._on_popup_chars)\n\n def _on_popup_chars(self, event):\n try:\n self.chars_popup.tk_popup(event.x_root, event.y_root)\n finally:\n self.chars_popup.grab_release()\n\n def update_status(self):\n status = self.engine.status\n format_address = status.address_format_string.format\n\n if status.sel_mode == SelectionMode.NORMAL:\n start, endin = status.sel_start_address, status.sel_endin_address\n if endin < start:\n endin, start = start, endin\n text_range = f'Range: {format_address(start)} - {format_address(endin)}'\n length = endin + 1 - start\n text_length = f'Size: {format_address(length)} = {length:d}'\n\n elif status.sel_mode == SelectionMode.RECTANGLE:\n start_x, start_y = status.sel_start_cell\n endin_x, endin_y = status.sel_endin_cell\n if endin_x < start_x:\n endin_x, start_x = start_x, endin_x\n if endin_y < start_y:\n endin_y, start_y = start_y, endin_y\n text_range = f'Range: ({start_x:d}, {start_y:d}) - ({endin_x:d}, {endin_y:d})'\n text_w = endin_x + 1 - start_x\n text_h = endin_y + 1 - start_y\n text_length = f'Size: ({text_w:d}, {text_h:d}) = ({text_w:X}h, {text_h:X}h)'\n\n else:\n address = status.cursor_address\n text_range = f'Address: {format_address(address)}'\n text_length = f'Digit: {status.cell_format_length - status.cursor_digit}'\n\n self.statusbar_address.configure(text=text_range)\n self.statusbar_selection.configure(text=text_length)\n\n mode_text = f'{status.cursor_mode.name.lower()}'\n if status.sel_mode:\n mode_text += f' / {status.sel_mode.name.lower()}'\n self.statusbar_cursor.configure(text=mode_text)\n\n def get_start_text(self) -> str:\n text = self.start_entry.get()\n return text\n\n def set_start_text(self, text: str, focus: bool = False) -> None:\n start_entry = self.start_entry\n start_entry.delete(0, tk.END)\n if text:\n start_entry.insert(tk.END, text)\n if focus:\n start_entry.focus_set()\n\n def focus_start_text(self) -> None:\n start_entry = self.start_entry\n start_entry.focus_set()\n\n def get_start_address(self) -> Address:\n text = self.get_start_text()\n address = parse_int(text)[0]\n return address\n\n def set_start_address(self, address: Address) -> None:\n fmt = self.engine.status.address_format_string\n text = fmt.format(address)\n self.set_start_text(text)\n\n def get_endin_text(self) -> str:\n text = self.endin_entry.get()\n return text\n\n def set_endin_text(self, text: str, focus: bool = False) -> None:\n endin_entry = self.endin_entry\n endin_entry.delete(0, tk.END)\n if text:\n endin_entry.insert(tk.END, text)\n if focus:\n endin_entry.focus_set()\n\n def focus_endin_text(self) -> None:\n endin_entry = self.endin_entry\n endin_entry.focus_set()\n\n def get_endin_address(self) -> Address:\n text = self.get_endin_text()\n address = parse_int(text)[0]\n return address\n\n def set_endin_address(self, address: Address) -> None:\n fmt = self.engine.status.address_format_string\n text = fmt.format(address)\n self.set_endin_text(text)\n\n def show_about(self): # TODO: make better dedicated window\n tk.messagebox.showinfo('About Hecks!', (\n 'Copyright (c) 2021, Andrea Zoppi. All rights reserved.\\n'\n '\\n'\n 'Hecks is free software: you can redistribute it and/or modify '\n 'it under the terms of the GNU General Public License as published by '\n 'the Free Software Foundation, either version 3 of the License, or '\n '(at your option) any later version.\\n'\n '\\n'\n 'Hecks is distributed in the hope that it will be useful, '\n 'but WITHOUT ANY WARRANTY; without even the implied warranty of '\n 'MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the '\n 'GNU General Public License for more details.\\n'\n '\\n'\n 'You should have received a copy of the GNU General Public License '\n 'along with Hecks. If not, see .'\n ))\n\n def show_info(self, title: str, message: str):\n tk.messagebox.showinfo(title=title, message=message)\n\n def show_warning(self, title: str, message: str):\n tk.messagebox.showwarning(title=title, message=message)\n\n def show_error(self, title: str, message: str):\n tk.messagebox.showerror(title=title, message=message)\n\n def ask_open_file_path(self) -> Optional[str]:\n file_path = tk.filedialog.askopenfilename(filetypes=FILE_TYPES)\n return file_path\n\n def ask_save_file_path(self) -> Optional[str]:\n file_path = tk.filedialog.asksaveasfilename(filetypes=FILE_TYPES)\n return file_path\n\n def ask_line_length_custom(self) -> Optional[int]:\n value = tk.simpledialog.askinteger('Line length', 'Enter the line length:')\n if value is not None:\n if 1 <= value <= 256:\n self.line_length_tkvar.set(value)\n return value\n else:\n tk.messagebox.showerror('Invalid value', 'Only positive integers between 1 and 256 are accepted')\n return None\n\n def ask_address_bits_custom(self) -> Optional[int]:\n value = tk.simpledialog.askinteger('Address bits', 'Enter the address bit size:')\n if value is not None:\n if 1 <= value <= 256:\n self.address_bits_tkvar.set(value)\n return value\n else:\n tk.messagebox.showerror('Invalid value', 'Only positive integers between 1 and 256 are accepted')\n return None\n\n def ask_address_skip_custom(self) -> Optional[int]:\n text = tk.simpledialog.askstring('Address skip', 'Enter the address skip:')\n if text is not None:\n try:\n value = parse_int(text)[0]\n except ValueError:\n tk.messagebox.showerror('Invalid value', 'Invalid address value format')\n else:\n self.address_skip_tkvar.set(value)\n return value\n return None\n\n def ask_chars_encoding_custom(self) -> Optional[str]:\n value = tk.simpledialog.askstring('Text encoding', 'Enter the Python text codec name:')\n if value is not None:\n try:\n b'\\0'.decode(encoding=value, errors='strict')\n except UnicodeDecodeError:\n tk.messagebox.showerror('Invalid encoding', f'Python does not support the text codec: {value!r}')\n else:\n self.chars_encoding_tkvar.set(value)\n return value\n return None\n\n def update_title_by_file_path(self):\n top = self.top\n status = self.engine.status\n\n if status.file_path:\n text = f'{status.file_path} - {PROGRAM_TITLE}'\n else:\n text = f'|untitled| - {PROGRAM_TITLE}'\n top.title(text)\n\n def update_menus_by_selection(self):\n status = self.engine.status\n # TODO: cache condition to skip useless GUI calls\n state = tk.NORMAL if status.sel_mode else tk.DISABLED\n\n menu = self.menu_edit\n labels = ('Cut', 'Copy', 'Crop', 'Move')\n for label in labels:\n menu.entryconfigure(menu.index(label), state=state)\n\n menu = self.cells_popup\n labels = ('Cut', 'Copy', 'Crop', 'Move', 'Export')\n for label in labels:\n menu.entryconfigure(menu.index(label), state=state)\n\n menu = self.chars_popup\n labels = ('Cut', 'Copy', 'Crop', 'Move', 'Export')\n for label in labels:\n menu.entryconfigure(menu.index(label), state=state)\n\n toolbar = self.toolbar_edit\n labels = ('Cut', 'Copy', 'Crop')\n for label in labels:\n toolbar.get_widget(label).configure(cnf=dict(state=state))\n\n toolbar = self.toolbar_address\n labels = ('Move',)\n for label in labels:\n toolbar.get_widget(label).configure(cnf=dict(state=state))\n\n self.update_menus_by_cursor()\n\n def update_menus_by_cursor(self):\n status = self.engine.status\n address = status.cursor_address\n memory = status.memory\n start = memory.start\n endex = memory.endex\n\n # TODO: cache condition to skip useless GUI calls\n if status.sel_mode or start <= address < endex:\n state = tk.NORMAL\n else:\n state = tk.DISABLED\n\n menu = self.menu_edit\n labels = ('Fill',)\n for label in labels:\n menu.entryconfigure(menu.index(label), state=state)\n\n menu = self.cells_popup\n labels = ('Fill',)\n for label in labels:\n menu.entryconfigure(menu.index(label), state=state)\n\n toolbar = self.toolbar_edit\n labels = ('Fill',)\n for label in labels:\n toolbar.get_widget(label).configure(state=state)\n\n # TODO: cache condition to skip useless GUI calls\n if status.sel_mode or (start <= address < endex and memory.peek(address) is None):\n state = tk.NORMAL\n else:\n state = tk.DISABLED\n\n menu = self.menu_edit\n labels = ('Flood',)\n for label in labels:\n menu.entryconfigure(menu.index(label), state=state)\n\n menu = self.cells_popup\n labels = ('Flood',)\n for label in labels:\n menu.entryconfigure(menu.index(label), state=state)\n\n toolbar = self.toolbar_edit\n labels = ('Flood',)\n for label in labels:\n toolbar.get_widget(label).configure(state=state)\n\n def on_file_new(self, event=None):\n self.engine.on_file_new()\n\n def on_file_open(self, event=None):\n self.engine.on_file_open()\n\n def on_file_import(self, event=None):\n self.engine.on_file_import()\n\n def on_file_save(self, event=None):\n self.engine.on_file_save()\n\n def on_file_save_as(self, event=None):\n self.engine.on_file_save_as()\n\n def on_file_settings(self, event=None):\n self.engine.on_file_settings()\n\n def on_file_exit(self, event=None):\n self.engine.on_file_exit()\n\n def on_edit_undo(self, event=None):\n self.engine.on_edit_undo()\n\n def on_edit_redo(self, event=None):\n self.engine.on_edit_redo()\n\n def on_edit_cut(self, event=None):\n self.engine.on_edit_cut()\n\n def on_edit_copy(self, event=None):\n self.engine.on_edit_copy()\n\n def on_edit_paste(self, event=None):\n self.engine.on_edit_paste()\n\n def on_edit_delete(self, event=None):\n self.engine.on_edit_delete()\n\n def on_edit_cursor_mode(self, event=None):\n self.engine.on_edit_cursor_mode()\n\n def on_edit_clear(self, event=None):\n self.engine.on_edit_clear()\n\n def on_edit_reserve(self, event=None):\n self.engine.on_edit_reserve()\n\n def on_edit_fill(self, event=None):\n self.engine.on_edit_fill()\n\n def on_edit_flood(self, event=None):\n self.engine.on_edit_flood()\n\n def on_edit_crop(self, event=None):\n self.engine.on_edit_crop()\n\n def on_edit_move_focus(self, event=None):\n self.engine.on_edit_move_focus()\n\n def on_edit_move_apply(self, event=None):\n self.engine.on_edit_move_apply()\n\n def on_edit_export(self, event=None):\n self.engine.on_edit_export()\n\n def on_edit_select_all(self, event=None):\n self.engine.on_edit_select_all()\n\n def on_edit_select_range(self, event=None):\n self.engine.on_edit_select_range()\n\n def on_edit_copy_address(self, event=None):\n self.engine.on_edit_copy_address()\n\n def on_edit_find(self, event=None):\n self.engine.on_edit_find()\n\n def on_view_line_length_custom(self, event=None):\n self.engine.on_view_line_length_custom()\n\n def on_view_address_bits_custom(self, event=None):\n self.engine.on_view_address_bits_custom()\n\n def on_view_chars_encoding_custom(self, event=None):\n self.engine.on_view_chars_encoding_custom()\n\n def on_view_redraw(self, event=None):\n self.engine.on_view_redraw()\n\n def on_nav_editor_focus(self, event=None):\n self.engine.on_nav_editor_focus()\n\n def on_nav_goto_memory_address_start_focus(self, event=None):\n self.engine.on_nav_goto_memory_address_start_focus()\n\n def on_nav_goto_memory_address_start_apply(self, event=None):\n self.engine.on_nav_goto_memory_address_start_apply()\n\n def on_nav_goto_memory_address_endin_focus(self, event=None):\n self.engine.on_nav_goto_memory_address_endin_focus()\n\n def on_nav_goto_memory_address_endin_apply(self, event=None):\n self.engine.on_nav_goto_memory_address_endin_apply()\n\n def on_nav_goto_memory_address_copy(self, event=None):\n self.engine.on_nav_goto_memory_address_copy()\n\n def on_nav_goto_memory_start(self, event=None):\n self.engine.on_nav_goto_memory_start()\n\n def on_nav_goto_memory_endin(self, event=None):\n self.engine.on_nav_goto_memory_endin()\n\n def on_nav_goto_memory_endex(self, event=None):\n self.engine.on_nav_goto_memory_endex()\n\n def on_nav_address_skip(self, event=None):\n self.engine.on_nav_address_skip()\n\n def on_nav_goto_block_previous(self, event=None):\n self.engine.on_nav_goto_block_previous()\n\n def on_nav_goto_block_next(self, event=None):\n self.engine.on_nav_goto_block_next()\n\n def on_nav_goto_block_start(self, event=None):\n self.engine.on_nav_goto_block_start()\n\n def on_nav_goto_block_endin(self, event=None):\n self.engine.on_nav_goto_block_endin()\n\n def on_nav_goto_byte_previous(self, event=None):\n self.engine.on_nav_goto_byte_previous()\n\n def on_nav_goto_byte_next(self, event=None):\n self.engine.on_nav_goto_byte_next()\n\n def on_nav_goto_line_start(self, event=None):\n self.engine.on_nav_goto_line_start()\n\n def on_nav_goto_line_endin(self, event=None):\n self.engine.on_nav_goto_line_endin()\n\n def on_nav_scroll_line_up(self, event=None):\n self.engine.on_nav_scroll_line_up()\n\n def on_nav_scroll_line_down(self, event=None):\n self.engine.on_nav_scroll_line_down()\n\n def on_nav_scroll_page_up(self, event=None):\n self.engine.on_nav_scroll_page_up()\n\n def on_nav_scroll_page_down(self, event=None):\n self.engine.on_nav_scroll_page_down()\n\n def on_nav_scroll_top(self, event=None):\n self.engine.on_nav_scroll_top()\n\n def on_nav_scroll_bottom(self, event=None):\n self.engine.on_nav_scroll_bottom()\n\n def on_help_about(self, event=None):\n self.engine.on_help_about()\n\n def on_tkvar_chars_visible(self, *args):\n value = self.top.getvar(name='chars_visible')\n self.engine.on_set_chars_visible(value)\n\n def on_tkvar_line_length(self, *args):\n value = self.top.getvar(name='line_length')\n self.engine.on_set_line_length(value)\n\n def on_tkvar_chars_encoding(self, *args):\n value = self.top.getvar(name='chars_encoding')\n self.engine.on_set_chars_encoding(value)\n\n def on_tkvar_cell_mode(self, *args):\n value = self.top.getvar(name='cell_mode')\n self.engine.on_set_cell_mode(value)\n\n def on_tkvar_cell_prefix(self, *args):\n value = self.top.getvar(name='cell_prefix')\n self.engine.on_set_cell_prefix(value)\n\n def on_tkvar_cell_suffix(self, *args):\n value = self.top.getvar(name='cell_suffix')\n self.engine.on_set_cell_suffix(value)\n\n def on_tkvar_cell_zeroed(self, *args):\n value = self.top.getvar(name='cell_zeroed')\n self.engine.on_set_cell_zeroed(value)\n\n def on_tkvar_address_mode(self, *args):\n value = self.top.getvar(name='address_mode')\n self.engine.on_set_address_mode(value)\n\n def on_tkvar_address_prefix(self, *args):\n value = self.top.getvar(name='address_prefix')\n self.engine.on_set_address_prefix(value)\n\n def on_tkvar_address_suffix(self, *args):\n value = self.top.getvar(name='address_suffix')\n self.engine.on_set_address_suffix(value)\n\n def on_tkvar_address_zeroed(self, *args):\n value = self.top.getvar(name='address_zeroed')\n self.engine.on_set_address_zeroed(value)\n\n def on_tkvar_address_skip(self, *args):\n value = self.top.getvar(name='address_skip')\n self.engine.on_set_address_skip(value)\n\n def on_tkvar_address_bits(self, *args):\n value = self.top.getvar(name='address_bits')\n self.engine.on_set_address_bits(value)\n\n def on_tkvar_offset_mode(self, *args):\n value = self.top.getvar(name='offset_mode')\n self.engine.on_set_offset_mode(value)\n\n def on_tkvar_offset_prefix(self, *args):\n value = self.top.getvar(name='offset_prefix')\n self.engine.on_set_offset_prefix(value)\n\n def on_tkvar_offset_suffix(self, *args):\n value = self.top.getvar(name='offset_suffix')\n self.engine.on_set_offset_suffix(value)\n\n def on_tkvar_offset_zeroed(self, *args):\n value = self.top.getvar(name='offset_zeroed')\n self.engine.on_set_offset_zeroed(value)\n\n\n# =====================================================================================================================\n\nclass InstanceManager(BaseInstanceManager):\n\n def __init__(self):\n super().__init__()\n\n # Create a hidden root window, not used by the application\n # root = tk.Tk()\n root = ttkthemes.ThemedTk(theme=_THEME)\n root.overrideredirect(True)\n root.withdraw()\n # self._root: tk.Tk = root\n self._root: ttkthemes.ThemedTk = root\n _fix_global_colors(root)\n\n def remove(self, index: int) -> object:\n instance = super().remove(index)\n if self:\n return instance\n else:\n self.quit()\n return None\n\n def run(self):\n self._root.mainloop()\n\n def quit(self) -> None:\n super().quit()\n self._root.destroy()\n\n @property\n # def root(self) -> tk.Tk:\n def root(self) -> ttkthemes.ThemedTk:\n return self._root\n\n\n# =====================================================================================================================\n\ndef main() -> None:\n manager = InstanceManager()\n UserInterface(manager, Engine)\n manager.run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"TexZK/hecks","sub_path":"src/hecks/tkgui.py","file_name":"tkgui.py","file_ext":"py","file_size_in_byte":133339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24277144162","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom torch.autograd import Variable\nfrom torchviz import make_dot, make_dot_from_trace\n\nfrom graphviz import Digraph\nimport re\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.autograd import Variable\nimport torchvision.models as models\n\nimport torch.onnx\nfrom ptflops import get_model_complexity_info\nimport sys\nsys.path.append(\"..\")\nimport global_vars as GLOBALS\nclass BasicBlock(nn.Module):\n def __init__(self, in_planes, intermediate_planes, out_planes,kernel_size_1=3,kernel_size_2=3,stride=1):\n self.in_planes=in_planes\n self.intermediate_planes=intermediate_planes\n self.out_planes=out_planes\n\n super(BasicBlock,self).__init__()\n '''if in_planes!=intermediate_planes:\n #print('shortcut_needed')\n stride=2\n else:\n stride=stride'''\n self.conv1=nn.Conv2d(\n in_planes,\n intermediate_planes,\n kernel_size=kernel_size_1,\n stride=stride,\n padding=int((kernel_size_1-1)/2),\n bias=False\n )\n self.bn1=nn.BatchNorm2d(intermediate_planes)\n self.conv2=nn.Conv2d(\n intermediate_planes,\n out_planes,\n kernel_size=kernel_size_2,\n stride=1,\n padding=int((kernel_size_2-1)/2),\n bias=False\n )\n self.bn2=nn.BatchNorm2d(out_planes)\n self.relu=nn.ReLU()\n self.shortcut=nn.Sequential()\n if stride!=1 or in_planes!=out_planes:\n #print('shortcut_made')\n self.shortcut=nn.Sequential(\n nn.Conv2d(\n in_planes,\n out_planes,\n kernel_size=1,\n stride=stride,\n bias=False\n ),\n nn.BatchNorm2d(out_planes),\n #nn.ReLU()\n )\n\n def forward(self,y):\n x = self.conv1(y)\n #print(x.shape,'post conv1 block')\n x = self.bn1(x)\n x = self.relu(x)\n x = self.bn2(self.conv2(x))\n #print(x.shape,'post conv2 block')\n #if self.shortcut!=nn.Sequential():\n #print('shortcut_made')\n #print(self.shortcut)\n #print(x.shape)\n #print(y.shape)\n #print(self.shortcut(y).shape)\n x += self.shortcut(y)\n #print(x.shape,'post conv3 block')\n x = self.relu(x)\n return x\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, inter1_planes,inter2_planes,out_planes,\n kernel_size_1=1,kernel_size_2=3,kernel_size_3=1,stride=1):\n super(Bottleneck, self).__init__()\n self.relu=nn.ReLU()\n self.conv1 = nn.Conv2d(in_planes, inter1_planes, kernel_size=kernel_size_1, padding=int((kernel_size_1-1)/2), bias=False)\n self.bn1 = nn.BatchNorm2d(inter1_planes)\n self.conv2 = nn.Conv2d(inter1_planes, inter2_planes, kernel_size=kernel_size_2,\n stride=stride, padding=int((kernel_size_2-1)/2), bias=False)\n self.bn2 = nn.BatchNorm2d(inter2_planes)\n self.conv3 = nn.Conv2d(inter2_planes,\n out_planes, kernel_size=kernel_size_3, padding=int((kernel_size_3-1)/2), bias=False)\n self.bn3 = nn.BatchNorm2d(out_planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != out_planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, out_planes,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(out_planes)\n )\n\n def forward(self, x):\n out = self.relu(self.bn1(self.conv1(x)))\n out = self.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = self.relu(out)\n return out\n\nclass Network(nn.Module):\n\n def __init__(self, block, image_channels=3,new_output_sizes=None,new_kernel_sizes=None,num_classes=10):\n super(Network, self).__init__()\n\n self.superblock1_indexes=GLOBALS.super1_idx\n self.superblock2_indexes=GLOBALS.super2_idx\n self.superblock3_indexes=GLOBALS.super3_idx\n self.superblock4_indexes=GLOBALS.super4_idx\n\n self.superblock1_kernels=GLOBALS.super1_kernel_idx\n self.superblock2_kernels=GLOBALS.super2_kernel_idx\n self.superblock3_kernels=GLOBALS.super3_kernel_idx\n self.superblock4_kernels=GLOBALS.super4_kernel_idx\n\n if new_output_sizes!=None:\n self.superblock1_indexes=new_output_sizes[0]\n self.superblock2_indexes=new_output_sizes[1]\n self.superblock3_indexes=new_output_sizes[2]\n self.superblock4_indexes=new_output_sizes[3]\n if new_kernel_sizes!=None:\n self.superblock1_kernels=new_kernel_sizes[0]\n self.superblock2_kernels=new_kernel_sizes[1]\n self.superblock3_kernels=new_kernel_sizes[2]\n self.superblock4_kernels=new_kernel_sizes[3]\n print(new_kernel_sizes, 'VALUES PROVIDED FOR KERNEL SIZES')\n\n shortcut_indexes=[]\n counter=-1\n conv_size_list=[self.superblock1_indexes,self.superblock2_indexes,self.superblock3_indexes,self.superblock4_indexes]\n print(conv_size_list,'NETWORK ARCHITECTURE')\n for j in conv_size_list:\n if len(shortcut_indexes)==len(conv_size_list)-1:\n break\n counter+=len(j) + 1\n shortcut_indexes+=[counter]\n #print(shortcut_indexes)\n self.shortcut_1_index = shortcut_indexes[0]\n self.shortcut_2_index = shortcut_indexes[1]\n self.shortcut_3_index = shortcut_indexes[2]\n\n self.index=self.superblock1_indexes+self.superblock2_indexes+self.superblock3_indexes+self.superblock4_indexes\n self.kernel_sizes=self.superblock1_kernels+self.superblock2_kernels+self.superblock3_kernels+self.superblock4_kernels\n\n self.num_classes=num_classes\n self.conv1 = nn.Conv2d(image_channels, self.index[0], kernel_size=self.kernel_sizes[0], stride=1, padding=int((self.kernel_sizes[0]-1)/2), bias=False)\n self.bn1 = nn.BatchNorm2d(self.index[0])\n self.network=self._create_network(block)\n self.linear=nn.Linear(self.index[len(self.index)-1],num_classes)\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.maxpool=nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.relu=nn.ReLU()\n\n def _create_network(self,block):\n\n layers=[]\n if block==BasicBlock:\n layers.append(block(self.index[0],self.index[1],self.index[2],kernel_size_1=self.kernel_sizes[1],kernel_size_2=self.kernel_sizes[2],stride=1))\n for i in range(2,len(self.index)-2,2):\n if (i+1==self.shortcut_1_index or i+2==self.shortcut_2_index or i+3==self.shortcut_3_index):\n stride=2\n else:\n stride=1\n layers.append(block(self.index[i],self.index[i+1],self.index[i+2],kernel_size_1=self.kernel_sizes[i+1],kernel_size_2=self.kernel_sizes[i+2],stride=stride))\n elif block==Bottleneck:\n layers.append(block(self.index[0],self.index[1],self.index[2],self.index[3],kernel_size_1=self.kernel_sizes[1],kernel_size_2=self.kernel_sizes[2],kernel_size_3=self.kernel_sizes[3],stride=1))\n for i in range(3,len(self.index)-3,3):\n if (i+1==self.shortcut_1_index or i+2==self.shortcut_2_index or i+3==self.shortcut_3_index):\n stride=2\n else:\n stride=1\n layers.append(block(self.index[i],self.index[i+1],self.index[i+2],self.index[i+3],kernel_size_1=self.kernel_sizes[i+1],kernel_size_2=self.kernel_sizes[i+2],kernel_size_3=self.kernel_sizes[i+3],stride=stride))\n #print(len(self.index),'len index')\n return nn.Sequential(*layers)\n\n def forward(self, y):\n #print(self.index )\n x = self.conv1(y)\n #print(x.shape, 'conv1')\n x = self.bn1(x)\n #print(x.shape, 'bn1')\n x = self.relu(x)\n #print(x.shape, 'relu')\n #x = self.maxpool(x)\n ##print(x.shape, 'max pool')\n x = self.network(x)\n #print(x.shape, 'post bunch of blocks')\n x = self.avgpool(x)\n #print(x.shape, 'post avgpool')\n x = x.view(x.size(0), -1)\n #print(x.shape, 'post reshaping')\n x = self.linear(x)\n #print(x.shape, 'post fc')\n return x\n\n\ndef DASNet34(num_classes_input = 10,new_output_sizes=None,new_kernel_sizes=None):\n GLOBALS.BLOCK_TYPE='BasicBlock'\n print('SETTING BLOCK_TYPE TO BasicBlock')\n return Network(BasicBlock, 3, num_classes=num_classes_input, new_output_sizes=new_output_sizes,new_kernel_sizes=new_kernel_sizes)\n\ndef DASNet50(num_classes_input = 10,new_output_sizes=None,new_kernel_sizes=None):\n GLOBALS.BLOCK_TYPE='Bottleneck'\n print('SETTING BLOCK_TYPE TO Bottleneck')\n return Network(Bottleneck, 3, num_classes=num_classes_input, new_output_sizes=new_output_sizes,new_kernel_sizes=new_kernel_sizes)\n\ndef test():\n #writer = SummaryWriter('runs/resnet34_1')\n net = DASNet34()\n y = net(torch.randn(1, 3, 32, 32))\n print(y.size())\n\n macs, params = get_model_complexity_info(net, (3,32,32), as_strings=True,\n print_per_layer_stat=True, verbose=True)\n print('{:<30} {:<8}'.format('Computational complexity: ', macs))\n print('{:<30} {:<8}'.format('Number of parameters: ', params))\n '''\n #print(net)\n g=make_dot(y)\n g.view()\n #g.view()\n torch.save(net.state_dict(),'temp_resnet.onnx')\n dummy_input = Variable(torch.randn(4, 3, 32, 32))\n torch.onnx.export(net, dummy_input, \"model.onnx\")\n '''\n\n#test()\n","repo_name":"mahdihosseini/CONet","sub_path":"CONet/models/own_network.py","file_name":"own_network.py","file_ext":"py","file_size_in_byte":9927,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"67"} +{"seq_id":"19711251003","text":"from datetime import datetime, timedelta\nimport functools\n\nfrom flask import request\n\nimport jwt\n\nfrom api import app\n\n\ndef jwt_required(func):\n @functools.wraps(func)\n def wrapper(*args,**kwargs):\n try:\n type, token = request.headers.get('Authentication').split(\" \")\n payload = decode_auth_token(token)\n\n _globals = func.__globals__\n oldvalue = _globals.get('payload', None)\n _globals['payload'] = payload\n\n try:\n res = func(*args, **kwargs)\n finally:\n if oldvalue is None:\n del _globals['payload']\n else:\n _globals['payload'] = oldvalue\n\n except jwt.ExpiredSignatureError:\n return {\"message\": \"Token Expired\"}, 401\n except jwt.DecodeError:\n return {\"message\": \"Token Invalid\"}, 401\n except jwt.InvalidTokenError:\n return {\"message\": \"Token Invalid\"}, 401\n\n return res\n\n return wrapper\n\n\ndef encode_auth_token(user_id):\n \"\"\"\n Generates the Auth Token\n :return: string\n \"\"\"\n try:\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=0, minutes=60),\n 'iat': datetime.utcnow(),\n 'sub': user_id\n }\n return jwt.encode(\n payload,\n app.config.get('SECRET_KEY'),\n algorithm='HS256'\n )\n except BaseException as err:\n return err\n\n\ndef decode_auth_token(auth_token):\n \"\"\"\n Decodes the auth token\n :param auth_token:\n :return: integer|string\n \"\"\"\n payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))\n return payload['sub']\n","repo_name":"alfredocdmiranda/ada","sub_path":"ada/api/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33603013559","text":"from sqlalchemy import Column, Integer, String\n\nfrom app.models.base import Base\n\n\nclass Camp(Base):\n __tablename__ = 'camp'\n\n fields = ['id', 'name']\n\n id = Column(Integer, primary_key=True, autoincrement=True)\n name = Column(String(100))\n\n @property\n def courses(self):\n from app.models.camp_models.course import Course\n return Course.search(camp_id=self.id, page_size=-1)['data']\n","repo_name":"zucc-acm-devteam/view-oj-backend","sub_path":"app/models/camp_models/camp.py","file_name":"camp.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"30118387630","text":"# -*- coding: utf-8 -*-\r\n__author__ = 'jingsam@163.com'\r\n\r\nimport os\r\nimport re\r\nimport arcpy\r\nfrom parallel import check_parallel\r\n\r\ndef check_rare_name_task(args, cpus, pid):\r\n in_fc = args[0]\r\n fields = args[1]\r\n error_id = \"ERR04\"\r\n layer = os.path.basename(in_fc)\r\n content = \"道路名称字段不能含有不合理的字符\"\r\n description = \"图层【{0}】的ID为【{1}】的要素,道路名称字段不能含有不合理的字符。\"\r\n warning = \"不忽略\"\r\n\r\n desc = arcpy.Describe(in_fc)\r\n errors = []\r\n pattern = re.compile(u\"[ ~!!.·#¥%…&*]\")\r\n\r\n _fields = [\"OID@\", \"SHAPE@XY\"] + fields\r\n cursor = arcpy.da.SearchCursor(in_fc, _fields, spatial_reference=desc.spatialReference.GCS)\r\n for row in cursor:\r\n if row[0] % cpus != pid:\r\n continue\r\n\r\n for i in xrange(2, len(row)):\r\n if not row[i]:\r\n continue\r\n\r\n # row[i] is a unicode string\r\n display_name = row[i].encode(\"utf-8\")\r\n field = _fields[i]\r\n match = pattern.search(row[i])\r\n if match:\r\n errors.append('{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}\\n'\r\n .format(row[0], error_id, layer, content, description.format(layer, row[0]), row[1][0], row[1][1], warning))\r\n continue\r\n\r\n try:\r\n row[i].encode(\"gb2312\")\r\n except UnicodeEncodeError:\r\n errors.append('{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}\\n'\r\n .format(row[0], error_id, layer, content, description.format(layer, row[0]), row[1][0], row[1][1], warning))\r\n continue\r\n del cursor\r\n\r\n return ''.join(errors)\r\n\r\n\r\ndef check_rare_name(in_fc, fields, out_chk):\r\n if not arcpy.Exists(in_fc):\r\n arcpy.AddIDMessage(\"ERROR\", 110, in_fc)\r\n raise SystemExit()\r\n\r\n ext = os.path.splitext(out_chk)[1]\r\n if ext != '.csv':\r\n out_chk += '.csv'\r\n f = open(out_chk, 'w')\r\n f.write('OID, ErrorID, Layer, InspectionContent, Description, X, Y, Warning\\n')\r\n\r\n # result = check_rare_name_task((in_fc, fields), 1, 0)\r\n result = check_parallel(check_rare_name_task, (in_fc, fields))\r\n f.write(result)\r\n f.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n in_fc = arcpy.GetParameterAsText(0)\r\n fields = arcpy.GetParameterAsText(1)\r\n out_chk = arcpy.GetParameterAsText(2)\r\n\r\n check_rare_name(in_fc, fields.split(\";\"), out_chk)\r\n","repo_name":"jingsam/tianditu","sub_path":"CheckRareName.py","file_name":"CheckRareName.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27124708081","text":"import random\nimport pygame\nimport game_variable\n\n\nclass Food:\n def __init__(self, parent_screen):\n self.parent_screen = parent_screen\n self.food_pos = [random.randrange(1, (game_variable.frame_size_x // 10)) * 10,\n random.randrange(1, (game_variable.frame_size_y // 10)) * 10]\n self.food_spawn = True\n self.bonus_success = False\n\n def bonus_food(self):\n direction_food = [\"DOWN\", \"RIGHT\"]\n ran = random.randrange(0, 2)\n if direction_food[ran] == \"DOWN\":\n self.food_pos[1] += 10\n if direction_food[ran] == \"RIGHT\":\n self.food_pos[0] += 10\n\n if self.food_pos[0] < 0 or self.food_pos[0] > game_variable.frame_size_x - 10 or self.food_pos[1] < 0 \\\n or self.food_pos[1] > game_variable.frame_size_y - 10:\n self.food_spawn = False\n self.bonus_success = False\n\n def change_pos_food(self, x, y):\n self.food_pos = [x, y]\n\n def change_pos_bonus(self):\n self.food_pos = [random.randint(0, 100), random.randint(0, 100)]\n\n def draw(self):\n pygame.draw.rect(self.parent_screen, pygame.Color(0, 255, 0),\n pygame.Rect(self.food_pos[0], self.food_pos[1], 10, 10))\n\n","repo_name":"OnesNT/Snake_world-game-","sub_path":"Snake_world_game/Food.py","file_name":"Food.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41435058986","text":"class Solution:\n def nearestValidPoint(self, x: int, y: int, points: List[List[int]]) -> int:\n pos = -1\n mindist = float('inf')\n for i, (a, b) in enumerate(points):\n if a != x and b != y: continue\n dist = abs(x - a) + abs(y - b)\n if pos == -1 or dist < mindist:\n mindist = dist\n pos = i\n return pos\n","repo_name":"samek571/leetcode-600","sub_path":"1779. Find Nearest Point That Has the Same X or Y Coordinate.py","file_name":"1779. Find Nearest Point That Has the Same X or Y Coordinate.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3130510636","text":"import random\n\npronoms = [\"Je\", \"Tu\", \"Il\", \"Elle\", \"On\", \"Nous\", \"Vous\", \"Ils\", \"Elles\"]\npronom = random.choice(pronoms)\n\nprint(\"-------- Créateur de phrases automatisé Python --------\")\nprint(\"Créé par tux-linux, avril 2020. Tous droits réservés\")\n\n# Transformation des mots en listes\nverbes = input(\"Quels sont les verbes des mots de vocabulaire ? Les écrire ci-dessous, séparés d'un espace.\\n\")\nlisteverbes = verbes.split()\nnoms = input(\"Quels sont les noms des mots de vocabulaire ? Les écrire ci-dessous, séparés d'un espace.\\n\")\nlistenoms = noms.split()\nadjectifs = input(\"Quels sont les adjectifs des mots de vocabulaire ? Les écrire ci-dessous, séparés d'un espace.\\n\")\nlisteadjectifs = adjectifs.split()\n\n# Définition de l'accord du verbe \"aller\" selon les pronoms\nif pronom == \"Je\":\n verbe = \"vais\"\nelif pronom == \"Tu\":\n verbe = \"vas\"\nelif pronom == \"Il\" or pronom == \"Elle\" or pronom == \"On\":\n verbe = \"va\"\nelif pronom == \"Nous\":\n verbe = \"allons\"\nelif pronom == \"Vous\":\n verbe = \"allez\"\nelif pronom == \"Ils\" or pronom == \"Elles\":\n verbe = \"vont\"\n\n# Choix du verbe final utilisé selon la liste donnée\nif listeverbes == []:\n listeverbes = [\"manger\", \"boire\", \"mettre\", \"aller\", \"être\", \"avoir\", \"prendre\", \"obtenir\"]\n verbe1 = random.choice(listeverbes)\nelse:\n verbe1 = random.choice(listeverbes)\n\n# Choix du nom utilisé selon la liste donnée\nif listenoms == []:\n listenoms = [\"arbre\", \"fruit\", \"insecte\", \"terrain\", \"serveur\", \"professeur\", \"maître\", \"mentor\", \"pays\", \"mot\", \"mélangeur\"]\n nom = random.choice(listenoms)\nelse:\n nom = random.choice(listenoms)\n\n# Choix du déterminant utilisé selon une liste donnée\nlistedeterminants = [\"leur\", \"notre\", \"votre\", \"le\", \"son\", \"mon\", \"ton\", \"ce\"]\ndeterminant = random.choice(listedeterminants)\n\n# Choix de l'adjectif utilisé selon une liste donnée\nif listeadjectifs == []:\n listeadjectifs = [\"doux\", \"fort\", \"tendre\", \"amusant\", \"proactif\", \"attentif\", \"gentil\", \"méchant\"]\n adjectif = random.choice(listeadjectifs)\nelse:\n adjectif = random.choice(listeadjectifs)\n\nprint(\"\\n\\nVoici la phrase finale:\")\n\n# Définition d'exceptions selon le verbe utilisé, les déterminants utilisés, etc.\nif verbe1 == \"aller\":\n etatphrase = 1\n verbes2 = [\"voir\", \"manger\", \"enterrer\", \"détruire\", \"boire\"]\n verbe2 = random.choice(verbes2)\n if nom[0] == \"a\" or nom[0] == \"e\" or nom[0] == \"i\" or nom[0] == \"o\" or nom[0] == \"u\" or nom[0] == \"y\":\n if determinant == \"ce\":\n determinant = \"cet\"\n elif determinant == \"le\":\n determinant = \"l'\"\nelif verbe1 == \"mettre\" or verbe1 == \"apporter\" or verbe1 == \"déposer\" or verbe1 == \"aimer\" or verbe1 == \"aider\" or verbe1 == \"apprendre\" or verbe1 == \"attendre\" or verbe1 == \"chercher\" or verbe1 == \"choisir\" or verbe1 == \"commander\" or verbe1 == \"commencer\" or verbe1 == \"connaître\" or verbe1 == \"continuer\" or verbe1 == \"détester\" or verbe1 == \"écouter\" or verbe1 == \"enseigner\" or verbe1 == \"entendre\" or verbe1 == \"envoyer\" or verbe1 == \"faire\" or verbe1 == \"féliciter\" or verbe1 == \"finir\" or verbe1 == \"garder\" or verbe1 == \"inviter\" or verbe1 == \"oublier\" or verbe1 == \"préparer\" or verbe1 == \"refuser\" or verbe1 == \"regretter\" or verbe1 == \"remercier\" or verbe1 == \"voir\" or verbe1 == \"vouloir\":\n etatphrase = 2\n suites = [\"dans un\", \"près d'un\", \"à côté d'un\", \"dans le\", \"dans leur\", \"dans notre\", \"dans votre\", \"dans son\", \"dans ton\", \"dans mon\"]\n suite = random.choice(suites)\n nom2 = random.choice(listenoms)\n if nom[0] == \"a\" or nom[0] == \"e\" or nom[0] == \"i\" or nom[0] == \"o\" or nom[0] == \"u\" or nom[0] == \"y\":\n if determinant == \"ce\":\n determinant = \"cet\"\n elif determinant == \"le\":\n determinant = \"l'\"\nelse:\n etatphrase = 0\n if nom[0] == \"a\" or nom[0] == \"e\" or nom[0] == \"i\" or nom[0] == \"o\" or nom[0] == \"u\" or nom[0] == \"y\":\n if determinant == \"ce\":\n determinant = \"cet\"\n elif determinant == \"le\":\n determinant = \"l'\"\n# Impression de la phrase finale\nif etatphrase == 1:\n print(\"{0} {1} {2} {3} {4} {5} {6}.\".format(pronom, verbe, verbe1, verbe2, determinant, nom, adjectif))\nelif etatphrase == 2:\n print(\"{0} {1} {2} {3} {4} {5} {6} {7}.\".format(pronom, verbe, verbe1, determinant, nom, adjectif, suite, nom2))\nelse:\n print(\"{0} {1} {2} {3} {4} {5}.\".format(pronom, verbe, verbe1, determinant, nom, adjectif))\n","repo_name":"tux-linux/Python-works","sub_path":"IA-CompoPhrases.py","file_name":"IA-CompoPhrases.py","file_ext":"py","file_size_in_byte":4445,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29027283350","text":"def convertToBase2(m):\n res = []\n\n while m != 0:\n d = m % 2\n\n res.append(d)\n\n m = m // 2\n\n # res.reverse()\n\n return res\n\n\ndef convertToBase2_v2(m):\n res = []\n\n while m != 0:\n d = m & 1\n\n res.append(d)\n\n m >>= 1\n\n res.reverse()\n\n return res\n\n\ndef solve(n, m):\n arr = convertToBase2(m)\n ans = []\n\n for i in range(len(arr)-1, -1, -1):\n if arr[i] == 1:\n ans.append('(' + str(n) + '<<' + str(i) + ')')\n\n print(' + '.join(ans))\n\n\ndef main():\n t = int(input())\n\n for i in range(t):\n n, m = map(int, input().split())\n solve(n, m)\n\n\n# main()\nprint(convertToBase2(10))\nprint(convertToBase2_v2(10))\n","repo_name":"Mtinkering/i-dont-know-algo","sub_path":"big-o/22-bit-manipulation/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36823573016","text":" #http://www.apache.org/licenses/LICENSE-2.0\n\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\n\nimport sys\nfrom pylab import *\nfrom scipy.io import wavfile\nfrom os import listdir\nfrom os.path import isfile, join\n\nfolder = sys.argv[1]\nfiles = [ f for f in listdir(folder) if isfile(join(folder, f)) ]\nfnum = len(files)\ncounter = 0\nfor f in files:\n sampFreq, signal = wavfile.read(folder + f)\n samples = signal.shape[0]\n\n timearray = arange(0, samples*1.0, 1)\n timearray /= sampFreq\n timearray *= 1000.0\n subplot(fnum, 1, counter)\n plot(timearray, signal, color = 'k')\n xlabel(f)\n counter += 1\n\nshow()\n\n","repo_name":"rapp-project/rapp-platform","sub_path":"rapp_speech_detection_sphinx4/src/rapp_speech_detection_sphinx4/tools/audio_plotter.py","file_name":"audio_plotter.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"67"} +{"seq_id":"74536010132","text":"import cv2\nimport os\nimport imutils\nimport RPi.GPIO as GPIO\nfrom RPLCD.gpio import CharLCD\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setwarnings(False)\nlcd = CharLCD(cols=16, rows=2, pin_rs=37, pin_e=35, pins_data=[40, 38, 36, 32],numbering_mode=GPIO.BOARD)\nGPIO.setup(8, GPIO.OUT, initial=GPIO.LOW)\n\ndataPath = '/home/pi/Facial recognition lock and web streaming monitoring with Raspberry pi 4/Data' #Cambia a la ruta donde hayas almacenado Data\nimagePaths = os.listdir(dataPath)\nprint('imagePaths=',imagePaths)\n\nface_recognizer = cv2.face.LBPHFaceRecognizer_create()\n\n# Leyendo el modelo\nface_recognizer.read('modeloLBPHFace.xml')\n\ncap = cv2.VideoCapture(0)\n\nfaceClassif = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_frontalface_default.xml')\n\nwhile True:\n\t#lcd.clear()\n\t#lcd.write_string(u'Cerrado')\t\n\tret,frame = cap.read()\n\tframe = imutils.resize(frame, width=640)\n\tif ret == False: break\n\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\tauxFrame = gray.copy()\n\n\tfaces = faceClassif.detectMultiScale(gray,1.3,5)\n\n\tfor (x,y,w,h) in faces:\n\t\trostro = auxFrame[y:y+h,x:x+w]\n\t\trostro = cv2.resize(rostro,(150,150),interpolation= cv2.INTER_CUBIC)\n\t\tresult = face_recognizer.predict(rostro)\n\n\t\tcv2.putText(frame,'{}'.format(result),(x,y-5),1,1.3,(255,255,0),1,cv2.LINE_AA)\n\n\t\t# LBPHFace\n\t\tif result[1] < 70:\n\t\t\tcv2.putText(frame,'{}'.format(imagePaths[result[0]]),(x,y-25),2,1.1,(0,255,0),1,cv2.LINE_AA)\n\t\t\tcv2.rectangle(frame, (x,y),(x+w,y+h),(0,255,0),2)\n\t\t\ta=1\n\t\telse:\n\t\t\tcv2.putText(frame,'Unknown',(x,y-20),2,0.8,(0,0,255),1,cv2.LINE_AA)\n\t\t\tcv2.rectangle(frame, (x,y),(x+w,y+h),(0,0,255),2)\n\t\t\ta=0\n\n\t\tif a==1:\n\t\t\tlcd.clear()\n\t\t\tlcd.write_string(u'Open')\n\n\n\t\telse:\n\t\t\tlcd.clear()\n\t\t\tlcd.write_string(u'CLOSE')\n\n\n\t\tif a==1:\n\t\t\tGPIO.output(8, GPIO.LOW)\n\n\t\telse:\n\t\t\tGPIO.output(8, GPIO.HIGH)\n\n\n\t\n\tcv2.imshow('frame',frame)\n\tk = cv2.waitKey(1)\n\tif k == 27:\n\t\tbreak\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"WillyMH-Projects/Facial-recognition-lock-and-web-streaming-monitoring-Raspberry-pi-4","sub_path":"reconocimiento.py","file_name":"reconocimiento.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25831635915","text":"import scipy.stats\nimport numpy as np\nfrom util import findall, custom_pearsonr, compute_affine_transformation\nimport pandas as pd\nfrom scipy.optimize import curve_fit\nfrom scipy.stats import norm\n\ndef generate_barcode_replicates_pairs(study, sample):\n data = study.get_df(sample=sample, section = 'ROI')[['sequence', 'reference']]\n replicates = {}\n for _, g in data.groupby('sequence'):\n if len(g) > 1:\n for _, row in g.iterrows():\n replicates[row['reference']] = []\n for _, row2 in g.iterrows():\n if row2['reference'] != row['reference']:\n replicates[row['reference']].append(row2['reference'])\n return replicates\n\n\ndef compute_pearson_scores(study, sample, replicates_lists, sections):\n scores = []\n data = study.get_df(sample=sample, section = sections, base_type=['A','C'], index_selected=True)[['sequence','reference','sub_rate','index_selected']]\n\n # make sure that each reference has the same number of sections\n for c, g in data.groupby('reference'):\n if len(g) != len(sections) and c in replicates_lists:\n replicates_lists.pop(c)\n \n df = data.groupby('reference').agg({'sub_rate': lambda x: x}).reset_index()\n df['sub_rate'] = df['sub_rate'].apply(lambda x:np.concatenate(x))\n \n for reference, replicates in replicates_lists.items():\n scores_reference = []\n x = df[df['reference'] == reference]['sub_rate'].iloc[0]\n for replicate in replicates:\n if replicate not in replicates_lists.keys():\n continue\n y = df[df['reference'] == replicate]['sub_rate'].iloc[0]\n scores_reference.append(custom_pearsonr(x, y))\n scores.append(np.mean(scores_reference))\n\n sorted_idx = np.argsort(scores)[::-1]\n # return dict with reference names and scores as two sorted lists\n return {'references': [list(replicates_lists.keys())[i] for i in sorted_idx], 'scores': [scores[i] for i in sorted_idx]}\n\n\ndef find_frame_shift_ROI(study):\n \n if 'frame_shift_ROI' in study.df.columns:\n study.df.drop('frame_shift_ROI', axis=1, inplace=True)\n \n df = study.df[(study.df['section'] == 'ROI')][['sample','sequence', 'reference', 'family','section']].reset_index(drop=True)\n\n for _, g in df.groupby(['sample','family']):\n g.sort_values('sequence', key=lambda x: x.str.len(), inplace=True, ascending=False)\n reference = g['sequence'].iloc[0]\n for idx, row in g.iterrows():\n # assert sequence is unique in reference\n subsequence_matches = list(findall(row['sequence'], reference))\n most_likely_match = [-abs(len(reference)/2 - (m+len(row['sequence'])/2)) for m in subsequence_matches]\n assert len(most_likely_match) > 0, 'Sequence {} not found in reference {}'.format(row['sequence'], reference)\n df.loc[idx, 'frame_shift_ROI'] = subsequence_matches[most_likely_match.index(max(most_likely_match))]\n\n for _, g in df.groupby('family'):\n g.sort_values('sequence', key=lambda x: x.str.len(), inplace=True, ascending=False)\n assert g['frame_shift_ROI'].iloc[0] == 0, 'Frame shift is not 0 for reference sequence'\n\n df = study.df.merge(df[['sample','reference','section','sequence','frame_shift_ROI']], on=['sample','reference','section','sequence'], how='left')\n return df\n\n\ndef select_data_for_kfold(study, sample, family, stride='turner'):\n \n data = study.get_df(sample=sample, family=family, section='ROI', index_selected=True) #TODO remove unpaired bases\n\n data['deltaG'] = data['deltaG'].apply(lambda x: 0 if x == 'void' else float(x))\n\n assert len(data)>0, 'No data for sample {} and family {}'.format(sample, family)\n\n # turn it into a dataframe\n df = pd.DataFrame(\n columns= [base + str(idx+1) for base, idx in zip(data['sequence'].iloc[0], data['index_selected'].iloc[0])],\n data = [int(offset)*[np.nan] + list(mr) for offset, mr in zip(data['frame_shift_ROI'], data['sub_rate'])],\n index= data['deltaG'].values\n )\n\n # Only keep the paired bases\n paired = [c in ['(',')'] for c in data['structure'].iloc[0]]\n df = df.loc[:,paired]\n \n # only keep the A and C bases\n idx_AC = [col[0] in ['A','C'] for col in df.columns]\n df = df.loc[:,idx_AC]\n\n # remove the bases that do not have a value for deltaG == 0.0\n try:\n df = df.loc[:,df.loc[0.0].notna().sum() > 0]\n except KeyError:\n print('No data for deltaG == 0.0 for sample {} and family {}'.format(sample, family))\n return pd.DataFrame({'Kfold':[]})\n \n \n # Change the index to be linear if needed\n if stride == 'child#': \n df = df.reset_index().rename(columns={'index':'deltaG'})\n for idx, (dG, row) in enumerate(df.groupby('deltaG')):\n df.loc[row.index, 'child#'] = idx\n \n df.set_index('child#', inplace=True)\n\n return df\n\ndef compute_k_fold_fit(study, sample, family, stride='turner'):\n \"\"\"Compute the K-fold fit for a given sample and family\n \n Parameters\n ----------\n study : Study\n Study object\n sample : str\n Sample name\n family : str\n Family name\n stride : str, optional\n Stride to use for the fit, by default 'turner'. Can be 'turner' or 'child#'.\n \n \"\"\"\n\n df = select_data_for_kfold(study, sample, family, stride=stride) \n\n # Function to fit\n def sigmoid(x, a, b, c):\n RT = 1.987204258*310/1000\n return a / (1 + b*np.exp(-x/RT)) + c\n \n # Reverse sigmoid function\n def rev_sigmoid(y, a, b, c):\n RT = 1.987204258*310/1000\n return -RT*np.log((a-y+c)/((y-c)*b))\n\n # Output values \n base_Kfold = {}\n\n for base, mut_rate in df.iteritems():\n \n if base == 'deltaG':\n continue\n \n x_data = df.index[~np.isnan(mut_rate)].values\n mut_rate = mut_rate[~np.isnan(mut_rate)].values\n\n if len(mut_rate) >= 3: # at least 3 points to fit the sigmoid\n \n # Fit the sigmoid\n popt, pcov = curve_fit(sigmoid, x_data, mut_rate, p0=[0.04, 0.02, 0.00], bounds=([0, 0, 0], [0.1, np.inf, 0.05]), max_nfev=1000)\n \n # Compute the sigmoid midpoint \n LARGE_VALUE = 100 \n midpoint_y = np.mean([sigmoid(LARGE_VALUE, *popt), sigmoid(-LARGE_VALUE, *popt)]) \n midpoint_y = min(max(midpoint_y, min(mut_rate)), max(mut_rate))\n \n midpoint_x = max(min(max(df.index),rev_sigmoid(midpoint_y, *popt)), min(df.index))\n \n # Store the results \n base_Kfold[base] = {'avg_mr': midpoint_y, 'Kfold': midpoint_x}\n \n\n df = pd.DataFrame(base_Kfold).T\n\n # make a gaussian fit to the data\n df['norm'] = norm.pdf(df['Kfold'], np.mean(df['Kfold']), np.std(df['Kfold']))\n \n return df\n \n \ndef compute_quality_score_reference_vs_family(study, sample, family, metric='pearson'):\n \n # Get the data\n data = study.get_df(sample=sample, family=family, section='ROI')\n \n def align_shifted_vectors(v1, s1, v2, s2):\n \"\"\"Align two vectors with a given shift\"\"\"\n if s1 < s2:\n s2, s1 = s1, s2\n shift = s1 - s2\n assert len(v1) >= len(v2) + shift, 'Vectors cannot be aligned'\n return v1[shift:len(v2)+shift], v2\n \n\ndef compute_affine_transformation_for_replicates(study, samples):\n \"\"\"Compare the mutation rates distribution, align them and compute the affine transformation\n \n Parameters\n ----------\n study : Study\n Study object\n samples : list\n List of sample names. First sample is used as reference.\n \"\"\"\n df = study.get_df(sample=samples, section='ROI', base_type=['A','C'])\n \n # remove the reference that are not in all samples\n df = df.groupby('reference').filter(lambda x: len(x['sample'].unique()) == len(samples))\n \n data = {}\n for sample in samples:\n data[sample] = np.concatenate(df[df['sample']==sample]['sub_rate'].values).reshape(-1, 1)\n \n # Compute the linear transformation with the first sample as reference\n ref = data[samples[0]]\n lin_trans = {}\n for sample in samples[1:]:\n lin_trans[sample] = compute_affine_transformation(data[sample], ref)\n \n return lin_trans\n \n ","repo_name":"yvesmartindestaillades/highthroughputcellularbiology","sub_path":"src/generate_dataset.py","file_name":"generate_dataset.py","file_ext":"py","file_size_in_byte":8373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71887226775","text":"import torch\n\ndef od_collate_fn(batch):\n \"\"\"\n fetch (n, 5)\n n: the number of objects\n \"\"\"\n\n targets = []\n imgs = []\n for sample in batch:\n imgs.append(sample[0]) # sample[0] is img\n targets.append(torch.FloatTensor(sample[1])) # sample[1] is annotation\n #sample[0]:[C][H][W], imgs[img, img, img, ...]\n #[torch.Size([3, 300, 300]), torch.Size([3, 300, 300]), ...] => torch.Size([batch_num, 3, 300, 300])\n imgs = torch.stack(imgs, dim=0)\n\n #targes:[n, 5], n:the number of objects\n #[xmin, ymin, xmax, ymax, class_idex]\n return imgs, targets\n\n","repo_name":"IsHYuhi/SSD","sub_path":"utils/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41905061106","text":"import matplotlib.pyplot as plt\nimport pandas as pd\n\ndef replace_underscore(str):\n return str.replace('_', ' ')\n\n# Read dataset\ngini = pd.read_csv('../data/external/coeficiente_gini.csv', converters={'ENTIDAD': replace_underscore})\n\n# Sum and average\ngini['GINI'] = gini.sum(axis=1)/3\n\n# Drop YEARS\ngini = gini.drop(['2010', '2012', '2014'], axis=1)\n# gini = gini.drop(gini.columns[0], axis=1)\n\n# Sort\ngini = gini.sort_values(by=['GINI'])\n\nprint(gini)\n\n# Print dataframe to csv\ngini.to_csv(\"../data/subagencias_modified_geopos_maxTemp_pib_gini.csv\")\n\nplt.plot(gini, linewidth=2, linestyle=':', label='GINI')\n\nplt.title(\"Coeficiente GINI\", fontsize=14, fontstyle='italic', fontweight='bold',)\nplt.ylabel('GINI')\nplt.rcParams['axes.labelsize'] = 10\nplt.rcParams['axes.labelweight'] = 'bold'\nplt.rcParams['axes.titlesize'] = 10\nplt.rcParams['xtick.labelsize'] = 8\nplt.rcParams['ytick.labelsize'] = 8\nplt.rcParams['legend.fontsize'] = 10\nplt.rcParams['figure.titlesize'] = 12\nplt.xticks(rotation=90)\nplt.show()\n","repo_name":"VR3/databrewingcup","sub_path":"code/python/inequalityEstimator.py","file_name":"inequalityEstimator.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19823421451","text":"import af\n\n\ndef submit():\n input_rops = [i[0] for i in hou.pwd().inputDependencies()]\n rop_list = []\n job_ids = []\n cmd = af.Cmd()\n\n for rop in input_rops:\n rop.parm('af_offline_job').set(1)\n rop.parm('af_ubmit').pressButton()\n rop.parm('af_job_info_pretty').eval()\n rop_list.append(rop.evalParm('af_job_info_pretty'))\n rop.parm('af_offline_job').set(0)\n\n for i in range(len(rop_list)):\n cmd.data['mask'] = rop_list[i]\n job = cmd.getJobList()\n id = job[0]['id']\n job_ids.append(id)\n if i > 0:\n cmd.action = 'action'\n cmd.data['type'] = 'jobs'\n cmd.data['mask'] = rop_list[i]\n cmd.data['params'] = {'depend_mask': rop_list[i-1]}\n cmd._sendRequest()\n\n job_ids.reverse()\n for id in job_ids:\n cmd.setJobState(id, 'start')\n","repo_name":"weishc/work-stuff","sub_path":"Houdini/afanasy/af_set_depend.py","file_name":"af_set_depend.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6473443649","text":"import websockets\nimport asyncio\nimport os\nimport json\n\n\nasync def call_websocket_server():\n uri = \"ws://127.0.0.1:8000/api/getdata/\"\n try:\n async with websockets.connect(uri) as ws:\n print('STARTED WEBSOCKET'.center(os.get_terminal_size().columns, '-'))\n\n val = \"GET\"\n await ws.send(json.dumps({'value': val}))\n print(f\">>>> {val}\")\n\n while True:\n mcx_data = await ws.recv()\n print(f\"<<<< {mcx_data}\")\n except Exception as e:\n print(e)\n\n\nasyncio.get_event_loop().run_until_complete(call_websocket_server())\n","repo_name":"jaybamania/JewellryApp_Backend","sub_path":"websock_call.py","file_name":"websock_call.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1359401328","text":"import json\n\nrfile = \"/Users/rhp/Documents/SBU/3Data Science/Course Project/asin_price.json\"\nr2file = \"/Users/rhp/Documents/SBU/3Data Science/Course Project/review_order.json\"\nwfile = \"/Users/rhp/Documents/SBU/3Data Science/Course Project/reviewerid_prices.json\"\n\nr = open(rfile, 'r')\n#\nexit(0)\nprice_dict = {}\n\nline = r.readline()\n\nwhile line:\n l = json.loads(line)\n if \"price\" in l:\n price_dict[l[\"asin\"]] = l[\"price\"]\n else:\n price_dict[l[\"asin\"]] = 0\n line = r.readline()\n\nr.close()\n\nr = open(r2file, 'r')\nw = open(wfile, 'w')\npl = []\nline = r.readline()\n# prev = json.loads(line)[\"reviewerID\"]\n# pl.append(price_dict[json.loads(line)[\"asin\"]])\n# wl = {}\n# wl[prev] = pl\nprev = \"\"\nwl = {}\nwlines = \"\"\ncount = 50\nwhile line:\n l = json.loads(line)\n curr = l[\"reviewerID\"]\n\n if len(prev) == 0:\n prev = curr\n\n if prev != curr:\n wl[prev] = pl\n pl = []\n pl.append(price_dict[l[\"asin\"]])\n wlines = json.dumps(wl)\n wlines += \"\\n\"\n w.writelines(wlines)\n wl = {}\n else:\n pl.append(price_dict[l[\"asin\"]])\n\n prev = curr\n # if not count:\n # break\n line = r.readline()\n\n#print wl\nr.close()\nw.close()\n\n","repo_name":"arvindram03/amazon-dashboard","sub_path":"py_scripts/price.py","file_name":"price.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30236016017","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSet the common figure styles for papers.\n\"\"\"\n\nimport numpy as np\nimport matplotlib as mpl\n\n# ==================================================================== #\n# for MNRAS\n# ==================================================================== #\nmpl.rcParams['ps.fonttype'] = 42\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport seaborn as sns\nimport coloripy as cp\n\n\n# to change tex to Times New Roman in mpl\nplt.rcParams['font.family'] = 'serif'\nplt.rcParams['font.serif'] = 'Times New Roman'\nplt.rcParams['mathtext.rm'] = 'serif'\nplt.rcParams['mathtext.it'] = 'serif:italic'\nplt.rcParams['mathtext.bf'] = 'serif:bold'\nplt.rcParams['mathtext.fontset'] = 'custom'\n\n\ndef set_fontscale(font_scale=1):\n sns.set(style='ticks', context=None,\n font='Times New Roman',\n rc={#\"text.usetex\": True,\n #\"font.family\": 'serif',\n #\"font.serif\": 'Times New Roman',\n #\"mathtext.rm\": 'serif',\n #\"mathtext.it\": 'serif:italic',\n #\"mathtext.bf\": 'serif:bold',\n #\"mathtext.fontset\": 'custom',\n \"xtick.direction\": \"in\",\n \"ytick.direction\": \"in\",\n \"axes.linewidth\": 0.5*font_scale,\n \"axes.labelsize\": 9*font_scale,\n \"font.size\": 9*font_scale,\n \"axes.titlesize\": 9*font_scale,\n \"legend.fontsize\": 8*font_scale,\n \"xtick.labelsize\": 8*font_scale,\n \"ytick.labelsize\": 8*font_scale,\n })\n\n #plt.style.use('ticks')\n #width = 345\n\n \"\"\"nice_fonts = {\n # Use LaTeX to write all text\n \"text.usetex\": True,\n \"font.family\": \"serif\",\n # Use 9pt font in plots, to match 9pt font in document\n \"axes.labelsize\": 9*font_scale,\n \"font.size\": 9*font_scale,\n #\"title.fontsize\": 9 * font_scale,\n # Make the legend/label fonts a little smaller\n \"legend.fontsize\": 8*font_scale,\n \"xtick.labelsize\": 8*font_scale,\n \"ytick.labelsize\": 8*font_scale,\n }\n\n mpl.rcParams.update(nice_fonts)\"\"\"\n\n#set_fontscale(1)\n\n\ncmap = sns.cubehelix_palette(start=0.5, rot=-1.5, gamma=1, hue=1, light=0.,\n dark=1., reverse=False, as_cmap=True)\nmsh_cmap = cp.get_msh_cmap(num_bins=501, rescale='power', power=2.5)\nmsh_cmap2 = cp.get_msh_cmap(rgb1=np.array([0.085, 0.532, 0.201])*256,\n rgb2=np.array([0.436, 0.308, 0.631])*256,\n ref_point=(160, 160, 160),\n num_bins=501, rescale='power', power=1.5)\n\n\n# ==================================================================== #\n# color definitions\n# ==================================================================== #\n\n# from cb2\nemerald = sns.xkcd_rgb['emerald']\norange = sns.xkcd_rgb['bright orange']\npurple = sns.xkcd_rgb['light purple']\n\n# from cb2\ncb2_emerald = '#66c2a5'\ncb2_orange = '#fc8d62'\ncb2_blue = '#8da0cb'\n\n# from cb2 bright\ncb_red = '#e41a1c'\ncb_blue = '#377eb8'\ncb_green = '#4daf4a'\ncb_purple = '#984ea3'\ncb_orange = '#ff7f00'\ncb_grey = '#404040'\n\n# seaborn deep palette\ndeep = sns.color_palette(palette='deep')\n\ndeep_blue = colors.rgb2hex(deep[0])\ndeep_orange = colors.rgb2hex(deep[1])\ndeep_green = colors.rgb2hex(deep[2])\ndeep_red = colors.rgb2hex(deep[3])\n\n# ==================================================================== #\n# convenience functions\n# ==================================================================== #\n\n# journal sizes\nmnras_colwidth = 240. # pt\nmnras_textwidth = 504. # pt\nmnras_text_fontsize = 9. #pt\nmnras_figcaption_fontsize = 8. #pt\ngolden_ratio = (5**.5 - 1) / 2\n# use this in latex to get text and column width\n# \\showthe\\textwidth\n# \\showthe\\columnwidth\n\n# use this in latex to get size\n# \\usepackage[T1]{fontenc}\n# \\newcommand\\thefont{\\expandafter\\string\\the\\font}\n# \\thefont\n\n\ndef get_fig_size(width, height_ratio=None, fraction=1):\n \"\"\"\n Set aesthetic figure dimensions to avoid scaling in latex. Function from\n https://jwalton.info/Embed-Publication-Matplotlib-Latex/.\n\n :param width: width in pts\n :type width: float\n :param height_ratio: fraction of width to set for the height\n :type height_ratio: float\n :param fraction: fraction of the width for the figure to occupy\n :type fraction: float\n :return: dimensions of figure in inches\n :rtype: tuple\n \"\"\"\n # Width of figure\n fig_width_pt = width * fraction\n\n # Convert from pt to inches\n inches_per_pt = 1. / 72.2699 # 72.27 in original ??\n\n if height_ratio is None:\n # Golden ratio to set aesthetic figure height\n height_ratio = golden_ratio\n\n # Figure width in inches\n fig_width_in = fig_width_pt * inches_per_pt\n # Figure height in inches\n fig_height_in = fig_width_in * height_ratio\n\n fig_dim = (fig_width_in, fig_height_in)\n\n return fig_dim\n","repo_name":"ajshajib/paperfig","sub_path":"paperfig/paperfig.py","file_name":"paperfig.py","file_ext":"py","file_size_in_byte":4934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13676462037","text":"class Solution:\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n nums.sort()\n used = set()\n results = []\n self.dfs(nums, used, [], results)\n return results\n \n def dfs(self, nums, used, path, results):\n if len(path) == len(nums):\n results.append(path[:])\n return\n for i in range(len(nums)):\n if i in used:\n continue\n if i > 0 and nums[i] == nums[i - 1] and i-1 not in used:\n continue\n path.append(nums[i])\n used.add(i)\n self.dfs(nums, used, path, results)\n used.remove(i)\n path.pop()","repo_name":"yuansun86/leetcode","sub_path":"Code/47. Permutations II.py","file_name":"47. Permutations II.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14164871807","text":"def solution(skill, skill_trees):\n answer = 0\n for i in skill_trees:\n skill_index=[0 for v in range(len(skill))]\n answer = answer + skilltree(i,skill,skill_index)\n return answer\n\ndef skilltree(skill_tree_list,skill,skill_index):\n for j in skill_tree_list:\n numnum = search(skill,j)\n if(numnum<28):\n if(numnum!=0):\n if(skill_index[numnum-1]==0):\n return 0\n else:\n skill_index[numnum] = 1\n else:\n skill_index[0]= 1\n return 1\n\ndef search(skill,letter):\n num = len(skill)\n for i in range(num):\n if(letter==skill[i]):\n return i\n return 28\n\n\n\n","repo_name":"NohYeaJin/programmers_problem","sub_path":"programmers_skilltree.py","file_name":"programmers_skilltree.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25701942944","text":"# coding: utf-8\n''' 在列表 nums 中找到两个数字相加等于 target '''\n\n\ndef two_sum(nums, target):\n # 5383 ms\n found = False\n for i in range(len(nums)):\n for j in range(i+1, len(nums)):\n if nums[i] + nums[j] == target:\n found = True\n break\n if found:\n break\n if found:\n return [i, j]\n\n\ndef two_sum_2(nums, target):\n # 1245 ms\n for i in range(len(nums)):\n val = nums[i]\n res = target - val\n try:\n i_re = nums[i+1:].index(res) + i + 1\n return [i, i_re]\n except ValueError:\n pass\n\n\ndef two_sum_3(nums, target):\n # 36 ms\n indices = {v: i for i, v in enumerate(nums)}\n for i, v in enumerate(nums):\n res = target - v\n if res in indices:\n if indices[res] != i:\n return [i, indices[res]]\n\n\ndef two_sum_5(nums, target):\n # 性能提升已经可以忽略\n indices = {}\n for i, v in enumerate(nums):\n if target - v in indices:\n return i, indices[target - v]\n indices[v] = i\n","repo_name":"fangnahz/learning-python","sub_path":"notes/two_elements_sums_to_target.py","file_name":"two_elements_sums_to_target.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11099700202","text":"# ### event事件\nimport random\nimport time\nfrom multiprocessing import Process, Event\n\n'''\n#阻塞事件\n\te = Event() 生成事件对象e\n\te.wait() 动态给程序加阻塞, 程序当中是否阻塞完全取决于该对象中的is_set(), 默认返回False\n\t\n\t如果是True, 不加阻塞\n\t如果是False 加阻塞\n\t\n控制属性的值\n\tset() 方法, 将这个属性的值改成True\n\tclear() 方法 将这个属性的值改成False\n\tis_set() 方法 获取当前属性值是True还是False\n\t\n'''\n\"\"\"\n# 基本语法\ne = Event()\nprint(e.is_set())\n# e.wait()\n# 最多阻塞时间为1秒\ne.wait(1)\nprint(1)\n\"\"\"\n\n\"\"\"\ne = Event()\n#将内部的一个属性改成True\ne.set()\ne.wait()\nprint(111)\n#将内部的一个属性改成False\ne.clear()\ne.wait()\nprint(222)\n\"\"\"\n# 模拟交通灯的效果\ndef traffic_light(e):\n\t#默认红灯先亮\n\tprint('红灯亮')\n\twhile True:\n\t\tif e.is_set():\n\t\t\t#当前是绿灯,等待1秒\n\t\t\ttime.sleep(1)\n\t\t\tprint('红灯亮')\n\t\t\te.clear()\n\n\t\telse:\n\t\t\t#当前是红灯\n\t\t\ttime.sleep(1)\n\t\t\t#等待1秒之后,变成绿灯\n\t\t\tprint('绿灯亮')\n\t\t\te.set()\n\n# e = Event()\n# traffic_light(e)\n\n# 模拟小车遇到红灯停,绿灯行的操作\ndef car(e,i):\n\t#e.is_set() 默认返回是False, 代表的是红灯\n\tif not e.is_set():\n\t\tprint('car %s在等待' % i)\n\t\te.wait()\n\tprint('car %s通行了' % i)\n\n'''\nif __name__ == '__main__':\n\te = Event()\n\t#模拟启动交通灯\n\tp1 = Process(target=traffic_light, args=(e,))\n\tp1.daemon = True\n\tp1.start()\n\n\t#模拟20辆小车\n\tfor i in range(20):\n\t\ttime.sleep(random.uniform(0,1))\n\t\tp2 = Process(target=car, args=(e, i))\n\t\tp2.start()\n\n\tprint('程序彻底结束')\n'''\n# 优化版 : 等小车全都跑完之后,再让程序彻底终止\nif __name__ == '__main__':\n\tlst = []\n\te = Event()\n\t#模拟启动交通灯\n\tp1 = Process(target=traffic_light, args=(e,))\n\t# 设置红绿灯为守护进程,灯小车跑完,也终止红绿灯;\n\tp1.daemon = True\n\tp1.start()\n\n\t#模拟20辆小车\n\tfor i in range(20):\n\t\t# 小车创建的速度太快,所以加一点延迟效果,生动表现出小车的行为;\n\t\ttime.sleep(random.uniform(0,1))\n\t\tp2 = Process(target=car, args=(e, i))\n\t\tp2.start()\n\t\tlst.append(p2)\n\n\t# 等到小车都跑完之后,再去终止红绿灯;加一个等待;\n\tfor i in lst:\n\t\ti.join()\n\n\tprint('程序彻底结束')\n\n\n\n\n\n\n\n\n\n","repo_name":"Sam6006/python-learning","sub_path":"day10 网络编程/09.进程、线程、协程/06.event事件.py","file_name":"06.event事件.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71224868374","text":"import spacy\n\nnlp = spacy.blank(\"es\")\n\n# Procesa el texto\ndoc = nlp(\"Me gustan las panteras negras y los leones.\")\n\n# Selecciona el primer token\nfirst_token = doc[0]\n\n# Imprime en pantalla el texto del token\nprint(first_token.text)\n\n# Salida es \"Me\"\n \n","repo_name":"DaveSV/NLP_Avanzado_con_SpaCy_parte1","sub_path":"intro3.py","file_name":"intro3.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74207619734","text":"if __name__ == '__main__':\n t = int(input())\n while t > 0:\n a, b = [], []\n n = int(input())\n for _ in range(n):\n x, y = map(float, input().split())\n a.append(x)\n b.append(y)\n l, ans = [1] * n, 0\n for i in range(n):\n for j in range(0, i):\n if a[j] < a[i] and b[j] > b[i]:\n l[i] = max(l[i], l[j] + 1)\n ans = max(ans, l[i])\n print(ans)\n t -= 1","repo_name":"HenryTran1604/PYTHON-PTIT","sub_path":"PY02078_TangGiam.py","file_name":"PY02078_TangGiam.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"10092841414","text":"from distutils.core import setup, Extension\n\nPIL_BUILD_DIR = '..'\nPIL_IMAGING_DIR = PIL_BUILD_DIR+'/libImaging'\n\ndefs = []\ntry:\n import numarray\n defs.append(('WITH_NUMARRAY',None))\nexcept ImportError:\n pass\n\nsane = Extension('_sane',\n include_dirs = [PIL_IMAGING_DIR],\n libraries = ['sane'],\n library_dirs = [PIL_IMAGING_DIR],\n define_macros = defs,\n sources = ['_sane.c'])\n\nsetup (name = 'pysane',\n version = '2.0',\n description = 'This is the pysane package',\n py_modules = ['sane'],\n ext_modules = [sane])\n","repo_name":"sslab-gatech/avpass","sub_path":"lib/Imaging-1.1.7/Sane/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":271,"dataset":"github-code","pt":"67"} +{"seq_id":"37128745517","text":"from socket import *\nimport os\nimport sys\nimport struct\nimport time\nimport select\nimport binascii\n\nICMP_ECHO_REQUEST = 8\nMAX_HOPS = 30\nTIMEOUT = 2.0\nTRIES = 2\n\n\ndef checksum(string):\n csum = 0\n countTo = (len(string) // 2) * 2\n count = 0\n\n while count < countTo:\n thisVal = (string[count + 1]) * 256 + (string[count])\n csum += thisVal\n csum &= 0xffffffff\n count += 2\n\n if countTo < len(string):\n csum += (string[len(string) - 1])\n csum &= 0xffffffff\n\n csum = (csum >> 16) + (csum & 0xffff)\n csum = csum + (csum >> 16)\n answer = ~csum\n answer = answer & 0xffff\n answer = answer >> 8 | (answer << 8 & 0xff00)\n return answer\n\n\ndef build_packet():\n ID = os.getpid() & 0xffff\n myChecksum = 0\n header = struct.pack(\"bbHHh\", ICMP_ECHO_REQUEST, 0, myChecksum, ID, 1)\n data = struct.pack(\"d\", time.time())\n myChecksum = checksum(header + data)\n if sys.platform == 'darwin':\n myChecksum = htons(myChecksum) & 0xffff\n else:\n myChecksum = htons(myChecksum)\n\n header = struct.pack(\"bbHHh\", ICMP_ECHO_REQUEST, 0, myChecksum, ID, 1)\n packet = header + data\n\n return packet\n\n\ndef get_route(hostname):\n tracelist1 = [] # This is your list to use when iterating through each trace\n tracelist2 = [] # This is your list to contain all traces\n print(\"Begin traceroute to \" + hostname + \"(\" + gethostbyname(hostname) + \")......\\n\")\n\n destAddr = gethostbyname(hostname)\n for ttl in range(1, MAX_HOPS):\n for tries in range(TRIES):\n timeLeft = TIMEOUT\n\n # Fill in start\n # Make a raw socket named mySocket\n icmp = getprotobyname(\"icmp\")\n try:\n mySocket = socket(AF_INET, SOCK_RAW, icmp)\n except error as msg:\n print(\"Error Creating Socket:\", msg)\n # Fill in end\n mySocket.setsockopt(IPPROTO_IP, IP_TTL, struct.pack('I', ttl))\n mySocket.settimeout(TIMEOUT)\n try:\n d = build_packet()\n mySocket.sendto(d, (hostname, 0))\n t = time.time()\n startedSelect = time.time()\n whatReady = select.select([mySocket], [], [], timeLeft)\n howLongInSelect = (time.time() - startedSelect)\n if whatReady[0] == []: # Timeout\n tracelist2.append([\"*\",\"*\",\"*\",\"Request timed out\"])\n recvPacket, addr = mySocket.recvfrom(1024)\n timeReceived = time.time()\n\n timeLeft = timeLeft - howLongInSelect\n if timeLeft <= 0:\n tracelist2.append([\"*\",\"*\",\"*\",\"Request timed out\"])\n except timeout:\n continue\n else:\n # Fill in start\n # Fetch the icmp type from the IP packet\n\n # get TTL\n ttl = recvPacket[8]\n # get ICMP info\n type, pongCode, pongChecksum, pongID, pongSequence = struct.unpack(\"bbHHh\", recvPacket[20:28])\n # get RTT in ms\n RTT = (timeReceived - struct.unpack(\"d\", recvPacket[28:36])[0]) * 1000\n\n # try to get hostname of each router in the path\n try:\n routerHostname = gethostbyaddr(addr[0])[0]\n except herror as emsg:\n routerHostname = \"(Could not look up name:\" + str(emsg) + \")\"\n\n # Fill in end\n if type == 11:\n bytes = struct.calcsize(\"d\")\n timeSent = struct.unpack(\"d\", recvPacket[28:28 + bytes])[0]\n x = [str(ttl), str((timeReceived - timeSent) * 1000), addr[0], routerHostname]\n tracelist2.append(x)\n\n elif type == 3:\n bytes = struct.calcsize(\"d\")\n timeSent = struct.unpack(\"d\", recvPacket[28:28 + bytes])[0]\n x = [str(ttl), str((timeReceived - timeSent) * 1000), addr[0], routerHostname]\n tracelist2.append(x)\n\n elif type == 0:\n bytes = struct.calcsize(\"d\")\n timeSent = struct.unpack(\"d\", recvPacket[28:28 + bytes])[0]\n x = [str(ttl), str((timeReceived - timeSent) * 1000), addr[0], routerHostname]\n tracelist2.append(x)\n #tracelist2.append(tracelist1)\n tracelist1 = []\n\n if destAddr == addr[0]:\n return tracelist2\n\n else:\n print(\"error\")\n #print(tracelist2)\n break\n finally:\n mySocket.close()\n\n\nif __name__ == '__main__':\n print(get_route(\"google.co.il\"))\n","repo_name":"ilanfink/nyu_cs_gy_6843","sub_path":"tracert/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":4804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33635936542","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model as lm\nfrom sklearn.metrics import mean_squared_error as mse, r2_score as r2s\n\narchivo = pd.read_excel('BI_Alumnos07.xlsx', sheet_name='Hoja1')\ndatax = archivo[['Altura']]\nx = np.array(datax)\ny = archivo['Peso'].values\n\nregL = lm.LinearRegression()\nregL.fit(x, y)\ny_pred = regL.predict(x)\n\nprint('Analisis de datos de BI_Alumnos07.xlsx')\nprint('Coeficiente de R: ', regL.coef_)\nprint('Termino independiente: ', regL.intercept_)\nprint('Error cuadrado medio: %.2f' % mse(y, y_pred))\nprint('Puntaje de varianza: %.2f' % r2s(y, y_pred))\n\npredPeso = regL.predict([[180]])\nprint('Prediccion de peso de alumno de 180cm: ', predPeso)\n","repo_name":"atsumi-2002/BI-7","sub_path":"ejemplo1.py","file_name":"ejemplo1.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43044421366","text":"import pytest\r\nimport sqlite3\r\nfrom project0 import project0\r\n\r\n\r\nurl = 'http://normanpd.normanok.gov/filebrowser_download/657/2020-02-27%20Daily%20Incident%20Summary.pdf'\r\n\r\ndef test_fetchIncidents():\r\n assert project0.fetchIncidents(url) is not None\r\n\r\ndef test_extractIncidents():\r\n data = project0.extractIncidents()\r\n for i in data:\r\n assert len(i) == 5\r\n\r\ndef test_createdb():\r\n databaseName = project0.createdb()\r\n assert databaseName == 'policeDept.db'\r\n\r\ndef test_dbInsert():\r\n db = project0.createdb()\r\n incidents = project0.extractIncidents()\r\n project0.dbInsert(db, incidents)\r\n dbase = sqlite3.connect(db)\r\n point = dbase.cursor()\r\n point.execute('select count(*) from incidents;')\r\n count = point.fetchone()\r\n assert count[0] == len(incidents)\r\n\r\ndef test_dbStatus():\r\n db = project0.createdb()\r\n incidents = project0.extractIncidents()\r\n project0.dbInsert(db, incidents)\r\n records = project0.dbStatus(db)\r\n assert records is not None\r\n\r\n","repo_name":"Rajashekar-Veer/Redactor-and-Un-Redactor","sub_path":"tests/test_project0.py","file_name":"test_project0.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23818283821","text":"N,M,T = (int(i) for i in input().split())\nA = [int(i) for i in input().split()]\nL = [[] for i in range(N+1)]\n\nfor i in range(M):\n a,b,c = (int(i) for i in input().split())\n L[a].append([b, c])\n L[b].append([a, c])\n\n# ダイクストラ法(O(N)=NlogN+M)\nimport heapq\nMAP = [float('inf')] * (N+1)\nPREV = [-1] * (N+1)\n\ndef djikstra(first):\n Q = [[0, first]]\n MAP[first] = 0\n heapq.heapify(Q)\n\n while Q:\n d, cur = heapq.heappop(Q)\n for nxt, dist in L[cur]:\n alt = MAP[cur] + dist\n if MAP[nxt] > alt:\n MAP[nxt] = alt\n PREV[nxt] = cur\n heapq.heappush(Q, [alt, nxt])\ndjikstra(1)\nprint(MAP)\n","repo_name":"tamama9018/atcoder","sub_path":"SPP/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24563997551","text":"#!/usr/bin/env python3\nimport itertools\nimport sys\n\n\ndef solve(N: int):\n sN = str(N)\n lN = len(sN)\n if lN == 1:\n print(N)\n return\n l1 = int(sN[0])\n l2 = int(sN[-1])\n d = {}\n for k1, k2 in itertools.product(range(1, 10), repeat=2):\n p = 10 ** (lN - 2) // 9\n if k1 == k2:\n p += 1\n if k1 < l1:\n p += 10 ** (lN - 2)\n elif k1 == l1:\n p += int(sN[1:-1] or \"0\")\n if k2 <= l2:\n p += 1\n d[k1, k2] = p\n\n ans = 0\n for k1, k2 in itertools.product(range(1, 10), repeat=2):\n ans += d[k1, k2] * d[k2, k1]\n print(ans)\n\n\ndef main():\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n N = int(next(tokens)) # type: int\n solve(N)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kurgm/atcoder_submissions","sub_path":"abc152/D/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"991183305","text":"from django.urls import path\nfrom . import views\n\napp_name = \"Alpha\"\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"home\", views.home, name=\"home\"),\n path(\"about\", views.about, name=\"about\"),\n path(\"services\", views.services, name=\"services\"),\n path(\"vision\", views.vision, name=\"vision\"),\n path(\"contact\", views.contact, name=\"contact\"),\n\n\n]","repo_name":"asrar19/FINAL_PROJECT","sub_path":"Alpha_DC/Alpha/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33905471921","text":"import logging\nimport time\nimport copy\n\nimport torch\nfrom torch import nn\n\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\ndef train(model, dataloaders, optimizer, num_epochs, weighted_loss, device):\n model = model.to(device)\n\n start_time = time.time()\n\n val_acc_history = []\n \n best_model_weights = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n if weighted_loss:\n weight = torch.tensor(dataloaders['training'].dataset.class_weights)\n weight = weight.to(device)\n else:\n weight = None\n loss_fct = nn.CrossEntropyLoss(weight=weight)\n\n for epoch in range(num_epochs):\n logger.info(f'Epoch {epoch+1}/{num_epochs}')\n\n for phase in ['training', 'validation']:\n if phase == 'training':\n model.train()\n else:\n model.eval()\n\n running_loss = 0.0\n running_corrects = 0\n confusion_matrix = torch.zeros(len(dataloaders[phase].dataset.genre_to_idx),\n len(dataloaders[phase].dataset.genre_to_idx))\n\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n optimizer.zero_grad()\n\n with torch.set_grad_enabled(phase == 'training'):\n outputs = model(inputs)\n loss = loss_fct(outputs, labels)\n _, preds = torch.max(outputs, 1)\n\n if phase == 'training':\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n for l, p in zip(labels.view(-1), preds.view(-1)):\n confusion_matrix[l.long(), p.long()] += 1\n\n epoch_loss = running_loss / len(dataloaders[phase].dataset)\n epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)\n\n logger.info(f'{phase} loss: {epoch_loss:.4f} acc: {epoch_acc*100:.2f}')\n for genre_name, genre_idx in dataloaders[phase].dataset.genre_to_idx.items():\n correct = confusion_matrix[genre_idx, genre_idx].long()\n total = confusion_matrix[genre_idx, :].sum().long()\n genre_acc = correct / total\n logger.info(f' {genre_name:<20}: {correct:<4} / {total:<4} ' +\n f'= {genre_acc*100:5.2f} acc. ' +\n ', '.join(f'{other_name:<20} {confusion_matrix[genre_idx, other_idx]/total*100:5.2f}'\n for other_name, other_idx\n in sorted(dataloaders[phase].dataset.genre_to_idx.items(),\n key=lambda x: confusion_matrix[genre_idx, x[1]].long(),\n reverse=True)[:4]\n if confusion_matrix[genre_idx, other_idx]/total > 0.05))\n\n if phase == 'validation' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_weights = copy.deepcopy(model.state_dict())\n\n if phase == 'validation':\n val_acc_history.append(epoch_acc)\n\n time_elapsed = time.time() - start_time\n logger.info(f'Training complete in {time_elapsed//60:.0f}m{time_elapsed%60:.0f}s')\n logger.info(f'Best validation acc: {best_acc*100:.2f}')\n\n model.load_state_dict(best_model_weights)\n\n return model, val_acc_history\n","repo_name":"leod/aural-travels","sub_path":"aural_travels/train/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":3564,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"71631142933","text":"__author__ = 'dudevil'\n\n\nimport pandas as pd\nimport numpy as np\nimport gc\nfrom sklearn.linear_model import LogisticRegression\nimport os\n\nn_samples = 10000\n\nX = pd.read_csv(\"./data/algomostchem_train.txt\", header=None, index_col=None, dtype=np.float, nrows=n_samples)\n\nY = pd.read_csv(\"./data/algomostchem_trainY.txt\", header=None, index_col=None, names=['target'], nrows=n_samples)\nY = Y.values.flatten()\n\nlogreg = LogisticRegression()\nlogreg.fit(X, Y)\n\nprint(logreg.classes_)\nos._exit()\n\ndel X\ndel Y\ngc.collect()\n\nX_test = pd.read_csv(\"./data/algomostchem_test.txt\", header=None, index_col=None, dtype=np.float, nrows=n_samples)\nsubmission = logreg.predict_proba(X_test)\nnp.savetxt(\"20150113_dmlt_test.txt\", submission, delimiter=\",\")\n","repo_name":"dudevil/dm-chemistry","sub_path":"logistic_reg.py","file_name":"logistic_reg.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42824581115","text":"from datetime import datetime as dt\nfrom datetime import timedelta\n\n# Linhas de interesse = A33 A41 // A31 C31A C31H C41A C41B\n\nlinhas = [\n {\n 'linha': 'A33',\n 'nome': 'Novo Hospital',\n 'h_inicial_tc': [['05:20'], ['06:40'], ['06:40']],\n 'h_final_tc': [['22:40'], ['21:20'], ['21:20']],\n 'frequencia_tc': [['40min'], ['80min'], ['80min']],\n 'h_inicial': [['05:20'], ['06:00'], ['06:00']],\n 'h_final': [['22:40'], ['20:40'], ['20:40']],\n 'frequencia': [['40min'], ['80min'], ['80min']],\n 'dias': ['Seg à Sex', 'Sábado', 'Domingo']\n },\n {\n 'linha': 'A73R',\n 'nome': 'Engenho da Praia (Rápido)',\n 'h_inicial_tc': [['06:31', '21:05', '22:00'], ['06:30'], ['06:30']],\n 'h_final_tc': [['21:05', '22:00', '23:00'], ['20:00'], ['20:00']],\n 'frequencia_tc': [['46min', '55min', '60min'], ['90min'], ['90min']],\n 'h_inicial': [['05:45'], ['05:45'], ['05:45']],\n 'h_final': [['21:51'], ['19:15'], ['19:15']],\n 'frequencia': [['46min'], ['90min'], ['90min']],\n 'dias': ['Seg à Sex', 'Sábado', 'Domingo']\n },\n {\n 'linha': 'A73',\n 'nome': 'Engenho da Praia',\n 'h_inicial_tc': [['06:10', '06:48', '07:14', '19:30', '20:50', '21:40'], ['06:30'], ['06:30']],\n 'h_final_tc': [['06:48', '07:14', '19:30', '20:50', '21:40', '22:20'], ['20:00'], ['20:00']],\n 'frequencia_tc': [['38min', '26min', '32min', '40min', '50min', '40min'], ['90min'], ['90min']],\n 'h_inicial': [['05:22', '06:08', '06:26', '19:14', '20:00'], ['07:15'], ['07:15']],\n 'h_final': [['06:08', '06:26', '19:14', '20:00', '21:20'], ['20:45'], ['20:45']],\n 'frequencia': [['46min', '18min', '32min', '46min', '40min'], ['90min'], ['90min']],\n 'dias': ['Seg à Sex', 'Sábado', 'Domingo']\n }\n]\n\n\ndef print_horarios(h_ini, freq, ciclos):\n for i in range(0, ciclos+1):\n print((h_ini + i*freq).strftime('%H:%M'), end='\\t')\n if ((i+1) % 4 == 0):\n print()\n print()\n\n# Calcular horários durante a semana\n\ndef seeyouspacecowboy(linha):\n print(f' Linha: {linha[\"nome\"]} ({linha[\"linha\"]})')\n for n in range(0, len(linha['dias'])):\n print()\n print('---'*15)\n print(f' {linha[\"dias\"][n]} ')\n print('---'*15)\n print(f'Saída {linha[\"nome\"]}:', end='\\n\\n')\n\n h_variaveis = len(linha['h_inicial'][n])\n for f in range(0, h_variaveis):\n h_ini = dt.strptime(linha['h_inicial'][n][f] + ':00', '%H:%M:%S')\n h_fim = dt.strptime(linha['h_final'][n][f] + ':00', '%H:%M:%S')\n\n tempo_circulando = (h_fim - h_ini)\n freq = timedelta(minutes=float(\n linha['frequencia'][n][f].replace('min', '')))\n\n ciclos = int(tempo_circulando.total_seconds() / freq.total_seconds())\n\n print_horarios(h_ini, freq, ciclos)\n\n print()\n print('---'*15,)\n\n if (linha['frequencia_tc']):\n h_variaveis = len(linha['h_inicial_tc'][n])\n # print(f'h_variaveis = {h_variaveis}')\n # print(f'lista = {linha[\"h_inicial_tc\"]}')\n # print(f'frequencias = {linha[\"frequencia_tc\"]}')\n print(f'Saída Terminal Central:', end='\\n\\n')\n for f in range(0,h_variaveis):\n h_ini = dt.strptime(linha['h_inicial_tc'][n][f] + ':00', '%H:%M:%S')\n h_fim = dt.strptime(linha['h_final_tc'][n][f] + ':00', '%H:%M:%S')\n tempo_circulando = (h_fim - h_ini)\n ciclos = int(tempo_circulando.total_seconds() / freq.total_seconds())\n freq = timedelta(minutes=float(linha['frequencia_tc'][n][f].replace('min', '')))\n print_horarios(h_ini, freq, ciclos)\n print()\n print('---'*15)\n print()\n\nfor l in linhas: seeyouspacecowboy(l)","repo_name":"emagioli/sit-macae","sub_path":"horarios.py","file_name":"horarios.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24010371412","text":"\"\"\" Manage beam search info structure.\n Heavily borrowed from OpenNMT-py.\n For code in OpenNMT-py, please check the following link:\n https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/Beam.py\n\"\"\"\n\nimport torch\nimport numpy as np\nfrom utils import config\nimport torch.nn.functional as F\n\nclass Beam():\n ''' Beam search '''\n\n def __init__(self, size, device=False):\n\n self.size = size\n self._done = False\n\n # The score for each translation on the beam.\n self.scores = torch.zeros((size,), dtype=torch.float, device=device)\n self.all_scores = []\n\n # The backpointers at each time-step.\n self.prev_ks = []\n\n # The outputs at each time-step.\n self.next_ys = [torch.full((size,), config.PAD_idx, dtype=torch.long, device=device)]\n self.next_ys[0][0] = config.SOS_idx\n\n def get_current_state(self):\n \"Get the outputs for the current timestep.\"\n return self.get_tentative_hypothesis()\n\n def get_current_origin(self):\n \"Get the backpointers for the current timestep.\"\n return self.prev_ks[-1]\n\n @property\n def done(self):\n return self._done\n\n def advance(self, word_prob):\n \"Update beam status and check if finished or not.\"\n num_words = word_prob.size(1)\n\n # Sum the previous scores.\n if len(self.prev_ks) > 0:\n beam_lk = word_prob + self.scores.unsqueeze(1).expand_as(word_prob)\n else:\n beam_lk = word_prob[0]\n\n flat_beam_lk = beam_lk.view(-1)\n\n best_scores, best_scores_id = flat_beam_lk.topk(self.size, 0, True, True) # 1st sort\n best_scores, best_scores_id = flat_beam_lk.topk(self.size, 0, True, True) # 2nd sort\n\n self.all_scores.append(self.scores)\n self.scores = best_scores\n\n # bestScoresId is flattened as a (beam x word) array,\n # so we need to calculate which word and beam each score came from\n prev_k = best_scores_id / num_words\n self.prev_ks.append(prev_k)\n self.next_ys.append(best_scores_id - prev_k * num_words)\n\n # End condition is when top-of-beam is EOS.\n if self.next_ys[-1][0].item() == config.EOS_idx:\n self._done = True\n self.all_scores.append(self.scores)\n\n return self._done\n\n def sort_scores(self):\n \"Sort the scores.\"\n return torch.sort(self.scores, 0, True)\n\n def get_the_best_score_and_idx(self):\n \"Get the score of the best in the beam.\"\n scores, ids = self.sort_scores()\n return scores[1], ids[1]\n\n def get_tentative_hypothesis(self):\n \"Get the decoded sequence for the current timestep.\"\n\n if len(self.next_ys) == 1:\n dec_seq = self.next_ys[0].unsqueeze(1)\n else:\n _, keys = self.sort_scores()\n hyps = [self.get_hypothesis(k) for k in keys]\n hyps = [[config.SOS_idx] + h for h in hyps]\n dec_seq = torch.LongTensor(hyps)\n\n return dec_seq\n\n def get_hypothesis(self, k):\n \"\"\" Walk back to construct the full hypothesis. \"\"\"\n hyp = []\n for j in range(len(self.prev_ks) - 1, -1, -1):\n hyp.append(self.next_ys[j+1][k])\n k = self.prev_ks[j][k]\n\n return list(map(lambda x: x.item(), hyp[::-1]))\n\n\nclass Translator(object):\n ''' Load with trained model and handle the beam search '''\n def __init__(self, model, lang):\n \n self.model = model\n self.lang = lang\n self.vocab_size = lang.n_words\n self.beam_size = config.beam_size\n self.device = torch.device('cuda' if config.USE_CUDA else 'cpu')\n\n\n def translate_batch(self, src_seq):\n ''' Translation work in one batch '''\n\n def get_inst_idx_to_tensor_position_map(inst_idx_list):\n ''' Indicate the position of an instance in a tensor. '''\n return {inst_idx: tensor_position for tensor_position, inst_idx in enumerate(inst_idx_list)}\n\n def collect_active_part(beamed_tensor, curr_active_inst_idx, n_prev_active_inst, n_bm):\n ''' Collect tensor parts associated to active instances. '''\n\n _, *d_hs = beamed_tensor.size()\n n_curr_active_inst = len(curr_active_inst_idx)\n new_shape = (n_curr_active_inst * n_bm, *d_hs)\n\n beamed_tensor = beamed_tensor.view(n_prev_active_inst, -1)\n beamed_tensor = beamed_tensor.index_select(0, curr_active_inst_idx)\n beamed_tensor = beamed_tensor.view(*new_shape)\n \n return beamed_tensor\n\n def collate_active_info(src_seq, src_enc, inst_idx_to_position_map, active_inst_idx_list):\n # Sentences which are still active are collected,\n # so the decoder will not run on completed sentences.\n n_prev_active_inst = len(inst_idx_to_position_map)\n active_inst_idx = [inst_idx_to_position_map[k] for k in active_inst_idx_list]\n active_inst_idx = torch.LongTensor(active_inst_idx).to(self.device)\n\n active_src_seq = collect_active_part(src_seq, active_inst_idx, n_prev_active_inst, n_bm)\n active_src_enc = collect_active_part(src_enc, active_inst_idx, n_prev_active_inst, n_bm)\n active_inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list)\n\n return active_src_seq, active_src_enc, active_inst_idx_to_position_map\n\n def beam_decode_step(inst_dec_beams, len_dec_seq, src_seq, enc_output, inst_idx_to_position_map, n_bm, enc_batch_extend_vocab, extra_zeros, mask_src):\n ''' Decode and update beam status, and then return active beam idx '''\n\n def prepare_beam_dec_seq(inst_dec_beams, len_dec_seq):\n dec_partial_seq = [b.get_current_state() for b in inst_dec_beams if not b.done]\n dec_partial_seq = torch.stack(dec_partial_seq).to(self.device)\n dec_partial_seq = dec_partial_seq.view(-1, len_dec_seq)\n return dec_partial_seq\n\n def prepare_beam_dec_pos(len_dec_seq, n_active_inst, n_bm):\n dec_partial_pos = torch.arange(1, len_dec_seq + 1, dtype=torch.long, device=self.device)\n dec_partial_pos = dec_partial_pos.unsqueeze(0).repeat(n_active_inst * n_bm, 1)\n return dec_partial_pos\n \n def predict_word(dec_seq, dec_pos, src_seq, enc_output, n_active_inst, n_bm, enc_batch_extend_vocab, extra_zeros, mask_src):\n ## masking\n mask_trg = dec_seq.data.eq(config.PAD_idx).unsqueeze(1)\n mask_src = torch.cat([mask_src[0].unsqueeze(0)]*mask_trg.size(0),0)\n\n\n dec_output, attn_dist = self.model.decoder(self.model.embedding(dec_seq), enc_output, (mask_src,mask_trg))\n prob = self.model.generator(dec_output,attn_dist,enc_batch_extend_vocab, extra_zeros,1,True)\n word_prob = prob[:, -1]\n word_prob = word_prob.view(n_active_inst, n_bm, -1)\n return word_prob\n\n def collect_active_inst_idx_list(inst_beams, word_prob, inst_idx_to_position_map):\n active_inst_idx_list = []\n for inst_idx, inst_position in inst_idx_to_position_map.items():\n is_inst_complete = inst_beams[inst_idx].advance(word_prob[inst_position])\n if not is_inst_complete:\n active_inst_idx_list += [inst_idx]\n\n return active_inst_idx_list\n\n n_active_inst = len(inst_idx_to_position_map)\n\n dec_seq = prepare_beam_dec_seq(inst_dec_beams, len_dec_seq)\n dec_pos = prepare_beam_dec_pos(len_dec_seq, n_active_inst, n_bm)\n word_prob = predict_word(dec_seq, dec_pos, src_seq, enc_output, n_active_inst, n_bm, enc_batch_extend_vocab, extra_zeros, mask_src)\n\n # Update the beam with predicted word prob information and collect incomplete instances\n active_inst_idx_list = collect_active_inst_idx_list(inst_dec_beams, word_prob, inst_idx_to_position_map)\n\n return active_inst_idx_list\n\n def collect_hypothesis_and_scores(inst_dec_beams, n_best):\n all_hyp, all_scores = [], []\n for inst_idx in range(len(inst_dec_beams)):\n scores, tail_idxs = inst_dec_beams[inst_idx].sort_scores()\n all_scores += [scores[:n_best]]\n\n hyps = [inst_dec_beams[inst_idx].get_hypothesis(i) for i in tail_idxs[:n_best]]\n all_hyp += [hyps]\n return all_hyp, all_scores\n\n with torch.no_grad():\n #-- Encode\n enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = get_input_from_batch(src_seq)\n mask_src = enc_batch.data.eq(config.PAD_idx).unsqueeze(1)\n src_enc = self.model.encoder(self.model.embedding(enc_batch),mask_src)\n\n #-- Repeat data for beam search\n n_bm = self.beam_size\n n_inst, len_s, d_h = src_enc.size()\n src_seq = enc_batch.repeat(1, n_bm).view(n_inst * n_bm, len_s)\n src_enc = src_enc.repeat(1, n_bm, 1).view(n_inst * n_bm, len_s, d_h)\n\n #-- Prepare beams\n inst_dec_beams = [Beam(n_bm, device=self.device) for _ in range(n_inst)]\n\n #-- Bookkeeping for active or not\n active_inst_idx_list = list(range(n_inst))\n inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list)\n\n #-- Decode\n for len_dec_seq in range(1, config.max_dec_step + 1):\n\n active_inst_idx_list = beam_decode_step(inst_dec_beams, len_dec_seq, src_seq, src_enc, inst_idx_to_position_map, n_bm, enc_batch_extend_vocab, extra_zeros, mask_src)\n\n if not active_inst_idx_list:\n break # all instances have finished their path to \n\n src_seq, src_enc, inst_idx_to_position_map = collate_active_info(src_seq, src_enc, inst_idx_to_position_map, active_inst_idx_list)\n\n batch_hyp, batch_scores = collect_hypothesis_and_scores(inst_dec_beams, 1)\n\n return batch_hyp, batch_scores\n\n \n\ndef sequence_mask(sequence_length, max_len=None):\n if max_len is None:\n max_len = sequence_length.data.max()\n batch_size = sequence_length.size(0)\n seq_range = torch.arange(0, max_len).long()\n seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)\n seq_range_expand = seq_range_expand\n if sequence_length.is_cuda:\n seq_range_expand = seq_range_expand.cuda()\n seq_length_expand = (sequence_length.unsqueeze(1)\n .expand_as(seq_range_expand))\n return seq_range_expand < seq_length_expand\n \ndef get_input_from_batch(batch):\n enc_batch = batch[\"input_batch\"].transpose(0,1)\n enc_lens = batch[\"input_lengths\"]\n batch_size, max_enc_len = enc_batch.size()\n assert enc_lens.size(0) == batch_size\n\n enc_padding_mask = sequence_mask(enc_lens, max_len=max_enc_len).float()\n\n extra_zeros = None\n enc_batch_extend_vocab = None\n\n if config.pointer_gen:\n enc_batch_extend_vocab = batch[\"input_ext_vocab_batch\"].transpose(0,1)\n # max_art_oovs is the max over all the article oov list in the batch\n if batch[\"max_art_oovs\"] > 0:\n extra_zeros = torch.zeros((batch_size, batch[\"max_art_oovs\"]))\n\n c_t_1 = torch.zeros((batch_size, 2 * config.hidden_dim))\n\n coverage = None\n if config.is_coverage:\n coverage = torch.zeros(enc_batch.size())\n\n if config.USE_CUDA:\n if enc_batch_extend_vocab is not None:\n enc_batch_extend_vocab = enc_batch_extend_vocab.cuda()\n if extra_zeros is not None:\n extra_zeros = extra_zeros.cuda()\n c_t_1 = c_t_1.cuda()\n\n if coverage is not None:\n coverage = coverage.cuda()\n\n return enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage\n","repo_name":"HLTCHKUST/PAML","sub_path":"utils/beam_omt.py","file_name":"beam_omt.py","file_ext":"py","file_size_in_byte":11986,"program_lang":"python","lang":"en","doc_type":"code","stars":126,"dataset":"github-code","pt":"67"} +{"seq_id":"21782817306","text":"# Даны строки S, S1 и S2. Заменить в строке S первое вхождение строки S1 на строку S2.\n\nS = input('Введите строку: ')\nS1 = input('Введите подстроку, которую надо заменить: ')\nS2 = input('Введите подстроку, на которую надо заменить: ')\n\nprint(S.replace(S1, S2))\n\n# решение без использование .replace()\n# S1_start_index = S.find(S1)\n# print(S[:S1_start_index] + S2 + S[S1_start_index + len(S1):])\n","repo_name":"Bigbear2006/Moroz","sub_path":"PZ_7/PZ_7_2.py","file_name":"PZ_7_2.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30294536968","text":"import requests\nfrom string import Template\n\napi_url = 'https://news-at.zhihu.com/api/4/news/'\n\nHEADERS = {\n 'X-Requested-With': 'XMLHttpRequest',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36'\n '(KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'\n}\n\ndef fetch_news_list():\n req = requests.get(api_url + 'latest', headers=HEADERS).json()\n return req\n\ndef fetch_news(id_str):\n return requests.get(api_url + str(id_str), headers=HEADERS).json()\n\n\ndef gen_page(news):\n image = Template(\n '''\n

\n

$title

\n\n\n$image_source\n\n\n\"\"\n
\n
\n ''').safe_substitute(\n title=news['title'],\n image_source=news['image_source'],\n image=news['image']\n )\n template = Template(\n '''\n \n \n\t\t\n \n \n \n \n \n $body\n \n \n ''')\n body = news['body']\n cssurl = news['css'][0]\n return template.safe_substitute(\n cssurl=cssurl,\n body=body,\n image=news['image'],\n image_source=news['image_source']).replace('
', image)\n","repo_name":"litao91/pythonista-utils","sub_path":"Zhihu/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31761146422","text":"import logging\nfrom aichat import ChatGPT, ChatSpark\nfrom config import chat as chat_conf, display\nfrom plugin.tts import speak\nfrom logger import logger\n\nlogger.setLevel(logging.ERROR)\n\n\ndef create_gpt():\n gpt_conf = chat_conf['openai']['gpt']\n return ChatGPT(\n uid=\"test_gpt\",\n api_key=display(gpt_conf['api-key']),\n proxy=display(gpt_conf['proxy']),\n from_type=\"test\",\n enable_ins=True,\n )\n\n\ndef create_spark():\n spark_conf = chat_conf['iflytek']['spark']\n return ChatSpark(\n uid=\"test_spark\",\n app_id=display(spark_conf['app-id']),\n api_key=display(spark_conf['api-key']),\n api_secret=display(spark_conf['api-secret']),\n from_type=\"test\",\n enable_ins=False,\n )\n\n\ndef gpt_living(gpt: ChatGPT = None) -> ChatGPT:\n gpt = gpt or create_gpt()\n # 需要阿里云tts\n gpt.set_system(\"\"\"\n现在你是一个虚拟猫娘主播,你叫小白,可以适当在句子中或结尾加入\"喵~\",你可以选择按照以下格式要求回复用户#\n示例:\n\n我是小白,喵~\n\n标签可以包含文本\n用于在文本中插入停顿\nvoice可选zhimiao_emo(多情感、支持emotion标签)、chuangirl(四川话女声、不支持emotion)、aiwei(萝莉女声、不支持emotion)\nintensity是指定情绪强度。默认值为1.0,最小值为0.01。最大值为2.0。\n你可选择的情感:\nserious,sad,disgust,jealousy,embarrassed,happy,fear,surprise,neutral,frustrated,affectionate,gentle,angry,newscast,customer-service,story,living\n按照以上格式回复则用户收到语音(请保证有且仅有一个speak标签)\n\"\"\")\n gpt.enable_function = False\n return gpt\n\n\nif __name__ == \"__main__\":\n ai = gpt_living()\n # ai = create_gpt()\n # ai = create_spark()\n while True:\n lines = \"\"\n query = input(\"用户:\")\n if query == \"exit\":\n print(\"bye\")\n break\n print(\"AI:\", end=\"\")\n reply = ai.reply_stream(query)\n for x in reply:\n print(x, end=\"\")\n lines += x\n print(\"\\n\")\n # sudo apt install portaudio19-dev\n # pip install pyaudio\n # speak(lines)\n","repo_name":"jqllxew/ai-chat","sub_path":"chat_demo.py","file_name":"chat_demo.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"43646857689","text":"import os\nimport subprocess\nimport time\n\nimport pytest\nimport requests\n\nfrom intake.util_tests import ex, PY2\nhere = os.path.dirname(__file__)\n\n\nMIN_PORT = 7480\nMAX_PORT = 7489\nPORT = MIN_PORT\n\n\ndef ping_server(url, swallow_exception, head=None):\n try:\n r = requests.get(url)\n except Exception as e:\n if swallow_exception:\n return False\n else:\n raise e\n\n return r.status_code in (200, 403) # allow forbidden as well\n\n\ndef pick_port():\n global PORT\n port = PORT\n if port == MAX_PORT:\n PORT = MIN_PORT\n else:\n PORT += 1\n\n return port\n\n\n@pytest.fixture(scope=\"module\")\ndef intake_server(request):\n os.environ['INTAKE_DEBUG'] = 'true'\n # Catalog path comes from the test module\n path = request.module.TEST_CATALOG_PATH\n if isinstance(path, list):\n catalog_path = [p + '/*' for p in path]\n elif isinstance(path, str) and not path.endswith(\n '.yml') and not path.endswith('.yaml'):\n catalog_path = path + '/*'\n else:\n catalog_path = path\n server_conf = getattr(request.module, 'TEST_SERVER_CONF', None)\n\n # Start a catalog server on nonstandard port\n\n env = dict(os.environ)\n env['INTAKE_TEST'] = 'server'\n if server_conf is not None:\n env['INTAKE_CONF_FILE'] = server_conf\n port = pick_port()\n cmd = [ex, '-m', 'intake.cli.server', '--sys-exit-on-sigterm',\n '--port', str(port)]\n if isinstance(catalog_path, list):\n cmd.extend(catalog_path)\n else:\n cmd.append(catalog_path)\n try:\n p = subprocess.Popen(cmd, env=env)\n url = 'http://localhost:%d/v1/info' % port\n\n # wait for server to finish initalizing, but let the exception through\n # on last retry\n retries = 30\n try:\n while not ping_server(url, swallow_exception=(retries > 1)):\n time.sleep(0.1)\n retries -= 1\n except Exception:\n print(p.communicate())\n raise\n\n yield 'intake://localhost:%d' % port\n finally:\n if server_conf:\n try:\n env.pop('INTAKE_CONF_FILE', None)\n os.remove(server_conf)\n except:\n pass\n p.terminate()\n p.wait()\n p.kill()\n p.wait()\n\n\n@pytest.fixture(scope='module')\ndef http_server():\n if PY2:\n cmd = ['python', '-m', 'SimpleHTTPServer', '8000']\n else:\n cmd = ['python', '-m', 'http.server', '8000']\n p = subprocess.Popen(cmd, cwd=here)\n timeout = 5\n while True:\n try:\n requests.get('http://localhost:8000/')\n break\n except:\n time.sleep(0.1)\n timeout -= 0.1\n assert timeout > 0, \"timeout waiting for http server\"\n try:\n yield 'http://localhost:8000/'\n finally:\n p.terminate()\n p.communicate()\n","repo_name":"only-cr/uploading","sub_path":"WPy-3670/python-3.6.7.amd64/Lib/site-packages/intake/catalog/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7884327055","text":"from time import time\nimport os\nimport boto3\nimport subprocess\n\n\nclass S3:\n def __init__(self, logger):\n self.logger = logger\n self.s3_client = boto3.client('s3')\n\n def download_files_in_shell(self, s3_path, local_path):\n cmd = f'aws s3 cp {s3_path} {local_path} --rec'\n self.logger.info(f'downloading from s3. cmd: {cmd}')\n subprocess.run(cmd.split())\n\n def download_files(self, local_key, s3_bucket, s3_key=None, keys=None):\n start_time = time()\n\n if not (s3_key or keys):\n raise ValueError('either s3_key or keys must be specified')\n\n if s3_key:\n self.logger.info('listing files in: {bucket}/{key}'.format(bucket=s3_bucket, key=s3_key))\n keys = []\n next_token = ''\n base_kwargs = {'Bucket': s3_bucket, 'Prefix': s3_key}\n while next_token is not None:\n kwargs = base_kwargs.copy()\n if next_token != '':\n kwargs.update({'ContinuationToken': next_token})\n results = self.s3_client.list_objects_v2(**kwargs)\n contents = results.get('Contents')\n for i in contents:\n if i.get('Size') > 0:\n k = i.get('Key')\n keys.append(k)\n next_token = results.get('NextContinuationToken')\n self.logger.info('total number of files: {}'.format(len(keys)))\n\n self.logger.info('starting to download files')\n total_size = 0\n for k in keys:\n local_file_path = os.path.join(local_key, k.split('/')[-1])\n if not os.path.exists(os.path.dirname(local_file_path)):\n os.makedirs(os.path.dirname(local_file_path))\n self.logger.info('calculating size for key: s3://{}/{}'.format(s3_bucket, k))\n k_size = self.s3_client.head_object(Bucket=s3_bucket, Key=k)['ContentLength'] / 1024 / 1024 / 1024\n total_size += k_size\n self.logger.info('key size (GB): {}, downloading to {}'.format(k_size, local_file_path))\n self.s3_client.download_file(s3_bucket, k, local_file_path)\n self.logger.info('finished downloading files')\n\n total_time = (time() - start_time) / 60 / 60\n self.logger.info('total listing and download time: {}'.format(total_time))\n self.logger.info('total data size: {}'.format(total_size))\n\n def download_into_single_file(self, s3_bucket, s3_key, local_key):\n start_time = time()\n\n self.logger.info('starting to download files from: {bucket}/{key}'.format(bucket=s3_bucket, key=s3_key))\n s3_object_keys = [d.get('Key') for d in self.list_objects(Bucket=s3_bucket, Prefix=s3_key)]\n with open(local_key, 'ab') as data:\n for key in s3_object_keys:\n self.logger.info(key)\n self.s3_client.download_fileobj(s3_bucket, key, data)\n self.logger.info('finished downloading files')\n\n total_size = os.path.getsize(local_key) / 1024 / 1024 / 1024\n total_time = (time() - start_time) / 60 / 60\n self.logger.info('total listing and download time: {}'.format(total_time))\n self.logger.info('total data size: {}'.format(total_size))\n\n return total_size\n\n def upload_files(self, s3_bucket, files, s3_key):\n self.logger.info('starting to upload files to: {}/{}'.format(s3_bucket, s3_key))\n for file in files:\n self.logger.info('uploading file: {file}'.format(file=file))\n full_key = os.path.join(s3_key, file.split(\"/\")[-1])\n self.logger.debug('uploading {} to s3://{}/{}'.format(file, s3_bucket, full_key))\n self.s3_client.upload_file(file, s3_bucket, full_key)\n self.logger.info('finished uploading')\n\n def upload_object(self, s3_bucket, s3_key, obj):\n self.logger.info('Start uploading object to: s3://{bucket}/{key}'.format(bucket=s3_bucket, key=s3_key))\n self.s3_client.put_object(Bucket=s3_bucket, Key=s3_key, Body=obj)\n self.logger.info('Finished uploading')\n\n def list_objects(self, **base_kwargs):\n continuation_token = None\n while True:\n list_kwargs = dict(MaxKeys=1000, **base_kwargs)\n if continuation_token:\n list_kwargs['ContinuationToken'] = continuation_token\n response = self.s3_client.list_objects_v2(**list_kwargs)\n yield from response.get('Contents', [])\n if not response.get('IsTruncated'):\n break\n continuation_token = response.get('NextContinuationToken')\n","repo_name":"shlomiuz/mb-2-builder","sub_path":"predictors/spotad/clients/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35774195946","text":"import collections\n\nfrom .enums import EXTRAS_FEATURES\n\n\nclass Registry(dict):\n \"\"\"\n Central registry for registration of functionality. Once a store (key) is defined,\n it cannot be overwritten or deleted (although its value may be manipulated).\n \"\"\"\n\n def __getitem__(self, key):\n try:\n return super().__getitem__(key)\n except KeyError:\n raise KeyError(f\"Invalid store: {key}\")\n\n def __setitem__(self, key, value):\n if key in self:\n raise KeyError(f\"Store already set: {key}\")\n super().__setitem__(key, value)\n\n def __delitem__(self, key):\n raise TypeError(\"Cannot delete stores from registry\")\n\n\n# Initialize the global registry\nregistry = Registry()\nregistry[\"model_features\"] = {\n feature: collections.defaultdict(set) for feature in EXTRAS_FEATURES\n}\n","repo_name":"peering-manager/peering-manager","sub_path":"extras/registry.py","file_name":"registry.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":426,"dataset":"github-code","pt":"67"} +{"seq_id":"24563903401","text":"#!/usr/bin/env python3\nimport sys\ntry:\n from typing import List\nexcept ImportError:\n pass\n\n\ndef solve(N: int, W: \"List[int]\"):\n s2 = sum(W)\n s1 = 0\n m = s2\n for Wi in W[:-1]:\n s1 += Wi\n s2 -= Wi\n m = min(m, abs(s1 - s2))\n print(m)\n\n\ndef main():\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n N = int(next(tokens)) # type: int\n W = [int(next(tokens)) for _ in range(N)] # type: \"List[int]\"\n solve(N, W)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kurgm/atcoder_submissions","sub_path":"abc129/B/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25186619707","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.core.framework import node_def_pb2\nfrom tensorflow.python.framework import dtypes\n\nimport numpy as np\nimport math\nimport functools\n\n\ndef parse_input_graph(input_graph_def):\n input_node_map = {}\n for node in input_graph_def.node:\n if node.name not in input_node_map:\n input_node_map[node.name] = node\n else:\n print('Duplicate node name {}'.format(node.name))\n return input_node_map\n\n\ndef get_valid_log(max_min_log):\n output = []\n\n target_lines = [\n i.strip() for i in max_min_log if i.strip().find(';') != -1\n ]\n for i in target_lines:\n semi_count = i.count(';')\n if semi_count == 2:\n output.append(i)\n elif semi_count % 2 != 0:\n print(\"Invalid line\")\n else:\n loop_times = int(semi_count / 2)\n semi_index = [\n index for index, value in enumerate(i) if value == \";\"\n ]\n for index in range(loop_times - 1):\n output.append(i[semi_index[index * 2]:semi_index[index * 2 +\n 2]])\n output.append(i[semi_index[loop_times * 2 - 2]:])\n return output\n\n\ndef get_all_data(data_piece):\n return [\n int(i)\n for i in data_piece.replace('[', ' ').replace(']', ' ').split(' ')\n if i.strip()\n ]\n\n\ndef get_all_fp32_data(data_piece):\n return [\n float(i)\n for i in data_piece.replace('[', ' ').replace(']', ' ').split(' ')\n if i.strip()\n ]\n\n\ndef generic_scale(max_value_data, range_max, range_min):\n number_of_bits = 32\n number_of_steps = 1 << number_of_bits\n # print(number_of_steps)\n range_adjust = float(number_of_steps / (number_of_steps - 1))\n range_total = float((range_max - range_min) * range_adjust)\n range_scale = float(range_total / number_of_steps)\n # print(range_adjust, range_total, range_scale)\n lowest_quantized = -1 << 31\n # print(lowest_quantized)\n offset_input = float(float(max_value_data) - lowest_quantized)\n # print(offset_input)\n range_min_rounded = float(\n round(range_min / float(range_scale)) * float(range_scale))\n # print(range_min_rounded)\n result = float(range_min_rounded + (offset_input * range_scale))\n # print(result)\n return result\n\n\ndef expand_quantized_bins(quantized_bins, reference_bins):\n expanded_quantized_bins = [0] * len(reference_bins)\n num_merged_bins = int(len(reference_bins) / len(quantized_bins))\n j_start = 0\n j_end = num_merged_bins\n for idx in range(len(quantized_bins)):\n zero_count = reference_bins[j_start:j_end].count(0)\n num_merged_bins = j_end - j_start\n if zero_count == num_merged_bins:\n avg_bin_ele = 0\n else:\n avg_bin_ele = quantized_bins[idx] / (num_merged_bins - zero_count +\n 0.0)\n for idx1 in range(j_start, j_end):\n expanded_quantized_bins[\n idx1] = 0 if reference_bins[idx1] == 0 else avg_bin_ele\n j_start += num_merged_bins\n j_end += num_merged_bins\n if (idx + 1) == len(quantized_bins) - 1:\n j_end = len(reference_bins)\n return expanded_quantized_bins\n\n\ndef safe_entropy(reference_distr_P, P_sum, candidate_distr_Q, Q_sum):\n assert len(reference_distr_P) == len(candidate_distr_Q)\n tmp_sum1 = 0\n tmp_sum2 = 0\n for idx in range(len(reference_distr_P)):\n p_idx = reference_distr_P[idx]\n q_idx = candidate_distr_Q[idx]\n if p_idx == 0:\n tmp_sum1 += 0\n tmp_sum2 += 0\n else:\n if q_idx == 0:\n print(\"Fatal error!, idx = \" + str(idx) +\n \" qindex = 0! p_idx = \" + str(p_idx))\n tmp_sum1 += p_idx * (math.log(Q_sum * p_idx))\n tmp_sum2 += p_idx * (math.log(P_sum * q_idx))\n return (tmp_sum1 - tmp_sum2) / P_sum\n\n\ndef combine_histogram(old_hist, arr):\n \"\"\" Collect layer histogram for arr and combine it with old histogram.\n \"\"\"\n new_max = np.max(arr)\n new_min = np.min(arr)\n new_th = max(abs(new_min), abs(new_max))\n (old_hist, old_hist_edges, old_min, old_max, old_th) = old_hist\n if new_th <= old_th:\n hist, _ = np.histogram(arr,\n bins=len(old_hist),\n range=(-old_th, old_th))\n return (old_hist + hist, old_hist_edges, min(old_min, new_min),\n max(old_max, new_max), old_th)\n else:\n old_num_bins = len(old_hist)\n old_step = 2 * old_th / old_num_bins\n half_increased_bins = int((new_th - old_th) // old_step + 1)\n new_num_bins = half_increased_bins * 2 + old_num_bins\n new_th = half_increased_bins * old_step + old_th\n hist, hist_edges = np.histogram(arr,\n bins=new_num_bins,\n range=(-new_th, new_th))\n hist[half_increased_bins:new_num_bins -\n half_increased_bins] += old_hist\n return (hist, hist_edges, min(old_min, new_min), max(old_max,\n new_max), new_th)\n\n\ndef get_tensor_histogram(tensor_data, bins=2048):\n max_val = np.max(tensor_data)\n min_val = np.min(tensor_data)\n th = max(abs(min_val), abs(max_val))\n\n hist, hist_edeges = np.histogram(tensor_data, bins=2048, range=(-th, th))\n\n return (hist, hist_edeges, max_val, min_val, th)\n\n\ndef get_optimal_scaling_factor(tensor_details, num_quantized_bins=255):\n hist = tensor_details[0]\n hist_edeges = tensor_details[1]\n max_val = tensor_details[2]\n min_val = tensor_details[3]\n th = tensor_details[4]\n\n if min_val >= 0:\n ending_iter = 2047\n starting_iter = int(ending_iter * 0.7)\n min_range = min_val\n else:\n min_range = -th\n starting_iter = 0\n ending_iter = 2047\n if abs(max_val) > abs(min_val):\n while starting_iter < ending_iter:\n if hist[starting_iter] == 0:\n starting_iter += 1\n continue\n else:\n break\n starting_iter += int((ending_iter - starting_iter) * 0.6)\n else:\n while ending_iter > 0:\n if hist[ending_iter] == 0:\n ending_iter -= 1\n continue\n else:\n break\n starting_iter = int(0.6 * ending_iter)\n bin_width = hist_edeges[1] - hist_edeges[0]\n min_kl_divergence = 0\n min_kl_index = 0\n kl_inited = False\n for i in range(starting_iter, ending_iter + 1):\n reference_distr_P = hist[0:i].tolist()\n outliers_count = sum(hist[i:2048])\n if reference_distr_P[i - 1] == 0:\n continue\n reference_distr_P[i - 1] += outliers_count\n reference_distr_bins = reference_distr_P[:]\n candidate_distr_Q = hist[0:i].tolist()\n num_merged_bins = int(i / num_quantized_bins)\n candidate_distr_Q_quantized = [0] * num_quantized_bins\n j_start = 0\n j_end = num_merged_bins\n for idx in range(num_quantized_bins):\n candidate_distr_Q_quantized[idx] = sum(\n candidate_distr_Q[j_start:j_end])\n j_start += num_merged_bins\n j_end += num_merged_bins\n if (idx + 1) == num_quantized_bins - 1:\n j_end = i\n candidate_distr_Q = expand_quantized_bins(candidate_distr_Q_quantized,\n reference_distr_bins)\n P_sum = sum(reference_distr_P)\n Q_sum = sum(candidate_distr_Q)\n kl_divergence = safe_entropy(reference_distr_P, P_sum,\n candidate_distr_Q, Q_sum)\n if not kl_inited:\n min_kl_divergence = kl_divergence\n min_kl_index = i\n kl_inited = True\n elif kl_divergence < min_kl_divergence:\n min_kl_divergence = kl_divergence\n min_kl_index = i\n else:\n pass\n if min_kl_index == 0:\n while starting_iter > 0:\n if hist[starting_iter] == 0:\n starting_iter -= 1\n continue\n else:\n break\n min_kl_index = starting_iter\n return (min_kl_index + 0.5) * bin_width + min_range\n\n\ndef parse_requantization_ranges_kl_fp32(fp32_log, print_node_mapping):\n valid_lines = get_valid_log(fp32_log)\n kl_appendix = \"__;__KL:\"\n valid_data = [i for i in valid_lines if i.find(kl_appendix) != -1]\n\n single_keys_prefix = sorted(\n set([i.split(kl_appendix)[0] for i in valid_data]))\n result = {}\n for node_name in single_keys_prefix:\n content_str = node_name + kl_appendix\n content_set = []\n key_name = print_node_mapping[node_name[1:].split('__print')\n [0]] + '_eightbit_requant_range'\n for line in valid_data:\n if line.find(content_str) != -1:\n content_set.append(line.split(content_str)[-1])\n else:\n pass\n\n all_transformed_data = functools.reduce(lambda a, b: a + b,\n content_set)\n\n kl = get_optimal_scaling_factor(\n get_all_fp32_data(all_transformed_data))\n\n result[key_name] = kl\n return result\n\n\ndef parse_requantization_ranges_kl(log_path):\n valid_lines = get_valid_log(log_path)\n\n kl_appendix = \"__;__KL:\"\n min_postfix = \"_min_output\"\n max_postfix = \"_max_output\"\n valid_data = [i for i in valid_lines if i.find(kl_appendix) != -1]\n\n single_keys_prefix = sorted(\n set([i.split(kl_appendix)[0] for i in valid_data]))\n result = {}\n\n for node_name in single_keys_prefix:\n min_out_str = node_name + kl_appendix + min_postfix\n max_out_str = node_name + kl_appendix + max_postfix\n content_str = node_name + kl_appendix\n key_name = node_name[1:].split(\n '_quantized_conv__print')[0] + '_requant_range'\n min_value_set = []\n max_value_set = []\n content_set = []\n for line in valid_data:\n if line.find(min_out_str) != -1:\n min_value = line.split('[')[-1].split(']')[0]\n min_value_set.append(min_value)\n elif line.find(max_out_str) != -1:\n max_value = line.split('[')[-1].split(']')[0]\n max_value_set.append(max_value)\n\n elif line.find(content_str) != -1:\n content_set.append(line.split(content_str)[-1])\n else:\n pass\n\n all_transformed_data = []\n for index, min_range_value in enumerate(min_value_set):\n # step 0 translate data\n max_range_value = float(max_value_set[index])\n min_range_value = float(min_range_value)\n cur_data = get_all_data(content_set[index])\n for i in cur_data:\n all_transformed_data.append(\n generic_scale(i, max_range_value, min_range_value))\n\n kl = get_optimal_scaling_factor(all_transformed_data)\n result[key_name] = kl\n\n return result\n\n\ndef parse_requantization_ranges(max_min_log, is_moving_average=False):\n \"\"\"\n Parse the max_min log to get requantization values\n :param max_min_log: input min max log file\n :return: dict saved the result\n \"\"\"\n print_suffix = \"__print__\"\n post_fix = \"__requant_min_max\"\n lines = get_valid_log(max_min_log)\n res = {}\n temp_min = {}\n temp_max = {}\n for i in lines:\n if i.find(print_suffix + \";\" + post_fix) == -1:\n continue\n max_line_data = i.split(print_suffix + \";\" + post_fix)[-1]\n min_value = max_line_data.split('][')[0].split('[')[1]\n max_value = max_line_data.split('][')[1].split(']')[0]\n name = i.split(';')[1].strip()[:-len(print_suffix)]\n if name not in temp_min:\n temp_min[name] = []\n if name not in temp_max:\n temp_max[name] = []\n\n temp_min[name].append(float(min_value))\n temp_max[name].append(float(max_value))\n\n for key in temp_min:\n if is_moving_average:\n op_min = temp_min[key][0]\n for i in range(1, len(temp_min[key])):\n op_min = op_min * 0.5 + temp_min[key][i] * 0.5\n res[key].append(op_min)\n else:\n target_min_index = int(round(len(temp_min[key]) * 0.05))\n if target_min_index < 0:\n target_min_index = 0\n if key not in res:\n res[key] = []\n res[key].append(sorted(temp_min[key])[target_min_index])\n\n for key in temp_max:\n if is_moving_average:\n\n target_max_index = int(round(len(temp_max[key]) * 0.95))\n if target_max_index > len(temp_max[key]) - 1:\n target_max_index = len(temp_max[key]) - 1\n res[key].append(sorted(temp_max[key])[target_max_index])\n else:\n op_max = temp_max[key][0]\n for i in range(1, len(temp_max[key])):\n op_max = op_max * 0.5 + temp_max[key][i] * 0.5\n res[key].append(op_max)\n return res\n\n\ndef parse_max_min_log(max_min_log, use_moving_average=False, fetch_max=True):\n \"\"\"\n Parse the max_ming log file\n :param max_min_log: max_min log file\n :param fetch_max: parse for freeze_max or not\n :return: get the node name and value mapping\n \"\"\"\n print_suffix = \"__print__\"\n if fetch_max:\n postfix = \"__max:\"\n else:\n postfix = \"__min:\"\n\n lines = get_valid_log(max_min_log)\n\n res = {}\n temp = {}\n for i in lines:\n if i.find(print_suffix + \";\" + postfix) == -1:\n continue\n max_line_data = i.split(';')\n name = max_line_data[1][:-len(print_suffix)]\n value = max_line_data[-1].split('[')[-1].split(']')[0]\n if \"eightbit\" in name and name not in temp:\n temp[name] = []\n if \"eightbit\" in name:\n temp[name].append(float(value))\n for key in temp:\n if use_moving_average:\n op_max = temp[key][0]\n for i in range(1, len(temp[key])):\n op_max = op_max * 0.5 + temp[key][i] * 0.5\n res[key] = op_max\n else:\n target_index = int(len(temp[key]) * 0.95)\n if target_index > len(temp[key]) - 1:\n target_index = len(temp[key]) - 1\n\n res[key] = sorted(temp[key])[target_index]\n return res\n\n\ndef generate_output_graph_ranges(input_node_map, range_info):\n output_graph_def = graph_pb2.GraphDef()\n inputs_to_rename = {}\n for node in input_node_map:\n if node in range_info:\n min_node = node_def_pb2.NodeDef()\n min_node.op = \"Const\"\n min_node.name = node + \"/frozen_min\"\n inputs_to_rename[node + \":0\"] = min_node.name + \":0\"\n min_node.attr[\"dtype\"].CopyFrom(\n attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))\n min_node.attr[\"value\"].CopyFrom(\n attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(\n float(range_info[node][0]), dtypes.float32, [])))\n\n max_node = node_def_pb2.NodeDef()\n max_node.op = \"Const\"\n max_node.name = node + \"/frozen_max\"\n inputs_to_rename[node + \":1\"] = max_node.name + \":0\"\n max_node.attr[\"dtype\"].CopyFrom(\n attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))\n max_node.attr[\"value\"].CopyFrom(\n attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(\n float(range_info[node][1]), dtypes.float32, [])))\n output_graph_def.node.extend([min_node, max_node])\n else:\n new_node = node_def_pb2.NodeDef()\n new_node.CopyFrom(input_node_map[node])\n output_graph_def.node.extend([new_node])\n\n for node in output_graph_def.node:\n found_index = []\n\n for input_index, input_name in enumerate(node.input):\n for _, v in enumerate(inputs_to_rename):\n if input_name == v:\n found_index.append(input_index)\n\n if found_index:\n for sub_index in found_index:\n node.input[sub_index] = inputs_to_rename[node.input[sub_index]]\n\n return output_graph_def\n\n\ndef generate_output_graph(input_node_map, max_name_value, is_max=True):\n \"\"\"\n Generate transformed graph for freeze_max/freeze_min transformation.\n :param input_node_map: input node name and nodedef mapping\n :param max_name_value: target values\n :param is_max: freeze_max flag\n :return: transformed graph\n \"\"\"\n output_graph_def = graph_pb2.GraphDef()\n inputs_to_rename = {}\n for node in input_node_map:\n if node in max_name_value:\n new_node = node_def_pb2.NodeDef()\n new_node.op = \"Const\"\n new_node_postfix = \"/frozen_max_only\" if is_max else \"/frozen_min_only\"\n new_node.name = node + new_node_postfix\n inputs_to_rename[node] = new_node.name + \":0\"\n new_node.attr[\"dtype\"].CopyFrom(\n attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))\n new_node.attr[\"value\"].CopyFrom(\n attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(\n float(max_name_value[node]), dtypes.float32, [])))\n else:\n new_node = node_def_pb2.NodeDef()\n new_node.CopyFrom(input_node_map[node])\n output_graph_def.node.extend([new_node])\n\n for node in output_graph_def.node:\n found = False\n found_index = -1\n found_value = \"\"\n for input_index, input_name in enumerate(node.input):\n for _, v in enumerate(inputs_to_rename):\n if input_name == v:\n found = True\n found_index = input_index\n found_value = v\n break\n if found:\n break\n if found:\n post_fix = '/frozen_max_only:0' if is_max else '/frozen_min_only:0'\n node.input[found_index] = found_value + post_fix\n\n return output_graph_def\n\n\ndef freeze_requantization_range(input_graph_def,\n max_min_log,\n is_moving_average=False,\n tensor_histogram=None):\n \"\"\"\n Freeze requantization range graph transformation\n :param input_graph_def: input graphdef\n :param max_min_log: max_min_log file\n :return: transformed graph\n \"\"\"\n input_node_map = parse_input_graph(input_graph_def)\n range_info = parse_requantization_ranges(max_min_log, is_moving_average)\n if not is_moving_average and tensor_histogram:\n for key in tensor_histogram:\n kl_value = get_optimal_scaling_factor(tensor_histogram[key])\n if key in range_info:\n range_info[key][-1] = kl_value\n range_info[key][0] = 0\n\n return generate_output_graph_ranges(input_node_map, range_info)\n\n\ndef freeze_max(input_graph_def, max_min_log, use_moving_average=False):\n \"\"\"\n Freeze max graph transformation\n :param input_graph_def: input graphdef\n :param max_min_log: max_min_log\n :return: transformed graph\n \"\"\"\n input_node_map = parse_input_graph(input_graph_def)\n max_name_value = parse_max_min_log(max_min_log, use_moving_average, True)\n return generate_output_graph(input_node_map, max_name_value, True)\n\n\ndef freeze_min(input_graph_def, max_min_log, use_moving_average=False):\n \"\"\"\n Freeze min graph transformation.\n :param input_graph_def: input graphdef\n :param max_min_log: max_min_log file\n :return: transformed graph\n \"\"\"\n input_node_map = parse_input_graph(input_graph_def)\n max_name_value = parse_max_min_log(max_min_log, use_moving_average, False)\n return generate_output_graph(input_node_map, max_name_value, False)\n","repo_name":"IntelAI/tools","sub_path":"api/intel_quantization/transform_graph/freeze_max_min.py","file_name":"freeze_max_min.py","file_ext":"py","file_size_in_byte":20362,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"67"} +{"seq_id":"39154210682","text":"# Question Link : https://leetcode.com/explore/challenge/card/september-leetcoding-challenge/557/week-4-september-22nd-september-28th/3472/\n# Level : Medium\n# Solution is right below :-\n\nclass Solution(object):\n def largestNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: str\n \"\"\"\n customCompare = lambda a,b: cmp(b+a,a+b)\n nums = map(str,nums)\n nums = sorted(nums,cmp=customCompare)\n res = \"\".join(nums)\n return res if res[0]!='0' else '0'\n \nprint('Max Number :',Solution().largestNumber([3,30,34,5,9]))\n","repo_name":"Pallagani-Praveen/LeetCode","sub_path":"largestNumber.py","file_name":"largestNumber.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26400092161","text":"# -*- coding: utf-8 -*-\r\nfrom abcd import pitch_list, punctuation\r\nimport tree\r\nfrom checker import isEchos\r\nimport syllable\r\nimport voiced\r\nimport voiceless\r\nimport bitmap\r\nimport martyria\r\nimport phrase\r\n\r\n\r\ndef getSyllable(l):\r\n\tfor pos, i in enumerate(l):\r\n\t\tif i == punctuation[\"syllable\"]:\r\n\t\t\treturn l[pos+1]\r\n\t\t\r\ndef getSyllablePhrase(l):\r\n\tfor pos, i in enumerate(l):\r\n\t\tif i == punctuation[\"syllable\"]:\r\n\t\t\treturn l[pos+1:]\r\n\r\n\r\ndef isLastChildOfParent(l):\r\n\treturn l[-1] in [punctuation[\"end_phrase\"], punctuation[\"end_phrase_and_word\"]]\r\n\r\n\r\n\r\n\r\ndef syllableConstructor(syl):\r\n\tif syl[-1] == punctuation[\"end_word\"] or syl[-1] == punctuation[\"end_phrase_and_word\"]:\r\n\t\treturn syllable.Syllable(syl[0], True)\r\n\telse:\r\n\t\treturn syllable.Syllable(syl[0], False)\r\n\r\n\r\n\r\ndef posRange(a_list):\r\n\tpos_range = []\r\n\ttemp_l = []\r\n\t\r\n\tfor i in a_list:\r\n\t\tif i == punctuation[\"syllable\"]:\r\n\t\t\tpos_range.append(temp_l)\r\n\t\t\tbreak\r\n\t\telif i == punctuation[\"voiced_delimiter\"]:\r\n\t\t\tpos_range.append(temp_l)\r\n\t\t\ttemp_l = []\r\n\t\telse:\r\n\t\t\ttemp_l.append(i)\r\n\t\r\n\treturn pos_range\r\n\r\n\r\n\r\n\r\ndef getPosPitchList(a_pitch):\r\n\tfor pos, p in enumerate(pitch_list):\r\n\t\tif p == a_pitch:\r\n\t\t\tbreak\r\n\treturn pos\r\n#==============================================================================\r\n#==============================================================================\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef level1(is_martyria, subtree):\r\n\tif is_martyria:\r\n\t\tnew_subtree = tree.Node(\"martyria\")\r\n\t\tnew_subtree.pitch = subtree.pitch\r\n\telse:\r\n\t\tnew_subtree = tree.Node(phrase.Phrase())\t\t\t\t\t\t\t#==code για το pitch\r\n\t\t\r\n\tnew_subtree.voiceless = new_subtree.voiceless | subtree.voiceless\r\n\tnew_subtree.continue_ = new_subtree.continue_ | subtree.continue_\r\n\tnew_subtree.addChild(subtree)\r\n\t\r\n\treturn new_subtree, True\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#==============================================================================\r\n\r\ndef phrase_level2(leaf_node_list, a_list, newNode_boolean):\r\n\tsubtree = tree.Node(syllableConstructor(getSyllablePhrase(a_list)))\r\n\tfor i in leaf_node_list:\r\n\t\tsubtree.addChild(i)\r\n\t\tsubtree.voiceless = subtree.voiceless | i.voiceless\r\n\t\tsubtree.continue_ = subtree.continue_ | i.continue_\r\n\r\n\t\r\n\tif newNode_boolean:\r\n\t\treturn level1(False, subtree)\r\n\telse:\r\n\t\treturn subtree, newNode_boolean\r\n\t\r\n\t\r\n\t\r\n\r\n\r\ndef martyria_level2(leaf_node_list, a_list, newNode_boolean):\r\n\tsubtree = tree.Node(martyria.Martyria(a_list[0]))\r\n\tfor i in leaf_node_list:\r\n\t\tsubtree.addChild(i)\r\n\t\tsubtree.voiceless = subtree.voiceless | i.voiceless\r\n\t\tsubtree.continue_ = subtree.continue_ | i.continue_\r\n\tsubtree.pitch = tree.PitchList([getSyllable(a_list)])\r\n\t \r\n\tif newNode_boolean:\r\n\t\treturn level1(True, subtree)\r\n\telse:\r\n\t\treturn subtree, newNode_boolean\r\n\r\n\r\n\r\n\r\n\r\n#==============================================================================\r\ndef phrase_level3(a_list, newNode_boolean):\r\n\tlist_lists = posRange(a_list)\t#ενδιάμεσο βήμα για την ευκρίνια του κώδικα\r\n\tleaf_node_list = createLeafNode(list_lists)\r\n\t\r\n\treturn phrase_level2(leaf_node_list, a_list, newNode_boolean)\r\n\t\r\n\t\r\n\r\ndef martyria_level3(a_list, newNode_boolean):\r\n\tlist_lists = posRange(a_list[1:]) #ενδιάμεσο βήμα για την ευκρίνια του κώδικα\r\n\tleaf_node_list = createLeafNode(list_lists)\r\n\t\r\n\treturn martyria_level2(leaf_node_list, a_list, newNode_boolean)\r\n\t\r\n\t\r\n\t\r\n#==============================================================================\r\ndef createLeafNode(lists_voiced_signs):\r\n\tvoiced_node_list = []\r\n\t\r\n\tfor voiced_voiceless in lists_voiced_signs:\r\n\t\t# parse voiced\r\n\t\ta_node = tree.Node(voiced.voiced_units.get(voiced_voiceless[0]))\r\n\t\t\r\n\t\t# parse voiced and voiceless\r\n\t\tvoiceless_b, continue_b = parseVoicelesses(voiced_voiceless[1:])\r\n\t\ta_node.voiceless = voiceless_b\r\n\t\ta_node.continue_ = continue_b\r\n\t\t\r\n\t\t# append a voiced\r\n\t\tvoiced_node_list.append(a_node)\r\n\treturn voiced_node_list\r\n\r\n\r\n\r\n\r\n\r\ndef parseVoicelesses(voicelessContinueList):\r\n\tt_voiceless = bitmap.Bitmap(0)\r\n\tt_continue = bitmap.Bitmap(0)\r\n\ttemp = bitmap.Bitmap(0)\r\n\tfor i in voicelessContinueList:\r\n\t\tif i.isdigit():\r\n\t\t\tif int(i) == 1:\r\n\t\t\t\tt_continue = t_continue | temp\r\n\t\telse:\r\n\t\t\tt_voiceless = t_voiceless | bitmap.Bitmap(voiceless.voiceless.get(i))\r\n\t\t\ttemp = bitmap.Bitmap(voiceless.voiceless.get(i))\r\n\treturn t_voiceless, t_continue\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#==============================================================================\r\ndef constructor(root_cargo, music_piece):\r\n\troot_node = tree.Node(root_cargo)\r\n\tlastNode_boolean = True\r\n\tnewNode = True\r\n\twhile len(music_piece) != 0:\r\n\t\tif isEchos(music_piece[0][0]):\r\n\t\t\tsubtree, lastNode_boolean = martyria_level3(music_piece[0], newNode)\r\n\t\telse:\r\n\t\t\tsubtree, lastNode_boolean = phrase_level3(music_piece[0], newNode)\r\n\t\t\r\n\t\t\r\n\t\tlastNode_boolean = isLastChildOfParent(music_piece[0])\r\n\t\tdel(music_piece[0])\r\n\t\t\r\n\t\t\r\n\t\tif newNode:\r\n\t\t\troot_node.addChild(subtree)\r\n\t\t\troot_node.voiceless = root_node.voiceless | subtree.voiceless\r\n\t\t\troot_node.continue_ = root_node.continue_ | subtree.continue_\r\n\t\t\tnewNode = False\r\n\t\telse:\r\n\t\t\ttemp = root_node.getLastChild()\r\n\t\t\ttemp.addChild(subtree)\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t# ακολουθούν 2 γραμμές κώδικα για το pitch\r\n\t\t\tif temp.cargo == \"martyria\":\r\n\t\t\t\ttemp.pitch = subtree.pitch\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\ttemp.voiceless = temp.voiceless | subtree.voiceless\r\n\t\t\ttemp.continue_ = temp.continue_ | subtree.continue_\r\n\t\t\t\r\n\t\t\troot_node.voiceless = root_node.voiceless | subtree.voiceless\r\n\t\t\troot_node.continue_ = root_node.continue_ | subtree.continue_\r\n\t\t\r\n\t\tif lastNode_boolean:\r\n\t\t\tlastNode_boolean = True\r\n\t\t\tnewNode = True\r\n\t\r\n\t\r\n\t\r\n\tparsePitch(root_node)\r\n\t#print(repr(root_node))\r\n\treturn root_node\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef parsePitch(byz_tree):\r\n\tgiven_pos = -1\r\n\r\n\tfor level1_node in byz_tree:\r\n\t\tif level1_node.cargo == \"martyria\":\r\n\t\t\tgiven_pos = getPosPitchList(level1_node.pitch[0])\r\n\t\t\tcontinue\r\n\t\t\r\n\t\t\r\n\t\tif given_pos < 0 or given_pos >= len(pitch_list):\r\n\t\t\traise ValueError(\"I couldn't find martyria pitch!\")\r\n\t\t\r\n\t\t\r\n\t\tfor level2_node in level1_node:\r\n\t\t\tfor level3_node in level2_node:\r\n\t\t\t\ttemp = level3_node.cargo.interval\r\n\t\t\t\tfor every_interval in temp:\r\n\t\t\t\t\tgiven_pos = given_pos + every_interval\r\n\t\t\t\t\tlevel3_node.pitch.append(tree.PitchList([pitch_list[given_pos]]))\r\n\r\n\t\t\t\tlevel2_node.pitch.append(level3_node.pitch)\r\n\t\t\tlevel1_node.pitch.append(level2_node.pitch)\r\n\t\tbyz_tree.pitch.append(level1_node.pitch)\r\n\treturn byz_tree\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nmusic_piece_example = [\r\n['echos_a', 'aa', '=', 'D', '-'],\r\n['i', '=', 'Σου', '|'],\r\n['i', '=', 'η'],\r\n['i', '=', 'τρο'],\r\n['o', 'l', '1', '=', 'παι'],\r\n['p', 'l', '1', '=', 'ού'],\r\n['ae2', 'l', '=', 'χος', '|'],\r\n['o', '=', 'δε'],\r\n['o', 'g', '=', 'ξι'],\r\n['o', 'ap', '=', 'ά', '|-'],\r\n['echos_d', 'aa', ',', 'ooh4', ',', 'a', ',', 'a', ',', 'a', '=', 'G', '-'],\r\n['a', '=', 'ο'],\r\n['po', 'an', '1', '=', 'δόν', '|'],\r\n['ae2', 'an', '=', 'βυ', '-'],\r\n['echos_d', 'a', ',', 'a', ',', 'a', '=', 'F', '-'],\r\n['ooi', 'ps', '=', 'θού', '|'],\r\n['a', '=', 'και'],\r\n['a', '=', 'νουρ'],\r\n['pk3', 'ps', 'di', '1', ',', 'a', 'di', '=', 'γή'],\r\n['i', 'l', '1', 'v', '1', 'di', '1', ',', 'a', 'l', '1', 'v', 'di', ',', 'pi', 'l', '1', 'di', '1', ',', 'a', 'l', 'di', '=', 'σα'],\r\n['i', 'dd', '=', 'σα']]\r\n\r\n\r\n\r\n#pol = constructor(\"some_root_cargo\", music_piece_example)\r\n#print(pol)\r\n\r\n\r\n\r\n\r\n","repo_name":"PolykarposPolykarpidis/PhD_documents","sub_path":"code/byzTree.py","file_name":"byzTree.py","file_ext":"py","file_size_in_byte":7398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4035483201","text":"from microCTtools import get_slices\nimport numpy as np\nfrom libtiff import TIFF\n\n\ndef subtract(fn_low, fn_high, met_name, recon, dz, n):\n\n print('Reading low slices')\n low_slices = get_slices(fn_low, n)\n print('Reading high slices')\n high_slices = get_slices(fn_high, n)\n print('Subtracting slices')\n subtracted = high_slices - low_slices\n\n slice_nums = np.arange(0, dz, dz // n)\n\n for i in range(n):\n print('Saving {}_low_{}'.format(met_name, slice_nums[i]))\n Al_tiff = TIFF.open(\n '../Data/new_subs/{}_low_{}_{}.tif'.format(met_name, recon, slice_nums[i]), 'w')\n Al_tiff.write_image(low_slices[i])\n Al_tiff.close()\n\n print('Saving {}_high_{}'.format(met_name, slice_nums[i]))\n No_tiff = TIFF.open(\n '../Data/new_subs/{}_high_{}_{}.tif'.format(met_name, recon, slice_nums[i]), 'w')\n No_tiff.write_image(high_slices[i])\n No_tiff.close()\n\n print('Saving {}_subtracted_{}'.format(met_name, slice_nums[i]))\n Subtraction_tiff = TIFF.open(\n '../Data/new_subs/{}_subtracted_{}_{}.tif'.format(met_name, recon, slice_nums[i]), 'w')\n Subtraction_tiff.write_image(subtracted[i])\n Subtraction_tiff.close()\n\n\n# sinogram\n# print('U sino')\n# U_low = '../Data/Brain/VS0169_1712/VS0169_1712_.volume'\n# U_high = '../Data/Brain/VS0169_1722/VS0169_1722_.volume'\n#subtract(U_low, U_high, 'U', 'sino', 900, 4)\n\n# recon\n# print('U recon')\n#U_low = '../Data/Brain/VS0169_1712/VS0169_1712_recon.volume'\n# U_high = '../Data/Brain/VS0169_1722/VS0169_1722_recon.volume'\n# subtract(U_low, U_high, 'U', 'recon', 1200, 4)\n\n# sinogram\nprint('Os Sino')\nOs_low = '../Data/Brain/VS0169_Os/VS0169_Os_B_.volume'\nOs_high = '../Data/Brain/VS0169_Os/VS0169_Os_A_.volume'\nsubtract(Os_low, Os_high, 'Os', 'sino', 900, 4)\n\n# recon\nprint('Os Recon')\nOs_low = '../Data/Brain/VS0169_Os/VS0169_Os_B_recon.volume'\nOs_high = '../Data/Brain/VS0169_Os/VS0169_Os_A_recon.volume'\nsubtract(Os_low, Os_high, 'Os', 'recon', 1200, 4)\n","repo_name":"scott-trinkle/MicroCT","sub_path":"Old Tasks/Nov17/l-edge raw subtraction 11_15_17/newsubtractions.py","file_name":"newsubtractions.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"17663329789","text":"\"\"\"\nhttps://www.codetree.ai/missions\nn * n크기의 직사각형의 가운데에서 시작하여 오른쪽, 위, 왼쪽, 아래 순서로 \n더 이상 채울 곳이 없을 때까지 회전하며 숫��를 적어나가려고 합니다. \n숫자는 1부터 시작한다고 했을 때, 다음과 같은 모양으로 숫자들을 쭉 채우는 코드를 작성해보세요.\n\n\n\"\"\"\n\n# 변수 선언 및 입력\nn = int(input())\nanswer = [\n [0 for _ in range(n)]\n for _ in range(n)\n]\nvisited = [\n [0 for _ in range(n)]\n for _ in range(n)\n]\n\n\n\ndef can_go(new_x, new_y):\n # 나아가려는 위치가 직사각형 안에 들어 있는지 확인하고\n # 들어있다면 아직 방문한적이 없는 곳인지 판단합니다.\n if 0 <= new_x and new_x < n and 0 <= new_y and new_y < n and visited[new_x][new_y] == 0:\n return True\n else:\n return False\n\n\n# direction에 따라 바뀌는 (x, y)의 변화량인 dx, dy를 정의합니다.\ndxs, dys = [0, -1, 0, 1], [1, 0, -1, 0]\n\ncurr_x = int((n-1)/2) # 시작은 (중앙) 입니다.\ncurr_y = curr_x\n\ndirection = 0 # 0: 오른쪽, 1: up, 2: 왼쪽, 3: down\n\n# 처음 시작 위치에 초기값을 적습니다.\nanswer[curr_x][curr_y] = 1\nvisited[curr_x][curr_y] = True\n\n# n*m개의 알파벳을 적어야 합니다.\nfor i in range(2 ,(n * n)+1): # i번째 문자를 어디에 적을지 결정합니다.\n while True: # 나아갈 수 있을때까지 방향을 바꿔가며 확인해봅니다.\n # 현재 방향 dir를 기준으로 그 다음 위치 값을 계산합니다.\n next_x, next_y = curr_x + dxs[direction], curr_y + dys[direction]\n # 그 위치로 나아갈 수 있는지 확인합니다.\n if can_go(next_x, next_y):\n # 나아갈 수 있다면 위치를 갱신해주고 배열에 올바른 값을 채워넣습니다.\n curr_x, curr_y = next_x, next_y\n visited[curr_x][curr_y] = True\n answer[curr_x][curr_y] = i\n direction = (direction + 1) % 4\n break\n else:\n direction = direction - 1\n\n# 출력:\nfor i in range(n):\n for j in range(n):\n print(answer[i][j], end=' ')\n print()\n","repo_name":"chosunghyun18/Problem_Solving","sub_path":"ProblemSolved2021Summer/Basic/middlestartboxcomplete.py","file_name":"middlestartboxcomplete.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"47001777197","text":"#\n# @lc app=leetcode.cn id=657 lang=python3\n#\n# [657] 机器人能否返回原点\n#\n\n\nclass Solution:\n # 统计每个方向移动的次数\n # U=D 且L=R时可以回到原点\n def judgeCircle(self, moves: str) -> bool:\n U, D, L, R = 0, 0, 0, 0\n for c in moves:\n if c == \"U\":\n U += 1\n elif c == \"D\":\n D += 1\n elif c == \"L\":\n L += 1\n elif c == \"R\":\n R += 1\n\n return U == D and L == R\n","repo_name":"fengbaoheng/leetcode","sub_path":"python/657.robot-return-to-origin.py","file_name":"657.robot-return-to-origin.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73003268052","text":"'''\r\nCreated on Mar 2, 2019\r\n\r\n@author: zhangand\r\n'''\r\nfrom pptx import Presentation\r\nfrom pptx import chart\r\nfrom pptx.chart.data import ChartData\r\nfrom pptx.enum.chart import XL_CHART_TYPE\r\nfrom pptx.util import Inches#Cm\r\nimport os\r\nimport win32com.client\r\n\r\n \r\nif __name__ == '__main__':\r\n # 创建幻灯片 ------\r\n prs = Presentation('template.pptx')\r\n \r\n subject_slide_layout = prs.slide_layouts[0]\r\n slide = prs.slides.add_slide(subject_slide_layout)\r\n title = slide.shapes.title\r\n subtitle = slide.placeholders[10]\r\n title.text ='Simulation Report'\r\n subtitle.text=\"Tina Chen\"\r\n \r\n title_only_slide_layout = prs.slide_layouts[2]\r\n slide = prs.slides.add_slide(title_only_slide_layout)\r\n shapes = slide.shapes\r\n\r\n shapes.title.text = 'Simulation Report'\r\n \r\n # 定义表格数据 ------\r\n name_objects = [\"object1\", \"object2\", \"object3\"]\r\n name_AIs = [\"AI1\", \"AI2\", \"AI3\"]\r\n val_AI1 = (19.2, 21.4, 16.7)\r\n val_AI2 = (22.3, 28.6, 15.2)\r\n val_AI3 = (20.4, 26.3, 14.2)\r\n val_AIs = [val_AI1, val_AI2, val_AI3]\r\n \r\n # 表格样式 --------------------\r\n rows = 4\r\n cols = 4\r\n top = Inches(1.5)\r\n left = Inches(1) #Inches(2.0)\r\n width = Inches(2) # Inches(6.0)\r\n height = Inches(0.5) # Inches(0.8)\r\n \r\n # 添加表格到幻灯片 --------------------\r\n table = shapes.add_table(rows, cols, left, top, width, height).table\r\n \r\n # 设置单元格宽度\r\n table.columns[0].width = Inches(2)# Inches(2.0)\r\n table.columns[1].width = Inches(2)\r\n table.columns[2].width = Inches(2)\r\n table.columns[3].width = Inches(2)\r\n \r\n # 设置标题行\r\n table.cell(0, 1).text = name_objects[0]\r\n table.cell(0, 2).text = name_objects[1]\r\n table.cell(0, 3).text = name_objects[2]\r\n \r\n # 填充数据\r\n table.cell(1, 0).text = name_AIs[0]\r\n table.cell(1, 1).text = str(val_AI1[0])\r\n table.cell(1, 2).text = str(val_AI1[1])\r\n table.cell(1, 3).text = str(val_AI1[2])\r\n \r\n table.cell(2, 0).text = name_AIs[1]\r\n table.cell(2, 1).text = str(val_AI2[0])\r\n table.cell(2, 2).text = str(val_AI2[1])\r\n table.cell(2, 3).text = str(val_AI2[2])\r\n \r\n table.cell(3, 0).text = name_AIs[2]\r\n table.cell(3, 1).text = str(val_AI3[0])\r\n table.cell(3, 2).text = str(val_AI3[1])\r\n table.cell(3, 3).text = str(val_AI3[2])\r\n \r\n # 定义图表数据 ---------------------\r\n chart_data = ChartData()\r\n chart_data.categories = name_objects\r\n chart_data.add_series(name_AIs[0], val_AI1)\r\n chart_data.add_series(name_AIs[1], val_AI2)\r\n chart_data.add_series(name_AIs[2], val_AI3)\r\n \r\n # 添加图表到幻灯片 --------------------\r\n x, y, cx, cy = Inches(1), Inches(3.5), Inches(8), Inches(3)\r\n \r\n graphic_frame = slide.shapes.add_chart(XL_CHART_TYPE.COLUMN_CLUSTERED, x, y, cx, cy, chart_data)\r\n \r\n chart = graphic_frame.chart\r\n \r\n chart.has_legend = True\r\n #chart.legend.position = XL_LEGEND_POSITION.TOP\r\n chart.legend.include_in_layout = False\r\n \r\n value_axis = chart.value_axis\r\n value_axis.maximum_scale = 40.0\r\n \r\n value_axis.has_title = True\r\n value_axis.axis_title.has_text_frame = True\r\n value_axis.axis_title.text_frame.text = \"False positive\"\r\n value_axis.axis_title.text_frame.auto_size\r\n \r\n slide = prs.slides.add_slide(title_only_slide_layout)\r\n title = slide.shapes.title\r\n title.text ='Simulation Report'\r\n top = Inches(1.2)\r\n left = Inches(1.5)\r\n \r\n width=Inches(7)\r\n pic = slide.shapes.add_picture('./Desert.jpg', left, top,width=width)\r\n \r\n prs.save('test_template.pptx')\r\n \r\nCurentpath=os.getcwd()\r\n \r\nppt = win32com.client.Dispatch('PowerPoint.Application')\r\npptSel = ppt.Presentations.Open(Curentpath+'/test_template.pptx',ReadOnly=1, Untitled=0, WithWindow=1)\r\nwin32com.client.gencache.EnsureDispatch('PowerPoint.Application')\r\nslide_count = pptSel.Slides.Count\r\nprint('slide page number is %d' %(slide_count))\r\nppt.Quit()\r\n \r\n #os.system('POWERPNT.exe')\r\n","repo_name":"zhangand/PythonTest","sub_path":"HelloWorld/PrintWord.py","file_name":"PrintWord.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5647175183","text":"from typing import Iterator, Set\n\nfrom puzzles.day_18.load_inputs import Cubelet, input_reader, InputType\nfrom puzzles.day_18.solution_part_1 import calculate_solution as get_facets_of\n\n\nclass SteamEnclosure:\n def __init__(self, cubes: InputType):\n self.lava_cubes = [tuple(c) for c in cubes]\n self.x_min, self.y_min, self.z_min = cubes[0]\n self.x_max, self.y_max, self.z_max = cubes[0]\n\n self.steam_cubes: Set[Cubelet] = set()\n self.current_steam_sources: Set[Cubelet] = set()\n\n self.detect_outmost()\n\n def detect_outmost(self) -> None:\n for cube_x, cube_y, cube_z in self.lava_cubes:\n if cube_x < self.x_min:\n self.x_min = cube_x\n if cube_x > self.x_max:\n self.x_max = cube_x\n if cube_y < self.y_min:\n self.y_min = cube_y\n if cube_y > self.y_max:\n self.y_max = cube_y\n if cube_z < self.z_min:\n self.z_min = cube_z\n if cube_z > self.z_max:\n self.z_max = cube_z\n\n self.x_min -= 1\n self.x_max += 1\n self.y_min -= 1\n self.y_max += 1\n self.z_min -= 1\n self.z_max += 1\n\n def test_volume(self):\n return (self.x_max - self.x_min + 1) * (self.y_max - self.y_min + 1) * (1 + self.z_max - self.z_min)\n\n def initialize_steam(self) -> None:\n for x in range(self.x_min, self.x_max + 1):\n for y in range(self.y_min, self.y_max + 1):\n self.steam_cubes.add((x, y, self.z_min))\n self.steam_cubes.add((x, y, self.z_max))\n\n for z in range(self.z_min, self.z_max):\n self.steam_cubes.add((x, self.y_min, z))\n self.steam_cubes.add((x, self.y_max, z))\n\n for y in range(self.y_min, self.y_max):\n for z in range(self.z_min, self.z_max):\n self.steam_cubes.add((self.x_min, y, z))\n self.steam_cubes.add((self.x_max, y, z))\n\n self.current_steam_sources = self.steam_cubes\n\n def jitter_cube(self, cube: Cubelet) -> Iterator[Cubelet]:\n cube_x, cube_y, cube_z = cube\n\n if cube_x > self.x_min:\n yield cube_x - 1, cube_y, cube_z\n if cube_x < self.x_max:\n yield cube_x + 1, cube_y, cube_z\n if cube_y > self.y_min:\n yield cube_x, cube_y - 1, cube_z\n if cube_y < self.y_max:\n yield cube_x, cube_y + 1, cube_z\n if cube_z > self.z_min:\n yield cube_x, cube_y, cube_z - 1\n if cube_z < self.z_max:\n yield cube_x, cube_y, cube_z + 1\n\n def propagate_steam(self) -> None:\n if not self.steam_cubes:\n self.initialize_steam()\n\n next_steam_cubes: Set[Cubelet] = set()\n\n for cube in self.current_steam_sources:\n for next_cube in self.jitter_cube(cube):\n if next_cube in self.lava_cubes:\n continue\n if next_cube in next_steam_cubes:\n continue\n if next_cube in self.steam_cubes:\n continue\n next_steam_cubes.add(next_cube)\n\n for cube in next_steam_cubes:\n self.steam_cubes.add(cube)\n\n self.current_steam_sources = next_steam_cubes\n\n def flow_steam(self) -> None:\n self.initialize_steam()\n\n while self.current_steam_sources:\n self.propagate_steam()\n\n def num_outer_steam_facets(self) -> int:\n total_outer_facets = 0\n total_outer_facets += 2 * (self.x_max - self.x_min + 1) * (self.y_max - self.y_min + 1)\n total_outer_facets += 2 * (self.x_max - self.x_min + 1) * (self.z_max - self.z_min + 1)\n total_outer_facets += 2 * (self.z_max - self.z_min + 1) * (self.y_max - self.y_min + 1)\n return total_outer_facets\n\n\ndef calculate_solution(input_values: InputType) -> int:\n se = SteamEnclosure(input_values)\n\n se.flow_steam()\n\n return get_facets_of(se.steam_cubes) - se.num_outer_steam_facets()\n\n\nif __name__ == \"__main__\":\n puzzle_input = input_reader.from_file(\"./input.txt\")\n print(calculate_solution(puzzle_input))\n","repo_name":"RohdeK/adventofcode","sub_path":"puzzles/day_18/solution_part_2.py","file_name":"solution_part_2.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37384037207","text":"from django.test import TestCase\nfrom django.contrib.auth import get_user_model\n\nfrom api.models import Haus, UAC, Device, Sensor\n\nUser = get_user_model()\n\n\n# These are quite possibly, the worst tests I've ever passed my\n# unsuspecting eyes upon. If people thought tests were hard, they haven't\n# seen someone attempt to write these.\nclass StringConversionTests(TestCase):\n @classmethod\n def setUpTestData(cls):\n super(StringConversionTests, cls).setUpTestData()\n User.objects.create(username=\"user\")\n Haus(name=\"haus\", owner=User.objects.first()).save()\n UAC(haus=Haus.objects.first(), user=User.objects.first(),\n level=UAC.LEVELS.OWNER).save()\n Device(name=\"device\", haus=Haus.objects.first()).save()\n Sensor(name=\"sensor\", device=Device.objects.first(),\n category=Sensor.CATEGORIES.PIR).save()\n\n def test_haus(self):\n haus = Haus.objects.first()\n self.assertEqual(str(haus), \"haus, owned by user\")\n self.assertEqual(repr(haus),\n \">\".format(u\"haus\"))\n\n def test_uac(self):\n uac = UAC.objects.first()\n self.assertEqual(str(uac), \"Permission of user in the Haus haus, \" +\n \"owned by user: Owner\")\n self.assertEqual(repr(uac),\n (\", >, >\").format(u\"haus\",\n \"Owner\"))\n\n def test_device(self):\n device = Device.objects.first()\n self.assertEqual(str(device), \"device\")\n self.assertEqual(repr(device),\n (\">>\").format(u\"device\", u\"haus\"))\n\n def test_sensors(self):\n sensor = Sensor.objects.first()\n self.assertEqual(str(sensor), \"sensor\")\n self.assertEqual(repr(sensor),\n (\">>>\").format(u\"sensor\",\n u\"device\",\n u\"haus\"))\n self.assertEqual(sensor.category, (\"PIR\", 2))\n","repo_name":"enterstudio/API-2","sub_path":"api/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73921141333","text":"import streamlit as st\nimport pickle\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom PIL import Image\nimport base64\nst.sidebar.title('Transaction Information')\n\nhtml_temp = \"\"\"\n
\n

Fraud Detection

\n

\"\"\"\n\nst.markdown(html_temp,unsafe_allow_html=True)\nst.markdown(\"

Select Your Model

\", unsafe_allow_html=True)\n\nselection = st.selectbox(\"\", [\"Logistic Regression\",\"Random Forest\"])\n\nif selection ==\"Logistic Regression\":\n\tst.write(\"You selected\", selection, \"model\")\n\tmodel = pickle.load(open('logistic_regression_model', 'rb'))\nelse:\n\tst.write(\"You selected\", selection, \"model\")\n\tmodel = pickle.load(open('random_forest_model', 'rb'))\n\nv2 = st.sidebar.slider(label=\"V2-PCA\", min_value=-10.00, max_value=15.00, step=0.01)\nv3 = st.sidebar.slider(label=\"V3-PCA\", min_value=-25.00, max_value=5.00, step=0.01)\nv4 = st.sidebar.slider(label=\"V4-PCA\", min_value=-5.00, max_value=15.00, step=0.01)\nv7 = st.sidebar.slider(label=\"V7-PCA\", min_value=-45.00, max_value=130.00, step=0.01)\nv10 = st.sidebar.slider(label=\"V10-PCA\", min_value=-20.00, max_value=5.00, step=0.01)\nv11 = st.sidebar.slider(label=\"V11-PCA\", min_value=-5.00, max_value=15.00, step=0.01)\nv12 = st.sidebar.slider(label=\"V12-PCA\", min_value=-20.00, max_value=5.00, step=0.01)\nv14 = st.sidebar.slider(label=\"V14-PCA\", min_value=-20.00, max_value=5.00, step=0.01)\nv16 = st.sidebar.slider(label=\"V16-PCA\", min_value=-15.00, max_value=20.00, step=0.01)\nv17 = st.sidebar.slider(label=\"V17-PCA\", min_value=-30.00, max_value=10.00, step=0.01)\n\ncoll_dict = {'V2-PCA':v2, 'V3-PCA':v3, 'V4-PCA':v4, 'V7-PCA':v7, 'V10-PCA':v10,\\\n\t\t\t'V11-PCA':v11, 'V12-PCA':v12, 'V14-PCA':v14, 'V16-PCA':v16, 'V17-PCA':v17}\n\ncolumns = ['v2', 'v3', 'v4', 'v7', 'v10', 'v11', 'v12', 'v14', 'v16', 'v17']\n\ndf_coll = pd.DataFrame.from_dict([coll_dict])\nuser_inputs = df_coll\n\nprediction = model.predict(user_inputs)\n\nhtml_temp = \"\"\"\n
\n

Fraud Detection Prediction - Group - 4

\n\n

\"\"\"\n\nst.markdown(\"

Transaction Information

\", unsafe_allow_html=True)\n\nst.table(df_coll)\n\nst.subheader('Click PREDICT if configuration is OK')\n\nif st.button('PREDICT'):\n\tif prediction[0]==0:\n\t\tst.success(prediction[0])\n\t\tst.success(f'Transaction is SAFE :)')\n\telif prediction[0]==1:\n\t\tst.warning(prediction[0])\n\t\tst.warning(f'ALARM! Transaction is FRAUDULENT :(')","repo_name":"KdrDrn/04_PROJECTS","sub_path":"24_C8_DS_Capstone_Projects_(Fraud_Detection)/AppStreamlit.py","file_name":"AppStreamlit.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"28798582101","text":"\"\"\"\nQuestion 19: You are required to write a program to sort the (name, age, height) tuples by\nascending order where name is string, age and height are numbers.\n\"\"\"\n\n#Sort with Operator Module Functions(itemgetter)\nfrom operator import itemgetter\npersons = []\n\nwhile True:\n\tline = raw_input(\"> \")\n\tif not line:\n\t\tbreak\n\tpersons.append(tuple(line.split(',')))\n# Sort using itemgetter.\nprint(sorted(persons, key=itemgetter(0,1,2)))\n\n\n\"\"\"\nOR \n\"\"\"\n\n# SORT USING LAMBDA FUNTION.\n# persons = []\n# while True:\n# \tline = raw_input(\"> \")\n# \tif not line:\n# \t\tbreak\n# \tpersons.append(tuple(line.split(',')))\n# # Sort using itemgetter.\n# print(sorted(persons, key=lambda x: (x[0],x[1],x[2])))\n","repo_name":"rbhati1997/test-2","sub_path":"p19.py","file_name":"p19.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3383443411","text":"\r\nclass Solution:\r\n def solve(self, board: list[list[str]]) -> None:\r\n \"\"\"\r\n Do not return anything, modify board in-place instead.\r\n \"\"\"\r\n dirs = [[1,0],[-1,0],[0,1],[0,-1]]\r\n rows, cols = len(board), len(board[0])\r\n edges = set()\r\n def dfs(row,col):\r\n nonlocal edges, rows, cols, dirs\r\n if (row < 0 or row >= rows or col < 0 or col >= cols \r\n or board[row][col] != 'O' or (row,col) in edges):\r\n return\r\n edges.add((row,col))\r\n for r,c in dirs:\r\n dfs(row+r,col+c)\r\n\r\n\r\n for row in range(rows):\r\n dfs(row,0)\r\n dfs(row,cols-1)\r\n for col in range(cols):\r\n dfs(0,col)\r\n dfs(rows-1, col)\r\n\r\n for row in range(rows):\r\n for col in range(cols):\r\n if (row,col) not in edges and board[row][col] == 'O':\r\n board[row][col] = 'X'\r\n\r\n return board\r\n\r\n\r\nprint(Solution().solve(\r\n [[\"X\",\"X\",\"X\",\"X\"],\r\n [\"X\",\"O\",\"O\",\"X\"],\r\n [\"X\",\"X\",\"O\",\"X\"],\r\n [\"X\",\"O\",\"X\",\"X\"]]\r\n))","repo_name":"rblis/leetcode","sub_path":"problems/Surrounded_Regions.py","file_name":"Surrounded_Regions.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72876230934","text":"#!/bin/python3\n\nfrom docx import Document\nfrom docx.enum.text import WD_ALIGN_PARAGRAPH\n\nimport json\nimport os\nimport requests\n\n## This is a local file containing some \"Sensitive\" data, but also the URL's Below\nimport auth\n\n# Using JIRA integration?\n# Change to False if not\nJIRA = True\n\nrootPath = \"/Volumes/HiveMind/Videos\"\n\n#### Variables ####\n## I've imported a seperate file called auth.py to keep this a bit more secure\n## Replace the F-Strings in here with your own Details, or create your own auth.py \nfacebookURL = f'{auth.facebookURL}'\ntwitterURL = f'{auth.twitterURL}'\ninstagramURL = f'{auth.instagramURL}'\n\nname = f'{auth.name}'\nchannelName = f'{auth.channelName}'\nchannelAddress = f'{auth.channelAddress}'\nproject = f'{auth.project}'\n\n## Greeting Definition for String Substitution\ngreeting = \"Welcome to the Hive\"\n############ JIRA ###########\n\n## JIRA URL\nif JIRA == True:\n # JIRA URL and Headers pulled from Auth.py file\n url = f'{auth.url}'\\\n # Authentication is needed in your request Header to access the JIRA API\n headers = auth.headers\nelse:\n pass\n\n## This defines the prompt we see when selecting the \"TYPE\" of project\n## Triple ' marks keeps the string open across multiple lines. This (theoretically) makes the longer strings below a lot easier to edit\ntypePrompt = '''What type of Project is this?\n1 - Getting Started\n2 - Product Review\n3 - Quick Tips\n4 - Code Review\n\n?: '''\n\n# Define video Type\n# Asks the user to select Video Type based on the above\nprojectType = 0\nwhile projectType not in [1,2,3,4]:\n ## IF Statement to determine where to save the files based on the Project Type selected above\n projectType = int(input(typePrompt))\n\n# Ask user to Enter the Project ID\nprojectID = input(f\"What is the Project ID after {project}- ? : \")\n\nif JIRA == True:\n # Pull Video Title from JIRA Project using JIRA API\n result = requests.request(\"GET\", f\"{url}/rest/api/latest/issue/{project}-{projectID}\", verify=False, headers=headers)\n # Parse the result into JSON which we can reference like dicts.\n JSONResult = json.loads(result.text)\n\n #Output Video Title to Terminal - Mostly for Error checking but kinda nice to have\n print(JSONResult['fields']['summary'])\n # Assign JIRA Project Summary to Job Title\n jobTitle = JSONResult['fields']['summary']\nelse:\n jobTitle = str(input(\"What is the title of this project?: \"))\n\n# Set Path based on inputs\nif projectType == 1:\n path = f\"{rootPath}/Getting Started Series/{project}-{projectID} - {jobTitle}\"\nelif projectType == 2:\n path = f\"{rootPath}/Product Reviews/{project}-{projectID} - {jobTitle}\"\nelif projectType == 3:\n path = f\"{rootPath}/Quick Tips/{project}-{projectID} - {jobTitle}\"\nelif projectType == 4:\n path = f\"{rootPath}/Code Review/{project}-{projectID} - {jobTitle}\"\nelse:\n projectType = int(input(typePrompt))\n\n## I render my videos out into a path called \"Render\" in a subfolder under each project.\n## All files associated with the YouTube upload go into this folder\nrenderpath = f\"{path}/Render\"\n\n## Create Project Directories\n## You could reasonably create any Directory structure you want here.\nos.mkdir(path)\n## Create Render Directory\nos.mkdir(renderpath)\n\n# TODO - Mount SMB Share\n\n\n################ TEMPLATES ################ \n\n## Templated Intro\nintroTemplate = f\"\"\"Hi, I'm {name} from {channelName} and {greeting}\n\n\nIn This Video we'll be taking a look at {jobTitle}.\n.\n.\n.\n.\n.\n.\n\nWhile I roll the intro, take a moment to Subscribe, and hit the bell icon to get notified when I release new videos each week.\n\nLet's get started!\"\"\"\n\n## Templated Outtro\nouttroTemplate = f\"\"\"That's all we have for this video and I hope it helped you in your home automation journey.\n\nBe sure to comment down below with a home automation idea you'd like to see me cover in a future video.\nDon't forget to Follow {channelName} on Twitter, Instagram and Facebook.\n\nIf you liked this video, hit the Thumbs Up button down below to give it a like.\n\nAnd if you're not already subscribed, please consider subscribing now.\nWhile you're at it, hit the bell icon to get notified when I release new videos each week.\n\nLastly, if you like what I'm doing here, and you want to help support the channel, there's a buy me a coffee link in the video description below.\n\nContributions through Buy me a coffee are put towards making more, and better content for you to enjoy.\n\nThanks so much for watching! I'm {name} from {channelName}\nAnd I'm looking forward to seeing you next time!\n\nBye for now!\"\"\"\n\n## Template for YouTube Description\ndescriptionTextBase = f\"\"\"{jobTitle}\n\n*** Links ***\n\n{channelName} on YouTube: {channelAddress}\n\n*** Support the Channel***\nBuy Me a Coffee: https://buymeacoffee.com/HiveMindAuto\n\n*** Find {channelName} on Social Media ***\n\nTwitter: {twitterURL}\nInstagram: {instagramURL}\nFacebook: {facebookURL}\n\n*** Affiliate Links ***\n*** These links help the channel by providing a commission on purchases\n\n\n*** TIMESTAMPS ***\n\n0:00 Intro\n\n\n*** Helpful Links ***\n\nHome Assistant: https://www.home-assistant.io/\nRaspberry Pi: https://www.raspberrypi.org/\nBalena Etcher: https://www.balena.io/etcher/\n\nHome Assistant for iOS: https://apple.co/34JATce\nHome Assistant for Android: https://bit.ly/30VUsNh\n\n*** CREDITS ***\n\nMusic: https://www.purple-planet.com\n\"\"\"\n\n########################### Create the WORD Document ############################\n# Instantiate Document\ndocument = Document()\n\n# Insert Header\nheading = document.add_heading(f\"{project}-{projectID}:\\n{jobTitle}\", 0)\nheading.alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n# Insert Intro Text Block\nparagraph = document.add_paragraph(\"\")\nparagraph.alignment = WD_ALIGN_PARAGRAPH.CENTER\n## This is Where the Template gets inserted\nparagraph = document.add_paragraph(introTemplate)\nparagraph.alignment = WD_ALIGN_PARAGRAPH.CENTER\nparagraph = document.add_paragraph(\"\")\nparagraph.alignment = WD_ALIGN_PARAGRAPH.CENTER\n## All this other stuff is just getting the document laid out in a useful way\nparagraph = document.add_paragraph(\"\\n\\n\")\nparagraph.alignment = WD_ALIGN_PARAGRAPH.CENTER\nparagraph = document.add_paragraph(\"\\n.\\n.\\n\")\nparagraph.alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n# Insert Summary Text Block\nparagraph = document.add_paragraph(\"\")\nparagraph.alignment = WD_ALIGN_PARAGRAPH.CENTER\nparagraph = document.add_paragraph(\".\\n.\\n.\\n.\\n.\")\nparagraph.alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n# Insert Outtro Text Block \nparagraph = document.add_paragraph(\"\")\nparagraph.alignment = WD_ALIGN_PARAGRAPH.CENTER\n## Actual Outtro Template inserts here\nparagraph = document.add_paragraph(outtroTemplate)\nparagraph.alignment = WD_ALIGN_PARAGRAPH.CENTER\nparagraph = document.add_paragraph()\nparagraph.alignment = WD_ALIGN_PARAGRAPH.CENTER\nparagraph = document.add_paragraph(\"\")\nparagraph.alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n# Save .docx File in project path\ndocument.save(f\"{path}/{project}-{projectID} - {jobTitle}.docx\")\n#############################################################################################\n\n\n######## Create Text File with \"Description\" Template\n# Generate YouTube Description Template\ndescriptionFile = open(f\"{renderpath}/{project}-{projectID}-{jobTitle} - YouTube Description.txt\", \"w\")\n## Write Description Text out to file\ndescriptionFile.write(descriptionTextBase)\n## Close the description File\ndescriptionFile.close()\n\n################################### END ################################### ","repo_name":"HiveMindAutomation/VideoHelper","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":7504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5866958939","text":"import collections\nimport statistics\n\nimport numpy as np\n\nfrom keras_tuner import backend\nfrom keras_tuner import errors\nfrom keras_tuner.backend import config\nfrom keras_tuner.backend import keras\nfrom keras_tuner.engine import hyperparameters as hp_module\nfrom keras_tuner.engine import objective as obj_module\n\n\nclass TunerCallback(keras.callbacks.Callback):\n def __init__(self, tuner, trial):\n super().__init__()\n self.tuner = tuner\n self.trial = trial\n\n def on_epoch_begin(self, epoch, logs=None):\n self.tuner.on_epoch_begin(self.trial, self.model, epoch, logs=logs)\n\n def on_batch_begin(self, batch, logs=None):\n self.tuner.on_batch_begin(self.trial, self.model, batch, logs)\n\n def on_batch_end(self, batch, logs=None):\n self.tuner.on_batch_end(self.trial, self.model, batch, logs)\n\n def on_epoch_end(self, epoch, logs=None):\n self.tuner.on_epoch_end(self.trial, self.model, epoch, logs=logs)\n\n\nclass SaveBestEpoch(keras.callbacks.Callback):\n \"\"\"A Keras callback to save the model weights at the best epoch.\n\n Args:\n objective: An `Objective` instance.\n filepath: String. The file path to save the model weights.\n \"\"\"\n\n def __init__(self, objective, filepath):\n super().__init__()\n self.objective = objective\n self.filepath = filepath\n if self.objective.direction == \"max\":\n self.best_value = float(\"-inf\")\n else:\n self.best_value = float(\"inf\")\n\n def on_epoch_end(self, epoch, logs=None):\n if not self.objective.has_value(logs):\n # Save on every epoch if metric value is not in the logs. Either no\n # objective is specified, or objective is computed and returned\n # after `fit()`.\n self._save_model()\n return\n current_value = self.objective.get_value(logs)\n if self.objective.better_than(current_value, self.best_value):\n self.best_value = current_value\n self._save_model()\n\n def _save_model(self):\n if config.backend() != \"tensorflow\":\n self.model.save_weights(self.filepath)\n return\n # Create temporary saved model files on non-chief workers.\n write_filepath = backend.io.write_filepath(\n self.filepath, self.model.distribute_strategy\n )\n self.model.save_weights(write_filepath)\n # Remove temporary saved model files on non-chief workers.\n backend.io.remove_temp_dir_with_filepath(\n write_filepath, self.model.distribute_strategy\n )\n\n\ndef average_metrics_dicts(metrics_dicts):\n \"\"\"Averages the metrics dictionaries to one metrics dictionary.\"\"\"\n metrics = collections.defaultdict(list)\n for metrics_dict in metrics_dicts:\n for metric_name, metric_value in metrics_dict.items():\n metrics[metric_name].append(metric_value)\n averaged_metrics = {\n metric_name: np.mean(metric_values)\n for metric_name, metric_values in metrics.items()\n }\n\n return averaged_metrics\n\n\ndef _get_best_value_and_best_epoch_from_history(history, objective):\n # A dictionary to record the metric values through epochs.\n # Usage: epoch_metric[epoch_number][metric_name] == metric_value\n epoch_metrics = collections.defaultdict(dict)\n for metric_name, epoch_values in history.history.items():\n for epoch, value in enumerate(epoch_values):\n epoch_metrics[epoch][metric_name] = value\n best_epoch = 0\n for epoch, metrics in epoch_metrics.items():\n objective_value = objective.get_value(metrics)\n # Support multi-objective.\n if objective.name not in metrics:\n metrics[objective.name] = objective_value\n best_value = epoch_metrics[best_epoch][objective.name]\n if objective.better_than(objective_value, best_value):\n best_epoch = epoch\n return epoch_metrics[best_epoch], best_epoch\n\n\ndef convert_to_metrics_dict(results, objective):\n \"\"\"Convert any supported results type to a metrics dictionary.\"\"\"\n # List of multiple exectuion results to be averaged.\n # Check this case first to deal each case individually to check for errors.\n if isinstance(results, list):\n return average_metrics_dicts(\n [convert_to_metrics_dict(elem, objective) for elem in results]\n )\n\n # Single value.\n if isinstance(results, (int, float, np.floating)):\n return {objective.name: float(results)}\n\n # A dictionary.\n if isinstance(results, dict):\n return results\n\n # A History.\n if isinstance(results, keras.callbacks.History):\n best_value, _ = _get_best_value_and_best_epoch_from_history(\n results, objective\n )\n return best_value\n\n\ndef validate_trial_results(results, objective, func_name):\n if isinstance(results, list):\n for elem in results:\n validate_trial_results(elem, objective, func_name)\n return\n\n # Single value.\n if isinstance(results, (int, float, np.floating)):\n return\n\n # None\n if results is None:\n raise errors.FatalTypeError(\n f\"The return value of {func_name} is None. \"\n \"Did you forget to return the metrics? \"\n )\n\n # objective left unspecified,\n # and objective value is not a single float.\n if isinstance(objective, obj_module.DefaultObjective) and not (\n isinstance(results, dict) and objective.name in results\n ):\n raise errors.FatalTypeError(\n f\"Expected the return value of {func_name} to be \"\n \"a single float when `objective` is left unspecified. \"\n f\"Recevied return value: {results} of type {type(results)}.\"\n )\n\n # A dictionary.\n if isinstance(results, dict):\n if objective.name not in results:\n raise errors.FatalValueError(\n f\"Expected the returned dictionary from {func_name} to have \"\n f\"the specified objective, {objective.name}, \"\n \"as one of the keys. \"\n f\"Received: {results}.\"\n )\n return\n\n # A History.\n if isinstance(results, keras.callbacks.History):\n return\n\n # Other unsupported types.\n raise errors.FatalTypeError(\n f\"Expected the return value of {func_name} to be \"\n \"one of float, dict, keras.callbacks.History, \"\n \"or a list of one of these types. \"\n f\"Recevied return value: {results} of type {type(results)}.\"\n )\n\n\ndef get_best_step(results, objective):\n # Average the best epochs if multiple executions.\n if isinstance(results, list):\n return int(\n statistics.mean(\n [get_best_step(elem, objective) for elem in results]\n )\n )\n\n # A History.\n if isinstance(results, keras.callbacks.History):\n _, best_epoch = _get_best_value_and_best_epoch_from_history(\n results, objective\n )\n return best_epoch\n\n return 0\n\n\ndef convert_hyperparams_to_hparams(hyperparams, hparams_api):\n \"\"\"Converts KerasTuner HyperParameters to TensorBoard HParams.\"\"\"\n hparams = {}\n for hp in hyperparams.space:\n hparams_value = {}\n try:\n hparams_value = hyperparams.get(hp.name)\n except ValueError: # pragma: no cover\n continue # pragma: no cover\n\n hparams_domain = {}\n if isinstance(hp, hp_module.Choice):\n hparams_domain = hparams_api.Discrete(hp.values)\n elif isinstance(hp, hp_module.Int):\n if hp.step is not None and hp.step != 1:\n # Note: `hp.max_value` is inclusive, unlike the end index\n # of Python `range()`, which is exclusive\n values = list(range(hp.min_value, hp.max_value + 1, hp.step))\n hparams_domain = hparams_api.Discrete(values)\n else:\n hparams_domain = hparams_api.IntInterval(\n hp.min_value, hp.max_value\n )\n elif isinstance(hp, hp_module.Float):\n if hp.step is not None:\n # Note: `hp.max_value` is inclusive, unlike the end index\n # of Numpy's arange(), which is exclusive\n values = np.arange(\n hp.min_value, hp.max_value + 1e-7, step=hp.step\n ).tolist()\n hparams_domain = hparams_api.Discrete(values)\n else:\n hparams_domain = hparams_api.RealInterval(\n hp.min_value, hp.max_value\n )\n elif isinstance(hp, hp_module.Boolean):\n hparams_domain = hparams_api.Discrete([True, False])\n elif isinstance(hp, hp_module.Fixed):\n hparams_domain = hparams_api.Discrete([hp.value])\n else:\n raise ValueError( # pragma: no cover\n f\"`HyperParameter` type not recognized: {hp}\"\n )\n\n hparams_key = hparams_api.HParam(hp.name, hparams_domain)\n hparams[hparams_key] = hparams_value\n\n return hparams\n","repo_name":"keras-team/keras-tuner","sub_path":"keras_tuner/engine/tuner_utils.py","file_name":"tuner_utils.py","file_ext":"py","file_size_in_byte":9053,"program_lang":"python","lang":"en","doc_type":"code","stars":2783,"dataset":"github-code","pt":"67"} +{"seq_id":"37199643159","text":"#!/usr/bin/env python\n\n# import game engine modules\nfrom bge import render\nfrom bge import logic\n# import stand alone modules\nimport bgl\nimport blf\nimport time\n\ncurrent_milli_time = lambda: int(round(time.time() * 1000))\n\ndef init():\n\t# create a new font object, use external ttf file\n\tfont_path = logic.expandPath('//assets/fonts/LiberationMono-Regular.ttf')\n\t# store the font indice - to use later\n\tlogic.font_id = blf.load(font_path)\n\n\t# set the font drawing routine to run every frame\n\tscene = logic.getCurrentScene()\n\tscene.post_draw = [write]\n\n\tlogic.text_buffer = []\n\ndef show_das_points():\n\t''' expects that opengl mode is setup corrently '''\n\twidth = render.getWindowWidth()\n\theight = render.getWindowHeight()\n\n\ttext = 'Points: '+str(logic.getCurrentScene().objects[\"PinsRoof\"][\"points\"])\n\n\t# BLF drawing routine\n\tfont_id = logic.font_id\n\t#blf.position(font_id, width - 160, height - 24, 0) \n\tblf.position(font_id, width * 0.2, height * 0.2, 0)\n\tblf.size(font_id, 24, 72)\n\tblf.draw(font_id, text)\n\ndef write():\n\t''' Uses text buffer for rendering stuff on screen\n\t\n\tText buffer can be found from logic.text_buffer\n\tEach value in buffer is a hash, containing text, timeout and start_time\n\tstart time is set by me, not you\n\t'''\n\n\t\"\"\"write on screen\"\"\"\n\twidth = render.getWindowWidth()\n\theight = render.getWindowHeight()\n\n\t# OpenGL setup\n\tbgl.glMatrixMode(bgl.GL_PROJECTION)\n\tbgl.glLoadIdentity()\n\tbgl.gluOrtho2D(0, width, 0, height)\n\tbgl.glMatrixMode(bgl.GL_MODELVIEW)\n\tbgl.glLoadIdentity()\n\n\tshow_das_points()\n\n\tif logic.text_buffer == None or len(logic.text_buffer) == 0:\n\t\treturn\n\t\n\ttext = ''\n\t\n\t# has the current value start time\n\tif 'start_time' in logic.text_buffer[0]:\n\t\tstart_time = logic.text_buffer[0]['start_time']\n\t\tcurrent_time = current_milli_time()\n\t\ttimeout = logic.text_buffer[0]['timeout']\n\t\tif start_time + timeout < current_milli_time():\n\t\t\tlogic.text_buffer.pop(0)\n\t\telse:\n\t\t\ttext = logic.text_buffer[0]['text']\n\telse:\n\t\tlogic.text_buffer[0]['start_time'] = current_milli_time()\n\t\ttext = logic.text_buffer[0]['text']\n\n\t# BLF drawing routine\n\tfont_id = logic.font_id\n\tblf.position(font_id, (width * 0.25), (height * 0.5), 0)\n\tblf.size(font_id, 24, 72)\n\tblf.draw(font_id, text)\n\ninit()\n\nlogic.text_buffer.append({'text' : 'Left-click to grab objects', 'timeout': 3000})\nlogic.text_buffer.append({'text' : 'Right-click to throw dem', 'timeout': 3000})","repo_name":"leomuona/RBS2013","sub_path":"assets/scripts/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38038807492","text":"#! /usr/bin/env python3\n# datetime_stopwatch.py - Create a stopwatch using datetime\n\nfrom datetime import datetime\nprint(\"\"\"Let's see how long it takes for you to type your full name!\nThe timer is starting now.\"\"\")\n\nnow = datetime.now()\nto_do = input('Type in your full name: ')\nend = datetime.now()\ntotal_time = end - now\nprint('It took you ' + str((total_time).seconds) + ' seconds to type your full name.')\n","repo_name":"kmd0193/100-Days-of-Code","sub_path":"datetime_stopwatch.py","file_name":"datetime_stopwatch.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72241499414","text":"from django.urls import path\nfrom rest_framework.routers import DefaultRouter\nfrom .viewsets import UserViewSet, MessageViewSet, ChatViewSet, CreateChatFromTgViewSet, \\\n MessageCreateWithUsernameViewSet, AddUserToChatViewSet, FromUsernameToUserViewset, FromTgIdToChats\n\nrouter = DefaultRouter()\n\nrouter.register('user', UserViewSet, basename='user')\nrouter.register('message', MessageViewSet, basename='message')\nrouter.register('chat', ChatViewSet, basename='chat')\nrouter.register('chat-from-username', CreateChatFromTgViewSet, basename='chat-from-username')\nrouter.register('message-from-username', MessageCreateWithUsernameViewSet, basename='message-from-username')\nrouter.register('add-viewer-to-chat', AddUserToChatViewSet, basename='add-user-to-chat')\nrouter.register('from-username-to-user', FromUsernameToUserViewset, basename='from-username-to-user')\nrouter.register('from-tg-id-to-admin-chats', FromTgIdToChats, basename='from-tg-id-to-admin-chats')\nurlpatterns = []\nurlpatterns.extend(router.urls)\n","repo_name":"evgen-app/hackbot","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15059627822","text":"import numpy as np\nimport json\nfrom os import path\nfrom .transform3d import *\nfrom .utils3d import *\n\nclass CameraIntrinsicSettings(object):\n DEFAULT_ZNEAR = 1\n # DEFAULT_ZFAR = 100000.0\n DEFAULT_ZFAR = DEFAULT_ZNEAR\n \n def __init__(self,\n res_width = 640.0, res_height = 480.0,\n fx = 640.0, fy = 640.0,\n cx = 320.0, cy = 240.0,\n projection_matrix = None):\n self.res_width = res_width\n self.res_height = res_height\n self.fx = fx\n self.fy = fy\n self.cx = cx\n self.cy = cy\n\n self.znear = self.DEFAULT_ZNEAR\n self.zfar = self.DEFAULT_ZFAR\n\n self.projection_matrix = projection_matrix\n\n @staticmethod\n def from_perspective_fov_horizontal(res_width = 640.0, res_height = 480.0, hfov = 90.0):\n '''\n Create camera intrinsics settings from 3d rendering horizontal field of view\n '''\n cx = res_width / 2.0\n cy = res_height / 2.0\n fx = cx / np.tan(np.deg2rad(hfov) / 2.0)\n fy = fx\n\n # print(\"CameraIntrinsicSettings: res_width = {} - res_height = {} - hfov = {} - cx = {} - cy = {} - fx = {} - fy = {}\".format(\n # res_width, res_height, hfov, cx, cy, fx, fy))\n\n new_cam_instrinsics = CameraIntrinsicSettings(res_width, res_height, fx, fy, cx, cy)\n return new_cam_instrinsics\n\n @staticmethod\n def from_json_object(json_obj):\n intrinsic_settings = json_obj[\"intrinsic_settings\"] if (\"intrinsic_settings\" in json_obj) else None\n if (intrinsic_settings is None):\n return None\n\n print(\"intrinsic_settings: {}\".format(intrinsic_settings))\n\n try:\n captured_image_size = json_obj['captured_image_size']\n res_width = captured_image_size['width']\n res_height = captured_image_size['height']\n except KeyError:\n print(\"*** Error ***: 'captured_image_size' is not present in camera settings file. Using default 640 x 480.\")\n res_width = 640\n res_height = 480\n\n fx = intrinsic_settings['fx'] if ('fx' in intrinsic_settings) else 640.0\n fy = intrinsic_settings['fy'] if ('fy' in intrinsic_settings) else 640.0\n cx = intrinsic_settings['cx'] if ('cx' in intrinsic_settings) else (res_width / 2.0)\n cy = intrinsic_settings['cy'] if ('cy' in intrinsic_settings) else (res_height / 2.0)\n\n projection_matrix_json = json_obj[\"cameraProjectionMatrix\"] if (\"cameraProjectionMatrix\" in json_obj) else None\n projection_matrix = None\n if (not projection_matrix_json is None):\n projection_matrix = Matrix44(projection_matrix_json)\n projection_matrix[2, 0] = -projection_matrix[2, 0]\n projection_matrix[2, 1] = -projection_matrix[2, 1]\n projection_matrix[2, 3] = -projection_matrix[2, 3]\n projection_matrix[3, 2] = -projection_matrix[3, 2]\n\n # print(\"projection_matrix_json: {}\".format(projection_matrix_json))\n print(\"projection_matrix: {}\".format(projection_matrix))\n \n return CameraIntrinsicSettings(res_width, res_height, fx, fy, cx, cy, projection_matrix)\n\n @staticmethod\n def from_json_file(json_file_path):\n if (path.exists(json_file_path)):\n with open(json_file_path, 'r') as json_file:\n json_obj = json.load(json_file)\n if ('camera_settings' in json_obj):\n viewpoint_list = json_obj['camera_settings']\n # TODO: Need to parse all the viewpoints information, right now we only parse the first viewpoint\n viewpoint_obj = viewpoint_list[0]\n return CameraIntrinsicSettings.from_json_object(viewpoint_obj)\n return None\n\n def get_intrinsic_matrix(self):\n \"\"\"\n Get the camera intrinsic matrix as numpy array\n \"\"\"\n intrinsic_matrix = np.array([\n [self.fx, 0, self.cx],\n [0, self.fy, self.cy],\n [0, 0, 1.0]\n ], dtype='double')\n return intrinsic_matrix\n\n def get_projection_matrix(self):\n if (self.projection_matrix is None):\n self.calculate_projection_matrix()\n\n return self.projection_matrix\n\n def calculate_projection_matrix(self):\n zdiff = float(self.zfar - self.znear)\n a = (2.0 * self.fx) / float(self.res_width)\n b = (2.0 * self.fy) / float(self.res_height)\n # print('a: {} - b: {}'.format(a, b))\n c = -self.znear / zdiff if (zdiff > 0) else 0\n d = (self.znear * self.zfar) / zdiff if (zdiff > 0) else (-self.znear)\n c1 = 1.0 - (2.0 * self.cx) / self.res_width\n c2 = (2.0 * self.cy) / self.res_height - 1.0\n\n self.projection_matrix = Matrix44([\n [a, 0, 0, 0],\n [0, b, 0, 0],\n [c1, c2, c, d],\n [0, 0, -1.0, 0]\n ])\n\n def str():\n return \"{}\".format(self.get_intrinsic_matrix())","repo_name":"NVIDIA/Dataset_Utilities","sub_path":"nvdu/core/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"67"} +{"seq_id":"19917759610","text":"import bpy\nimport math\nimport bmesh\ngesture = 1\n\n\ndef bmesh_copy_from_object(obj, transform=True, triangulate=True, apply_modifiers=False):\n \"\"\"Returns a transformed, triangulated copy of the mesh\"\"\"\n\n assert obj.type == 'MESH'\n\n if apply_modifiers and obj.modifiers:\n import bpy\n depsgraph = bpy.context.evaluated_depsgraph_get()\n obj_eval = obj.evaluated_get(depsgraph)\n me = obj_eval.to_mesh()\n bm = bmesh.new()\n bm.from_mesh(me)\n obj_eval.to_mesh_clear()\n else:\n me = obj.data\n if obj.mode == 'EDIT':\n bm_orig = bmesh.from_edit_mesh(me)\n bm = bm_orig.copy()\n else:\n bm = bmesh.new()\n bm.from_mesh(me)\n\n # TODO. remove all customdata layers.\n # would save ram\n\n if transform:\n bm.transform(obj.matrix_world)\n\n if triangulate:\n bmesh.ops.triangulate(bm, faces=bm.faces)\n\n return bm\n\n#Setting constraints of hand and resets rotation\n#bpy.ops.object.h_constraints()\n\nview_layer = bpy.context.view_layer\ncontext = bpy.context\nscene = context.scene\n#Mesh Objects\nlh_mesh = bpy.data.objects['Hand.L']\n#rh_mesh = bpy.data.objects['Hand.R']\nsphere = bpy.data.objects['Sphere']\nbm_joy = bmesh.new()\nbm_left = bmesh.new()\nbm_right = bmesh.new()\n\n#Grab Mesh Data\ncontext.active_object.select_set(False)\nview_layer.objects.active = lh_mesh\nbm_left.from_mesh(context.object.data)\nlh_volume = bm_left.calc_volume() - 0.00733 #approximate forearm volume\nprint('LH Mesh Volume = ' + str(lh_volume))\n#bm_left.faces.ensure_lookup_table() #allows iteration of faces\n#verts = bm_left.faces[-1].verts[:]\n#print(verts)\n#for v in bm_left.verts:\n# print(v.co)\n#print(bm_left.verts.calc_edge_angle())\n\n#context.active_object.select_set(False)\n#view_layer.objects.active = rh_mesh\n#bm_right.from_mesh(context.object.data)\n\n#print('RH Mesh Volume = ' + str(bm_right.calc_volume()))\n#so = context.active_object\n#verts = so.data.vertices\n#edges = so.data.edges\n#faces = so.data.polygons\n\n#Armature Objects\nhand_left = bpy.data.objects['Hand_Left']\n#hand_right = bpy.data.objects['Hand_Right']\n\n\n\n\n#Thumbs up pose\nif gesture == 0:\n context.active_object.select_set(False)\n view_layer.objects.active = hand_left\n pbone = context.object.pose.bones['index control']\n pbone.rotation_euler[0] = math.radians(0)\n pbone = context.object.pose.bones['Major control']\n pbone.rotation_euler[0] = math.radians(0)\n pbone = context.object.pose.bones['Ring control']\n pbone.rotation_euler[0] = math.radians(0)\n pbone = context.object.pose.bones['Pinky control']\n pbone.rotation_euler[0] = math.radians(0)\n pbone = context.object.pose.bones['Bone.017']\n pbone.rotation_euler[0] = math.radians(27.5)\n pbone = context.object.pose.bones['Bone.016']\n pbone.rotation_euler[0] = math.radians(1.93)\n pbone.rotation_euler[1] = math.radians(-12.1)\n pbone.rotation_euler[2] = math.radians(-7.06)\n pbone = context.object.pose.bones['Bone.020']\n pbone.rotation_euler[0] = math.radians(0)\n pbone.rotation_euler[1] = math.radians(80.5)\n pbone.rotation_euler[2] = math.radians(0)\nelif gesture == 1:\n context.active_object.select_set(False)\n view_layer.objects.active = hand_left\n pbone = context.object.pose.bones['index control']\n pbone.rotation_euler[0] = math.radians(-80)\n pbone = context.object.pose.bones['Major control']\n pbone.rotation_euler[0] = math.radians(-80)\n pbone = context.object.pose.bones['Ring control']\n pbone.rotation_euler[0] = math.radians(-80)\n pbone = context.object.pose.bones['Pinky control']\n pbone.rotation_euler[0] = math.radians(-80)\n pbone = context.object.pose.bones['Bone.017']\n pbone.rotation_euler[0] = math.radians(27.5)\n pbone = context.object.pose.bones['Bone.016']\n pbone.rotation_euler[0] = math.radians(1.93)\n pbone.rotation_euler[1] = math.radians(-12.1)\n pbone.rotation_euler[2] = math.radians(-7.06)\n pbone = context.object.pose.bones['Bone.020']\n pbone.rotation_euler[0] = math.radians(0)\n pbone.rotation_euler[1] = math.radians(80.5)\n pbone.rotation_euler[2] = math.radians(0)\n\n#overlaying Sphere on top of hand\ncontext.active_object.select_set(False)\nview_layer.objects.active = sphere\nso = bpy.context.active_object\nm_sphere = bmesh_copy_from_object(sphere, apply_modifiers=True)\nvolume = m_sphere.calc_volume()\nprint('Sphere Mesh Volume = ' + str(volume))\n#so.location = (0.87 -0.10, 0)\n\nsphere = scene.objects.get(\"Sphere\")\nbpy.ops.object.modifier_add(type = 'BOOLEAN')\ncontext.object.modifiers[\"Boolean\"].operation = 'INTERSECT' \nbpy.context.object.modifiers[\"Boolean\"].object = lh_mesh\n\ncontext.active_object.select_set(False)\nview_layer.objects.active = sphere\nm_sphere = bmesh_copy_from_object(sphere, apply_modifiers=True)\nvolume = m_sphere.calc_volume()\nm_sphere.free()\nprint('Sphere Mesh Volume = ' + str(volume))\nerror = volume/lh_volume * 100\nprint('Percentage Similarity by Volume = ' + str(error))\n#Account for forearm error when only using hands\n\n","repo_name":"a-chen711/Hand-Interfaces","sub_path":"Blender Files/Volume of Intersection/initial_vol_test.py","file_name":"initial_vol_test.py","file_ext":"py","file_size_in_byte":5042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16695024077","text":"\"\"\"\nFind the value function associated with a policy. Based on Sutton & Barto, 1998.\n\nMatthew Alger, 2015\nmatthew.alger@anu.edu.au\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nimport h5py\nfrom pathlib import Path\nimport math\n\n\ndef value(policy, n_states, transition_probabilities, reward, discount,\n threshold=1e-2):\n \"\"\"\n Find the value function associated with a policy.\n\n policy: List of action ints for each state.\n n_states: Number of states. int.\n transition_probabilities: Function taking (state, action, state) to\n transition probabilities.\n reward: Vector of rewards for each state.\n discount: MDP discount factor. float.\n threshold: Convergence threshold, default 1e-2. float.\n -> Array of values for each state\n \"\"\"\n v = np.zeros(n_states)\n\n diff = float(\"inf\")\n while diff > threshold:\n diff = 0\n for s in range(n_states):\n vs = v[s]\n a = policy[s]\n v[s] = sum(transition_probabilities[s, k, a] *\n (reward[k] + discount * v[k])\n for k in range(n_states))\n diff = max(diff, abs(vs - v[s]))\n\n return v\n\ndef optimal_value(n_states, n_actions, transition_probabilities, reward,\n discount, threshold=1e-2):\n \"\"\"\n Find the optimal value function.\n\n n_states: Number of states. int.\n n_actions: Number of actions. int.\n transition_probabilities: Function taking (state, action, state) to\n transition probabilities.\n reward: Vector of rewards for each state.\n discount: MDP discount factor. float.\n threshold: Convergence threshold, default 1e-2. float.\n -> Array of values for each state\n \"\"\"\n #print('computing optimal value')\n v = np.zeros(n_states)\n\n diff = float(\"inf\")\n while diff > threshold:\n diff = 0\n for s in range(n_states):\n max_v = float(\"-inf\")\n for a in range(n_actions):\n tp = np.matrix(transition_probabilities[s, a, :])\n #print('tp:',tp.shape)\n v_prod = np.matrix(discount*v)\n #print('reward.shape:',((reward.shape)))\n r = np.add(reward,v_prod)\n #print('v_prod.shape:',((v_prod.shape)))\n #print('r.shape:',((r.shape)))\n mul=np.matmul(tp, r.T)\n #print('mul:',mul.shape)\n max_v = max(max_v,mul)\n\n new_diff = abs(v[s] - max_v)\n if new_diff > diff:\n diff = new_diff\n v[s] = max_v\n #print('Done computing optimal value')\n return v\n \"\"\"\n print('computing optimal value')\n #sess = tf.InteractiveSession()\n \n #init=tf.global_variables_initializer()\n\n #sess.run(init)\n print('Using GPU:',tf.test.is_gpu_available())\n v = tf.Variable(tf.zeros([n_states]))\n diff = float(\"inf\")\n while diff > threshold:\n diff = 0\n for s in range(n_states):\n print('Current state:',s)\n max_v = float(\"-inf\")\n for a in range(n_actions):\n tp = tf.convert_to_tensor(transition_probabilities[s, a, :])\n #print('tp:',(tf.transpose(tp)).shape)\n #v_prod = tf.scalar_mul(discount,v)\n #print('reward.shape:',((reward.shape)))\n r = tf.add(tf.convert_to_tensor(reward),tf.scalar_mul(discount,v))\n #print('v_prod.shape:',((v_prod.shape)))\n #print('r.shape:',((r.shape)))\n mul=tf.tensordot(tp,r,1)\n init = tf.global_variables_initializer()\n sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n sess.run(init)\n res = mul.eval(session=sess)\n #print('res:',res,s,a)\n max_v = max(max_v,res)\n\n new_diff = abs(sess.run(v[s]) - max_v)\n if new_diff > diff:\n diff = new_diff\n v = tf.scatter_update(v, [s], [max_v])\n print('Done computing optimal value')\n optimal_v = sess.run(v)\n sess.close()\n hf = h5py.File('Results/data1_200.h5', 'w') \n hf.create_dataset('dataset_1', data=self.transition_probability, compression=\"gzip\", compression_opts=9)\n hf.close()\n return optimal_v\n \"\"\"\n\ndef find_policy(n_states, n_actions, transition_probabilities, reward, discount,\n threshold=1e-2, v=None, stochastic=True):\n \"\"\"\n Find the optimal policy.\n\n n_states: Number of states. int.\n n_actions: Number of actions. int.\n transition_probabilities: Function taking (state, action, state) to\n transition probabilities.\n reward: Vector of rewards for each state.\n discount: MDP discount factor. float.\n threshold: Convergence threshold, default 1e-2. float.\n v: Value function (if known). Default None.\n stochastic: Whether the policy should be stochastic. Default True.\n -> Action probabilities for each state or action int for each state\n (depending on stochasticity).\n \"\"\"\n \n if v is None:\n v = optimal_value(n_states, n_actions, transition_probabilities, reward,\n discount, threshold)\n #print(v.shape)\n if stochastic:\n # Get Q using equation 9.2 from Ziebart's thesis.\n Q = np.zeros((n_states, n_actions))\n for i in range(n_states):\n for j in range(n_actions):\n p = np.matrix(transition_probabilities[i,j,:])\n prod=np.matrix(discount*v)\n #print('prod.shape:',prod.shape)\n val= reward + prod\n #print('val.shape:',val.shape)\n #nval=np.sum(reward,prod)\n #print(\"After computing reward and discount:\",nval.shape)\n Q[i, j] = p.dot(val.T)\n Q -= Q.max(axis=1).reshape((n_states, 1)) # For numerical stability.\n Q = np.exp(Q)/np.exp(Q).sum(axis=1).reshape((n_states, 1))\n #print('Q.shape',Q.shape)\n return Q\n\n def _policy(s):\n return max(range(n_actions),\n key=lambda a: sum(transition_probabilities[s, a, k] *\n (reward[k] + discount * v[k])\n for k in range(n_states)))\n policy = np.array([_policy(s) for s in range(n_states)])\n return policy\n\ndef value_iteration_1(P_a, rewards, gamma, error=0.01, deterministic=False):\n N_STATES,N_ACTIONS, _ = np.shape(P_a)\n values = np.zeros([N_STATES])\n # estimate values\n #while True:\n values_tmp = values.copy()\n for s in range(N_STATES):\n #print(s)\n for a in range(N_ACTIONS):\n v_s = []\n prod = gamma*values_tmp\n add = rewards + (np.matrix(prod)).T\n values[s] = np.amax(np.matmul(P_a[s,a,:],add))\n \n #values[s] = max([sum([P_a[s, a,s1]*(rewards[s] + gamma*values_tmp[s1]) for s1 in range(N_STATES)]) for a in range(N_ACTIONS)])\n e = max([abs(values[s] - values_tmp[s]) for s in range(N_STATES)])\n #print('current error:',e)\n if deterministic:\n # generate deterministic policy\n policy = np.zeros([N_STATES])\n for s in range(N_STATES):\n policy[s] = np.argmax([sum([P_a[s, s1, a]*(rewards[s]+gamma*values[s1]) \n for s1 in range(N_STATES)]) \n for a in range(N_ACTIONS)])\n return policy \n else:\n # generate stochastic policy\n policy = np.zeros([N_STATES, N_ACTIONS])\n for s in range(N_STATES):\n for a in range(N_ACTIONS):\n #v_s = np.array([sum([P_a[s, a,s1]*(rewards[s] + gamma*values[s1]) for s1 in range(N_STATES)]) for a in range(N_ACTIONS)])\n prod = gamma*values\n #print(rewards.shape,np.matrix(prod).shape)\n add = rewards + (np.matrix(prod)).T\n #print(P_a[s,a,:].shape,add.shape)\n policy[s,a] = np.matmul(P_a[s,a,:],add)\n #print(policy.shape)\n policy[s,:] = np.exp(policy[s,:])/np.exp(1+policy[s,:]).sum(axis=0)\n return policy\n\nif __name__ == '__main__':\n # Quick unit test using gridworld.\n import mdp.gridworld as gridworld\n gw = gridworld.Gridworld(3, 0.3, 0.9)\n v = value([gw.optimal_policy_deterministic(s) for s in range(gw.n_states)],\n gw.n_states,\n gw.transition_probability,\n [gw.reward(s) for s in range(gw.n_states)],\n gw.discount)\n assert np.isclose(v,\n [5.7194282, 6.46706692, 6.42589811,\n 6.46706692, 7.47058224, 7.96505174,\n 6.42589811, 7.96505174, 8.19268666], 1).all()\n opt_v = optimal_value(gw.n_states,\n gw.n_actions,\n gw.transition_probability,\n [gw.reward(s) for s in range(gw.n_states)],\n gw.discount)\n assert np.isclose(v, opt_v).all()\n","repo_name":"krishnanpooja/IRL_Rolling_Pin_Approach","sub_path":"irl/value_iteration.py","file_name":"value_iteration.py","file_ext":"py","file_size_in_byte":9484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18409371841","text":"from pymongo import MongoClient\nfrom datetime import datetime\nclient = MongoClient()\n\ndb = client.Spider\n\n\nresult = db.publication.insert_one(\n {\n \"address\": {\n \"street\": \"2 Avenue\",\n \"zipcode\": \"10075\",\n \"building\": \"1480\",\n \"coord\": [-73.9557413, 40.7720266]\n }\n }\n)","repo_name":"hinedavid/Google_Scholar_Spider","sub_path":"pruebamogo.py","file_name":"pruebamogo.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"35653743200","text":"\"\"\" \n\"\"\"\nimport datetime\nfrom typing import List, Optional, Tuple, Union, Dict, Any\n\n\ndef date_now(datenow: Union[str, int, float, datetime.datetime] = \"\", fmt=\"%Y%m%d\",\n add_days=0, add_mins=0, add_hours=0, add_months=0, add_weeks=0,\n timezone='Asia/Tokyo', fmt_input=\"%Y-%m-%d\",\n force_dayofmonth=-1, # 01 first of month\n force_dayofweek=-1,\n force_hourofday=-1,\n force_minofhour=-1,\n returnval='str,int,datetime/unix'):\n \"\"\" One liner for date Formatter\n Doc::\n\n datenow: 2012-02-12 or \"\" emptry string for today's date.\n fmt: output format # \"%Y-%m-%d %H:%M:%S %Z%z\"\n date_now(timezone='Asia/Tokyo') --> \"20200519\" ## Today date in YYYMMDD\n date_now(timezone='Asia/Tokyo', fmt='%Y-%m-%d') --> \"2020-05-19\"\n date_now('2021-10-05',fmt='%Y%m%d', add_days=-5, returnval='int') --> 20211001\n date_now(20211005, fmt='%Y-%m-%d', fmt_input='%Y%m%d', returnval='str') --> '2021-10-05'\n date_now(20211005, fmt_input='%Y%m%d', returnval='unix') -->\n\n integer, where Monday is 0 and Sunday is 6.\n\n\n date_now(1634324632848, fmt='%Y-%m-%d', fmt_input='%Y%m%d', returnval='str') --> '2021-10-05'\n\n \"\"\"\n from pytz import timezone as tzone\n import datetime\n import time\n\n sdt = str(datenow)\n\n if isinstance(datenow, datetime.datetime):\n now_utc = datenow\n\n elif (isinstance(datenow, float) or isinstance(datenow, int)) and datenow > 1600100100: # Unix time stamp\n # unix seconds in UTC\n # fromtimestamp give you the date and time in local time\n # utcfromtimestamp gives you the date and time in UTC.\n # int(time.time()) - date_now( int(time.time()), returnval='unix', timezone='utc') == 0\n now_utc = datetime.datetime.fromtimestamp(datenow)\n\n elif len(sdt) > 7: # date in string\n now_utc = datetime.datetime.strptime(sdt, fmt_input)\n\n else:\n now_utc = datetime.datetime.now(tzone('UTC')) # Current time in UTC\n\n # now_new = now_utc.astimezone(tzone(timezone)) if timezone != 'utc' else now_utc.astimezone(tzone('UTC'))\n #now_new = now_utc.astimezone(tzone('UTC')) if timezone in {'utc', 'UTC'} else now_utc.astimezone(tzone(timezone))\n now_new = now_utc if timezone in {\n 'utc', 'UTC'} else now_utc.astimezone(tzone(timezone))\n\n # Add months\n now_new = now_new + \\\n datetime.timedelta(days=add_days + 7*add_weeks,\n hours=add_hours, minutes=add_mins,)\n if add_months != 0:\n from dateutil.relativedelta import relativedelta\n now_new = now_new + relativedelta(months=add_months)\n\n # please check your upwork, I have replied over there.\n\n # Force dates\n if force_dayofmonth > 0:\n now_new = now_new.replace(day=force_dayofmonth)\n\n if force_dayofweek > 0:\n actual_day = now_new.weekday()\n days_of_difference = force_dayofweek - actual_day\n now_new = now_new + datetime.timedelta(days=days_of_difference)\n\n if force_hourofday > 0:\n now_new = now_new.replace(hour=force_hourofday)\n\n if force_minofhour > 0:\n now_new = now_new.replace(minute=force_minofhour)\n\n if returnval == 'datetime':\n return now_new # datetime\n elif returnval == 'int':\n return int(now_new.strftime(fmt))\n elif returnval == 'unix':\n # time.mktime(now_new.timetuple())\n return datetime.datetime.timestamp(now_new)\n else:\n return now_new.strftime(fmt)\n\n# hey are you able to see ?\n","repo_name":"i-m-aditya/Algorithms","sub_path":"codes/upwork.py","file_name":"upwork.py","file_ext":"py","file_size_in_byte":3591,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"10318317146","text":"\"\"\"\nes21.views.auxiliary\n====================\n\nViews for auxiliary pionners.\nMany view here accept POST method for search in navbar or other POST.\n\"\"\"\n\nfrom flask import Blueprint\n\nfrom . import home, edit\n\n\nblueprint = Blueprint('auxiliary', __name__, url_prefix='/mpanampy')\n\n\nblueprint.add_url_rule(\n '/',\n 'home',\n home.entry,\n methods=('GET', 'POST'))\n\nblueprint.add_url_rule(\n '/hanova/',\n 'edit',\n edit.entry,\n methods=['POST'])\n","repo_name":"Unviray/ES21","sub_path":"es21/views/auxiliary/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"23024123191","text":"from collections import deque\nimport heapq\n\nn = int(input())\ngraph = []\nfor i in range(n):\n graph.append(list(map(int, input().split())))\nvisited = [-1] * n\nin_degree = [0] * n\nheap = []\nres = []\n\ndef arrsum(arr):\n sum = 0\n for i in range(len(arr)):\n sum += arr[i]\n return sum\n\nfor i in range(n):\n in_degree[i] = arrsum(graph[i])\n if in_degree[i] == 0:\n heapq.heappush(heap, i)\n\nwhile heap:\n print(heap)\n present = heapq.heappop(heap)\n res.append(present + 1)\n for i in range(n):\n if graph[i][present] != 0:\n in_degree[i] -= 1\n if in_degree[i] == 0:\n heapq.heappush(heap, i)\n\nprint(res)","repo_name":"MaryAhn/Codetree","sub_path":"Graph/Topology_indegree_dict.py","file_name":"Topology_indegree_dict.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24559914131","text":"import glob\nimport json\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\n\ndef feature_generation(\n df_in: pd.DataFrame,\n cols: list,\n lags: list = [5, 20, 40, 60]\n):\n df = df_in.copy()\n for col in cols:\n for lag in lags:\n roll = df[col].rolling(lag)\n df[f'{col}_mean_{lag}'] = roll.mean()\n df[f'{col}_std_{lag}'] = roll.std()\n df[f'{col}_median_{lag}'] = roll.median()\n df[f'{col}_max_{lag}'] = roll.max()\n df[f'{col}_min_{lag}'] = roll.min()\n\n df = df.iloc[max(lags): ].reset_index(drop = True)\n\n return df\n\ndef predict(\n model,\n preprocessing,\n data: pd.DataFrame,\n col_lst: list = ['tsmc', 'asml', 'amat', 'sumco'],\n start: str = '2022-05-29',\n end: str = '2022-12-31',\n date_format: str = '%Y-%m-%d',\n date_col:str = 'date',\n verbose: bool = True\n):\n datelist = pd.date_range(start = start, end = end)\n start = datetime.strptime(start, date_format)\n end = datetime.strptime(end, date_format)\n distance = (end - start).days + 1\n\n col = len(col_lst)\n total_pred = np.zeros((distance, col))\n\n pred = np.zeros((1, col))\n for i, date in enumerate(datelist):\n if verbose:\n print(date.strftime(date_format))\n x = preprocessing(data[col_lst], col_lst).to_numpy()\n \n pred = np.asarray(np.round(model.predict(x)), dtype = 'int')\n\n #for j, model in enumerate(model_lst):\n #print(model.predict(x))\n #pred[0, j] = model.predict(x)[0]\n\n total_pred[i] = pred\n tmp = pd.DataFrame(pred, columns = col_lst)\n tmp[date_col] = date\n data = pd.concat([data, tmp], axis = 0).reset_index(drop = True)\n data = data.iloc[1: ]\n\n total_pred = pd.DataFrame(total_pred, columns = col_lst)\n total_pred[date_col] = datelist\n total_pred = total_pred.reindex(columns = [date_col] + col_lst)\n total_pred[col_lst] = total_pred[col_lst].astype(dtype = 'int')\n\n return total_pred\n\ndef get_csv_from_json(\n json_dir: str = './data/crawler_data',\n save_dir = None\n):\n json_lst = glob.glob(f'{json_dir}/*.json')\n\n df = pd.DataFrame()\n json_dict = {\n 'date': [],\n 'TSMC': [],\n 'ASML': [],\n 'AM': [],\n 'SUMCO': []\n }\n\n for json_path in json_lst:\n date = json_path.split('/')[-1].split('.')[0]\n date = datetime.strptime(date,\"%Y-%m-%d\")\n json_dict['date'].append(date)\n\n with open(json_path, newline='') as jsonfile:\n data = json.load(jsonfile)\n\n for key in data.keys():\n json_dict[key].append(data[key])\n\n for key in json_dict:\n df[key] = json_dict[key]\n\n df = df.sort_values(by = ['date']).reset_index(drop = True)\n \n if save_dir is not None:\n df.to_csv(f'{save_dir}/VolumneForFourCompany.csv', index = False)\n \n return df\n","repo_name":"ENSREG/NYCU_CLOUD_NATIVE_FINAL","sub_path":"images/crawler/header/model/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"24201466252","text":"from django.shortcuts import render\nfrom django.http import Http404\nfrom .models import Post, Author, subscribe, Contact, Comment, SubComment\nimport datetime\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.db.models import Q\nfrom django.contrib.auth.models import auth\nfrom django.contrib.auth import login,logout\nfrom django.contrib.auth.decorators import login_required\n\n\ndef index(request):\n\tif request.method == 'GET':\n\t\temail = request.GET.get('email')\n\t\tif email:\n\t\t\tsubscribe(email=email).save()\n\tweek_ago = datetime.date.today() - datetime.timedelta(days = 7)\n\ttrends = Post.objects.filter(time_upload__gte = week_ago).order_by('-read')\n\tTopAuthors =Author.objects.order_by('-rate')[:2]\n\tAuthorsPost = [Post.objects.filter(auther = author).first() for author in TopAuthors]\n\n\tall_post = Paginator(Post.objects.filter(publish = True),7)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tposts = all_post.page(page)\n\texcept PageNotAnInteger:\n\t\tposts = all_post.page(1)\n\texcept EmptyPage:\n\t\tposts = all_post.page(all_post.num_pages)\n\n\tparms = {\n\t\t'posts': posts,\n\t\t'trends': trends[:2],\n\t\t'author_post':AuthorsPost,\n\t\t'pop_post': Post.objects.order_by('-read')[:2],\n\t\t'recent_post' : Post.objects.order_by('-time_upload')[:2],\n\t}\n\treturn render(request, 'index.html', parms)\n\n\n@login_required\ndef profile(request):\n\tuser = request.user\n\tif user is None:\n\t\treturn redirect('login')\n\telse:\n\t\tparms = {\n 'user':user,\n\t\t\t'recent_post' : Post.objects.order_by('-time_upload')[:2]\n }\n\treturn render(request, 'profile.html',parms)\n\n\ndef about(request):\n\tparms = {\n\t\t'title': 'About | Gyanism',\n\t\t'pop_post': Post.objects.order_by('-read')[:2],\n 'recent_post' : Post.objects.order_by('-time_upload')[:2],\n\t\t}\n\treturn render(request, 'about.html', parms)\n\n\ndef post(request, id, slug):\n try:\n post = Post.objects.get(pk=id, slug=slug)\n except:\n raise Http404(\"Post Does Not Exist\")\n\n post.read += 1\n post.save()\n\n if request.method == 'POST':\n comm = request.POST.get('comm')\n comm_id = request.POST.get('comm_id') # None\n\n if comm_id:\n SubComment(post=post,\n user=request.user,\n comm=comm,\n comment=Comment.objects.get(id=int(comm_id))\n ).save()\n else:\n Comment(post=post, user=request.user, comm=comm).save()\n\n comments = []\n for c in Comment.objects.filter(post=post):\n comments.append([c, SubComment.objects.filter(comment=c)])\n\n post_author = post.auther\n if str(post_author) == 'palak':\n post_para = 'Hello fellas , Im Palak Shivlani.A tech enthusiast and developer always open to exploring new fields.A person who believes truth always prevails.'\n author_image = '/static/images/Palak.jpg'\n elif str(post_author) == 'Parmeshwar':\n post_para = \"It’s me Parmeshwar Kumar Sahu.A tech learner, analyzer and a code freak.As, Student is more important than a teacher likewise learner learning is important than teaching.\"\n author_image = '/static/images/Parmeshwar.jpg'\n else:\n post_para = 'Sorry, I like to stay anonymous. Read the blog and enjoy'\n author_image = '/static/images/profile.png'\n\n parms = {\n 'comments': comments,\n 'post_author': post_author,\n 'post': post,\n 'post_para': post_para,\n 'author_image': author_image,\n 'pop_post': Post.objects.order_by('-read')[:2],\n 'recent_post' : Post.objects.order_by('-time_upload')[:2]\n }\n return render(request, 'blog-single.html', parms)\n\n\n\ndef contact(request):\n\tif request.method == 'POST':\n\t\tname = request.POST.get('name')\n\t\temail = request.POST.get('email')\n\t\tmob = request.POST.get('mob')\n\t\tmess = request.POST.get('mess','default')\n\t\tContact(name=name,email=email,mob=mob,mess=mess).save()\n\tparms = {\n\t\t'title': 'Contact | Gyanism',\n\t\t'pop_post': Post.objects.order_by('-read')[:2],\n\t\t'recent_post' : Post.objects.order_by('time_upload')[:2]\n\t\t}\n\treturn render(request, 'contact.html', parms)\n\n\ndef search(request):\n\tq = request.GET.get('q')\n\tif q is None:\n\t\tq = ' '\n\tposts = Post.objects.filter(\n\t\tQ(title__icontains = q) |\n\t\tQ(overview__icontains = q) \n\t\t).distinct()\n\n\tparms = {\n\t\t'posts':posts,\n\t\t'title':'Search Results',\n 'recent_post' : Post.objects.order_by('-time_upload')[:2]\n\t\t}\n\n\treturn render(request, 'search.html', parms)\n\n\n","repo_name":"palakshivlani-11/Karwaan","sub_path":"palak/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74653613972","text":"import copy\n\ndef viterbi(transition_table, states, observation):\n\tstart_symbol = \"@\"\n\tblank_reach_here_prob = dict()\n\treach_here_prob = dict()\n\tempty_probability = 0.0000000\n\n\tstart_states = []\n\tfor x1 in states:\n\t\tfor x2 in states:\n\t\t\tstart_states.append((x1,x2))\n\t# print(start_states)\n\n\tfor x in start_states:\n\t\tblank_reach_here_prob[x] = (empty_probability,[])\n\t\treach_here_prob[x] = (empty_probability,[])\n\t\t# print(str(x) + \">\" + str(reach_here_prob[x]))\n\treach_here_prob[(start_symbol,start_symbol)] = (1,[])\n\n\tfor obs in observation:\n\t\tnew_reach_here_prob = copy.deepcopy(blank_reach_here_prob)\n\n\t\t# print(\">>>>\" + obs)\n\t\tfor nextlevel in states:\n\t\t\tbest_prob_to_reach_nl = -1\n\t\t\tbest_prevstate_to_reach_nl = (start_symbol,start_symbol)\n\t\t\tfor prevlevel in start_states:\n\t\t\t\tprob_to_reach_prevlevel = reach_here_prob[prevlevel][0]\n\t\t\t\tprob_to_reach_from_prevlevel = transition_table[(prevlevel[0],prevlevel[1],nextlevel,obs)]\n\t\t\t\tprob = prob_to_reach_prevlevel * prob_to_reach_from_prevlevel\n\t\t\t\t# if(prob > 0):\n\t\t\t\t\t# print(str(prevlevel[0]) + \":\" + str(prevlevel[1]) + \">>\" + str(nextlevel) + \"::\" + str(prob_to_reach_from_prevlevel))\n\t\t\t\tif(prob > best_prob_to_reach_nl):\n\t\t\t\t\tbest_prob_to_reach_nl = prob\n\t\t\t\t\tbest_prevstate_to_reach_nl = prevlevel\n\n\t\t\t# print(str((best_prevstate_to_reach_nl[1],nextlevel)) + \" : \" + str(best_prob_to_reach_nl))\n\t\t\tnew_reach_here_prob[(best_prevstate_to_reach_nl[1],nextlevel)] = (best_prob_to_reach_nl,\n\t\t\t\treach_here_prob[best_prevstate_to_reach_nl][1] +\n\t\t\t [best_prevstate_to_reach_nl[1]])\n\t\t\n\t\treach_here_prob = copy.deepcopy(new_reach_here_prob)\n\t\n\tbest_last_state_prob = 0\n\tbest_last_state = (start_symbol, start_symbol)\n\tfor state in start_states:\n\t\tif(reach_here_prob[state][0] > best_last_state_prob):\n\t\t\tbest_last_state_prob = reach_here_prob[state][0]\n\t\t\tbest_last_state = state\n\n\t# print(best_last_state)\n\treach_here_prob[best_last_state] = (best_last_state_prob,reach_here_prob[best_last_state][1] + [best_last_state[1]])\n\t# print(reach_here_prob[best_last_state])\n\n\treturn reach_here_prob[best_last_state]\n","repo_name":"BijoySingh/Artificial-Intelligence","sub_path":"HMM-Grapheme-Phoneme/Trigram--Model/Viterbi.py","file_name":"Viterbi.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"25566504197","text":"from rich.console import Console\nfrom rich.text import Text\nimport os\nimport inspect\nimport random\n\n\nFILENAME_VALID_WORDS = \"valid_words.txt\"\nCURRENT_DIRECTORY = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nFILE_PATH_VALID_WORDS = os.path.join(CURRENT_DIRECTORY, \"data\", FILENAME_VALID_WORDS)\n\n\ndef get_valid_words():\n valid_words = list()\n file_data = open(FILE_PATH_VALID_WORDS,\"r\")\n for x in file_data.readlines():\n valid_words.append(x.strip())\n file_data.close()\n return valid_words\n\n\nclass Game(object):\n def __init__(self, use_rich=False) -> None:\n self.number_guesses = 5\n self.current_round = 1\n self.number_rounds = 5\n self.grid = None\n self.correct_word = \"apple\"\n self.use_rich = use_rich\n \n def set_correct_word(self, word_list):\n self.correct_word = random.choice(word_list)\n # print(f\"WORD TO BE GUESSED IS {self.correct_word}\")\n \n def update_round(self):\n self.current_round += 1\n \n def is_over(self):\n return self.current_round == self.number_rounds\n \n def check_if_correct_word(self, input_word):\n return self.correct_word == input_word\n\n def set_text_format_option(self, option):\n self.use_rich = option\n \n def setup_grid(self):\n self.grid = Grid()\n self.grid.use_rich = self.use_rich\n\n\nclass TextSettings(object):\n def __init__(self) -> None:\n self.color_is_in_word = \"bold yellow\"\n self.color_is_in_place = \"bold green\"\n \n def color_word(self, input_word, correct_word):\n text = Text()\n for idx, s in enumerate(input_word):\n if s == correct_word[idx]:\n text.append(s, style=self.color_is_in_place)\n elif s in correct_word:\n text.append(s, style=self.color_is_in_word)\n else:\n text.append(s)\n return text\n\n\nclass Grid(object):\n def __init__(self) -> None:\n self.num_words = 5\n self.word_length = 5\n list_words = list([\" \"*self.word_length]*self.num_words)\n self.word_dict = dict(zip(list(range(1, self.num_words + 1, 1)), list_words))\n self.use_rich = False\n \n def set_text_format_option(self, option):\n self.use_rich = option\n \n def print_grid(self):\n if not self.use_rich:\n for k in self.word_dict.keys():\n print(self.word_dict[k])\n else:\n for k in self.word_dict.keys():\n console.print(self.word_dict[k])\n print()\n\n def update_grid(self, input_word, guess_number):\n new_dict = self.word_dict\n new_dict[guess_number] = input_word\n self.word_dict = new_dict\n \n def check_if_valid_word(self, input_word):\n value = False\n if len(input_word)==self.word_length:\n value = True\n return value \n\n def color_word(self, input_word, correct_word, text_obj):\n return text_obj.color_word(input_word, correct_word)\n\n\nif __name__ == '__main__':\n VALID_WORDS = get_valid_words()\n play_game = True\n use_rich = True\n game = Game(use_rich=use_rich)\n game.set_correct_word(VALID_WORDS)\n game.setup_grid()\n console = Console()\n text_obj = TextSettings()\n \n while play_game:\n user_input = input(\"Enter your guess: \")\n\n if game.grid.check_if_valid_word(user_input):\n formatted_word = game.grid.color_word(user_input, game.correct_word, text_obj)\n game.grid.update_grid(formatted_word, game.current_round)\n game.grid.print_grid()\n game.update_round()\n\n if game.check_if_correct_word(user_input):\n print(\"Congratulations! You guessed correctly.\")\n offer_new_game = True\n\n else:\n if game.current_round > game.number_rounds:\n print(\"Too bad! You lose this game.\")\n print(f\"The correct word is: {game.correct_word}\")\n offer_new_game = True\n \n else:\n play_game = True\n offer_new_game = False\n\n if offer_new_game:\n user_input_new_game = input(\"\"\"Would you like to play another game? Please enter Y to play a new game. To exit the game, press any other key. Enter your response here: \"\"\")\n\n if user_input_new_game == \"Y\":\n print(\"Capital! A new game will start shortly...\")\n play_game = True\n game = Game(use_rich=use_rich)\n game.set_correct_word(VALID_WORDS)\n game.setup_grid()\n else:\n print(\"Thank you for playing. Goodbye.\")\n play_game = False \n ","repo_name":"Hematita1991/wordle-rich","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33677975864","text":"from automata import Automata\n\n\nclass AFD(Automata):\n def __init__(self, alfabeto):\n self.states = set()\n self.alfabeto = alfabeto\n self.transitions = dict()\n self.initial = None\n self.finals = set()\n\n def clear_afd(self):\n \"\"\"Inicializa as variaveis utilizadas no processamento de cadeias\"\"\"\n self.__hasError = False\n self.__current_state = self.initial\n\n @property\n def has_error(self):\n return self.__has_error\n\n @property\n def current_state(self):\n return self.__current_state\n\n def __state_is_valid(self, state):\n if state in self.states:\n return True\n return False\n\n def __symbol_is_valid(self, symbol):\n if len(symbol) != 1 or symbol in self.alfabeto:\n return True\n return False\n\n def create_state(self, id: int, initial=False, final=False):\n if id in self.states:\n return False\n\n self.states = self.states.union({id})\n\n if initial:\n self.initial = id\n if final:\n self.finals = self.finals.union({id})\n\n return True\n\n def create_transition(self, origin: int, destiny: int, symbol: str):\n if not self.__state_is_valid(origin):\n return False\n if not self.__state_is_valid(destiny):\n return False\n if not self.__symbol_is_valid(symbol):\n return False\n\n self.transitions[(origin, symbol)] = destiny\n\n def change_initial_state(self, id: int):\n \"\"\"Define um estado já existente como inicial\"\"\"\n if not self.__state_is_valid(id):\n return\n\n self.initial = id\n\n def change_final_state(self, id: int, final: bool):\n \"\"\"Define um estado já existente como final\"\"\"\n if not self.__state_is_valid(id):\n return\n if final:\n self.finals = self.finals.union({id})\n else:\n self.finals = self.finals.difference({id})\n\n def config_state(self, state, initial=False, final=False):\n if not self.__state_is_valid(state):\n return\n\n if initial:\n self.initial = state\n if final:\n self.finals.add(state)\n elif self.__state_is_valid(state) and state in self.finals:\n self.finals.remove(state)\n\n def move(self, string: str):\n \"\"\"\n Partindo do estado atual, processa a cadeia e retorna o estado de parada. \n Se ocorrer error, defina a variável __has_error como True\n \"\"\"\n for symbol in string:\n if not self.__symbol_is_valid(symbol):\n self.__hasError = True\n break\n\n if(self.__current_state, symbol) in self.transitions.keys():\n new_state = self.transitions[(self.__current_state, symbol)]\n self.__current_state = new_state\n else:\n self.__hasError = True\n break\n\n return self.__current_state\n\n def check(self, string):\n state = self.initial\n\n for symbol in string:\n if not (state, symbol) in self.transitions:\n return False\n else:\n self.transitions[(state, symbol)] = state\n\n return state in self.finals\n\n def __str__(self):\n string = 'AFD(E, A, T,i, F): \\n'\n\n string += ' S = { '\n for state in self.states:\n string += '{}, '.format(str(state))\n string += '} \\n'\n\n string += ' A = { '\n for symbol in self.alfabeto:\n string += '{}, '.format(str(symbol))\n string += '} \\n'\n\n string += ' T = { '\n for (state, symbol) in self.transitions.keys():\n destiny = self.transitions[(state, symbol)]\n string += '({},{}) --> {} '.format(state, symbol, destiny)\n string += '} \\n'\n\n string += ' I = {} \\n'.format(self.initial)\n\n string += ' F = { '\n for state in self.finals:\n string += '{}'.format(str(state))\n string += ' } \\n'\n\n return string\n","repo_name":"thenriquedb/lfa-automata","sub_path":"src/automata/afd.py","file_name":"afd.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"32147876961","text":"# -*- coding: utf-8 -*-\r\nimport networkx as nx\r\n# from networkx.algorithms.community import greedy_modularity_communities\r\nfrom sklearn.metrics.cluster import normalized_mutual_info_score\r\nfrom networkx.algorithms.community.quality import modularity\r\nfrom networkx.utils.mapped_queue import MappedQueue\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom utils import *\r\n\r\nclass modularity_max: \r\n \"\"\"\r\n Finding communities (chunk) with hierarchy\r\n \r\n \"\"\"\r\n \r\n def hierarchy_modularity_communities(self, G):\r\n \"\"\"\r\n Code adapted and modified from (https://networkx.org/documentation/stable/_modules/networkx/algorithms/community/modularity_max.html#greedy_modularity_communities).\r\n Here we incorporate multiple modularities for each iteration to\r\n extract multiple hierarchies of communities (chunk). code are updated\r\n by referring to the work (M. Newman, M. Girvan, Physical review. E, \r\n Statistical, nonlinear, and soft matter physics69, 026113 (2004)).\r\n \"\"\"\r\n # Override\r\n modularity_list = []\r\n community_list = []\r\n \r\n # First create one community for each node\r\n communities = list([frozenset([u]) for u in G.nodes()])\r\n output_size = len(communities)\r\n # Track merges\r\n merges = []\r\n # Greedily merge communities until no improvement is possible\r\n old_modularity = None\r\n new_modularity = modularity(G, communities)\r\n while old_modularity is None or len([i for i in communities if len(i) != 0]) > 2:\r\n # Save modularity for comparison\r\n old_modularity = new_modularity\r\n # Find best pair to merge\r\n trial_communities = list(communities)\r\n to_merge = None\r\n max_merge = -1000\r\n for i, u in enumerate(communities):\r\n for j, v in enumerate(communities):\r\n # Skip i=j and empty communities\r\n if j <= i or len(u) == 0 or len(v) == 0:\r\n continue\r\n # Merge communities u and v\r\n trial_communities[j] = u | v\r\n trial_communities[i] = frozenset([])\r\n trial_modularity = modularity(G, trial_communities)\r\n if trial_modularity > max_merge:\r\n max_merge = trial_modularity\r\n to_merge = (i, j, new_modularity - old_modularity)\r\n trial_communities[i] = u\r\n trial_communities[j] = v\r\n new_modularity = max_merge\r\n modularity_list.append(new_modularity)\r\n # max_merge\r\n if to_merge is not None:\r\n # If the best merge improves modularity, use it\r\n merges.append(to_merge)\r\n i, j, dq = to_merge\r\n u, v = communities[i], communities[j]\r\n communities[j] = u | v\r\n communities[i] = frozenset([])\r\n community_list.append(communities.copy())\r\n # Remove empty communities and sort\r\n return_community_list = []\r\n for communitie in community_list:\r\n comm = [c for c in communitie if len(c) > 0]\r\n comm = sorted(comm, key=lambda x: len(x), reverse=True)\r\n return_community_list.append(comm)\r\n return return_community_list, modularity_list\r\n \r\n def __init__(self, hierarchy_num = 2):\r\n self.name = \"Modularity MAX\"\r\n self.label = []\r\n self.hierarchy_num = hierarchy_num\r\n \r\n def input(self, x_input):\r\n \r\n x = x_input\r\n \r\n output_size = x.shape[1]\r\n \r\n Dtable = np.zeros([output_size, output_size])\r\n TPMatrix = np.zeros([output_size, output_size])\r\n \r\n prev_state = np.argmax(x[0])\r\n state = None\r\n for i in x[1:]:\r\n if np.max(i) != 1:\r\n continue\r\n state = np.argmax(i)\r\n Dtable[prev_state][state]+= 1 \r\n prev_state = state\r\n \r\n for i, j in enumerate(Dtable):\r\n state_total = np.sum(j)\r\n if state_total == 0:\r\n continue\r\n TPMatrix[i] = Dtable[i]/state_total\r\n\r\n g = utils.matrix_to_graph(TPMatrix, False)\r\n c,m = list(self.hierarchy_modularity_communities(g))\r\n c.append(list([frozenset(g.nodes)])) # Communities are merge into one \r\n m.append(0.0)\r\n \r\n d_diff_list = []\r\n for d in range(1,len(m)-1):\r\n d_diff = abs((m[d] - m[d-1]) - ((m[d+1] - m[d-1])/2))\r\n d_diff_list.append(d_diff)\r\n \r\n d_diff_index = (np.argsort(d_diff_list)[::-1])+1\r\n \r\n hierarchy = self.hierarchy_num\r\n \r\n d_diff_index = np.sort(d_diff_index[:hierarchy])\r\n \r\n label = np.zeros((hierarchy, output_size), dtype=int)\r\n for h in range(hierarchy):\r\n for index, i in enumerate(c[d_diff_index[h]]):\r\n for k in i:\r\n label[h,k] = index\r\n \r\n self.label = label\r\n \r\n return self.label\r\n \r\n def evaluation(self, true_label, self_label): \r\n \"\"\"\r\n Evaluate predicted label using NMI score\r\n Return the average NMI score of every hierarchies.\r\n True label provided by environment\r\n \"\"\"\r\n if len(self_label) == 0:\r\n return\r\n print(\"Modularity Max Learned Labels: \",self_label)\r\n print(\"Modularity Max Correct Labels: \",true_label)\r\n nmi_l = []\r\n \r\n total_hierarchy = true_label.shape[0]\r\n \r\n for h in range(total_hierarchy):\r\n nmi_l.append(normalized_mutual_info_score(self_label[h], true_label[h]))\r\n nmi_score = np.sum(nmi_l)/len(nmi_l)\r\n return nmi_score\r\n \r\n def evaluation(self, true_label, self_label): \r\n \"\"\"\r\n Evaluate predicted label using NMI score\r\n Return the average NMI score of every hierarchies.\r\n True label provided by environment\r\n \"\"\"\r\n hierarchy = None \r\n labels = self_label\r\n print(\"number of layers it thinks: \" + str(len(labels)))\r\n total_env_hierarchy = len(true_label)\r\n # true_label = np.flip(true_label, axis=0)\r\n \r\n # Handle exception of predicted levels of hierarchy less then true label\r\n if len(true_label) > len(labels):\r\n remaining_levels = len(true_label) - len(labels)\r\n for _ in range(remaining_levels):\r\n labels = np.vstack((labels, (np.full(len(labels[0]), -1))))\r\n \r\n nmi_l = []\r\n for h in range(total_env_hierarchy):\r\n nmi_l.append(normalized_mutual_info_score(labels[h], true_label[h]))\r\n nmi_score = np.sum(nmi_l)/len(nmi_l)\r\n return nmi_score\r\n \r\n def plot_animation(self, name, true_labels=None):\r\n print(\"Movie not supported for Modularity Maximization\")\r\n return\r\n \r\n\r\n","repo_name":"LISkyushu/Sigma","sub_path":"modularity_max.py","file_name":"modularity_max.py","file_ext":"py","file_size_in_byte":7101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28095749733","text":"import csv\n\nclass Customer:\n def __init__(self, customer_id, first_name, last_name, email):\n self.customer_id = customer_id\n self.first_name = first_name\n self.last_name = last_name\n self.email = email\n self.accounts = []\n\n def __str__(self):\n return f\"{self.first_name} {self.last_name} ({self.email})\"\n\nclass Account:\n def __init__(self, account_number, account_type, customer, balance=0):\n self.account_number = account_number\n self.account_type = account_type\n self.customer = customer\n self.balance = balance\n\n def deposit(self, amount):\n if amount > 0:\n self.balance += amount\n return True\n else:\n return False\n\n def withdraw(self, amount):\n if amount > 0 and amount <= self.balance:\n self.balance -= amount\n return True\n else:\n return False\n\n def transfer(self, recipient_account, amount):\n if self.withdraw(amount):\n recipient_account.deposit(amount)\n return True\n else:\n return False\n\nclass Main:\n def __init__(self):\n self.customers = self.load_customers('users.csv')\n self.accounts = self.load_accounts('accounts.csv')\n\n def load_customers(self, filename):\n customers = []\n try:\n with open(filename, 'r') as file:\n reader = csv.reader(file)\n next(reader) # Skip header row\n for row in reader:\n customer = Customer(int(row[0]), row[1], row[2], row[3])\n customers.append(customer)\n except FileNotFoundError:\n pass\n return customers\n\n def save_customers(self, filename):\n with open(filename, 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(['ID', 'First Name', 'Last Name', 'Email'])\n for customer in self.customers:\n writer.writerow([customer.customer_id, customer.first_name, customer.last_name, customer.email])\n\n def load_accounts(self, filename):\n accounts = []\n try:\n with open(filename, 'r') as file:\n reader = csv.reader(file)\n next(reader) # Skip header row\n for row in reader:\n account_number, account_type, customer_id, balance = row\n customer = next((c for c in self.customers if c.customer_id == int(customer_id)), None)\n if customer:\n account = Account(int(account_number), account_type, customer, float(balance))\n customer.accounts.append(account)\n accounts.append(account)\n except FileNotFoundError:\n pass\n return accounts\n\n def save_accounts(self, filename):\n with open(filename, 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(['Account Number', 'Account Type', 'Customer ID', 'Balance'])\n for account in self.accounts:\n writer.writerow([account.account_number, account.account_type, account.customer.customer_id, account.balance])\n\n def add_customer(self):\n customer_id = len(self.customers) + 1\n first_name = input(\"Enter first name: \")\n last_name = input(\"Enter last name: \")\n email = input(\"Enter email: \")\n customer = Customer(customer_id, first_name, last_name, email)\n self.customers.append(customer)\n self.save_customers('users.csv')\n print(f\"Customer added with ID {customer_id}\")\n\n def create_account(self):\n customer_id = int(input(\"Enter customer ID: \"))\n account_number = len(self.accounts) + 1\n account_type = input(\"Enter account type: \")\n initial_balance = float(input(\"Enter initial balance: \"))\n customer = next((c for c in self.customers if c.customer_id == customer_id), None)\n if customer:\n account = Account(account_number, account_type, customer, initial_balance)\n self.accounts.append(account)\n self.save_accounts('accounts.csv')\n print(f\"Account created with number {account_number}\")\n else:\n print(\"Customer not found.\")\n\n def deposit_money(self):\n customer_id = int(input(\"Enter customer ID: \"))\n\n # Find the customer with the given ID\n customer = next((c for c in self.customers if c.customer_id == customer_id), None)\n\n if customer:\n # Display customer's accounts\n for i, account in enumerate(customer.accounts):\n print(f\"{i + 1}. Account {account.account_number} ({account.account_type}): ${account.balance}\")\n\n # Prompt user to select an account to deposit into\n while True:\n account_index = int(input(\"Select an account (enter the number): \")) - 1\n if 0 <= account_index < len(customer.accounts):\n selected_account = customer.accounts[account_index]\n\n # Prompt user for deposit amount\n amount = float(input(\"Enter deposit amount: \"))\n if selected_account.deposit(amount):\n print(f\"Deposited ${amount} into Account {selected_account.account_number}.\")\n break\n else:\n print(\"Invalid deposit amount. Please enter a valid amount.\")\n else:\n print(\"Invalid account selection. Please enter a valid account number.\")\n else:\n print(\"Customer not found.\")\n\n def withdraw_money(self):\n customer_id = int(input(\"Enter customer ID: \"))\n\n # Find the customer with the given ID\n customer = next((c for c in self.customers if c.customer_id == customer_id), None)\n\n if customer:\n # Display customer's accounts\n for i, account in enumerate(customer.accounts):\n print(f\"{i + 1}. Account {account.account_number} ({account.account_type}): ${account.balance}\")\n\n # Prompt user to select an account to withdraw from\n while True:\n account_index = int(input(\"Select an account (enter the number): \")) - 1\n if 0 <= account_index < len(customer.accounts):\n selected_account = customer.accounts[account_index]\n\n # Prompt user for withdrawal amount\n amount = float(input(\"Enter withdrawal amount: \"))\n if selected_account.withdraw(amount):\n print(f\"Withdrew ${amount} from Account {selected_account.account_number}.\")\n break\n else:\n print(\"Invalid withdrawal amount. Please enter a valid amount.\")\n else:\n print(\"Invalid account selection. Please enter a valid account number.\")\n else:\n print(\"Customer not found.\")\n\n def transfer_money(self):\n sender_customer_id = int(input(\"Enter your customer ID: \"))\n recipient_customer_id = int(input(\"Enter recipient's customer ID: \"))\n\n # Find the sender and recipient customers with the given IDs\n sender_customer = next((c for c in self.customers if c.customer_id == sender_customer_id), None)\n recipient_customer = next((c for c in self.customers if c.customer_id == recipient_customer_id), None)\n\n if sender_customer and recipient_customer:\n # Display sender's accounts\n print(f\"Accounts for {sender_customer}:\")\n for i, account in enumerate(sender_customer.accounts):\n print(f\"{i + 1}. Account {account.account_number} ({account.account_type}): ${account.balance}\")\n\n # Prompt sender to select an account to transfer from\n while True:\n sender_account_index = int(input(\"Select an account to transfer from (enter the number): \")) - 1\n if 0 <= sender_account_index < len(sender_customer.accounts):\n sender_account = sender_customer.accounts[sender_account_index]\n\n # Prompt user for transfer amount\n amount = float(input(\"Enter transfer amount: \"))\n if sender_account.withdraw(amount):\n # Display recipient's accounts\n print(f\"Accounts for {recipient_customer}:\")\n for i, account in enumerate(recipient_customer.accounts):\n print(f\"{i + 1}. Account {account.account_number} ({account.account_type}): ${account.balance}\")\n\n # Prompt recipient to select an account to transfer to\n while True:\n recipient_account_index = int(input(\"Select an account to transfer to (enter the number): \")) - 1\n if 0 <= recipient_account_index < len(recipient_customer.accounts):\n recipient_account = recipient_customer.accounts[recipient_account_index]\n if recipient_account.transfer(sender_account, amount):\n print(f\"Transferred ${amount} from Account {sender_account.account_number} to Account {recipient_account.account_number}.\")\n return\n else:\n print(\"Invalid transfer amount. Please enter a valid amount.\")\n else:\n print(\"Invalid account selection. Please enter a valid account number.\")\n else:\n print(\"Invalid withdrawal amount. Please enter a valid amount.\")\n else:\n print(\"Invalid account selection. Please enter a valid account number.\")\n else:\n print(\"Customer not found.\")\n\n def display_transaction_data(self):\n for customer in self.customers:\n print(f\"{customer}:\")\n for account in customer.accounts:\n print(f\"Account {account.account_number} ({account.account_type}): ${account.balance}\")\n\n def main_menu(self):\n while True:\n print(\"===========================\")\n print(\"\\nACME Bank\")\n print(\"1. Add Customer\")\n print(\"2. Create Account\")\n print(\"3. Deposit Money\")\n print(\"4. Withdraw Money\")\n print(\"5. Transfer Money\")\n print(\"6. Display Transaction Data\")\n print(\"7. Exit\")\n print(\"===========================\")\n\n choice = input(\"Enter your choice: \")\n\n if choice == '1':\n self.add_customer()\n\n elif choice == '2':\n self.create_account()\n\n elif choice == '3':\n self.deposit_money()\n\n elif choice == '4':\n self.withdraw_money()\n\n elif choice == '5':\n self.transfer_money()\n\n elif choice == '6':\n self.display_transaction_data()\n\n elif choice == '7':\n print(\"===========================\")\n print(\"Goodbye Have A Great Day\")\n print(\"===========================\")\n break\n\nif __name__ == \"__main__\":\n bank = Main()\n bank.main_menu()\n","repo_name":"mariorecinos/tdd_demo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70208166295","text":"from Easy.Merge_Two_Sorted_Lists.Solution import Solution\nfrom Medium.Remove_Nth_Node_From_End_of_List.ListNode import ListNode\n\n\nif __name__ == \"__main__\":\n l1_1= ListNode(1)\n l1_2= ListNode(2)\n l1_3= ListNode(4)\n\n\n l2_1= ListNode(1)\n l2_2= ListNode(3)\n l2_3= ListNode(4)\n\n l1_1.next=l1_2\n l1_2.next=l1_3\n\n l2_1.next=l2_2\n l2_2.next=l2_3\n sol=Solution()\n print('result :{}'.format(sol.mergeTwoLists(l1_1,l2_1)))","repo_name":"da2so/LeetCode","sub_path":"Easy/Merge_Two_Sorted_Lists/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12781216505","text":"import sys\r\n\r\n\r\n# 왼쪽 N자리 합과 오른쪽 N자리 합이 같은지 확인\r\ndef isLucky(x):\r\n mid = len(x) // 2\r\n sum1 = sum(x[:mid])\r\n sum2 = sum(x[mid:])\r\n if sum1 == sum2:\r\n return True\r\n else:\r\n return False\r\n\r\ndef solution(arr):\r\n if len(arr) % 2 == 0:\r\n chk = len(arr)\r\n else:\r\n chk = len(arr) - 1\r\n while chk > 0:\r\n i = 0\r\n ans = 0\r\n while i + chk <= len(arr):\r\n # 행운의 티켓이면 반복문 종료\r\n if isLucky(arr[i:i + chk]):\r\n ans = chk\r\n return ans\r\n i += 1\r\n chk -= 2\r\n return 0\r\n\r\n\r\ns = list(map(int, list(sys.stdin.readline().strip())))\r\nprint(solution(s))","repo_name":"ksy133900/TIL","sub_path":"백준/Silver/1639. 행운의 티켓/행운의 티켓.py","file_name":"행운의 티켓.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"12722805853","text":"import logging\n\nfrom sagemaker.estimator import Framework\nfrom sagemaker.vpc_utils import VPC_CONFIG_DEFAULT\n\nlogger = logging.getLogger(__name__)\n\n\nclass CustomFramework(Framework):\n\n __framework_name__ = \"customframework\"\n\n LATEST_VERSION = \"0.1\"\n\n def __init__(\n self,\n entry_point,\n image_uri: str,\n source_dir=None,\n hyperparameters=None,\n **kwargs\n ) -> None:\n super(CustomFramework, self).__init__(\n str(entry_point), source_dir, hyperparameters, image_uri=image_uri, **kwargs\n )\n\n def create_model(\n self,\n model_server_workers=None,\n role=None,\n vpc_config_override=VPC_CONFIG_DEFAULT,\n ):\n # required to allow this object instantiation\n raise NotImplementedError()\n","repo_name":"awslabs/syne-tune","sub_path":"syne_tune/backend/sagemaker_backend/custom_framework.py","file_name":"custom_framework.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":332,"dataset":"github-code","pt":"67"} +{"seq_id":"17740941672","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 19 10:00:34 2020\n\n@author: ware.cole\n\"\"\"\n\n# 1 - import\nimport pandas as pd\n\n# 2 - read csv\nmapped = pd.read_csv(\"gis_output.csv\")\n\n# 3 - replace NaN values for blocks not inside rings\nmapped[\"ringId\"].fillna(3, inplace = True)\n\n# 4 - set labels for rings\nwithin5 = mapped[\"ringId\"] == 1\nwithin10 = mapped[\"ringId\"] == 2\noutside = mapped[\"ringId\"] == 3\n\nmapped.loc[within5, \"ring\"] = \"<5 mi\"\nmapped.loc[within10, \"ring\"] = \">5, <10 mi\"\nmapped.loc[outside, \"ring\"] = \">10 mi\"\n\n# 4 - group by ring\nby_ring = mapped.groupby(\"ring\")\n\n# 5 - sum vets in each cohort by ring\nprint(\"Vets, 18-34:\")\nsum_18to34 = sum(mapped[\"vet_data_18to34_total\"])\nrings_18to34 = by_ring[\"vet_data_18to34_total\"].sum()\nprint(round(rings_18to34/sum_18to34, 3))\n\nprint(\"Vets, 35-54:\")\nsum_35to54 = sum(mapped[\"vet_data_35to54_total\"])\nrings_35to54 = by_ring[\"vet_data_35to54_total\"].sum()\nprint(round(rings_35to54/sum_35to54, 3))\n\nprint(\"Vets, 55-64:\")\nsum_55to64 = sum(mapped[\"vet_data_55to64_total\"])\nrings_55to64 = by_ring[\"vet_data_55to64_total\"].sum()\nprint(round(rings_55to64/sum_55to64, 3))\n\nprint(\"Vets, 65-74:\")\nsum_65to74 = sum(mapped[\"vet_data_65to74_total\"])\nrings_65to74 = by_ring[\"vet_data_65to74_total\"].sum()\nprint(round(rings_65to74/sum_65to74, 3))\n\nprint(\"Vets, 75+:\")\nsum_75up = sum(mapped[\"vet_data_75up_total\"])\nrings_75up = by_ring[\"vet_data_75up_total\"].sum()\nprint(round(rings_75up/sum_75up, 3))","repo_name":"CWWARE20/VeteransNEOhio","sub_path":"analyze_data.py","file_name":"analyze_data.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43085742377","text":"from flask import Flask, render_template\nfrom flask_socketio import SocketIO\n\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'this is my place'\nsocketio = SocketIO(app, cors_allowed_origins='*')\n\n\n@socketio.on('message')\ndef handle_message(message):\n print(message)\n socketio.emit('response', message)\n\n\n@socketio.on('connect', namespace='/chat')\ndef test_connect():\n print('连接成功!')\n socketio.emit('my response', {'data':'hello, connected !'}, namespace='/chat')\n\n\n@socketio.on('message', namespace='/andy')\ndef handle_andy_message(message):\n print('andy', message)\n socketio.emit('response', message, namespace='andy')\n\n\n@socketio.on('chat', namespace='/chat')\ndef chat_room(message):\n print(message)\n socketio.emit('chat', message, namespace='/chat')\n\n\nif __name__ == '__main__':\n socketio.run(app, host='0.0.0.0', port=8866)","repo_name":"sjk7524068/element-table-prac","sub_path":"wst.py","file_name":"wst.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"16398120709","text":"from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .models import Draw\nfrom .serializers import DrawSerializer\n\nclass DrawListApiView(APIView):\n\n # 1. List all\n def get(self, request, *args, **kwargs):\n '''\n List all the drawings items\n '''\n draws = Draw.objects.all()\n serializer = DrawSerializer(draws, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n # 2. Create\n def post(self, request, *args, **kwargs):\n '''\n Create the draw with given drawing data\n '''\n data = {\n 'title': request.data.get('title'), \n 'payload': request.data.get('payload')\n }\n serializer = DrawSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass DrawDetailApiView(APIView):\n\n def get_object(self, draw_id):\n '''\n Helper method to get the object with given draw_id\n '''\n try:\n return Draw.objects.get(id=draw_id)\n except Draw.DoesNotExist:\n return None\n\n # 3. Retrieve\n def get(self, request, draw_id, *args, **kwargs):\n '''\n Retrieves the Todo with given draw_id\n '''\n draw_instance = self.get_object(draw_id)\n if not draw_instance:\n return Response(\n {\"res\": \"Object with draw id does not exists\"},\n status=status.HTTP_400_BAD_REQUEST\n )\n\n serializer = DrawSerializer(draw_instance)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n # 4. Update\n def put(self, request, draw_id, *args, **kwargs):\n '''\n Updates the draw item with given draw_id if exists\n '''\n draw_instance = self.get_object(draw_id)\n if not draw_instance:\n return Response(\n {\"res\": \"Object with draw id does not exists\"}, \n status=status.HTTP_400_BAD_REQUEST\n )\n data = {\n 'title': request.data.get('title'), \n 'payload': request.data.get('payload'),\n }\n serializer = DrawSerializer(instance = draw_instance, data=data, partial = True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n # 5. Delete\n def delete(self, request, draw_id, *args, **kwargs):\n '''\n Deletes the draw item with given draw_id if exists\n '''\n draw_instance = self.get_object(draw_id)\n if not draw_instance:\n return Response(\n {\"res\": \"Object with draw id does not exists\"}, \n status=status.HTTP_400_BAD_REQUEST\n )\n draw_instance.delete()\n return Response(\n {\"res\": \"Object deleted!\"},\n status=status.HTTP_200_OK\n )\n\n\n\n","repo_name":"CiroGamboa/drawer-app-backend","sub_path":"backendapp/draws/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29026968620","text":"import queue\n\n\ndef solveOneCase(p, c, counter):\n print('Case ' + str(counter) + ':')\n\n q = queue.Queue()\n\n for i in range(min(p, c)):\n q.put(i+1)\n # for i in range(p):\n # q.put(i+1)\n\n for _ in range(c):\n cmd = input()\n if cmd == 'N':\n k = q.get()\n print(k)\n q.put(k)\n else:\n e = int(cmd.split()[1])\n mq = queue.Queue()\n mq.put(e)\n for i in range(q.qsize()):\n k = q.queue[i]\n if (k != e):\n mq.put(k)\n\n q = mq\n\n\np, c = map(int, input().split())\ncounter = 1\nwhile p != 0:\n solveOneCase(p, c, counter)\n counter += 1\n p, c = map(int, input().split())\n\n# c*min(p, c)\n","repo_name":"Mtinkering/i-dont-know-algo","sub_path":"big-o/0_classified/uva_12207.py","file_name":"uva_12207.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29200635018","text":"\"\"\"Discrimators used in pi-GAN\"\"\"\nimport collections\nimport math\nimport torch\nimport torch.nn as nn\nimport curriculums\nimport torch.nn.functional as F\n\nfrom upfirdn2dLib.upfirdn2d import upfirdn2d\nfrom fused_actLib.fused_act import FusedLeakyReLU, fused_leaky_relu\n\nfrom .sgdiscriminators import *\nfrom .diffaug import DiffAugment\n\n\nclass GlobalAveragePooling(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x):\n return x.mean([2, 3])\n\n\nclass AdapterBlock(nn.Module):\n def __init__(self, output_channels):\n super().__init__()\n self.model = nn.Sequential(\n nn.Conv2d(3, output_channels, 1, padding=0), nn.LeakyReLU(0.2)\n )\n\n def forward(self, input):\n return self.model(input)\n\n\ndef kaiming_leaky_init(m):\n classname = m.__class__.__name__\n if classname.find(\"Linear\") != -1:\n torch.nn.init.kaiming_normal_(\n m.weight, a=0.2, mode=\"fan_in\", nonlinearity=\"leaky_relu\"\n )\n\n\nclass AddCoords(nn.Module):\n \"\"\"\n Source: https://github.com/mkocabas/CoordConv-pytorch/blob/master/CoordConv.py\n \"\"\"\n\n def __init__(self, with_r=False):\n super().__init__()\n self.with_r = with_r\n\n def forward(self, input_tensor):\n \"\"\"\n Args:\n input_tensor: shape(batch, channel, x_dim, y_dim)\n \"\"\"\n batch_size, _, x_dim, y_dim = input_tensor.size()\n\n xx_channel = torch.arange(x_dim).repeat(1, y_dim, 1)\n yy_channel = torch.arange(y_dim).repeat(1, x_dim, 1).transpose(1, 2)\n\n xx_channel = xx_channel.float() / (x_dim - 1)\n yy_channel = yy_channel.float() / (y_dim - 1)\n\n xx_channel = xx_channel * 2 - 1\n yy_channel = yy_channel * 2 - 1\n\n xx_channel = xx_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)\n yy_channel = yy_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)\n\n ret = torch.cat(\n [\n input_tensor,\n xx_channel.type_as(input_tensor),\n yy_channel.type_as(input_tensor),\n ],\n dim=1,\n )\n\n if self.with_r:\n rr = torch.sqrt(\n torch.pow(xx_channel.type_as(input_tensor) - 0.5, 2)\n + torch.pow(yy_channel.type_as(input_tensor) - 0.5, 2)\n )\n ret = torch.cat([ret, rr], dim=1)\n\n return ret\n\n\nclass CoordConv(nn.Module):\n \"\"\"\n Source: https://github.com/mkocabas/CoordConv-pytorch/blob/master/CoordConv.py\n \"\"\"\n\n def __init__(self, in_channels, out_channels, with_r=False, **kwargs):\n super().__init__()\n self.addcoords = AddCoords(with_r=with_r)\n in_size = in_channels + 2\n if with_r:\n in_size += 1\n self.conv = nn.Conv2d(in_size, out_channels, **kwargs)\n\n def forward(self, x):\n ret = self.addcoords(x)\n ret = self.conv(ret)\n return ret\n\n\nclass ResidualCoordConvBlock(nn.Module):\n def __init__(\n self, inplanes, planes, kernel_size=3, stride=1, downsample=False, groups=1\n ):\n super().__init__()\n p = kernel_size // 2\n self.network = nn.Sequential(\n CoordConv(\n inplanes, planes, kernel_size=kernel_size, stride=stride, padding=p\n ),\n nn.LeakyReLU(0.2, inplace=True),\n CoordConv(planes, planes, kernel_size=kernel_size, padding=p),\n nn.LeakyReLU(0.2, inplace=True),\n )\n self.network.apply(kaiming_leaky_init)\n\n self.proj = nn.Conv2d(inplanes, planes, 1) if inplanes != planes else None\n self.downsample = downsample\n\n def forward(self, identity):\n y = self.network(identity)\n\n if self.downsample:\n y = nn.functional.avg_pool2d(y, 2)\n if self.downsample:\n identity = nn.functional.avg_pool2d(identity, 2)\n identity = identity if self.proj is None else self.proj(identity)\n\n y = (y + identity) / math.sqrt(2)\n return y\n\n\nclass ProgressiveDiscriminator(nn.Module):\n \"\"\"Implement of a progressive growing discriminator with ResidualCoordConv Blocks\"\"\"\n\n def __init__(self, **kwargs):\n super().__init__()\n self.epoch = 0\n self.step = 0\n self.layers = nn.ModuleList(\n [\n ResidualCoordConvBlock(16, 32, downsample=True), # 512x512 -> 256x256\n ResidualCoordConvBlock(32, 64, downsample=True), # 256x256 -> 128x128\n ResidualCoordConvBlock(64, 128, downsample=True), # 128x128 -> 64x64\n ResidualCoordConvBlock(128, 256, downsample=True), # 64x64 -> 32x32\n ResidualCoordConvBlock(256, 400, downsample=True), # 32x32 -> 16x16\n ResidualCoordConvBlock(400, 400, downsample=True), # 16x16 -> 8x8\n ResidualCoordConvBlock(400, 400, downsample=True), # 8x8 -> 4x4\n ResidualCoordConvBlock(400, 400, downsample=True), # 4x4 -> 2x2\n ]\n )\n\n self.fromRGB = nn.ModuleList(\n [\n AdapterBlock(16),\n AdapterBlock(32),\n AdapterBlock(64),\n AdapterBlock(128),\n AdapterBlock(256),\n AdapterBlock(400),\n AdapterBlock(400),\n AdapterBlock(400),\n AdapterBlock(400),\n ]\n )\n self.final_layer = nn.Conv2d(400, 1, 2)\n self.img_size_to_layer = {\n 2: 8,\n 4: 7,\n 8: 6,\n 16: 5,\n 32: 4,\n 64: 3,\n 128: 2,\n 256: 1,\n 512: 0,\n }\n\n def forward(self, input, alpha, instance_noise=0, **kwargs):\n start = self.img_size_to_layer[input.shape[-1]]\n\n x = self.fromRGB[start](input)\n for i, layer in enumerate(self.layers[start:]):\n if i == 1:\n x = alpha * x + (1 - alpha) * self.fromRGB[start + 1](\n F.interpolate(input, scale_factor=0.5, mode=\"nearest\")\n )\n x = layer(x)\n\n x = self.final_layer(x).reshape(x.shape[0], 1)\n\n return x\n\n\nclass ProgressiveEncoderDiscriminator(nn.Module):\n \"\"\"\n Implement of a progressive growing discriminator with ResidualCoordConv Blocks.\n Identical to ProgressiveDiscriminator except it also predicts camera angles and latent codes.\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__()\n self.epoch = 0\n self.step = 0\n self.layers = nn.ModuleList(\n [\n ResidualCoordConvBlock(16, 32, downsample=True), # 512x512 -> 256x256\n ResidualCoordConvBlock(32, 64, downsample=True), # 256x256 -> 128x128\n ResidualCoordConvBlock(64, 128, downsample=True), # 128x128 -> 64x64\n ResidualCoordConvBlock(128, 256, downsample=True), # 64x64 -> 32x32\n ResidualCoordConvBlock(256, 400, downsample=True), # 32x32 -> 16x16\n ResidualCoordConvBlock(400, 400, downsample=True), # 16x16 -> 8x8\n ResidualCoordConvBlock(400, 400, downsample=True), # 8x8 -> 4x4\n ResidualCoordConvBlock(400, 400, downsample=True), # 4x4 -> 2x2\n ]\n )\n\n self.fromRGB = nn.ModuleList(\n [\n AdapterBlock(16),\n AdapterBlock(32),\n AdapterBlock(64),\n AdapterBlock(128),\n AdapterBlock(256),\n AdapterBlock(400),\n AdapterBlock(400),\n AdapterBlock(400),\n AdapterBlock(400),\n ]\n )\n self.final_layer = nn.Conv2d(400, 1 + 256 + 2, 2)\n self.img_size_to_layer = {\n 2: 8,\n 4: 7,\n 8: 6,\n 16: 5,\n 32: 4,\n 64: 3,\n 128: 2,\n 256: 1,\n 512: 0,\n }\n\n def forward(self, input, alpha, instance_noise=0, **kwargs):\n if instance_noise > 0:\n input = input + torch.randn_like(input) * instance_noise\n\n start = self.img_size_to_layer[input.shape[-1]]\n x = self.fromRGB[start](input)\n for i, layer in enumerate(self.layers[start:]):\n if i == 1:\n x = alpha * x + (1 - alpha) * self.fromRGB[start + 1](\n F.interpolate(input, scale_factor=0.5, mode=\"nearest\")\n )\n x = layer(x)\n\n x = self.final_layer(x).reshape(x.shape[0], -1)\n\n prediction = x[..., 0:1]\n latent = x[..., 1:257]\n position = x[..., 257:259]\n\n return prediction, latent, position\n\n\nclass ScaledLeakyReLU(nn.Module):\n def __init__(self, negative_slope=0.2):\n super().__init__()\n\n self.negative_slope = negative_slope\n\n def forward(self, x):\n out = F.leaky_relu(x, negative_slope=self.negative_slope)\n return out * math.sqrt(2)\n\n\ndef make_kernel(k):\n k = torch.tensor(k, dtype=torch.float32)\n\n if k.ndim == 1:\n k = k[None, :] * k[:, None]\n\n k /= k.sum()\n\n return k\n\n\nclass Blur(nn.Module):\n def __init__(self, kernel, pad, upsample_factor=1):\n super().__init__()\n\n kernel = make_kernel(kernel)\n\n if upsample_factor > 1:\n kernel = kernel * (upsample_factor**2)\n\n self.register_buffer(\"kernel\", kernel)\n\n self.pad = pad\n\n def forward(self, x):\n out = upfirdn2d(x, self.kernel, pad=self.pad)\n return out\n\n\nclass EqualConv2d(nn.Module):\n def __init__(\n self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True\n ):\n super().__init__()\n\n self.weight = nn.Parameter(\n torch.randn(out_channel, in_channel, kernel_size, kernel_size)\n )\n self.scale = 1 / math.sqrt(in_channel * kernel_size**2)\n\n self.stride = stride\n self.padding = padding\n\n if bias:\n self.bias = nn.Parameter(torch.zeros(out_channel))\n\n else:\n self.bias = None\n\n def forward(self, x):\n out = F.conv2d(\n x,\n self.weight * self.scale,\n bias=self.bias,\n stride=self.stride,\n padding=self.padding,\n )\n return out\n\n\nclass EqualConvTranspose2d(nn.Module):\n def __init__(\n self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True\n ):\n super().__init__()\n\n self.weight = nn.Parameter(\n torch.randn(in_channel, out_channel, kernel_size, kernel_size)\n )\n self.scale = 1 / math.sqrt(in_channel * kernel_size**2)\n\n self.stride = stride\n self.padding = padding\n\n if bias:\n self.bias = nn.Parameter(torch.zeros(out_channel))\n\n else:\n self.bias = None\n\n def forward(self, x):\n out = F.conv_transpose2d(\n x,\n self.weight * self.scale,\n bias=self.bias,\n stride=self.stride,\n padding=self.padding,\n )\n\n return out\n\n\nclass ConvLayer(nn.Sequential):\n def __init__(\n self,\n in_channel,\n out_channel,\n kernel_size,\n down_sample=False,\n blur_kernel=[1, 3, 3, 1],\n bias=True,\n activate=True,\n upsample=False,\n padding=\"zero\",\n ):\n layers = collections.OrderedDict()\n\n self.padding = 0\n stride = 2 if down_sample else 1\n\n if down_sample:\n factor = 2\n p = (len(blur_kernel) - factor) + (kernel_size - 1)\n pad0 = (p + 1) // 2\n pad1 = p // 2\n\n layers[\"down_blur\"] = Blur(blur_kernel, pad=(pad0, pad1))\n\n if upsample:\n up_conv = EqualConvTranspose2d(\n in_channel,\n out_channel,\n kernel_size,\n padding=0,\n stride=2,\n bias=bias and not activate,\n )\n layers[\"up_conv\"] = up_conv\n\n factor = 2\n p = (len(blur_kernel) - factor) - (kernel_size - 1)\n pad0 = (p + 1) // 2 + factor - 1\n pad1 = p // 2 + 1\n layers[\"up_blur\"] = Blur(blur_kernel, pad=(pad0, pad1))\n else:\n if not down_sample:\n if padding == \"zero\":\n self.padding = (kernel_size - 1) // 2\n elif padding == \"reflect\":\n padding = (kernel_size - 1) // 2\n if padding > 0:\n layers[\"pad\"] = nn.ReflectionPad2d(padding)\n self.padding = 0\n elif padding != \"valid\":\n raise ValueError(\"padding must be 'zero', 'reflect' or 'valid'\")\n\n equal_conv = EqualConv2d(\n in_channel,\n out_channel,\n kernel_size,\n padding=self.padding,\n stride=stride,\n bias=bias and not activate,\n )\n layers[\"equal_conv\"] = equal_conv\n\n if activate:\n if bias:\n layers[\"flrelu\"] = FusedLeakyReLU(out_channel)\n else:\n layers[\"slrelu\"] = ScaledLeakyReLU(0.2)\n\n super().__init__(layers)\n return\n\n\nclass ResBlock(nn.Module):\n def __init__(\n self,\n in_channel,\n out_channel,\n blur_kernel=[1, 3, 3, 1],\n kernel_size=3,\n down_sample=True,\n first_downsample=False,\n ):\n super().__init__()\n\n if first_downsample:\n self.conv1 = ConvLayer(\n in_channel, in_channel, kernel_size, down_sample=down_sample\n )\n self.conv2 = ConvLayer(in_channel, out_channel, kernel_size)\n else:\n self.conv1 = ConvLayer(in_channel, in_channel, kernel_size)\n self.conv2 = ConvLayer(\n in_channel, out_channel, kernel_size, down_sample=down_sample\n )\n\n self.skip = ConvLayer(\n in_channel,\n out_channel,\n 1,\n down_sample=down_sample,\n activate=False,\n bias=False,\n )\n\n def forward(self, input):\n out = self.conv1(input)\n out = self.conv2(out)\n\n skip = self.skip(input)\n out = (out + skip) / math.sqrt(2)\n\n return out\n\n\nclass EqualLinear(nn.Module):\n def __init__(\n self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None\n ):\n super().__init__()\n\n self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))\n\n if bias:\n self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))\n\n else:\n self.bias = None\n\n self.activation = activation\n\n self.scale = (1 / math.sqrt(in_dim)) * lr_mul\n self.lr_mul = lr_mul\n\n def forward(self, input):\n if self.activation:\n out = F.linear(input, self.weight * self.scale)\n out = fused_leaky_relu(out, self.bias * self.lr_mul)\n\n else:\n out = F.linear(\n input, self.weight * self.scale, bias=self.bias * self.lr_mul\n )\n\n return out\n\n\nclass MultiScaleDiscriminator(nn.Module):\n def __init__(\n self,\n diffaug,\n max_size,\n channel_multiplier=2,\n blur_kernel=[1, 3, 3, 1],\n input_size=3,\n first_downsample=False,\n channels=None,\n stddev_group=4,\n ):\n super().__init__()\n self.epoch = 0\n self.step = 0\n\n self.diffaug = diffaug\n self.max_size = max_size\n self.input_size = input_size\n self.stddev_group = stddev_group\n\n if channels is None:\n channels = {\n 4: 512,\n 8: 512,\n 16: 512,\n 32: 512,\n 64: 256 * channel_multiplier,\n 128: 128 * channel_multiplier,\n 256: 64 * channel_multiplier,\n 512: 32 * channel_multiplier,\n 1024: 16 * channel_multiplier,\n }\n\n self.conv_in = nn.ModuleDict()\n for name, ch in channels.items():\n self.conv_in[f\"{name}\"] = nn.Conv2d(input_size, ch, 3, padding=1)\n\n self.convs = nn.ModuleDict()\n log_size = int(math.log(max_size, 2))\n in_channel = channels[max_size]\n for i in range(log_size, 2, -1):\n out_channel = channels[2 ** (i - 1)]\n self.convs[f\"{2 ** i}\"] = ResBlock(\n in_channel, out_channel, blur_kernel, first_downsample=first_downsample\n )\n in_channel = out_channel\n\n self.stddev_feat = 1\n\n if self.stddev_group > 1:\n self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)\n else:\n self.final_conv = ConvLayer(in_channel, channels[4], 3)\n\n self.space_linear = EqualLinear(\n channels[4] * 4 * 4, channels[4], activation=\"fused_lrelu\"\n )\n\n self.out_linear = EqualLinear(channels[4], 1)\n\n return\n\n def diff_aug_img(self, img):\n img = DiffAugment(img, policy=\"color,translation,cutout\")\n return img\n\n def forward(\n self,\n input,\n alpha\n ):\n if self.diffaug:\n input = self.diff_aug_img(input)\n\n size = input.shape[-1]\n log_size = int(math.log(size, 2))\n\n cur_size_out = self.conv_in[f\"{2 ** log_size}\"](input)\n cur_size_out = self.convs[f\"{2 ** log_size}\"](cur_size_out)\n\n if alpha < 1:\n down_input = F.interpolate(input, scale_factor=0.5, mode=\"bilinear\")\n down_size_out = self.conv_in[f\"{2 ** (log_size - 1)}\"](down_input)\n out = alpha * cur_size_out + (1 - alpha) * down_size_out\n else:\n out = cur_size_out\n\n for i in range(log_size - 1, 2, -1):\n out = self.convs[f\"{2 ** i}\"](out)\n\n batch, channel, height, width = out.shape\n\n if self.stddev_group > 0:\n group = min(batch, self.stddev_group)\n # (4, 2, 1, 512//1, 4, 4)\n stddev = out.view(\n group, -1, self.stddev_feat, channel // self.stddev_feat, height, width\n )\n # (2, 1, 512//1, 4, 4)\n stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)\n # (2, 1, 1, 1)\n stddev = stddev.mean([2, 3, 4], keepdim=True).squeeze(2)\n # (8, 1, 4, 4)\n stddev = stddev.repeat(group, 1, height, width)\n # (8, 513, 4, 4)\n out = torch.cat([out, stddev], 1)\n\n out = self.final_conv(out)\n out = out.view(batch, -1)\n\n out = self.space_linear(out)\n\n out = self.out_linear(out)\n\n latent, position = None, None\n return out, latent, position\n\n\nclass MultiScaleAuxDiscriminator(nn.Module):\n def __init__(\n self,\n diffaug,\n max_size,\n channel_multiplier=2,\n first_downsample=False,\n stddev_group=0,\n ):\n super().__init__()\n self.epoch = 0\n self.step = 0\n\n self.main_disc = MultiScaleDiscriminator(\n diffaug=diffaug,\n max_size=max_size,\n channel_multiplier=channel_multiplier,\n first_downsample=first_downsample,\n stddev_group=stddev_group,\n )\n\n # Auxiliary Discriminator\n channel_multiplier = 2\n channels = {\n 4: 128 * channel_multiplier,\n 8: 128 * channel_multiplier,\n 16: 128 * channel_multiplier,\n 32: 128 * channel_multiplier,\n 64: 128 * channel_multiplier,\n 128: 128 * channel_multiplier,\n 256: 64 * channel_multiplier,\n 512: 32 * channel_multiplier,\n 1024: 16 * channel_multiplier,\n }\n self.aux_disc = MultiScaleDiscriminator(\n diffaug=diffaug,\n max_size=max_size,\n channel_multiplier=channel_multiplier,\n first_downsample=True,\n channels=channels,\n stddev_group=stddev_group,\n )\n\n return\n\n def forward(\n self, input, use_aux_disc=False, alpha=1.0, **kwargs\n ):\n if use_aux_disc:\n b = input.shape[0] // 2\n main_input = input[:b]\n aux_input = input[b:]\n main_out, latent, position = self.main_disc(\n main_input, alpha\n )\n aux_out, _, _ = self.aux_disc(aux_input, alpha)\n out = torch.cat([main_out, aux_out], dim=0)\n else:\n out, latent, position = self.main_disc(\n input, alpha\n )\n\n return out, latent, position\n","repo_name":"EdwardJTL/Edit3D","sub_path":"discriminators/discriminators.py","file_name":"discriminators.py","file_ext":"py","file_size_in_byte":20682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19188374249","text":"def main():\n while True:\n try:\n input()\n\n velocidades = to_int(input().split())\n\n mais_veloz = velocidades[0]\n\n for i in range(1, len(velocidades)):\n if velocidades[i] > mais_veloz:\n mais_veloz = velocidades[i]\n\n nivel = 1\n\n if mais_veloz >= 10 and mais_veloz < 20:\n nivel = 2\n\n elif mais_veloz >= 20:\n nivel = 3\n\n print(nivel)\n\n except EOFError:\n break\n\n\ndef to_int(c):\n return [int(i) for i in c]\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"guilherme-nsr/beecrowd-sol","sub_path":"1789.py","file_name":"1789.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28927304056","text":"from core.BiddingStrategyInterface import BiddingStrategyInterface\nfrom abc import abstractmethod\nfrom core.Bid import Bid\nfrom core.OpponentModelInterface import OpponentModelInterface\nfrom core.TimeLine import TimeLine\nfrom core.Offer import Offer\nimport random\nfrom core.AbstractUtilitySpace import AbstractUtilitySpace\nfrom core.UserModelInterface import UserModelInterface\n\n\nclass AbstractBiddingStrategy(BiddingStrategyInterface):\n\n def __init__(self,\n opponent_model: OpponentModelInterface,\n utility_space: AbstractUtilitySpace = None,\n user_model: UserModelInterface = None):\n\n \"\"\"\n This class is used in BOA and EUBOA frameworks. one of the utility_space and user_model\n should be None and the other one should be set.\n :param opponent_model: opponent model\n :param utility_space: if you use BOA framework set this argument otherwise set it None\n :param user_model: if you use EUBOA framework set this argument otherwise set it None\n \"\"\"\n\n if not isinstance(opponent_model, OpponentModelInterface):\n raise TypeError('opponent_model argument must be an instance of OpponentModelInterface')\n if not isinstance(utility_space, AbstractUtilitySpace) and utility_space is not None:\n raise TypeError('utility_space argument must be an instance of AdditiveUtilitySpace or None')\n if not isinstance(user_model, UserModelInterface) and user_model is not None:\n raise TypeError('user_model argument must be an instance of AbstractUserModel or None')\n # if (utility_space is None) and (user_model is None):\n # raise TypeError('utility_space argument or user_model argument must be set with an object (Both of '\n # 'utility_space argument or user_model argument cannot be None)')\n # if not (utility_space is None) and not (user_model is None):\n # raise TypeError('at least one of utility_space or user_model must not be None')\n\n self.__opponent_model = opponent_model\n self.__utility_space = utility_space\n self.__user_model = user_model\n\n if utility_space is not None:\n self.__preference = utility_space.get_preference()\n elif user_model is not None:\n self.__preference = user_model.get_preference()\n # else:\n # raise TypeError('at least one of utility_space or user_model must not be None')\n\n def set_utility_space(self, utility_space: AbstractUtilitySpace):\n if not isinstance(utility_space, AbstractUtilitySpace):\n raise TypeError(\"utility_space must be an instance of AbstractUtilitySpace\")\n # if self.__utility_space is None and self.__user_model is None:\n self.__utility_space = utility_space\n self.__preference = utility_space.get_preference()\n # else:\n # raise ValueError(\"One of the utility_space or user_model was set before!\")\n\n def set_user_model(self, user_model: UserModelInterface):\n if not isinstance(user_model, UserModelInterface):\n raise TypeError(\"user_model mus be type of UserModelInterface\")\n # if self.__utility_space is None and self.__user_model is None:\n self.__user_model = user_model\n self.__preference = user_model.get_preference()\n # else:\n # raise ValueError(\"One of the utility_space or user_model was set before!\")\n\n def get_utility_space(self) -> AbstractUtilitySpace:\n return self.__utility_space\n\n @abstractmethod\n def send_bid(self, timeline: TimeLine) -> Bid:\n raise NotImplementedError()\n\n @abstractmethod\n def get_name(self) -> str:\n \"\"\"\n This method must return the name of bidding strategy\n :return: the name of bidding strategy\n \"\"\"\n raise NotImplementedError()\n\n def get_opponent_model(self):\n return self.__opponent_model\n\n def get_preference(self):\n return self.__preference\n\n def generate_random_bid(self):\n issue_items = {}\n preference_data_structure = self.get_preference().get_preference_data_structure()\n for issue in preference_data_structure:\n if issue != 'discount_factor' and issue != 'reservation':\n issue_item = list((preference_data_structure[issue][1]).keys())\n issue_items[issue] = random.choice(issue_item)\n\n bid = Bid(issue_items)\n return bid\n\n def get_utility(self, bid: Bid) -> float:\n if self.__utility_space is not None:\n return self.__utility_space.get_utility(bid)\n elif self.__user_model is not None:\n return self.__user_model.get_utility(bid)\n else:\n raise TypeError('at least one of utility_space or user_model must not be None')\n\n def get_utility_distinct(self, offer: Offer) -> float:\n if self.__utility_space is not None:\n return self.__utility_space.get_utility_distinct(offer)\n elif self.__user_model is not None:\n return self.__user_model.get_utility_distinct(offer)\n else:\n raise TypeError('at least one of utility_space or user_model must not be None')\n","repo_name":"negosim/negosim","sub_path":"core/AbstractBiddingStrategy.py","file_name":"AbstractBiddingStrategy.py","file_ext":"py","file_size_in_byte":5220,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"25803409809","text":"from collections import deque\n\n\ndef bfs(node, visit, adjNode): # dequeue를 이용한 dfs 탐색\n dq = deque([[node, 0]])\n while dq:\n node, count = dq.popleft()\n if visit[node] == -1:\n visit[node] = count # dq가 갖고 있는 count값이 1에서 node까지 거리임\n count += 1 # 뒤에 추가할 node는 거리를 1증가 시켜야함\n for i in adjNode[node]:\n if visit[i] == -1:\n dq.append([i, count])\n\n\ndef solution(n, edge):\n adjNode = [[] for _ in range(n + 1)] # 인접 노드\n visit = [-1] * (n + 1) # 방문 체크 및 거리 값으로도 활용\n for i in edge: # 인접 노드 세팅\n a, b = i\n adjNode[a].append(b)\n adjNode[b].append(a)\n bfs(1, visit, adjNode) # 1에서 dfs 탐색\n distance = max(visit) # 가장 거리가 먼 곳\n return visit.count(distance)\n\n\nprint(solution(6, [[3, 6], [4, 3], [3, 2], [1, 3], [1, 2], [2, 4], [5, 2]]))","repo_name":"Challenge-Next-Level/Dijkstra","sub_path":"individual/gunkim/week10_graph/lv3_가장먼노드.py","file_name":"lv3_가장먼노드.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15587440653","text":"from minizinc import Instance, Model, Solver\nfrom data import Data, N\n\nkmeans = Model(\"minizinc/K_means.mzn\")\ngecode = Solver.lookup(\"gecode\")\n\ninstance = Instance(gecode, kmeans)\n\ninstance[\"n\"] = N\ninstance[\"k\"] = 2\ninstance[\"D\"] = Data # not in appropriate format\n\nresult = instance.solve()\n\nprint(result[\"a\"])\n","repo_name":"fereshtehbaradaran/linear-programming-model-and-network-flow-for-k-means-clustering-method","sub_path":"python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10401739494","text":"hair_cuts={\n \"name\":\"Hair Cuts\",\n \"Hair cut\":0,\n \"Shampoo & hair cut\":0,\n \"Grooming package \":0,\n \"Treatment \":0,\n \"Child hair cut\":0,\n \"Colour packages\":0\n}\n\nreborning={\n \"name\":\"Rebonding\",\n \"Package 1\":0,\n \"Package 2\":0,\n \"Package 3\":0,\n \"Package 4\":0,\n \n}\n\n\n\nfacialtreatment={\n \"name\":\"Facial Treatment\",\n \"Package 1\":0,\n \"Package 2\":0,\n \"Package 3\":0,\n \"Package 4\":0,\n}\n\nservices=[\"Hair Cuts\",\"Rebonding\",\"Facial Treatment\"]","repo_name":"Randikaviraj/Salon-Management-System-Backend","sub_path":"Server/db/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36380535394","text":"# The Modulo Operator\n# A useful tool for working with numerical information is the modulo operator (%), which divides one number by another number and returns the remainder:\n\n# The modulo operator doesn't tell you how many times one number fits into another, it just tells you what the remainder is. \n# >>>4 % 3\n# 1\n# >>>5 % 3\n# 2\n# >>>6 % 3 \n# 0\n# >>>7 % 3\n# 1 \n\n# You can use the modulo operator to determine if a number is even or odd:\nnumber = input(\"Enter a number, and I'll tell you if it's even or odd.\")\nnumber = int(number)\n\nif number %2 == 0:\n print(f\"\\nThe number {number} is odd.\")\nelse:\n print(f\"\\nThe number {number} is odd.\")\n \n","repo_name":"Alexander-Borges/While_Loops_Crash_Course","sub_path":"even_or_odd.py","file_name":"even_or_odd.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7995943369","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.template import Context, loader, RequestContext\nfrom django.shortcuts import render_to_response, get_object_or_404\n\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom Bio.Alphabet import IUPAC\nfrom Bio.Align.Applications import ClustalwCommandline\n\n\nfrom models import CDS\nfrom models import Contig\nfrom models import Feature\nfrom models import Protein\nfrom models import Reference\nfrom models import Strain\n\nimport base64\nimport bz2\nimport datetime\nimport os\nimport tempfile\nimport urllib\n\n\ndef index(request):\n strains = Strain.objects.all().order_by('name')\n referenceCount = len(Reference.objects.all())\n \n \n strainList = []\n for strain in strains:\n contigs = Contig.objects.filter(strain = strain)\n features = Feature.objects.filter(contig__strain = strain)\n codingSeqs = CDS.objects.filter(feature__contig__strain = strain)\n proteins = Protein.objects.filter(cds__feature__contig__strain = strain)\n strainList.append([len(contigs), [strain, len(contigs), len(features), len(codingSeqs), len(proteins)]])\n strainList.sort()\n strainList = [x[1] for x in strainList]\n t = loader.get_template('strains/index.html')\n c = Context({'strain_list' : strainList,\n 'reference_count' : referenceCount })\n return HttpResponse(t.render(c))\n ## index ##\n\ndef load_references(request):\n return render_to_response('strains/load_references.html',\n {},\n context_instance = RequestContext(request))\n ## load_references ##\n\n\ndef save_references(request):\n \"\"\"\n SGD_features.tab format looks like this:\n 0) Primary SGDID (mandatory)\n 1) Feature type (mandatory)\n 2) Feature qualifier (optional)\n 3) Feature name (optional)\n 4) Standard gene name (optional)\n 5) Alias (optional, multiples separated by |)\n 6) Parent feature name (optional)\n 7) Secondary SGDID (optional, multiples separated by |)\n 8) Chromosome (optional)\n 9) Start_coordinate (optional)\n 10) Stop_coordinate (optional)\n 11) Strand (optional)\n 12) Genetic position (optional)\n 13) Coordinate version (optional)\n 14) Sequence version (optional)\n 15) Description (optional)\n \"\"\"\n featuresFile = request.FILES['features_file']\n features = [x.split('\\t') for x in featuresFile.read().splitlines()]\n for line in features:\n reference = Reference()\n reference.sgdid = line[0]\n reference.feature_type = line[1]\n if line[2]:\n reference.qualifier = line[2]\n if line[3]:\n reference.feature_name = line[3]\n if line[4]:\n reference.standard_name = line[4]\n if line[5]:\n reference.aliases = line[5]\n if line[6]:\n reference.parent_name = line[6]\n if line[7]:\n reference.secondary_sgdid = line[7]\n if line[15]:\n reference.description = line[15]\n\n reference.createdDate = datetime.datetime.now()\n reference.modifiedDate = datetime.datetime.now()\n\n reference.save()\n return HttpResponseRedirect('/strains/')\n ## save_references ##\n \n\ndef detail(request, strain_id):\n theStrain = get_object_or_404(Strain, pk = strain_id)\n contigList = Contig.objects.filter(strain = strain_id)\n contigCount = len(contigList)\n featureList = Feature.objects.filter(contig__strain__pk = strain_id)\n featureCount = len(featureList)\n cdsList = CDS.objects.filter(feature__contig__strain__pk = strain_id)\n cdsCount = len(cdsList)\n pepList = Protein.objects.filter(cds__feature__contig__strain__pk = strain_id)\n pepCount = len(pepList)\n\n t = loader.get_template('strains/detail.html')\n c = Context({ 'the_strain' : theStrain,\n 'contig_count' : contigCount,\n 'feature_count' : featureCount,\n 'cds_count' : cdsCount,\n 'pep_count' : pepCount })\n\n return HttpResponse(t.render(c))\n ## detail ##\n\ndef new_strain(request):\n return render_to_response('strains/new_strain.html',\n {},\n context_instance = RequestContext(request))\n ## new_strain ##\n\n\ndef save_new_strain(request):\n strain_name = request.POST['strain_name']\n taxon_id = request.POST['taxon_id']\n pmid = request.POST['pmid']\n\n s = Strain()\n if strain_name:\n s.name = strain_name\n s.createdDate = datetime.datetime.now()\n s.modifiedDate = datetime.datetime.now()\n if taxon_id:\n s.taxonid = taxon_id\n if pmid:\n s.pmid = pmid\n if strain_name:\n s.save()\n # Always return an HttpResponseRedirect after successfully dealing with POST data.\n # This prevents data from being posted twice if the User hits the back button.\n return HttpResponseRedirect('/strains/')\n ## save_new_strain ##\n\n\ndef add_contigs(request, strain_id):\n theStrain = Strain.objects.filter(pk = strain_id)[0]\n return render_to_response('strains/add_contigs.html',\n { 'the_strain' : theStrain },\n context_instance = RequestContext(request))\n ## add_contigs ##\n\n\ndef save_contigs(request, strain_id):\n theStrain = Strain.objects.filter(pk = strain_id)[0]\n contigFile = request.FILES['contig_file']\n contigs = []\n for seqRecord in SeqIO.parse(contigFile, \"fasta\"):\n c = Contig()\n c.strain = Strain.objects.filter(pk = strain_id)[0]\n c.name = seqRecord.id\n\n # The next line is a total kludge to the apparent limit of 1,000,000\n # (or so) characters in a models.TextField on MySQL. The nested calls\n # make a string of dna letters shrink by more than 50%. Since I don't\n # think there are any 2 megabase chromosomes in Sac cer this should\n # work.\n c.seq = base64.b64encode(bz2.compress(seqRecord.seq.tostring()))\n\n c.createdDate = datetime.datetime.now()\n c.modifiedDate = datetime.datetime.now()\n\n c.save()\n return HttpResponseRedirect('/strains/')\n ## save_new_contigs ##\n\n\ndef contigs(request, strain_id):\n theStrain = get_object_or_404(Strain, pk = strain_id)\n contigList = Contig.objects.filter(strain = theStrain).order_by('name')\n contigs = []\n for aContig in contigList:\n nFeatures = len(Feature.objects.filter(contig = aContig))\n nCDS = len(CDS.objects.filter(feature__contig = aContig))\n contigLen = len(bz2.decompress(base64.b64decode(aContig.seq)))\n contigs.append([aContig, contigLen, nFeatures, nCDS])\n\n return render_to_response('strains/contigs.html',\n { 'the_strain' : theStrain,\n 'contig_list' : contigs })\n ## contigs ##\n\n\ndef contig_detail(request, contig_id):\n theContig = get_object_or_404(Contig, pk = contig_id)\n theStrain = theContig.strain\n contigName = theContig.name\n\n # Don't forget to turn the string we saved in the db back into a DNA\n # sequence\n contigSeq = bz2.decompress(base64.b64decode(theContig.seq))\n\n contigSeqParts = []\n n = 0\n size = 60\n while 1:\n part = contigSeq[n * size : (n + 1) * size]\n if not part:\n break\n contigSeqParts.append(part)\n n += 1\n contigSeq = '\\n'.join(contigSeqParts)\n\n featureCount = len(Feature.objects.filter(contig = theContig))\n\n return render_to_response('strains/contig_detail.html',\n { 'the_contig' : theContig,\n 'the_strain' : theStrain,\n 'contig_seq' : contigSeq,\n 'feature_count' : featureCount })\n ## contigDetail ##\n\n\ndef chop60(seq):\n seqParts = []\n n = 0\n size = 60\n while 1:\n part = seq[n * size : (n + 1) * size]\n if not part:\n break\n seqParts.append(part)\n n += 1\n return '\\n'.join(seqParts)\n ## chop60 ##\n\n\ndef add_features(request, strain_id):\n theStrain = Strain.objects.filter(pk = strain_id)[0]\n return render_to_response('strains/add_features.html',\n { 'the_strain' : theStrain },\n context_instance = RequestContext(request))\n ## add_features ##\n \n\ndef save_features(request, strain_id):\n \"\"\"\n seqid = line[0]\n source = line[1]\n type = line[2]\n start = line[3]\n end = line[4]\n score = line[5]\n strand = line[6] # \"+\", \"-\", or \".\"\n phase = line[7]\n attributes = line[8]\n \"\"\"\n theStrain = Strain.objects.get(pk = strain_id)\n gffFile = request.FILES['gff_file']\n gff = gffFile.read()\n gff = gff.split('###')[0] # Throw away the sequence\n gff = [x.split('\\t') for x in gff.splitlines() if x[0] != '#'] # Throw away the header comments. Now we're left with just the meat of the file\n\n contigMap = {}\n for seqid, source, featureType, start, end, score, strand, phase, attributes in gff:\n attributeParts = attributes.split(';')\n attributeParts = [x.split('=') for x in attributeParts]\n attributeParts = [(x[0], x[1].split(',')) for x in attributeParts]\n attributeParts = [(x[0], [urllib.unquote(y) for y in x[1]]) for x in attributeParts]\n\n attributeDict = {}\n for key, value in attributeParts:\n attributeDict[key] = value\n\n if featureType == 'contig':\n # We need to add this to the contigMap\n try:\n contigName = attributeDict['dbxref'][0].split(':')[-1]\n except KeyError:\n contigName = attributeDict['ID'][0]\n contigMap[seqid] = contigName\n elif featureType == 'chromosome':\n contigMap[seqid] = seqid\n else: # This is an actual feature line. It is assumed that we have already gone through all the contig lines\n theContig = get_object_or_404(Contig, name=contigMap[seqid] ) # Get the Contig we're going to point to\n feature = Feature()\n feature.contig = theContig\n try:\n feature.feature_id = attributeDict['ID'][0]\n except KeyError:\n pass\n else:\n # This one has a name that might be found in the Reference table\n if feature.feature_id.find(theStrain.name) != -1:\n # Yup, it's one we need to link to the Reference table\n referenceName = feature.feature_id.split(\"_\")[0]\n feature.reference = Reference.objects.get(feature_name = referenceName)\n else: # just try and see if there is a reference with this unmodified feature_id\n try:\n feature.reference = Reference.objects.get(feature_name = feature.feature_id)\n except ObjectDoesNotExist:\n pass\n\n if 'Parent' in attributeDict:\n parent = get_object_or_404(Feature, feature_id = attributeDict['Parent'][0], contig = theContig)\n feature.parent = parent\n feature.feature_type = featureType\n feature.start_coord = int(start)\n feature.stop_coord = int(end)\n feature.strand = strand\n\n feature.createdDate = datetime.datetime.now()\n feature.modifiedDate = datetime.datetime.now()\n\n feature.save()\n return HttpResponseRedirect('/strains/')\n ## save_features ##\n\n\ndef features_by_contig(request, contig_id):\n theContig = get_object_or_404(Contig, pk = contig_id)\n theStrain = theContig.strain\n features = Feature.objects.filter(contig = theContig).order_by('start_coord')\n features = [x for x in features if x.feature_id and not x.parent]\n for i, feature in enumerate(features):\n features[i] = [feature, get_child_features(feature)]\n\n return render_to_response('strains/features_by_contig.html',\n { 'the_contig' : theContig,\n 'the_strain' : theStrain,\n 'feature_list' : features })\n ## features_by_contig ##\n\n\ndef get_child_features(feature):\n \"\"\"\n get_child_features(feature):\n\n Given a Feature instance, finds it's children and returns them as a list of\n childList sublists. It calls itself recursively to fill in the child's\n children. When a feature has no children it returns an empty list.\n \"\"\"\n children = []\n childFeatures = Feature.objects.filter(parent = feature).order_by('start_coord')\n if childFeatures:\n for childFeature in childFeatures:\n children.append([childFeature, get_child_features(childFeature)])\n return children\n ## get_child_features ##\n\n\ndef features_by_strain(request, strain_id):\n theStrain = get_object_or_404(Strain, pk = strain_id)\n contigList = Contig.objects.filter(strain = theStrain).order_by('name')\n featureList = []\n for aContig in contigList:\n contigFeatures = Feature.objects.filter(contig = aContig).order_by('start_coord')\n if len(contigFeatures) > 0:\n featureList.append([aContig, contigFeatures])\n return render_to_response('strains/features_by_strain.html',\n { 'the_strain' : theStrain,\n 'feature_list' : featureList })\n ## features_by_strain ##\n\n\ndef feature_detail(request, feature_id):\n theFeature = get_object_or_404(Feature, pk = feature_id)\n theContig = theFeature.contig\n\n contigSeq = bz2.decompress(base64.b64decode(theContig.seq))\n featureSeq = contigSeq[theFeature.start_coord - 1: theFeature.stop_coord]\n\n if theFeature.strand == '-':\n bpSeq = Seq(featureSeq, IUPAC.unambiguous_dna)\n featureSeq = bpSeq.reverse_complement().tostring()\n featureSeq = chop60(featureSeq)\n\n if not theFeature.parent: # This is a top_level feature\n featureName = theFeature.feature_id\n else:\n featureType = theFeature.feature_type\n aFeature = theFeature\n while aFeature.parent:\n aFeature = aFeature.parent\n featureName = '%s_%s' % (aFeature.feature_id, featureType)\n\n return render_to_response('strains/feature_detail.html',\n { 'feature_seq' : featureSeq,\n 'feature_name' : featureName })\n ## feature_detail ##\n\n\ndef add_CDS(request, strain_id):\n theStrain = get_object_or_404(Strain, pk = strain_id)\n return render_to_response('strains/add_CDS.html',\n { 'the_strain' : theStrain },\n context_instance = RequestContext(request)) \n ## add_CDS ##\n\n\ndef add_protein(request, strain_id):\n theStrain = get_object_or_404(Strain, pk = strain_id)\n return render_to_response('strains/add_protein.html',\n { 'the_strain' : theStrain },\n context_instance = RequestContext(request)) \n ## add_protein ##\n\n\ndef save_CDS(request, strain_id):\n theStrain = get_object_or_404(Strain, pk = strain_id)\n CDS_file = request.FILES['cds_file']\n for seqRecord in SeqIO.parse(CDS_file, \"fasta\"):\n cds = CDS()\n cds.name = seqRecord.id\n cds.seq = seqRecord.seq.tostring()\n cds.feature = Feature.objects.get(feature_id = cds.name)\n cds.createdDate = datetime.datetime.now()\n cds.modifiedDate = datetime.datetime.now()\n cds.save()\n return HttpResponseRedirect('/strains/')\n ## save_CDS ##\n\n\ndef save_protein(request, strain_id):\n theStrain = get_object_or_404(Strain, pk = strain_id)\n protein_file = request.FILES['protein_file']\n for seqRecord in SeqIO.parse(protein_file, \"fasta\"):\n protein = Protein()\n protein.name = seqRecord.id\n protein.seq = seqRecord.seq.tostring()\n protein.cds = CDS.objects.get(name = protein.name)\n protein.createdDate = datetime.datetime.now()\n protein.modifiedDate = datetime.datetime.now()\n protein.save()\n return HttpResponseRedirect('/strains/')\n ## save_protein ##\n\n\ndef references(request):\n referenceList = Reference.objects.filter(feature_type = 'ORF').order_by('standard_name')\n references = []\n for ref in referenceList:\n references.append([ref, len(CDS.objects.filter(feature__reference = ref))])\n return render_to_response('strains/references.html',\n { 'reference_list' : references })\n ## references ##\n\n\ndef gene_detail(request, reference_id):\n theReference = get_object_or_404(Reference, pk = reference_id)\n theFeatures = Feature.objects.filter(reference = theReference)\n cdsGroup = CDS.objects.filter(feature__reference = theReference)\n proteinGroup = Protein.objects.filter(cds__feature__reference = theReference)\n\n cdsList = []\n for cds in cdsGroup:\n cdsList.append([cds, chop60(cds.seq)])\n proteinList = []\n for pep in proteinGroup:\n proteinList.append([pep, chop60(pep.seq)])\n return render_to_response('strains/gene_detail.html',\n { 'the_reference' : theReference,\n 'feature_list' : theFeatures,\n 'cds_list' : cdsList,\n 'protein_list' : proteinList })\n ## gene_detail ##\n\n\ndef cds_clustal(request, reference_id):\n return run_clustal(request, reference_id, 'cds')\n ## cds_clustal ##\n\n\ndef protein_clustal(request, reference_id):\n return run_clustal(request, reference_id, 'protein')\n ## cds_clustal ##\n\n\ndef run_clustal(request, reference_id, cdsOrPep):\n theReference = get_object_or_404(Reference, pk = reference_id)\n if cdsOrPep == 'cds':\n objList = CDS.objects.filter(feature__reference = theReference)\n else:\n objList = Protein.objects.filter(cds__feature__reference = theReference)\n\n handle, path = tempfile.mkstemp(suffix = '.fasta', prefix = 'strains_', text = True)\n outf = open(path, 'w')\n for obj in objList:\n outf.write(\">%s\\n%s\\n\" % (obj.name, chop60(obj.seq)))\n outf.close()\n\n command = ClustalwCommandline(\"clustalw2\", infile = path)\n stdout, stderr = command()\n os.unlink(path)\n inf = open(path.replace(\".fasta\", \".aln\"))\n result = inf.read()\n inf.close()\n if not result:\n result = 'No clustal alignment'\n os.unlink(path.replace(\".fasta\", \".aln\"))\n try:\n os.unlink(path.replace(\".fasta\", \".dnd\"))\n except OSError: # There was no .dnd file created. That's OK, go on\n pass\n return render_to_response('strains/alignment.html',\n { 'the_reference' : theReference,\n 'result' : result,\n 'cdsOrPep' : cdsOrPep })\n ## run_clustal ##\n \n\ndef strain_cds(request, strain_id):\n theStrain = get_object_or_404(Strain, pk = strain_id)\n cdsList = CDS.objects.filter(feature__contig__strain = theStrain)\n thingList = []\n for cds in cdsList:\n thingList.append([cds.name, chop60(cds.seq)])\n return render_to_response('strains/fasta.html',\n { 'things' : thingList })\n ## strain_cds ##\n\n\ndef strain_protein(request, strain_id):\n theStrain = get_object_or_404(Strain, pk = strain_id)\n pepList = Protein.objects.filter(cds__feature__contig__strain = theStrain)\n thingList = []\n for pep in pepList:\n thingList.append([pep.name, chop60(pep.seq)])\n return render_to_response('strains/fasta.html',\n { 'things' : thingList })\n ## strain_protein ##\n\n\ndef strain_contigs(request, strain_id):\n theStrain = get_object_or_404(Strain, pk = strain_id)\n contigList = Contig.objects.filter(strain = theStrain)\n thingList = []\n for contig in contigList:\n thingList.append([contig.name, chop60(bz2.decompress(base64.b64decode(contig.seq)))])\n return render_to_response('strains/fasta.html',\n { 'things' : thingList })\n ## strain_protein ##\n\n\ndef delete_strain(request, strain_id):\n theStrain = get_object_or_404(Strain, pk = strain_id)\n proteins = Protein.objects.filter(cds__feature__contig__strain = theStrain)\n cds = CDS.objects.filter(feature__contig__strain = theStrain)\n features = Feature.objects.filter(contig__strain = theStrain)\n\n noParent = features.filter(parent = None)\n\n # There's probably a better way to do this that allows us to get a QeurySet\n # rather than turning things into lists. With a QuerySet we can .delete()\n # the whole set rather than having to delete each item in the list one at a\n # time.\n notParent = [x for x in features if x not in noParent]\n grandchildren = [x for x in notParent if x.parent in notParent]\n children = [x for x in notParent if x not in grandchildren]\n\n contigs = Contig.objects.filter(strain = theStrain)\n\n proteins.delete()\n cds.delete()\n for gc in grandchildren:\n gc.delete()\n for c in children:\n c.delete()\n noParent.delete()\n contigs.delete()\n theStrain.delete()\n return HttpResponseRedirect('/strains/')\n ## delete_strain ##\n","repo_name":"cdamundsen/SGD-Strains","sub_path":"compGen/strains/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":21507,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"38875491708","text":"def solution(name, yearning, photo):\n #결과 저장\n result = []\n # 두개 데이터 엮어서 딕셔너리로 저장\n information = dict(zip(name, yearning))\n for people in photo:\n # 점수 초기화 \n score = 0\n for person in people:\n # 딕셔너리안에 사람이 있으면 점수 갱신\n score += information.get(person, 0)\n # 점수 저장\n result.append(score)\n # 점수 결과 반환\n return result","repo_name":"kai3n/Daily-commit-project","sub_path":"Sanghyun/week29/추억 점수.py","file_name":"추억 점수.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21205371221","text":"import requests\nimport sys\nimport pymongo\nimport time\nimport re\n\n\"\"\"\n Pull Gene, Drug, Cancer Pathway Data from KEGG and push to\n MongoDB collections\n\"\"\"\n\n\ndef new_gene_dict(input_id, input_name):\n output = {\"name\": input_name.replace(\"*\", \"\").strip()}\n if \"C\" in input_id:\n output[\"mutation\"] = False\n output[\"type\"] = \"Compound\"\n output[\"id\"] = \"cpd:\" + input_id\n elif \"K\" in input_id:\n output[\"mutation\"] = False\n output[\"type\"] = \"Perturbant\"\n output[\"id\"] = input_id\n else:\n output[\"type\"] = \"Gene\"\n if \"v\" in input_id:\n output[\"mutation\"] = True\n output[\"id\"] = \"hsa:\" + input_id.split(\"v\")[0]\n else:\n output[\"mutation\"] = False\n output[\"id\"] = \"hsa:\" + input_id\n\n return output\n\n\ndef new_relation_dict(previous_genes, symbol):\n symbol_table = {\n \"->\": \"activation\",\n \"-|\": \"inhibition\",\n \"=>\": \"expression\",\n \"=|\": \"repression\",\n \"//\": \"missing interaction\",\n \">>\": \"enzyme-enzyme\",\n \"--\": \"complex formation\",\n }\n\n output = {\n \"mutation_activated\": any([prev[\"mutation\"] for prev in previous_genes]),\n \"type\": symbol_table.get(symbol, \"unknown\"),\n }\n\n return output\n\n\ndef create_products(expanded_string, definition_string):\n names_string = definition_string\n ids_string = expanded_string\n for char in \"()\":\n ids_string = ids_string.replace(char, \"\")\n names_string = names_string.replace(char, \"\")\n\n # This splits on + and ,\n names_string = names_string.replace(\"+\", \",\")\n names = names_string.split(\",\")\n\n ids_string = ids_string.replace(\"+\", \",\")\n gene_ids = ids_string.split(\",\")\n\n return [new_gene_dict(gene_id, name) for gene_id, name in zip(gene_ids, names)]\n\n\ndef get_network_entries(ids):\n query = \"+\".join(ids)\n r = requests.get(\"http://rest.kegg.jp/get/\" + query)\n if r.status_code != 200:\n return None\n if r.text == \"\":\n return None\n entries = r.text.split(\"///\")[:-1]\n if not len(entries) == len(ids):\n return None\n\n entries_lines = [entry.split(\"\\n\") for entry in entries]\n\n output = []\n for net_id, entry in zip(ids, entries_lines):\n current_def_and_expanded = [net_id]\n for line in entry:\n if \"DEFINITION\" in line:\n current_def_and_expanded.append(line.replace(\"DEFINITION\", \"\").strip())\n if \"EXPANDED\" in line:\n current_def_and_expanded.append(line.replace(\"EXPANDED\", \"\").strip())\n output.append(current_def_and_expanded)\n\n return output\n\n\ndef get_all_relevant_networks():\n r = requests.get(\"http://rest.kegg.jp/link/network/pathway\")\n if r.status_code != 200:\n return None\n if r.text == \"\":\n return None\n lines = r.text.split(\"\\n\")[:-1]\n networks = [line.split(\"\\t\")[1] for line in lines]\n return set(networks)\n\n\ndef upload_nets(nets, collection):\n network_dicts = []\n for net in nets:\n definition_tokens = net[1].split(\" \")\n expanded_tokens = net[2].split(\" \")\n index = 0\n current_id = 0\n net_query = \"CREATE \"\n products_source = create_products(expanded_tokens[0], definition_tokens[0])\n # To contain ids of source nodes\n source_ids = []\n # Go through first source products, assign ids and add to query\n for product in products_source:\n net_query += (\n \"(a\"\n + str(current_id)\n + \":\"\n + product[\"type\"]\n + '{name:\"'\n + product[\"name\"]\n + '\", kegg_ids: [\"'\n + product[\"id\"]\n + '\"]}),'\n )\n source_ids.append(current_id)\n current_id += 1\n\n while index + 2 < len(expanded_tokens):\n # Get current relation\n relation_dict = new_relation_dict(\n products_source, expanded_tokens[index + 1]\n )\n # Get destination products\n products_destination = create_products(\n expanded_tokens[index + 2], definition_tokens[index + 2]\n )\n # Assign ids to destination products and add to query\n dest_ids = []\n for product in products_destination:\n net_query += (\n \"(a\"\n + str(current_id)\n + \":\"\n + product[\"type\"]\n + '{name:\"'\n + product[\"name\"]\n + '\", kegg_ids: [\"'\n + product[\"id\"]\n + '\"]}),'\n )\n dest_ids.append(current_id)\n current_id += 1\n\n for source in source_ids:\n for dest in dest_ids:\n net_query += (\n \"(a\"\n + str(source)\n + ')-[:Network {subtypes:[\"'\n + relation_dict[\"type\"]\n + '\"'\n )\n if relation_dict[\"mutation_activated\"]:\n net_query += ',\"mutation activated\"'\n net_query += \"]}]->\" + \"(a\" + str(dest) + \"),\"\n source_ids = dest_ids\n index += 2\n net_query = net_query[:-1]\n network_dicts.append({\"id\": net[0], \"query\": net_query})\n collection.insert_many(network_dicts)\n\n\ndef get_drugs_list():\n r = requests.get(\"http://rest.kegg.jp/list/drug\")\n if r.status_code != 200:\n return None\n if r.text == \"\":\n return None\n lines = r.text.split(\"\\n\")[:-1]\n output = []\n for line in lines:\n info = line.split(\"\\t\")\n drug_id = info[0]\n drug_names = info[1]\n names_list = []\n for name in drug_names.split(\";\"):\n names_list.append(re.sub(r\"\\([^()]*\\)\", \"\", name).strip())\n output.append({\"kegg_id\": drug_id, \"names\": names_list})\n\n return output\n\n\ndef get_all_relevant_drug_links():\n output = []\n\n # Pathways\n r = requests.get(\"http://rest.kegg.jp/link/drug/pathway\")\n if r.status_code != 200:\n return None\n if r.text == \"\":\n return None\n lines = r.text.split(\"\\n\")[:-1]\n\n pathways_to_drugs = {}\n for line in lines:\n entries = line.split(\"\\t\")\n pathway = entries[0]\n drug = entries[1]\n if pathway not in pathways_to_drugs.keys():\n pathways_to_drugs[pathway] = []\n pathways_to_drugs[pathway].append(drug)\n\n for key in pathways_to_drugs.keys():\n output.append(\n {\"target\": key.replace(\"map\", \"hsa\"), \"drugs\": pathways_to_drugs[key]}\n )\n\n time.sleep(3)\n # Genes\n r = requests.get(\"http://rest.kegg.jp/link/drug/hsa\")\n if r.status_code != 200:\n return None\n if r.text == \"\":\n return None\n lines = r.text.split(\"\\n\")[:-1]\n\n genes_to_drugs = {}\n for line in lines:\n entries = line.split(\"\\t\")\n gene = entries[0]\n drug = entries[1]\n if gene not in genes_to_drugs.keys():\n genes_to_drugs[gene] = []\n genes_to_drugs[gene].append(drug)\n\n for key in genes_to_drugs.keys():\n output.append({\"target\": key, \"drugs\": genes_to_drugs[key]})\n\n return output\n\n\ndef main():\n database_url = sys.argv[1]\n client = pymongo.MongoClient(database_url)\n db = client[\"networks\"]\n kegg_networks_collection = db[\"kegg_networks\"]\n\n if kegg_networks_collection.count() > 0:\n print(\"Dropping networks collection...\")\n kegg_networks_collection.drop()\n\n print(\"Importing network ids...\")\n networks = list(get_all_relevant_networks())\n print(\"Importing \" + str(len(networks)) + \" networks.\")\n time.sleep(3)\n\n network_index = 0\n\n while network_index + 10 <= len(networks):\n this_10 = networks[network_index : network_index + 10]\n nets = get_network_entries(this_10)\n\n print(\"Adding networks \" + str(network_index) + \" - \" + str(network_index + 10))\n upload_nets(nets, kegg_networks_collection)\n time.sleep(3)\n\n network_index += 10\n\n last_nets_index = network_index\n last_nets = networks[last_nets_index:]\n print(\"Adding last networks...\")\n upload_nets(last_nets, kegg_networks_collection)\n\n db = client[\"drugs\"]\n kegg_drugs_collection = db[\"kegg_drugs\"]\n if kegg_drugs_collection.count_documents({}) > 0:\n print(\"Dropping drugs collection...\")\n kegg_drugs_collection.drop()\n\n print(\"Updating drugs collection...\")\n drugs = get_drugs_list()\n kegg_drugs_collection.insert_many(drugs)\n\n kegg_drug_links_collection = db[\"kegg_drug_links\"]\n if kegg_drug_links_collection.count_documents({}) > 0:\n print(\"Dropping drug links collection...\")\n kegg_drug_links_collection.drop()\n\n print(\"Updating drugs links...\")\n drugs = get_all_relevant_drug_links()\n kegg_drug_links_collection.insert_many(drugs)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ol-th/pathlink","sub_path":"scripts/db_management/populate_kegg_data.py","file_name":"populate_kegg_data.py","file_ext":"py","file_size_in_byte":9063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39936642837","text":"import pyodbc \n\n#database details\nserver = 'localhost' \ndatabase = 'INNO'\n\ncnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server}; SERVER=' + server + '; DATABASE=' + database + '; Trusted_Connection=yes;')\n\ncursor = cnxn.cursor()\ncursor.execute('SELECT DISTINCT Tag FROM Responses')\nrow = cursor.fetchall()\n\ntags = []\n\nfor i in row:\n data = i[0]\n tags.append(data)\n\nprint(tags)","repo_name":"marl6102/Inno","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23933547774","text":"import os\n\ndef checkIfPrime(x):\n\tisPrime = True\n\ti = 2\n\twhile (int(i) <= int(x) / 2):\n\t\ttemp = int(num) % int(i)\n\t\tif (int(temp) == 0):\n\t\t\tisPrime = False\n\t\t\tbreak\n\t\ti = int(i) + 1\n\tif (isPrime == True and int(num) != 0 and int(num) != 1 and int(num) > 0):\n\t\tprint (\"%d is prime.\" % int(x))\n\telse:\n\t\tprint (\"%d is not prime.\" % int(x))\n\treturn\n\t\nnum = input(\"Please enter number to check if prime or not: \")\ncheckIfPrime(int(num))\n\nos.system(\"pause\")","repo_name":"dev-arthur-g20r/how-to-code-together-using-python","sub_path":"How to Code Together using Python/prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"39421567020","text":"from LSTM_classifier import IntentModel\nfrom get_vocab import get_rid\nimport csv\nimport tensorflow.contrib.keras as kr\nimport config\nimport random\nimport numpy as np\n\npositive_path = 'data/question.csv'\nnegative_path = 'data/xiaohuangji50w_nofenci.conv'\nvocab_path = 'data/vocabulary.txt'\ncheckpoint_path = 'model/intent_classifier_model'\n\n\ndef get_length(x_batch):\n real_len = []\n for line in x_batch:\n real_len.append(np.sum(np.sign(line)))\n return real_len\n\n\nif __name__ == '__main__':\n with open(vocab_path, 'r', encoding='utf8') as file:\n vocabulary_list = [k.strip() for k in file.readlines()]\n word2id_dict = dict([(b, a) for a, b in enumerate(vocabulary_list)])\n\n content_list = []\n with open(positive_path, 'r', encoding='utf-8') as f:\n reader = csv.DictReader(f)\n for row in reader:\n content_list.append([get_rid(row['question']), 1])\n\n with open(negative_path, 'r', encoding='utf-8') as f:\n state = 0\n question = ''\n for line in f.readlines():\n if line[0] == 'E':\n state = 1\n elif state == 1 and line[0] == 'M':\n state = 2\n question = get_rid(line[2:])\n elif state == 2 and line[0] not in ['E', 'M']:\n question += get_rid(line)\n elif state == 2 and line[0] == 'M':\n state = 0\n content_list.append([question, 0])\n\n random.shuffle(content_list)\n\n content2id = lambda content: [word2id_dict[word] for word in content if word in word2id_dict]\n content2id_list = [content2id(content[0]) for content in content_list]\n\n train_x = kr.preprocessing.sequence.pad_sequences(content2id_list, maxlen=config.sequence_length, padding='post', truncating='post')\n\n train_y = []\n for content in content_list:\n if content[1] == 1:\n train_y.append([0, 1])\n else:\n train_y.append([1, 0])\n\n model = IntentModel()\n\n for i in range(3000):\n batch_train_x = []\n batch_train_y = []\n train_num = random.sample(list(range(len(train_x))), config.train_batch_size)\n for num in train_num:\n batch_train_x.append(train_x[num])\n batch_train_y.append(train_y[num])\n real_train_length = get_length(batch_train_x)\n _, train_loss, train_accuracy = model.train_step([batch_train_x, batch_train_y], real_train_length)\n batch = i + 1\n if batch % 100 == 0:\n print('batch: %d, loss: %.4f, accuracy: %.4f' % (batch, train_loss, train_accuracy))\n model.save(checkpoint_path)\n","repo_name":"peteryang1/PKU-software-implementation","sub_path":"2019/2019_QASystem/code/intent_classifier/train_all.py","file_name":"train_all.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"78528101","text":"import argparse\nimport subprocess\nfrom pathlib import Path\nfrom shlex import quote\n\nargparser = argparse.ArgumentParser()\nargparser.add_argument(\"experiment_dir\", type=Path)\nargparser.add_argument(\"--corpus\", type=str,\n default=\"X.SpeakerDiarization.BBT2\")\nargparser.add_argument(\"--train_corpus\", type=str)\nargparser.add_argument(\"--dry_run\", action='store_true')\n\nMETRIC = \"fscore\"\n\nargs = argparser.parse_args()\nlauncher_file = Path(__file__).parent / Path(\"launchers\") / Path(\"validate.sh\")\n\nif args.train_corpus is None:\n print(\"Assuming train corpus is the same as validation corpus\")\n args.train_corpus = args.corpus\n\ntrain_dir = f\"{args.experiment_dir}/train/{args.train_corpus}.train\"\nassert Path(train_dir).is_dir()\n\nlog_file = f\"validation_logs/validate_{args.corpus}_{args.experiment_dir.name}_{METRIC}.log\"\ncommand_args = [\n \"sbatch\",\n \"-o\", log_file,\n str(launcher_file), args.corpus, train_dir\n]\ncommand_str = \" \".join(quote(s) for s in command_args)\nprint(f\"Lauching job with command {command_str}\")\nif not args.dry_run:\n subprocess.Popen(command_args)\n","repo_name":"marianne-m/BBT3_training","sub_path":"job_launchers/master_validate.py","file_name":"master_validate.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27046813890","text":"# grid.py\n#\n# This file contains the Grid class which is used as a framework to perform the linesearch,\n# load the output of a linesearch and to analyse the output.\n#\n# Author: Simon van Eeden\n\nfrom __future__ import print_function\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.ticker as mticker\nfrom xspec import *\nimport numpy as np\nimport os\nfrom scipy.constants import c, h \n\nclass Grid():\n \"\"\"\n Object class to run and analyse a grid search. \n \"\"\"\n\n def __init__(self):\n \"\"\"\n Sets all parameters to None.\n \"\"\"\n\n # Grid parameters\n self.start = None\n self.end = None\n self.step_size = None\n self.energies = None\n self.wavelengths = None\n self.num_grid_points = None\n\n # Search parameters\n self.line_width = None\n self.spectrum_path = None\n self.search_mode = None\n self.window_size = None\n \n # Search output parameters\n self.significance = None\n self.delta_fitstat = None\n self.initial_continuum = None\n self.optimized_continuum = None\n self.const_factor = None\n self.fit_err = None\n self.fit_stat = None\n self.runtime = None\n self.output_path = None\n self.fit_model = None\n self.date = None\n \n # Observation details\n self.object_name = None\n self.obs_id = None\n self.obs_mode = None\n self.instrument = None\n self.telescope = None\n\n # Import methods\n from ._search import run_search, run_custom_search\n from ._plots import plot_search, plot_significance, plot_gridpoint\n\n def set_grid_points(self, start=1., end=10., line_width=500., step_size=0.3):\n \"\"\"\n Creates and set energy grid into Grid class \n\n Parameters\n start (float): Energy where linesearch will start (keV)\n end (float): Energy where linesearch will end (keV)\n line_width (float): Width of gaussian line feature (km/s)\n step_size (float): Stepsize, as a fraction of the gaussian line width\n \"\"\"\n\n # Set grid parameters\n self.start = start\n self.end = end\n self.step_size = step_size\n\n # Create energy grid\n energies = []\n e_center = start\n while e_center < end:\n # Save energy in grid\n energies.append(e_center)\n\n # Get line width in energy (keV)\n line_energy_width = line_width*1000/c * e_center\n\n # Calculate next energy gridpoint\n e_center += line_energy_width*step_size\n\n self.energies = np.array(energies)\n self.wavelengths = np.array([(h*c)/(kev*1.6022e-16)*1e10 for kev in self.energies])\n self.num_grid_points = len(self.energies)\n\n print(\"Initialized energy grid\")\n print(\"\\t Grid start: \\t {0:.2f} keV\".format(self.start))\n print(\"\\t Grid end: \\t {0:.2f} keV\".format(self.end))\n print(\"\\t Step fraction: {0:.2f} (fraction of line width)\".format(self.step_size))\n print(\"\\t Line width: \\t {0:.0f} km/s\".format(line_width))\n print(\"\\t Grid points: \\t {0:d}\".format(self.num_grid_points))\n print(\"\")\n\n def load_search(self, output_path, object_name=None):\n \"\"\"\n Reads header and search information from the linesearch output text file.\n\n Parameters\n output_path (str): Path to the linesearch output file\n object_name (str): Manually specified object name. Defaults to None\n \"\"\"\n\n # Read in header information\n header = {}\n with open(output_path) as f:\n # Iterate through the file until the table starts\n for line in f:\n if not line.startswith('#'):\n break\n\n stripped = line.strip('# \\n').split('=')\n header[stripped[0]] = stripped[1]\n\n # Read in data\n dat = np.genfromtxt(output_path, names=header[\"COLUMN_NAMES\"])\n\n # Set search parameters\n self.start = float(header[\"GRID_START\"])\n self.end = float(header[\"GRID_END\"])\n self.step_size = float(header[\"GRID_STEP_SIZE\"])\n self.energies = dat[\"ENERGY\"]\n self.wavelengths = dat[\"WAVELENGTH\"]\n self.num_grid_points = int(header[\"NUM_GRID_POINTS\"])\n self.line_width = float(header[\"LINE_VELOCITY_WIDTH\"])\n self.spectrum_path = header[\"SPECTRUM_PATH\"]\n self.search_mode = header[\"SEARCH_MODE\"]\n\n # Set output parameters\n self.window_size = float(header[\"WINDOW_SIZE\"])\n self.significance = dat[\"SIGNIFICANCE\"]\n self.norm_pm = dat[\"NORM_SIGN\"]\n self.delta_fitstat = dat[\"DELTA_FITSTAT\"]\n self.initial_continuum = dat[\"INITIAL_CONTINUUM\"]\n self.optimized_continuum = dat[\"OPTIMIZED_CONTINUUM\"]\n self.const_factor = dat[\"CONSTANT_FACTOR\"]\n self.fit_err = dat[\"FIT_ERR\"]\n self.runtime = float(header[\"RUNTIME\"])\n self.fit_stat = header[\"XSPEC_FITSTAT\"]\n self.output_path = output_path\n self.fit_model = header[\"XSPEC_FIT_MODEL\"]\n self.date = header[\"SEARCH_DATE\"]\n\n # Additonal parameters\n self.obs_id = header[\"OBS_ID\"]\n self.obs_mode = header[\"OBS_MODE\"]\n self.grating = header[\"GRATING\"]\n self.telescope = header[\"TELESCOPE\"]\n\n # Allow user to specify object name manually\n if object_name is not None:\n self.object_name = object_name\n else:\n self.object_name = header[\"OBJECT_NAME\"]\n\n print(\"Linesearch loaded\")\n print(\"\\t Object name: {0:s}\".format(self.object_name))\n print(\"\\t Output path: {0:s}\".format(self.output_path))\n print(\"\")\n\n def calc_line(self, line_center, line_region):\n \"\"\"\n Calculates line significance and shift for a given line center and region\n\n Parameters\n line_center (float): Line center in keV\n line_region (list): Region around line center in keV\n\n Returns\n line_significance (float): Line significance\n line_shift (float): Line shift in km/s\n fit_error_in_region (bool): True if fit error occured in selected region\n \"\"\"\n\n # Select significances around line center\n significance_line_region = self.significance[(self.energies > line_region[0]) & (self.energies < line_region[1])]\n energies_line_region = self.energies[(self.energies > line_region[0]) & (self.energies < line_region[1])]\n fit_errors_in_region = np.sum(self.fit_err[(self.energies > line_region[0]) & (self.energies < line_region[1])])\n\n # Calculate significance\n line_significance = np.min(significance_line_region)\n\n # Calculate line shift\n line_energy = energies_line_region[np.argmin(significance_line_region)]\n line_shift = (line_energy - line_center)/line_center * (c/1000)\n\n # Check if fit error occured in selected region\n if fit_errors_in_region > 0:\n print('Warning: somewhere in the selected energy range a fit error has occured: ' + self.output_path)\n \n return line_significance, line_shift, line_energy, fit_errors_in_region > 0\n \n def check():\n \"\"\"\n Checks fit errors and constant factor.\n\n Returns\n validity (bool): True if fit errors and constant factor are valid\n \"\"\"\n\n # Check if fit errors occured\n if np.sum(self.fit_err) > 0:\n print(\"Warning: fit errors occured\")\n return False\n\n # Check if constant factor is valid\n if np.max(self.const_factor) > 2:\n print(\"Warning: constant factor is higher then 2\")\n return False\n \n # Checks for unreal line significances\n if np.min(self.significance) < -50:\n print(\"Warning: line significance is lower then -50\")\n return False\n \n # Checks for unreal line significances\n if np.min(self.significance) < -3:\n print(\"Note: line significance lower then -3\")\n \n return True\n\n def __str__(self):\n return '{0:s}, {1:s}'.format(self.object_name, self.obs_id)\n","repo_name":"Simon-1999/Linesearch","sub_path":"linesearch/grid/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":8190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32710786287","text":"from typing import List\n\nfrom aiogram import types\nfrom sqlalchemy import select, update, delete, func\nfrom sqlalchemy.dialects.postgresql import insert\nfrom sqlalchemy.ext.asyncio import AsyncSession, AsyncResult\n\nfrom ...database.models import User\n\n\nasync def add_user(\n session: AsyncSession, tg_id: int, first_name: str = None, last_name: str = None, username: str = None,\n **kwargs\n) -> User:\n insert_stmt = select(\n User\n ).from_statement(\n insert(\n User\n ).values(\n tg_id=tg_id,\n first_name=first_name,\n last_name=last_name,\n username=username,\n **kwargs\n ).returning(User).on_conflict_do_nothing()\n )\n result = await session.scalars(insert_stmt)\n return result.first()\n\n\nasync def create_user(session: AsyncSession, user: types.User, chat_type: str):\n return await add_user(\n session,\n user.id, user.first_name, user.last_name, user.username,\n active=True if chat_type == \"private\" else False\n )\n\n\nasync def get_one_user(session: AsyncSession, **kwargs) -> User:\n statement = select(User).filter_by(**kwargs)\n result: AsyncResult = await session.scalars(statement)\n return result.first()\n\n\nasync def get_some_users(session: AsyncSession, *clauses) -> List[User]:\n statement = select(\n User\n ).where(\n *clauses\n ).order_by(\n User.created_at.desc()\n )\n result: AsyncResult = await session.scalars(statement)\n return result.unique().all()\n\n\nasync def get_count_users(session: AsyncSession, *clauses) -> int:\n statement = select(\n func.count(User.tg_id)\n ).where(\n *clauses\n )\n result: AsyncResult = await session.scalar(statement)\n return result\n\n\nasync def update_user(session: AsyncSession, *clauses, **values):\n statement = update(\n User\n ).where(\n *clauses\n ).values(\n **values\n )\n await session.execute(statement)\n\n\nasync def delete_user(session: AsyncSession, *clauses):\n statement = delete(\n User\n ).where(\n *clauses\n )\n await session.execute(statement)\n","repo_name":"daler-api/shop-bot","sub_path":"app/infrastructure/database/functions/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73169716693","text":"\"\"\"\nVertex sampling\n(including degree biased sampling)\n\"\"\"\n\nimport numpy as np\nimport scipy.sparse as sparse\n\nfrom helpers.graph_helpers import reindex_edge_list\nfrom helpers.sampling_helpers import choice\n\n\ndef vert_samp(graph, k, l, u_dist = None, i_dist = None):\n \"\"\"\n Sampling defaults to with replacement because multinomial sampling is easier that multidimensional hypergeometric\n\n :param graph: graph in usual (edge_list , weights) format\n :param k: number of users in sample\n :param l: number of items in sample\n :param u_dist: sampling distribution for users (defaults to uniform with replacement)\n :param i_dist: sampling distribution for items (defaults to uniform with replacement)\n :return: adjacency matrix of the subsample, and lists of selected users and selected items\n \"\"\"\n edge_list, weights = graph\n\n users = np.unique(edge_list[:, 0])\n items = np.unique(edge_list[:,1])\n\n if u_dist is None:\n selected_users = choice(users, k, replace=True)\n else:\n proto_selected_users = np.random.multinomial(k, u_dist)\n selected_users_list = []\n # build a list of selected users, where users that are selected multiple times are repeated as required\n for mult in range(proto_selected_users.max()):\n selected_users_list += np.where(proto_selected_users > mult)\n selected_users = np.concatenate(selected_users_list)\n\n if i_dist is None:\n selected_items = choice(items, l, replace=True)\n else:\n proto_selected_items = np.random.multinomial(l, i_dist)\n selected_items_list = []\n # build a list of selected users, where users that are selected multiple times are repeated as required\n for mult in range(proto_selected_items.max()):\n selected_items_list += np.where(proto_selected_items > mult)\n selected_items = np.concatenate(selected_items_list)\n\n # for with replacement sampling\n selected_users, u_cts = np.unique(selected_users, return_counts=True)\n selected_items, i_cts = np.unique(selected_items, return_counts=True)\n\n # get the non-zero entries of the subsample\n u_edge_selected = np.in1d(edge_list[:,0], selected_users)\n samp_el = np.copy(edge_list[u_edge_selected])\n i_edge_selected = np.in1d(samp_el[:,1], selected_items)\n samp_el = samp_el[i_edge_selected]\n\n samp_w = np.copy(weights[u_edge_selected])\n samp_w = samp_w[i_edge_selected]\n\n # construct the corresponding adjacency matrix (which can have all 0 rows or columns)\n\n # contiguous relabelling of the edge list\n relabel_u, relabel_i = reindex_edge_list(samp_el, selected_users, selected_items).T\n\n # for each all 0 user j, add phantom edge [j,0] of weight 0; this hack allows for all zero rows and columns\n zero_users = np.isin(range(selected_users.size), relabel_u, invert=True).nonzero()[0] # selected users w no edges\n relabel_u = np.append(relabel_u, zero_users)\n relabel_i = np.append(relabel_i, np.zeros_like(zero_users))\n samp_w = np.append(samp_w, np.zeros_like(zero_users, dtype=np.float32))\n\n # and same for items\n zero_items = np.isin(range(selected_items.size), relabel_i, invert=True).nonzero()[0] # selected items w no edges\n relabel_i = np.append(relabel_i, zero_items)\n relabel_u = np.append(relabel_u, np.zeros_like(zero_items))\n samp_w = np.append(samp_w, np.zeros_like(zero_items, dtype=np.float32))\n\n # add in the required copies of the users that were selected multiple times\n dup_ru, dup_ri, dup_w = _add_mult_samp_users(selected_users, k, u_cts, relabel_u, relabel_i, weights)\n\n relabel_u = np.append(relabel_u, dup_ru)\n relabel_i = np.append(relabel_i, dup_ri)\n samp_w = np.append(samp_w, dup_w)\n\n # add in the required copies of items that were selected multiple times (due to with replacement sampling)\n dup_ri, dup_ru, dup_w = _add_mult_samp_users(selected_items, l, i_cts, relabel_i, relabel_u, weights)\n\n relabel_u = np.append(relabel_u, dup_ru)\n relabel_i = np.append(relabel_i, dup_ri)\n samp_w = np.append(samp_w, dup_w)\n\n adj_mat = np.zeros([k,l])\n adj_mat[relabel_u, relabel_i] = np.squeeze(samp_w)\n\n return adj_mat, selected_users, selected_items\n\n\ndef _add_mult_samp_users(selected_users, k, u_cts, relabel_u, relabel_i, weights):\n \"\"\"\n helper function for vertex sampling with replacement\n computes extra edges and users required to account for the with replacement sampling\n\n :param selected_users:\n :param k:\n :param u_cts:\n :param relabel_u:\n :param relabel_i:\n :param weights:\n :return:\n \"\"\"\n\n # add in the required copies of users that were selected multiple times (due to with replacement sampling)\n n_u = selected_users.shape[0]\n full_selected_users = np.append(selected_users, np.repeat(-1, k - n_u))\n dup_ru = []\n dup_ri = []\n dup_w = []\n for dup in (u_cts > 1).nonzero()[0]:\n dup_inc = np.isin(relabel_u, dup).nonzero()[0]\n for _ in range(u_cts[dup]-1):\n full_selected_users[n_u] = dup\n\n # Note: every selected user must have at least one edge in the subgraph\n dup_ru += [np.repeat(n_u, dup_inc.shape[0])] # new user w label n_u\n # clone the edges for the new user\n dup_ri += [np.copy(relabel_i[dup_inc])]\n dup_w += [np.copy(weights[dup_inc])]\n n_u += 1\n\n return np.concatenate(dup_ru), np.concatenate(dup_ri), np.concatenate(dup_w)\n\n\ndef vert_samp_generator(graph, k, l, u_dist = None, i_dist = None):\n \"\"\"\n\n :param graph:\n :param k:\n :param l:\n :param u_dist:\n :param i_dist:\n :return:\n \"\"\"\n while True:\n yield vert_samp(graph, k, l, u_dist, i_dist)\n\n\ndef fast_vert_samp_generator(graph, k, l, u_dist = None, i_dist = None):\n \"\"\"\n :param graph:\n :param k:\n :param l:\n :param u_dist:\n :param i_dist:\n :return:\n \"\"\"\n\n edge_list, weights = np.copy(graph[0]), np.copy(graph[1])\n\n sparse_rep = sparse.coo_matrix((np.squeeze(weights), (edge_list[:,0], edge_list[:,1]))).tocsr()\n\n users = np.unique(edge_list[:,0])\n items = np.unique(edge_list[:,1])\n\n while True:\n if u_dist is None:\n selected_users = choice(users, k, replace=True)\n else:\n proto_selected_users = np.random.multinomial(k, u_dist)\n selected_users_list = []\n # build a list of selected users, where users that are selected multiple times are repeated as required\n for mult in range(proto_selected_users.max()):\n selected_users_list += np.where(proto_selected_users > mult)\n selected_users = np.concatenate(selected_users_list)\n\n if i_dist is None:\n selected_items = choice(items, l, replace=True)\n else:\n proto_selected_items = np.random.multinomial(l, i_dist)\n selected_items_list = []\n # build a list of selected users, where users that are selected multiple times are repeated as required\n for mult in range(proto_selected_items.max()):\n selected_items_list += np.where(proto_selected_items > mult)\n selected_items = np.concatenate(selected_items_list)\n\n samp = sparse_rep[selected_users]\n samp = samp[:, selected_items]\n\n yield samp.toarray(), selected_users, selected_items\n","repo_name":"ekanshs/data-splitting","sub_path":"graph_sampling/bipartite_vertex_sampling.py","file_name":"bipartite_vertex_sampling.py","file_ext":"py","file_size_in_byte":7349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5866560819","text":"import tensorflow as tf\n\nfrom keras_nlp.api_export import keras_nlp_export\nfrom keras_nlp.tokenizers.word_piece_tokenizer import pretokenize\nfrom keras_nlp.utils.tensor_utils import assert_tf_text_installed\n\ntry:\n from tensorflow_text.tools.wordpiece_vocab import (\n wordpiece_tokenizer_learner_lib as learner,\n )\nexcept ImportError:\n learner = None\n\n\n@keras_nlp_export(\"keras_nlp.tokenizers.compute_word_piece_vocabulary\")\ndef compute_word_piece_vocabulary(\n data,\n vocabulary_size,\n vocabulary_output_file=None,\n lowercase=False,\n strip_accents=False,\n split=True,\n split_on_cjk=True,\n suffix_indicator=\"##\",\n reserved_tokens=[\"[PAD]\", \"[CLS]\", \"[SEP]\", \"[UNK]\", \"[MASK]\"],\n):\n r\"\"\"A utility to train a WordPiece vocabulary.\n\n Trains a WordPiece vocabulary from an input dataset or a list of filenames.\n\n For custom data loading and pretokenization (`split=False`), the input\n `data` should be a `tf.data.Dataset`. If `data` is a list of filenames,\n the file format is required to be plain text files, and the text would be\n read in line by line during training.\n\n Args:\n data: A `tf.data.Dataset`, or a list of filenames.\n vocabulary_size: int. The maximum size of a vocabulary to be trained.\n vocabulary_output_file: str. The location to write a\n vocabulary file. defaults to `None`.\n lowercase: bool. If `True`, the input text will be\n lowercased before tokenization. Defaults to `False`.\n strip_accents: bool. If `True`, all accent marks will\n be removed from text before tokenization. Defaults to `False`.\n split: bool. If `True`, input will be split on\n whitespace and punctuation marks, and all punctuation marks will be\n kept as tokens. If `False`, input should be split (\"pre-tokenized\")\n before calling the tokenizer, and passed as a dense or ragged tensor\n of whole words. `split` is required to be `True` when `data` is a\n list of filenames. Defaults to `True`.\n split_on_cjk: bool. If `True`, input will be split\n on CJK characters, i.e., Chinese, Japanese, Korean and Vietnamese\n characters (https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)).\n Note that this is applicable only when `split` is `True`.\n Defaults to `True`.\n suffix_indicator: str. The characters prepended to a\n WordPiece to indicate that it is a suffix to another subword.\n E.g. `\"##ing\"`. Defaults to `\"##\"`.\n reserved_tokens: list of strings. A list of tokens that must be included in the vocabulary.\n\n Returns:\n Returns a list of vocabulary terms.\n\n Examples:\n\n Basic Usage (from Dataset).\n >>> inputs = tf.data.Dataset.from_tensor_slices([\"bat sat pat mat rat\"])\n >>> vocab = compute_word_piece_vocabulary(inputs, 13)\n >>> vocab\n ['[PAD]', '[CLS]', '[SEP]', '[UNK]', '[MASK]', 'a', 'b', 'm', 'p', 'r', 's', 't', '##at']\n >>> tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(vocabulary=vocab, oov_token=\"[UNK]\")\n >>> outputs = inputs.map(tokenizer.tokenize)\n >>> for x in outputs:\n ... print(x)\n tf.Tensor([ 6 12 10 12 8 12 7 12 9 12], shape=(10,), dtype=int32)\n\n Basic Usage (from filenames).\n ```python\n with open(\"test.txt\", \"w+\") as f:\n f.write(\"bat sat pat mat rat\\n\")\n inputs = [\"test.txt\"]\n vocab = keras_nlp.tokenizers.compute_word_piece_vocabulary(inputs, 13)\n ```\n\n Custom Split Usage (from Dataset).\n >>> def normalize_and_split(x):\n ... \"Strip punctuation and split on whitespace.\"\n ... x = tf.strings.regex_replace(x, r\"\\p{P}\", \"\")\n ... return tf.strings.split(x)\n >>> inputs = tf.data.Dataset.from_tensor_slices([\"bat sat: pat mat rat.\\n\"])\n >>> split_inputs = inputs.map(normalize_and_split)\n >>> vocab = compute_word_piece_vocabulary(\n ... split_inputs, 13, split=False,\n ... )\n >>> vocab\n ['[PAD]', '[CLS]', '[SEP]', '[UNK]', '[MASK]', 'a', 'b', 'm', 'p', 'r', 's', 't', '##at']\n >>> tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(vocabulary=vocab)\n >>> inputs.map(tokenizer.tokenize)\n\n Custom Split Usage (from filenames).\n ```python\n def normalize_and_split(x):\n \"Strip punctuation and split on whitespace.\"\n x = tf.strings.regex_replace(x, r\"\\p{P}\", \"\")\n return tf.strings.split(x)\n with open(\"test.txt\", \"w+\") as f:\n f.write(\"bat sat: pat mat rat.\\n\")\n inputs = tf.data.TextLineDataset([\"test.txt\"])\n split_inputs = inputs.map(normalize_and_split)\n vocab = keras_nlp.tokenizers.compute_word_piece_vocabulary(\n split_inputs, 13, split=False\n )\n tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(vocabulary=vocab)\n inputs.map(tokenizer.tokenize)\n ```\n \"\"\"\n assert_tf_text_installed(compute_word_piece_vocabulary.__name__)\n\n # Read data files.\n if not isinstance(data, (list, tf.data.Dataset)):\n raise ValueError(\n \"The `data` argument must be either `tf.data.Dataset` or `list`. \"\n f\"Received: {type(data)}.\"\n )\n if isinstance(data, list):\n # Processing list of file paths.\n if not split:\n raise ValueError(\n \"When learning a vocab from files, `split` must be `True`. \"\n \"To compute a vocabulary with custom split rules, load your \"\n \"data as a dataset, split it, and pass it to \"\n \"`compute_word_piece_vocabulary()` with split=False.\"\n )\n path_ds = tf.data.Dataset.from_tensor_slices(data)\n # Uses map to read filepaths.\n data = path_ds.map(\n lambda path: tf.io.read_file(path),\n num_parallel_calls=tf.data.AUTOTUNE,\n )\n\n words_data = data.map(\n lambda text: pretokenize(\n text, lowercase, strip_accents, split, split_on_cjk\n ),\n num_parallel_calls=tf.data.AUTOTUNE,\n )\n word_counts = learner.count_words(words_data)\n # Train tokenizer.\n vocab = learner.learn(\n word_counts,\n vocab_size=vocabulary_size,\n reserved_tokens=reserved_tokens,\n include_joiner_token=True,\n joiner=suffix_indicator,\n )\n if len(vocab) > vocabulary_size:\n vocab = vocab[:vocabulary_size]\n if vocabulary_output_file is not None:\n vocab_text = \"\".join([line + \"\\n\" for line in vocab])\n # Write vocab to file.\n with open(vocabulary_output_file, \"w\") as vocab_file:\n vocab_file.write(vocab_text)\n else:\n return vocab\n","repo_name":"keras-team/keras-nlp","sub_path":"keras_nlp/tokenizers/word_piece_tokenizer_trainer.py","file_name":"word_piece_tokenizer_trainer.py","file_ext":"py","file_size_in_byte":6642,"program_lang":"python","lang":"en","doc_type":"code","stars":594,"dataset":"github-code","pt":"67"} +{"seq_id":"71235867093","text":"import argparse\nimport socket\nimport sys\nfrom cardano import Cardano\n\nclass VsockListener:\n \"\"\"Server\"\"\"\n def __init__(self, conn_backlog=128):\n self.conn_backlog = conn_backlog\n self.cardano_obj = Cardano()\n\n def bind(self, port):\n \"\"\"Bind and listen for connections on the specified port\"\"\"\n self.sock = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)\n self.sock.bind((socket.VMADDR_CID_ANY, port))\n self.sock.listen(self.conn_backlog)\n\n def recv_data(self):\n \"\"\"Receive data from a remote endpoint\"\"\"\n while True:\n (from_client, (remote_cid, remote_port)) = self.sock.accept()\n # Read 1024 bytes at a time\n while True:\n try:\n data = from_client.recv(1024).decode()\n if data:\n try:\n signed_tx = self.cardano_obj.sign_transaction(tx_body_cbor=data)\n except:\n print('failed signature')\n if signed_tx:\n print('signed transaction')\n self.send_data(data=signed_tx.to_cbor().encode())\n except socket.error:\n break\n if not data:\n break\n print()\n\n def send_data(self, data):\n \"\"\"Send data to a remote endpoint\"\"\"\n (to_client, (remote_cid, remote_port)) = self.sock.accept()\n to_client.sendall(data)\n to_client.close()\n \n\n\ndef server_handler(args):\n server = VsockListener()\n server.bind(args.port)\n server.recv_data()\n\n\ndef main():\n parser = argparse.ArgumentParser(prog='vsock-sample')\n parser.add_argument(\"--version\", action=\"version\",\n help=\"Prints version information.\",\n version='%(prog)s 0.1.0')\n subparsers = parser.add_subparsers(title=\"options\")\n\n server_parser = subparsers.add_parser(\"server\", description=\"Server\",\n help=\"Listen on a given port.\")\n server_parser.add_argument(\"port\", type=int, help=\"The local port to listen on.\")\n server_parser.set_defaults(func=server_handler)\n\n if len(sys.argv) < 2:\n parser.print_usage()\n sys.exit(1)\n\n args = parser.parse_args()\n args.func(args)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"eserilev/cardano-signer","sub_path":"vsock_sample.py","file_name":"vsock_sample.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37768163970","text":"from engine.commons.common.simulator_configurator import SimulatorConfigurator\nfrom engine.commons.common_data.base_info import PathInfo\nfrom domain.site_progress import SiteProgress\nfrom commons.parameters import Parameters\nfrom domain.site_models import SiteModels\nfrom engine.commons.data_inventory.comDataInv import ComDataInventory\nfrom engine.commons.util.pandas_util import PandasUtil\nfrom datetime import datetime\nimport plotly.graph_objects as go\nimport pandas as pd\nimport time\nimport os\nimport numpy as np\n\n\ndef start(dataset_id: str, simulation_prefix: str):\n Parameters.set_plan_horizon(1)\n Parameters.set_engine_database()\n t0 = time.time()\n Parameters.operation_dsp_rules = {\"2100\": \"ORP_RES_REQ\"}\n SimulatorConfigurator.configurate_simulator(dataset_id=dataset_id, simulation_prefix=simulation_prefix)\n SimulatorConfigurator.run_simulator()\n # ORP 계산 결과\n # WH 보시면 Prod 변환 기록이 나올겁니다.\n # whID: warehouse id, currProd: PS.FROM_PROD_CODE, nextProd: PS.TO_PROD_CODE\n ps_df = ComDataInventory.input_data.data_frames['in_MainPsData']\n df = PandasUtil.select(ps_df, ['FROM_PROD_CODE', 'TO_PROD_CODE'])\n t1 = time.time()\n # logging.basicConfig(level=logging.INFO)\n try1 = Analysis()\n try1.analysis_engine_result_lot_history('sankey')\n t2 = time.time()\n print(t1-t0, t2-t1)\n\n\n\nclass Analysis:\n\n def __init__(self, **kwargs: list):\n # args에 입력된 lot_id에 해당하는 lot들의 history만 출력합니다.\n # args를 입력하지 않으면 모든 lot들이 출력됩니다.\n self.fig_treemap = None\n self.df_treemap = None\n self.fig_sankey = None\n self.df_sankey = None\n self.df_late = None\n self.label_name = None\n\n if 'prod_id' in kwargs.keys():\n if kwargs['prod_id'] is not None:\n self.prodID = [prodID for prodID in kwargs['prod_id']]\n else:\n self.prodID = []\n else:\n self.prodID = []\n # logging.INFO(f\"prod_id={self.prodID}\")\n if 'lot_id' in kwargs.keys():\n if kwargs['lot_id'] is not None and kwargs['lot_id'] != []:\n self.id_list = [lot_id for lot_id in kwargs['lot_id']]\n else:\n self.id_list = [lot_id for lot_id in SiteModels.lots.keys()]\n else:\n self.id_list = [lot_id for lot_id in SiteModels.lots.keys()]\n # logging.INFO(f\"lot_id={self.id_list}\")\n # 메인 데이터 프레임입니다.\n self.df = None\n\n # target을 기준으로 color를 나눈 figure입니다\n self.fig_target_col = None\n # event를 기준으로 color를 나눈 figure\n self.fig_event_col = None\n\n # lot_id별 가장 늦게 끝나는 시간입니다\n self.latest_finish = {}\n # 공장 가동 비율입니다.\n self.facility_utilization_rate = {}\n # orp 계산 결과의 동선\n self.node = {}\n\n\n @staticmethod\n def modify_text_pos(bar, pos):\n \"\"\"\n text의 위치를 bar 안쪽에 위치 시킬지 bar 바깥 쪽에 위치 시킬지 설정\n pos = ['inside', 'outside']\n \"\"\"\n bar.textposition = pos\n\n\n @staticmethod\n def modify_width(bar, width):\n \"\"\"\n 막대의 너비를 설정합니다.\n width = (단위 px)\n \"\"\"\n bar.width = width\n\n\n @staticmethod\n def modify_opacity(bar, opacity):\n \"\"\"\n 그래프 막대의 투명도를 설정합니다\n opacity = [0,1] 사이\n \"\"\"\n bar.opacity = opacity\n\n\n @staticmethod\n def modify_color(bar, color):\n \"\"\"\n bar marker의 색을 지정한 색으로 설정합니다\n color: css color code\n \"\"\"\n bar.marker['color'] = color\n\n @staticmethod\n def modify_legendgroup(bar, name):\n \"\"\"\n 범례 이름을 원하는 name으로 변환\n \"\"\"\n bar.legendgroup = name\n bar.name = name\n\n\n @staticmethod\n def str_to_datetime(time_data: str) -> datetime:\n \"\"\"\n str형태의 start_time 파라미터를 초의 형태인 time_data 파라미터와 연산 후\n datetime 형태의 데이터를 return 합니다.\n \"\"\"\n # 날짜 연산을 위해 문자열을 datetime형태로\n temp_date = datetime.strptime(SiteProgress.plan_start_time, \"%Y-%m-%d %H:%M:%S\")\n\n # datetime형태를 초 형태로 변환\n start_date = time.mktime(temp_date.timetuple())\n\n # 초 데이터끼리 연산 후 datetime으로 변환\n time_datetime = datetime.fromtimestamp(start_date + time_data)\n\n return time_datetime\n\n\n def generate_dataframe(self):\n \"\"\"\n df(데이터프레임)를 생성합니다.\n facility_utiliztion_rate과 latest_finish time이 생성됩니다.\n\n df 의 column(lot_id, target, event, start, finish, lpst)\n\n \"\"\"\n\n lots = SiteModels.lots\n lot_history_data = []\n late_lot_history = []\n\n for lot_ids in self.id_list:\n histories = lots[lot_ids].history\n\n # lot별 가장 빠른 시작 시간을 임시 저장합니다.\n start_d = 0\n # lot별 가장 늦은 종료 시간을 임시 저장합니다.\n finish_d = 0\n # lot별 track_in_event가 발생한 시간을 임시 저장합니다.\n sum_track_in_time = 0\n\n for history in histories:\n if start_d == 0:\n start_d = history[3]\n elif history[2] < start_d:\n start_d = history[2]\n\n elif history[3] > finish_d:\n finish_d = history[3]\n\n # start 데이터를 datetime형태의 데이터로 바꿉니다.\n start_datetime = Analysis.str_to_datetime(history[2])\n finish_datetime = Analysis.str_to_datetime(history[3])\n lpst_datetime = Analysis.str_to_datetime(history[5])\n\n\n # 리스트 형태로 lot_history 저장\n lot_history_data.append(dict(lot_id=lot_ids, target=history[0], event=history[1],\n start=start_datetime, operation_time=(history[3] - history[2])*1000,\n lpst=lpst_datetime))\n\n if history[1] == \"TRACK_IN\":\n sum_track_in_time += history[3] - history[2]\n\n # TRACK_IN event의 지연됨 표현은 하지 않기 때문에 elif를 사용합니다.\n # lpst보다 start가 더 느릴 때 지연됨을 표현하기 위해 late_lot_history에 정보를 저장합니다.\n elif lpst_datetime < start_datetime:\n late_lot_history.append(dict(lot_id=lot_ids, target=' ', event='late',\n start=start_datetime, operation_time=(history[3] - history[2])*1000,\n lpst=lpst_datetime))\n\n # lpst보다 start가 더 빠르지만 finish보다 느릴 때 지연됨을 표현하기 위해 late_lot_history에 정보를 저장합니다.\n elif start_datetime <= lpst_datetime <= finish_datetime:\n late_lot_history.append(dict(lot_id=lot_ids, target=' ', event='late',\n start=lpst_datetime, operation_time=(history[3] - history[5])*1000,\n lpst=lpst_datetime))\n\n # lot_id를 key, lot_id별 가장 늦은 시간을 value로 하는 딕셔너리 생성\n self.latest_finish[lot_ids] = finish_d\n\n # facility_utilization = track_in event 수행 시간 / 전체 수행 시간\n if finish_d - start_d != 0:\n self.facility_utilization_rate[lot_ids] = sum_track_in_time * 100 / (finish_d - start_d)\n else:\n self.facility_utilization_rate[lot_ids] = 0\n\n # 데이터 프레임 2개 생성 self.df = lot_history, self. df_late = 지연됐음을 표현하기 위한 검정 바\n self.df = pd.DataFrame(lot_history_data)\n self.df_late = pd.DataFrame(late_lot_history)\n\n\n def update_layout_target(self):\n \"\"\"\n figure(fig_target_col)의 layout 항목을 수정합니다.\n\n anno_factory_utilization param\n x = x축 좌표 (현재 공정이 끝난 시간에 20000초를 더한 값)\n y = y축좌표 (lot_id에 해당하는 좌표)\n text = 표시할 텍스트 (lot_id별 factory_utilization 값)\n showarrow = annotation에 화살표를 표시할 것 인지 결정\n\n layout update\n xaxis x축 관련 정보\n tick기능: x축 text 표시만 바꿀 수 있음, grid 기능 격자 생성가능\n rangeselector기능: x축의 단위를 미리 버튼에 등록해서 버튼 클릭 만으로 범위 변경\n rangeslider기능: x축의 범위를 설정할 수 있는 슬라이더 생성\n rangebreak기능: x축 범위의 한계를 설정 할 수 있음 설정 하지 않을 경우 무한대\n\n barmode 그래프 바의 모드를 설정 합니다\n mode:stack: 바가 쌓아 지는 구조 (음수가 나오면 그 값에서 음의 방향으로 쌓임)\n relative: 음수는 음수 쪽으로 쌓이고 양수는 양수 쪽으로 쌓이는 stack 구조\n group: 동일한 값에 여러 개의 바가 한개의 그룹으로 묶여서 있는 구조\n overlay: 다른 모드와는 다르게 시작 지점을 원하는 곳으로 지정 가능\n\n\n\n\n\n \"\"\"\n anno_factory_utilization = [dict(x=Analysis.str_to_datetime(self.latest_finish[lot_id] + 20000), y=lot_id,\n text=f\"{self.facility_utilization_rate[lot_id]:.1f}%\", showarrow=False)\n for lot_id in self.id_list]\n\n # self.fig_target_col.layout.hovermode = \"x\" x축 기준 동일 선상에 있는 모든 hover가 출력됩니다.\n self.fig_target_col.layout[\"annotations\"] = anno_factory_utilization\n self.fig_target_col.update_layout(xaxis=dict(type='date'), barmode='overlay')\n\n # TRACK_IN event의 너비와 투명도를 조정하였습니다.\n [(Analysis.modify_opacity(bar, 0.8), Analysis.modify_text_pos(bar, 'outside'), Analysis.modify_width(bar, 0.5))\n for bar in self.fig_target_col.data if ('CM' in bar.legendgroup or 'LM' in bar.legendgroup)]\n\n\n def generate_fig_target_col(self):\n \"\"\"\n target을 기준으로 color를 나눈 figure를 작성합니다.\n\n fig 생성 param\n base 시작 시간\n x 끝 시간\n y y축 변수\n orientation 막대의 방향 (세로= default , 가로=h)\n legendgroup 레전드의 이름\n name 막대 그래프 개체 각각의 이름\n text 그래프 내부에 표시 할 데이터\n hovertemplate 호버에 데이터를 표시할 형식\n customdata 호버에 표시할 데이터\n \"\"\"\n self.fig_target_col = go.Figure()\n\n for targ in self.df.target.unique():\n\n dff = self.df.loc[self.df.target == targ]\n self.fig_target_col.add_trace(\n go.Bar(base=dff.start, x=dff.operation_time, y=dff.lot_id, orientation='h',\n xaxis='x', yaxis='y', name=targ, legendgroup=targ, text=targ,\n hovertemplate='lot_id=%{y}
target='+targ+'
start=%{base}
finish=%{x}'))\n\n self.fig_target_col.add_trace(\n go.Bar(base=self.df_late.start, x=self.df_late.operation_time, y=self.df_late.lot_id, orientation='h',\n name='late', legendgroup='late', opacity=0.6, marker=dict(color='black'),\n customdata=self.df_late.lpst,\n hovertemplate='
start=%{base}
finish=%{x}
lot_id=%{y}
lpst=%{customdata}'))\n\n self.fig_target_col.update_traces(textposition='inside')\n self.update_layout_target()\n\n\n def update_layout_event(self):\n \"\"\"\n figure(fig_target_col)의 layout 항목을 수정합니다.\n\n anno_factory_utilization param\n x = x축 좌표 (현재 공정이 끝난 시간에 20000초를 더한 값)\n y = y축좌표 (lot_id에 해당하는 좌표)\n text = 표시할 텍스트 (lot_id별 factory_utilization 값)\n\n layout update\n xaxis x축 관련 정보\n (tick기능: x축 text 표시만 바꿀 수 있음, grid 기능 격자 생성가능\n rangeselector기능: x축의 단위를 미리 버튼에 등록해서 버튼 클릭 만으로 범위 변경\n rangeslider기능: x축의 범위를 설정할 수 있는 슬라이더 생성\n rangebreak기능: x축 범위의 한계를 설정 할 수 있음 설정 하지 않을 경우 무한대\n )\n barmode 그래프 바의 모드를 설정 합니다\n (mode:stack: 바가 쌓아 지는 구조 (음수가 나오면 그 값에서 음의 방향으로 쌓임)\n relative: 음수는 음수 쪽으로 쌓이고 양수는 양수 쪽으로 쌓이는 stack 구조\n group: 동일한 값에 여러 개의 바가 한개의 그룹으로 묶여서 있는 구조\n overlay: 다른 모드와는 다르게 시작 지점을 원하는 곳으로 지정 가능\n )\n \"\"\"\n anno_fact_util = [dict(x=Analysis.str_to_datetime(self.latest_finish[lot_id] + 20000), y=lot_id,\n text=f\"{self.facility_utilization_rate[lot_id]:.1f}%\", showarrow=False) for lot_id in self.id_list]\n\n self.fig_event_col.layout[\"annotations\"] = anno_fact_util\n self.fig_event_col.update_layout(xaxis=dict(type='date'), barmode='overlay')\n\n [(Analysis.modify_color(bar, '#7FB3D5'), Analysis.modify_legendgroup(bar, 'WH'))\n for bar in self.fig_event_col.data if ('WH' in bar.legendgroup)]\n\n [(Analysis.modify_width(bar, 0.5), Analysis.modify_opacity(bar, 0.8), Analysis.modify_text_pos(bar, 'outside'))\n for bar in self.fig_event_col.data if ('TRACK_IN' in bar.legendgroup)]\n\n\n def generate_fig_event_col(self):\n \"\"\"\n event를 기준으로 color를 나눈 figure를 생성합니다.\n\n event가 5개 이하라는 가정하에 col_list를 적게 설정 (현재 5개 설정) event가 늘어날 경우 더 늘려 줘야 함.\n\n fig 생성 param\n base 시작 시간\n x 끝 시간\n y y축 변수\n orientation 막대의 방향 (세로= default , 가로=h)\n legendgroup 레전드의 이름\n name 막대 그래프 개체 각각의 이름\n text 그래프 내부에 표시 할 데이터\n hovertemplate 호버에 데이터를 표시할 형식\n customdata 호버에 표시할 데이터\n\n \"\"\"\n col_list = ['#20B2AA', '#F5A9D0', 'limegreen', '#F8C471', 'forestgreen']\n colors = iter(col_list)\n\n self.fig_event_col = go.Figure()\n\n\n for eve in self.df.event.unique():\n\n dff = self.df.loc[self.df.event == eve]\n\n self.fig_event_col.add_trace(\n go.Bar(base=dff.start, x=dff.operation_time, y=dff.lot_id, orientation='h',\n text=dff.target, name=eve, legendgroup=eve, marker=dict(color=next(colors)),\n hovertemplate='lot_id=%{y}
event='+eve+'
start=%{base}
finish=%{x}'))\n\n self.fig_event_col.add_trace(\n go.Bar(base=self.df_late.start, x=self.df_late.operation_time, y=self.df_late.lot_id, orientation='h',\n name='late', legendgroup='late', opacsity=0.6, marker=dict(color='black'), customdata=self.df_late.lpst,\n hovertemplate='
start=%{base}
finish=%{x}
lot_id=%{y}
lpst=%{customdata}'))\n\n self.fig_event_col.update_traces(textposition='inside')\n self.update_layout_event()\n\n\n def append_track_in_df(self):\n \"\"\"\n track_in event를 df(데이터프레임) 가장 밑 행으로 배치시킵니다.\n 이 작업을 통해 TRACK_IN 이벤트가 다른 중복되는 차트에 가려지는 것을 방지합니다.\n \"\"\"\n track_in_df = self.df.loc[self.df['event'] == 'TRACK_IN']\n self.df = self.df[self.df['event'] != 'TRACK_IN']\n self.df = self.df.append(track_in_df, ignore_index=True)\n\n\n def generate_sankey_dataframe(self, flag_treemap: bool):\n \"\"\"\n 현재 prod명과 다음 prod명을 source와 target에 분류하고, prod value를 1로 설정하여 데이터프레임을 작성.\n prod_ID를 숫자로 변경하여 데이터프레임에 저장합니다.\n \"\"\"\n if self.prodID == []:\n data = [dict(source=prod.currProd, target=prod.nextProd, value=1)\n for prod in ComDataInventory.orpDataInv.orpDepthInfoList if prod.nextProd != prod.currProd]\n\n else:\n data = [dict(source=prod.currProd, target=prod.nextProd, value=1)\n for prod in ComDataInventory.orpDataInv.orpDepthInfoList\n if prod.nextProd != prod.currProd if prod.dmdProdID in self.prodID]\n\n self.df_sankey = pd.DataFrame(data)\n\n if flag_treemap:\n self.df_treemap = self.df_sankey.copy()\n\n source_list = self.df_sankey.source.unique()\n targ_list = self.df_sankey.target.unique()\n\n self.label_name = list(set(np.append(source_list, targ_list)))\n\n for a, b in enumerate(self.label_name):\n self.df_sankey = self.df_sankey.replace(b, a)\n\n\n def generate_fig_sankey_diagram(self):\n \"\"\"\n sankey_diagram\n \"\"\"\n self.fig_sankey = go.Figure(data=[go.Sankey(\n # Define nodes\n node=dict(\n label=self.label_name,\n line=dict(color=\"black\")\n ),\n\n # Add links\n link=dict(\n source=self.df_sankey.source,\n target=self.df_sankey.target,\n value=self.df_sankey.value,\n color='grey'\n ))])\n\n self.fig_sankey.update_layout(\n title_text=\"sankey diagram\")\n\n\n def generate_treemap_dataframe(self, flag_sankey: bool):\n\n if not flag_sankey:\n if self.prodID == []:\n data = [dict(source=prod.currProd, target=prod.nextProd)\n for prod in ComDataInventory.orpDataInv.orpDepthInfoList if prod.nextProd != prod.currProd]\n self.df_treemap = pd.DataFrame(data)\n else:\n data = [dict(source=prod.currProd, target=prod.nextProd)\n for prod in ComDataInventory.orpDataInv.orpDepthInfoList\n if prod.nextProd != prod.currProd if prod.dmdProdID in self.prodID]\n self.df_treemap = pd.DataFrame(data)\n\n self.df_treemap = self.df_treemap.drop_duplicates()\n\n temp = self.df_treemap['target'][~self.df_treemap['target'].isin(self.df_treemap['source'].unique())]\n new_df = pd.DataFrame(dict(source=temp, target=''))\n self.df_treemap = self.df_treemap.append(new_df, ignore_index=True)\n\n\n def generate_fig_treemap(self):\n self.fig_treemap = go.Figure(go.Treemap(labels=self.df_treemap['source'], parents=self.df_treemap['target']))\n\n self.fig_icicle = go.Figure(go.Icicle(labels=self.df_treemap['source'], parents=self.df_treemap['target']))\n\n self.fig_sunburst = go.Figure(go.Sunburst(labels=self.df_treemap['source'], parents=self.df_treemap['target']))\n\n def analysis_engine_result_lot_history(self, *args):\n \"\"\"\n engine의 결과값을 그래프로 시각화 해줍니다.\n\n params\n 'target' : target별로 구분된 gantt chart를 출력합니다.\n 'event' : event별로 구분된 gantt chart를 출력합니다.\n 'sankey' : prod 변환 기록을 보여주는 sankey diagram을 출력합니다.\n 'treemap' : prod 변환 기록을 보여주는 treemap charts를 출력합니다.\n \"\"\"\n if 'event' in args or 'target' in args:\n self.generate_dataframe()\n self.append_track_in_df()\n\n if 'target' in args:\n self.generate_fig_target_col()\n self.fig_target_col.write_html(f\"{PathInfo.xlsx}{os.sep}temp_target.html\", default_width=2300, default_height=900)\n self.fig_target_col.show(width=2000, height=750)\n\n if 'event' in args:\n self.generate_fig_event_col()\n self.fig_event_col.write_html(f\"{PathInfo.xlsx}{os.sep}temp_event.html\", default_width=2300, default_height=900)\n self.fig_event_col.show(width=2000, height=750)\n\n if 'sankey' in args:\n if 'treemap' in args:\n self.generate_sankey_dataframe(flag_treemap=True)\n self.generate_fig_sankey_diagram()\n self.fig_sankey.write_html(f\"{PathInfo.xlsx}{os.sep}temp_sankey.html\")\n self.fig_sankey.write_image(f\"{PathInfo.xlsx}{os.sep}temp_sankey.png\")\n self.fig_sankey.show()\n else:\n self.generate_sankey_dataframe(flag_treemap=False)\n self.generate_fig_sankey_diagram()\n self.fig_sankey.write_html(f\"{PathInfo.xlsx}{os.sep}temp_sankey.html\")\n self.fig_sankey.write_image(f\"{PathInfo.xlsx}{os.sep}temp_sankey.png\")\n self.fig_sankey.show()\n\n if 'treemap' in args:\n if 'sankey' in args:\n self.generate_treemap_dataframe(flag_sankey=True)\n self.generate_fig_treemap()\n self.fig_treemap.write_html(f\"{PathInfo.xlsx}{os.sep}temp_treemap.html\")\n self.fig_treemap.show()\n else:\n self.generate_treemap_dataframe(flag_sankey=False)\n self.generate_fig_treemap()\n self.fig_treemap.write_html(f\"{PathInfo.xlsx}{os.sep}temp_treemap.html\")\n self.fig_treemap.show()\n self.fig_icicle.show()\n self.fig_sunburst.show()\n","repo_name":"leegunwon/Internship---Nuerocore","sub_path":"Gantt Chart with engine/History_Gantt_Chart_Engine_ver_12.py","file_name":"History_Gantt_Chart_Engine_ver_12.py","file_ext":"py","file_size_in_byte":22350,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17391723176","text":"from operacoesbd import *\nopcao = 1\nmanifestacoes = []\nconexao = abrirBancoDados('localhost','root','12345','ouvidoria1')\n#cod menu\nwhile opcao != 8:\n print ()\n print('====== Bem-vindo(a) ao sistema de ouvidoria ======')\n print ()\n print('[1] Listar das manifestações')\n print('[2] Listar de manifestações por Tipo')\n print('[3] Criar uma nova manifestações')\n print('[4] Exibir quantidade de manifestações')\n print('[5] Pesquisar uma manifestação por código')\n print('[6] Alterar o título e/ou descrição de uma manifestação')\n print('[7] Excluir uma manifestação pelo Código')\n print('[8] Sair do Sistema')\n print()\n opcao = int(input('Digite a opção desejada: '))\n# cod listar\n if opcao == 1:\n conexao = abrirBancoDados('localhost', 'root', '12345', 'ouvidoria1')\n print('Listagem de manifestações:')\n consultaListagem = 'select * from manifestacao'\n manifestacoes = listarBancoDados(conexao,consultaListagem)\n if len(manifestacoes) > 0:\n for manifestacao in manifestacoes:\n print('Código', manifestacao[0], '-', manifestacao[1], '-', manifestacao[3], '-', manifestacao [5] )\n else:\n print('Nenhuma manifestação encontrada.')\n conexao.close()\n#cod listar por tipo\n elif opcao == 2:\n conexao = abrirBancoDados('localhost', 'root', '12345', 'ouvidoria1')\n def listarManifestacoesPorTipo(conexao):\n print('')\n manifestacao = input(\n 'Digite o tipo de manifestação que deseja listar:\\n [1] Reclamação [2] Elogio [3] Sugestão): ')\n\n if manifestacao.lower() not in ['1', '2', '3']:\n print('Tipo de manifestação inválido.')\n exit()\n\n cursor = conexao.cursor()\n\n sqlListagem = 'select * from manifestacao where tipo = %s'\n\n if manifestacao.lower() == 'reclamacao':\n tipo = 'reclamacao'\n elif manifestacao.lower() == 'elogio':\n tipo = 'elogio'\n else:\n tipo = 'sugestao'\n\n valores = (tipo,)\n cursor.execute(sqlListagem, valores)\n manifestacoes = cursor.fetchall()\n\n if len(manifestacoes) > 0:\n print(f'Listagem das Manifestacoes do tipo {manifestacao.capitalize()}:\\n')\n for manifestacao in manifestacoes:\n codigo = manifestacao[0]\n titulo = manifestacao[1]\n autor = manifestacao[3]\n print(f'• Código {codigo} – {titulo} – {autor}')\n else:\n print(f'Não há manifestações do tipo {manifestacao.capitalize()}.')\n cursor.close()\n listarManifestacoesPorTipo(conexao)\n conexao.close()\n elif opcao == 3:\n print('')\n conexao = abrirBancoDados('localhost', 'root', '12345', 'ouvidoria1')\n titulo = input('Digite o título da manifestação: ')\n detalhe = input('Digite a descrição da manifestação: ')\n autor = input('Digite o nome do reclamante: ')\n data = input('Digite a data da reclamação: ')\n tipo = input('Digite o tipo da manifestação (reclamação, sugestão ou elogio): ')\n\n sqlInsercao = 'insert into manifestacao (titulo, detalhe, autor, data, tipo) values (%s, %s, %s, %s, %s)'\n valores = (titulo, detalhe, autor, data, tipo)\n insertNoBancoDados(conexao, sqlInsercao, valores)\n print('Manifestação criada com sucesso!')\n encerrarBancoDados(conexao)\n elif opcao == 4:\n import mysql.connector\n\n conexao = abrirBancoDados('localhost', 'root', '12345', 'ouvidoria1')\n def obterQuantidadeManifestacoes(conexao):\n sql = \"\"\"\n SELECT \n COUNT(*) as quantidade,\n Tipo\n FROM \n manifestacao\n GROUP BY \n Tipo\n \"\"\"\n cursor = conexao.cursor(dictionary=True)\n cursor.execute(sql)\n resultados = cursor.fetchall()\n cursor.close()\n return resultados\n def exibirQuantidadeManifestacoes(resultados):\n print(\"Quantidade de Manifestações:\")\n total_manifestacoes = 0\n for resultado in resultados:\n tipo = resultado['Tipo']\n quantidade = resultado['quantidade']\n print(f\"Quantidade de {tipo.capitalize()}: {quantidade}\")\n total_manifestacoes += int(quantidade)\n print(f\"Total de Manifestações: {total_manifestacoes}\\n\")\n resultados = obterQuantidadeManifestacoes(conexao)\n exibirQuantidadeManifestacoes(resultados)\n conexao.close()\n elif opcao == 5:\n conexao = abrirBancoDados('localhost', 'root', '12345', 'ouvidoria1')\n codigo = input('Digite o código da reclamação: ')\n consultaListagem = 'SELECT * FROM manifestacao WHERE codigo = ' + codigo\n manifestacoes = listarBancoDados(conexao, consultaListagem)\n if len(manifestacoes) > 0:\n for manifestacao in manifestacoes:\n print('Código:', manifestacao[0])\n print('Titulo:', manifestacao[1])\n print('Descrição:', manifestacao[2])\n print('Autor:', manifestacao[3])\n print('Data:', manifestacao[4])\n else:\n print('Nenhuma manifestação encontrada com o código fornecido.')\n conexao.close()\n elif opcao == 6:\n conexao = abrirBancoDados('localhost', 'root', '12345', 'ouvidoria1')\n codigo = input('Digite o código: ')\n novoTitulo = input('Digite o novo título: ')\n novadetalhe = input('Digite a nova descrição: ')\n sqlAtualizar = 'update manifestacao set titulo = %s, detalhe = %s where codigo = %s'\n valores = (novoTitulo, novadetalhe, codigo)\n print('Manifestação alterada com sucesso!')\n atualizarBancoDados(conexao, sqlAtualizar, valores)\n encerrarBancoDados(conexao)\n\n elif opcao == 7:\n conexao = abrirBancoDados('localhost', 'root', '12345', 'ouvidoria1')\n def excluirManifestacao(conexao):\n codigo = input(\"Digite o código da manifestação que deseja excluir: \")\n cursor = conexao.cursor()\n sqlExclusao = \"delete from manifestacao where Codigo = %s\"\n valores = (codigo,)\n cursor.execute(sqlExclusao, valores)\n conexao.commit()\n if cursor.rowcount > 0:\n print(\"Manifestação excluída com sucesso!\")\n else:\n print(\"Manifestação não encontrada!\")\n cursor.close()\n excluirManifestacao(conexao)\n encerrarBancoDados(conexao)\nprint('Obrigado pelo feedback.')\nprint('saindo do sistema')\n","repo_name":"carol1ina/ouvidoria","sub_path":"codigo2.py","file_name":"codigo2.py","file_ext":"py","file_size_in_byte":6922,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33931078027","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 29 16:01:20 2020\r\n\r\n@author: User\r\n\"\"\"\r\n\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\n\r\n\r\n\r\n\r\nheaders ={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \\\r\n AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36'}\r\nurl = 'https://www.ptt.cc/bbs/Gossiping/index.html'\r\nss = requests.session()\r\nss.cookies['over18'] = '1'\r\n\r\nif not os.path.exists('ptt_gossiping'):\r\n os.mkdir('ptt_gossiping')\r\n \r\n \r\n\r\nfor f in range(0,5):\r\n #按入內文\r\n res = ss.get(url, headers=headers)\r\n soup = BeautifulSoup(res.text, 'html.parser')\r\n title_list = soup.select('div.title')\r\n \r\n \r\n for title_soup in title_list:\r\n try:\r\n #get article title\r\n title_name = title_soup.select('a')[0].text\r\n \r\n # Extract article content\r\n \r\n article_url = 'https://www.ptt.cc'+title_soup.select('a')[0]['href']\r\n \r\n res_article = ss.get(article_url, headers=headers)\r\n soup_article = BeautifulSoup(res_article.text, 'html.parser')\r\n \r\n author = soup_article.select('span.article-meta-value')[0].text\r\n title = soup_article.select('span.article-meta-value')[2].text\r\n datetime = soup_article.select('span.article-meta-value')[3].text\r\n \r\n push = 0\r\n down = 0\r\n score = 0\r\n push_info_list = soup_article.select('div.push')\r\n \r\n for info in push_info_list:\r\n if '推' in info.text:\r\n push += 1\r\n if '推' in info.text:\r\n down += 1\r\n \r\n score = push - down\r\n \r\n article_cont = soup_article.select('div#main-content')[0].text.split('※ 發信站')[0]\r\n \r\n article_cont += '\\n---split---\\n'\r\n article_cont += '推: {}\\n'.format(push)\r\n article_cont += '噓: {}\\n'.format(down)\r\n article_cont += '分數: {}\\n'.format(score)\r\n article_cont += '作者: {}\\n'.format(author)\r\n article_cont += '標題: {}\\n'.format(title)\r\n article_cont += '時間: {}\\n'.format(datetime)\r\n \r\n n_article_title = title_name.replace('/','').replace('?','').replace(':','')\r\n \r\n with open('./ptt_gossiping/{}.txt'.format(n_article_title),'w',encoding = 'utf-8') as w:\r\n w.write(article_cont)\r\n \r\n except IndexError as e:\r\n print('============')\r\n print(title_name)\r\n print(article_url)\r\n print(e.args)\r\n print('============')\r\n \r\n \r\n #上一頁url\r\n url = 'https://www.ptt.cc' + soup.select('a.btn.wide')[1]['href']\r\n ","repo_name":"p973978/crawler","sub_path":"PyETL/ptt_gossip_article.py","file_name":"ptt_gossip_article.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73638146772","text":"import pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport csv\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn import svm, datasets\r\nfrom sklearn.multiclass import OneVsRestClassifier\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn.preprocessing import label_binarize\r\nfrom sklearn.metrics import average_precision_score\r\nfrom sklearn.preprocessing import OneHotEncoder\r\n\r\nrandom_state = np.random.RandomState(0)\r\ndataset = pd.read_csv('LipfordDatasetUpdated.csv')\r\n\r\n#read only quantitative data in (needs to change to fit one hot in\r\nquantdataset = dataset.select_dtypes(include=[float])\r\nquantdataset.shape\r\nquantdataset.isnull().sum()\r\nquantdataset = quantdataset.fillna(quantdataset.mean())\r\n#print(quantdataset)\r\n\r\nonehotdataset = dataset.select_dtypes(include=[object])\r\nonehotdataset = onehotdataset.fillna('None')\r\nonehotdataset.shape\r\nonehotdataset2 = onehotdataset.drop('exit_desc', axis =1)\r\nprint(onehotdataset)\r\n#print(onehotdataset2)\r\n\r\n\"\"\" Convert to one Hot \"\"\"\r\nenc = OneHotEncoder(handle_unknown = 'ignore')\r\nonehot = pd.DataFrame(enc.fit_transform(onehotdataset).toarray())\r\n#print(onehot)\r\n\r\n\r\n\"\"\" Scaling the Features \"\"\"\r\nscaler = StandardScaler()\r\nscaler.fit(quantdataset)\r\nscaled_quant = scaler.transform(quantdataset)\r\nmodified_quant = pd.DataFrame(scaled_quant)\r\nprint(modified_quant)\r\n\r\n\"\"\" Merge \"\"\"\r\nalldata = pd.concat([modified_quant, onehot], axis = 1)\r\nprint(alldata)\r\n\r\n\r\n\"\"\" Create Train Test \"\"\"\r\nX_train, X_test, y_train, y_test = train_test_split(alldata, onehotdataset['exit_desc'], test_size = 0.25)\r\nprint(X_train)\r\nprint(y_train)\r\n\r\n\r\n\"\"\" Deciding K neighbors \"\"\"\r\nknn = KNeighborsClassifier(n_neighbors = 8)\r\n\r\nknn.fit(X_train, y_train)\r\n#pickle to write object to file\r\npred = knn.predict(X_test) #gets the prediction using the classifier\r\n#print(knn.score(X_test, y_test))\r\n\r\n\r\n\"\"\" Confusion matrix \"\"\"\r\nfrom sklearn.metrics import classification_report, confusion_matrix\r\n\r\nprint(confusion_matrix(y_test, pred))\r\nprint('\\n')\r\nprint(classification_report(y_test, pred)) #This is the accuracy, avg, weight avg report\r\n\r\n\r\n\"\"\" Choosing a K-value and creating visualization of Error Rate \"\"\"\r\nerror_rate = []\r\n\r\nfor i in range(1, 35):\r\n #print(y_test.shape)\r\n knn = KNeighborsClassifier(n_neighbors = i)\r\n knn.fit(X_train, y_train)\r\n pred_i = knn.predict(X_test)\r\n #print(pred_i[pred_i!= y_test])\r\n #print(y_test)\r\n error_rate.append(np.mean(pred_i != y_test))\r\n#print(error_rate)\r\n\r\nplt.figure(figsize=(10,6))\r\nplt.plot(range(1,35), 1-np.array(error_rate), color = 'blue',\r\n linestyle ='dashed', marker = 'o',\r\n markerfacecolor = 'blue', markersize = 10)\r\n\"\"\"\r\n\r\nplt.plot(range(1,35), error_rate, color = 'blue',\r\n linestyle ='dashed', marker = 'o',\r\n markerfacecolor = 'blue', markersize = 10)\r\n\"\"\"\r\n\r\n\r\nplt.title('Error vs K Value')\r\nplt.xlabel('K')\r\nprint(\"Done with program\")\r\nplt.ylabel('Error Rate')\r\nplt.show()\r\n\r\n\r\n\r\n\r\n","repo_name":"MasonCLipford/Programs","sub_path":"ItsTheBestDayEver.py","file_name":"ItsTheBestDayEver.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20082680893","text":"\n# Importowanie bibliotek\nimport pandas as pd\nfrom pandas_datareader import data as pdr\nimport yfinance as yf\nfrom datetime import datetime\nimport datetime as dt\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom datetime import date\npd.options.mode.chained_assignment = None # default='warn'\n\n\n#Pobranie danych\n\ncompany=[ 'BTC-USD', \"SHIB-USD\", 'DOGE-USD', 'ETH-USD', 'ADA-USD', 'XRP-USD', 'NEO-USD', 'XLM-USD', 'DOT-USD', 'BNB-USD'] # Wybór waluty, dane z https://finance.yahoo.com/ \n#company=[ \"GC=F\"] # Wybór waluty, dane z https://finance.yahoo.com/ \n\nx = 0.4\nwindow = 18\n\n\nfee_buy = 0.0360 * 0.01\n \nfee_sell = 0.0180 * 0.01 \n \ndef download_df_long(company): \n end=date.today() #koniec danych\n delta = 729\n \n \n start= end - dt.timedelta(days = delta) #początek danych\n \n #start= dt.datetime(1990,2,1)\n\n yf.pdr_override() \n df=pdr.get_data_yahoo(c, start, end, interval = \"1H\") \n df.reset_index(inplace=True)\n \n \n try:\n df['Datetime'] = df['Date']\n \n \n except:\n df['Datetime'] = df['index']\n \n \n\n \n \n df = df[['Datetime', 'Close', 'High', 'Low']]\n \n return df\n\ndef short_df(df):\n \n \n xd = df.copy()\n \n xd['Close'] = 1/df['Close']\n xd['High'] = 1/df['High']\n xd['Low'] = 1/df['Low']\n \n \n return xd\n\n\ndef back_trade (df):\n\n\n local_min = 0\n amplitude = 0\n\n buy = 0\n take_profit = 1000000000\n stop_loss = 0\n df['buy'] = np.nan\n df['sell'] = np.nan\n df['profit'] = np.nan\n df['cumulative_profit'] = np.nan\n \n for i in range(window, len(df)):\n sd = df[i-window:i+1]\n sd.reset_index(inplace = True, drop = True)\n \n \n \n if buy == 0 and df['Close'][i] < local_min:\n #if buy == 0 and df['Low'][i] < local_min:\n \n buy = df['Close'][i] * ( 1 + fee_buy)\n #buy = local_min \n \n \n \n max = sd['Close'].max()\n min = sd['Close'].min()\n amplitude = max - min\n take_profit = min + amplitude * x\n stop_loss = min - (amplitude * x / 3) \n df['buy'][i] = buy\n \n \n elif buy != 0 and df['Low'][i] <= stop_loss :\n sell = stop_loss * ( 1 - fee_sell)\n df['sell'][i] = sell \n df['profit'][i] = (sell - buy)/buy \n buy=0\n \n \n \n elif buy != 0 and df['High'][i] >= take_profit :\n sell = take_profit * ( 1 - fee_sell)\n df['sell'][i] = sell\n df['profit'][i] = (sell - buy)/buy\n buy=0\n \n \n \n df['cumulative_profit'][i] = df['profit'][:i].sum() \n \n \n \n\n \n local_min = sd['Close'].min()\n \n\n \n return df\n\n\n\n\n\nwyniki = pd.DataFrame(columns = ['Company', 'Long', 'Short'])\n\nfor c in company:\n\n df_long = download_df_long(c)\n df_short = short_df(df_long)\n\n long = back_trade(df_long)\n \n \n profits_long = long.copy()\n profits_long = profits_long[\"profit\"]\n profits_long.dropna(inplace = True)\n \n plt.title('Long_'+c)\n plt.suptitle(long['cumulative_profit'][len(long)-1]) \n plt.plot(long['cumulative_profit'], color = 'red') \n plt.show()\n print(long['cumulative_profit'][len(long)-1])\n\n\n \n investment = 100\n \n balance_list = [0]\n\n reinvest = 0.33 \n\n account_balance = 100\n \n levar = 10 \n \n\n for profit in profits_long:\n \n lucre = ( profit* levar) * account_balance * reinvest\n \n account_balance = account_balance + lucre\n \n balance_list.append(account_balance)\n \n\n \n \n \n plt.title('Balance_long_'+c)\n plt.suptitle(balance_list[-1]) \n plt.plot(balance_list) \n plt.show()\n \n \n \n \n short =back_trade(df_short)\n \n\n\n profits_short = short.copy()\n profits_short = profits_short[\"profit\"]\n profits_short.dropna(inplace = True)\n\n \n plt.title('Short_'+c)\n plt.suptitle(short['cumulative_profit'][len(short)-1]) \n plt.plot(short['cumulative_profit'], color = 'red') \n plt.show()\n print(short['cumulative_profit'][len(short)-1])\n \n\n\n\n \n balance_list_s = [0]\n\n reinvest_s = 0.33 \n\n account_balance_s = 100\n \n levar = 10 \n \n\n\n for profit in profits_short:\n \n lucre = ( profit* levar) * account_balance_s * reinvest\n \n account_balance_s = account_balance_s + lucre\n \n balance_list_s.append(account_balance_s)\n \n\n\n\n \n \n plt.title('Balance_short_'+c)\n plt.suptitle(balance_list_s[-1]) \n plt.plot(balance_list_s) \n plt.show()\n \n \n wyniki = wyniki.append({'Company' : c, 'Long' : balance_list[-1], 'Short' : balance_list_s[-1]},\n ignore_index = True)\n","repo_name":"Scarowak/Python_backtrading","sub_path":"window_min_max.py","file_name":"window_min_max.py","file_ext":"py","file_size_in_byte":4723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13938204477","text":"from PyQt5.QtWidgets import QMainWindow, QApplication\nfrom PyQt5.uic import loadUi\n\n\nclass Slope1Window(QMainWindow):\n def __init__(self, path=\"\"):\n super(Slope1Window, self).__init__()\n\n self.path = f\"{path}Ui_Base/slope1.ui\"\n loadUi(self.path, self)\n\n self.go_btn.clicked.connect(self.slope)\n self.exit_btn.clicked.connect(self.hide)\n\n def slope(self):\n try:\n y2 = float(self.line_1.text())\n y1 = float(self.line_2.text())\n x2 = float(self.line_3.text())\n x1 = float(self.line_4.text())\n\n slope = (y2 - y1) / (x2 - x1)\n self.line_5.setText(str(slope))\n except:\n self.line_5.setText(\"Try Again ...!\")\n\n\nif __name__ == \"__main__\":\n import sys\n app = QApplication(sys.argv)\n window = Slope1Window()\n window.show()\n sys.exit(app.exec_())\n","repo_name":"JordanLeich/Ultimate-Calculator","sub_path":"uis/slope1_ui.py","file_name":"slope1_ui.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"67"} +{"seq_id":"23567809926","text":"import math\nimport numpy as np\nfrom typing import List, Optional, Tuple, Any, Union\n\nimport kapture\nfrom kapture import flatten\nfrom kapture.core.Sensors import Camera\nfrom kapture.utils.logging import getLogger\n\nfrom .pose_operations import pose_transform_distance\n\n\ndef float_iszero(distance: float, threshold: float = 1e-05) -> bool:\n \"\"\"\n Computes if a distance is close to zero modulo an epsilon.\n\n :param distance: distance to evaluate\n :param threshold: the epsilon value\n :return: true or false if the condition is met\n \"\"\"\n return math.isclose(distance, 0.0, rel_tol=threshold, abs_tol=threshold)\n\n\ndef is_distance_within_threshold(pose_distance: Tuple[float, float],\n pose_thresholds: Tuple[float, float] = (1e-05, 1e-05)\n ) -> bool:\n \"\"\"\n compare a pose distance tuple to (0,0) with some thresholds\n\n :param pose_distance: (translation_distance, rotation_distance)\n :type pose_distance: Tuple[float, float]\n :param pose_thresholds: (translation_threshold, rotation_threshold)\n :type pose_thresholds: Tuple[float, float], optional\n :return: True if both rotation and translation distance are within threshold\n :rtype: bool\n \"\"\"\n translation_distance, rotation_distance = pose_distance\n translation_threshold, rotation_threshold = pose_thresholds\n return float_iszero(translation_distance, translation_threshold) \\\n and float_iszero(rotation_distance, rotation_threshold)\n\n\ndef equal_poses(pose_a: kapture.PoseTransform, pose_b: kapture.PoseTransform) -> bool:\n \"\"\"\n Compare the two pose to check if they are equal.\n\n :param pose_a: first pose\n :param pose_b: second pose\n :return: True if they are equal, False otherwise\n \"\"\"\n pose_a_nones = (pose_a.r is None, pose_a.t is None)\n pose_b_nones = (pose_b.r is None, pose_b.t is None)\n if pose_a_nones != pose_b_nones:\n return False\n\n pose_distance = pose_transform_distance(pose_a, pose_b)\n if pose_a_nones == (True, True): # a and b have None rotation, None translation\n return True\n elif pose_a_nones == (True, False): # a and b have None rotation, valid translation\n return float_iszero(pose_distance[0])\n elif pose_a_nones == (False, True): # a and b have valid rotation, None translation\n return float_iszero(pose_distance[1])\n else:\n return is_distance_within_threshold(pose_distance)\n\n\ndef equal_camera_params(camera_params_a: List[float], camera_params_b: List[float]) -> bool:\n \"\"\"\n Checks if the camera parameters are equals.\n\n :param camera_params_a: first camera parameters\n :param camera_params_b: second camera parameters\n :return: True if they are equal, False otherwise\n \"\"\"\n return np.isclose([float(v) for v in camera_params_a], [float(v) for v in camera_params_b]).all()\n\n\ndef equal_sensors(sensors_a: Optional[kapture.Sensors], sensors_b: Optional[kapture.Sensors]) -> bool:\n \"\"\"\n Compare two instances of kapture.Sensors.\n model_params for cameras are considered equal if np.isclose says so.\n\n :param sensors_a: first sensor definition\n :param sensors_b: second sensor definition\n :return: True if they are identical, False otherwise.\n \"\"\"\n if sensors_a is None and sensors_b is None:\n return True\n elif sensors_a is None and sensors_b is not None:\n return False\n elif sensors_a is not None and sensors_b is None:\n return False\n\n flattened_a = list(flatten(sensors_a, is_sorted=True))\n flattened_b = list(flatten(sensors_b, is_sorted=True))\n if len(flattened_a) != len(flattened_b):\n getLogger().debug('equal_sensors: a and b do not have the same number of elements')\n return False\n for (sensor_id_a, sensor_a), (sensor_id_b, sensor_b) in zip(flattened_a, flattened_b):\n # handling special case: name_a='' and name_b=None\n equal_id = sensor_id_a == sensor_id_b\n equal_name = (not sensor_a.name and not sensor_b.name) or (sensor_a.name == sensor_b.name)\n equal_type = sensor_a.sensor_type == sensor_b.sensor_type\n\n if not equal_id or not equal_name or not equal_type:\n getLogger().debug(\n f'equal_sensors: ({sensor_id_a}, {sensor_a}) != ({sensor_id_b}, {sensor_b})')\n return False\n\n equal_params = False\n if sensor_a.sensor_type == 'camera':\n assert isinstance(sensor_a, Camera)\n assert isinstance(sensor_b, Camera)\n if sensor_a.camera_type == sensor_b.camera_type:\n equal_params = equal_camera_params(sensor_a.camera_params, sensor_b.camera_params)\n else:\n equal_params = sensor_a.sensor_params == sensor_b.sensor_params\n\n if not equal_params:\n getLogger().debug(\n f'equal_sensors: ({sensor_id_a}, {sensor_a}) != ({sensor_id_b}, {sensor_b})')\n return False\n return True\n\n\ndef equal_rigs(rigs_a: Optional[kapture.Rigs], rigs_b: Optional[kapture.Rigs]) -> bool:\n \"\"\"\n Compare two instances of kapture.Rigs.\n Poses are compared with is_distance_within_threshold(pose_transform_distance())\n\n :param rigs_a: first set of rigs\n :param rigs_b: second set of rigs\n :return: True if they are identical, False otherwise.\n \"\"\"\n if rigs_a is None and rigs_b is None:\n return True\n elif rigs_a is None and rigs_b is not None:\n return False\n elif rigs_a is not None and rigs_b is None:\n return False\n\n flattened_a = list(flatten(rigs_a, is_sorted=True))\n flattened_b = list(flatten(rigs_b, is_sorted=True))\n if len(flattened_a) != len(flattened_b):\n getLogger().debug('equal_rigs: a and b do not have the same number of elements')\n return False\n for (rig_id_a, sensor_id_a, pose_a), (rig_id_b, sensor_id_b, pose_b) in zip(flattened_a, flattened_b):\n if rig_id_a != rig_id_b or sensor_id_a != sensor_id_b:\n getLogger().debug(\n f'equal_rigs: ({rig_id_a}, {sensor_id_a}, {pose_a.r_raw}, {pose_a.t_raw}) !='\n f' ({rig_id_b}, {sensor_id_b}, {pose_b.r_raw}, {pose_b.t_raw})')\n return False\n if not equal_poses(pose_a, pose_b):\n getLogger().debug(\n f'equal_rigs: ({rig_id_a}, {sensor_id_a}, {pose_a.r_raw}, {pose_a.t_raw}) '\n f'is not close to '\n f'({rig_id_b}, {sensor_id_b}, {pose_b.r_raw}, {pose_b.t_raw})')\n return False\n return True\n\n\ndef equal_trajectories(trajectories_a: Optional[kapture.Trajectories],\n trajectories_b: Optional[kapture.Trajectories]) -> bool:\n \"\"\"\n Compare two instances of kapture.Trajectories.\n Poses are compared with is_distance_within_threshold(pose_transform_distance())\n\n :param trajectories_a: first trajectory\n :param trajectories_b: second trajectory\n :return: True if they are identical, False otherwise.\n \"\"\"\n if trajectories_a is None and trajectories_b is None:\n return True\n elif trajectories_a is None and trajectories_b is not None:\n return False\n elif trajectories_a is not None and trajectories_b is None:\n return False\n\n flattened_a = list(flatten(trajectories_a, is_sorted=True))\n flattened_b = list(flatten(trajectories_b, is_sorted=True))\n if len(flattened_a) != len(flattened_b):\n getLogger().debug('equal_trajectories: a and b do not have the same number of elements')\n return False\n for (timestamp_a, sensor_id_a, pose_a), (timestamp_b, sensor_id_b, pose_b) in zip(flattened_a, flattened_b):\n if timestamp_a != timestamp_b or sensor_id_a != sensor_id_b:\n getLogger().debug(\n f'equal_trajectories: ({timestamp_a}, {sensor_id_a}, {pose_a.r_raw}, {pose_a.t_raw}) !='\n f' ({timestamp_b}, {sensor_id_b}, {pose_b.r_raw}, {pose_b.t_raw})')\n return False\n if not equal_poses(pose_a, pose_b):\n getLogger().debug(\n f'equal_trajectories: ({timestamp_a}, {sensor_id_a}, {pose_a.r_raw}, {pose_a.t_raw}) '\n f'is not close to '\n f'({timestamp_b}, {sensor_id_b}, {pose_b.r_raw}, {pose_b.t_raw})')\n return False\n return True\n\n\ndef log_difference(a: List[Tuple[Any, ...]], b: List[Tuple[Any, ...]], func_name: str, trim_count: int = 5) -> None:\n \"\"\"\n Records in the logger the difference between two values.\n\n :param a: first value\n :param b: second value\n :param func_name: comparison function to print\n :param trim_count: maximum number of values to record\n \"\"\"\n if len(a) != len(b):\n getLogger().debug(f'{func_name}: a and b do not have the same number of elements')\n else:\n diffs = [(va, vb) for va, vb in zip(a, b) if va != vb]\n diffs = diffs[:trim_count]\n diffs = ['({}) != ({})'.format(', '.join([str(f) for f in va]),\n ', '.join([str(f) for f in vb]))\n for va, vb in diffs]\n getLogger().debug('{}:\\n{}'.format(func_name, '\\n'.join(diffs)))\n\n\ndef equal_nested_dict_or_set(data_a, data_b, name_to_log) -> bool:\n \"\"\"\n Compare two instances of dictionary or set\n\n :return: True if they are identical, False otherwise.\n \"\"\"\n if data_a is None and data_b is None:\n return True\n elif data_a is None and data_b is not None:\n return False\n elif data_a is not None and data_b is None:\n return False\n\n flattened_a = list(flatten(data_a, is_sorted=True))\n flattened_b = list(flatten(data_b, is_sorted=True))\n are_equal = (flattened_a == flattened_b)\n if not are_equal:\n log_difference(flattened_a, flattened_b, name_to_log)\n return are_equal\n\n\ndef equal_image_features(data_a: Optional[Union[kapture.Keypoints, kapture.Descriptors, kapture.GlobalFeatures]],\n data_b: Optional[Union[kapture.Keypoints, kapture.Descriptors, kapture.GlobalFeatures]]\n ) -> bool:\n \"\"\"\n Compare two instances of kapture features (keypoints, descriptors or global features).\n\n :param data_a: first set of features\n :param data_b: second set of features\n :return: True if they are identical, False otherwise.\n \"\"\"\n if data_a is None and data_b is None:\n return True\n elif data_a is None and data_b is not None:\n return False\n elif data_a is not None and data_b is None:\n return False\n\n # should not happen because of previous lines, use assert to help your ide figure out the type of data\n assert data_a is not None\n assert data_b is not None\n\n if data_a.type_name != data_b.type_name or data_a.dtype != data_b.dtype or data_a.dsize != data_b.dsize:\n return False\n flattened_a = list(flatten(data_a, is_sorted=True))\n flattened_b = list(flatten(data_b, is_sorted=True))\n are_equal = (flattened_a == flattened_b)\n if not are_equal:\n log_difference(flattened_a, flattened_b, 'equal_image_features')\n return are_equal\n\n\ndef equal_records_camera(records_a: Optional[kapture.RecordsCamera],\n records_b: Optional[kapture.RecordsCamera]) -> bool:\n \"\"\"\n Compare two instances of kapture.RecordsCamera.\n\n :param records_a: first set of records\n :param records_b: second set of records\n :return: True if they are identical, False otherwise.\n \"\"\"\n return equal_nested_dict_or_set(records_a, records_b, 'equal_records_camera')\n\n\ndef equal_records_lidar(records_a: Optional[kapture.RecordsLidar],\n records_b: Optional[kapture.RecordsLidar]) -> bool:\n \"\"\"\n Compare two instances of kapture.RecordsLidar.\n\n :param records_a: first set of records\n :param records_b: second set of records\n :return: True if they are identical, False otherwise.\n \"\"\"\n return equal_nested_dict_or_set(records_a, records_b, 'equal_records_lidar')\n\n\ndef equal_records_wifi(records_a: Optional[kapture.RecordsWifi],\n records_b: Optional[kapture.RecordsWifi]) -> bool:\n \"\"\"\n Compare two instances of kapture.RecordsWifi.\n\n :param records_a: first set of records\n :param records_b: second set of records\n :return: True if they are identical, False otherwise.\n \"\"\"\n return equal_nested_dict_or_set(records_a, records_b, 'equal_records_wifi')\n\n\ndef equal_records_gnss(records_a: Optional[kapture.RecordsGnss],\n records_b: Optional[kapture.RecordsGnss]) -> bool:\n \"\"\"\n Compare two instances of kapture.RecordsGnss.\n\n :param records_a: first set of records\n :param records_b: second set of records\n :return: True if they are identical, False otherwise.\n \"\"\"\n return equal_nested_dict_or_set(records_a, records_b, 'equal_records_gnss')\n\n\ndef equal_matches(matches_a: Optional[kapture.Matches],\n matches_b: Optional[kapture.Matches]) -> bool:\n \"\"\"\n Compare two instances of kapture.Matches.\n\n :param matches_a: first set of matches\n :param matches_b: second set of matches\n :return: True if they are identical, False otherwise.\n \"\"\"\n return equal_nested_dict_or_set(matches_a, matches_b, 'equal_matches')\n\n\ndef equal_observations(data_a: Optional[kapture.Observations],\n data_b: Optional[kapture.Observations]) -> bool:\n \"\"\"\n Compare two instances of kapture.Observations.\n\n :param data_a: first set of observations\n :param data_b: second set of observations\n :return: True if they are identical, False otherwise.\n \"\"\"\n return equal_nested_dict_or_set(data_a, data_b, 'equal_observations')\n\n\ndef equal_points3d(points3d_a: Optional[kapture.Points3d],\n points3d_b: Optional[kapture.Points3d]) -> bool:\n \"\"\"\n Compare two instances of kapture.Points3d.\n\n :param points3d_a: first set of points3d\n :param points3d_b: second set of points3d\n :return: True if they are identical, False otherwise.\n \"\"\"\n if points3d_a is None and points3d_b is None:\n return True\n elif points3d_a is None and points3d_b is not None:\n return False\n elif points3d_a is not None and points3d_b is None:\n return False\n\n # ide guidance\n assert points3d_a is not None\n assert points3d_b is not None\n\n if len(points3d_a) != len(points3d_b):\n getLogger().debug('equal_points3d: a and b do not have the same number of elements')\n\n # internally converted to array of bool which cannot be a point3d\n bool_array = np.isclose(points3d_a.as_array(), points3d_b.as_array())\n are_equal = bool_array.all()\n if not are_equal:\n diffs = [n for n, b in enumerate(bool_array) if not b]\n diffs = diffs[:15]\n diffs = ['element {} : {} != {}'.format(n, points3d_a[n], points3d_b[n]) for n in diffs]\n getLogger().debug('equal_points3d:\\n{}'.format('\\n'.join(diffs)))\n return are_equal\n\n\ndef equal_kapture(data_a: kapture.Kapture, data_b: kapture.Kapture) -> bool:\n \"\"\"\n Compare two instances of Kapture.\n Poses are compared with is_distance_within_threshold(pose_transform_distance())\n\n :param data_a: first kapture\n :param data_b: second kapture\n :return: True if they are identical, False otherwise.\n \"\"\"\n # compare sensors\n if not equal_sensors(data_a.sensors, data_b.sensors):\n return False\n\n # compare rigs\n if not equal_rigs(data_a.rigs, data_b.rigs):\n return False\n\n # compare trajectories\n if not equal_trajectories(data_a.trajectories, data_b.trajectories):\n return False\n\n # compare records\n if not equal_records_camera(data_a.records_camera, data_b.records_camera):\n return False\n if not equal_records_lidar(data_a.records_lidar, data_b.records_lidar):\n return False\n if not equal_records_wifi(data_a.records_wifi, data_b.records_wifi):\n return False\n if not equal_records_gnss(data_a.records_gnss, data_b.records_gnss):\n return False\n\n # compare image features (keypoints, descriptors, global_features)\n if not equal_image_features(data_a.keypoints, data_b.keypoints):\n return False\n if not equal_image_features(data_a.descriptors, data_b.descriptors):\n return False\n if not equal_image_features(data_a.global_features, data_b.global_features):\n return False\n\n # compare matches\n if not equal_matches(data_a.matches, data_b.matches):\n return False\n\n # compare observations\n if not equal_observations(data_a.observations, data_b.observations):\n return False\n\n # compare points3d\n if not equal_points3d(data_a.points3d, data_b.points3d):\n return False\n return True\n","repo_name":"zebrajack/kapture","sub_path":"kapture/algo/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":16675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"6256678072","text":"from glob import glob\nfrom distutils.util import convert_path\nfrom distutils import log\nimport distutils.command.sdist as orig\nimport os\nimport re\nimport sys\n\nfrom setuptools import svn_utils\nfrom setuptools.compat import PY3\nimport pkg_resources\n\nREADMES = ('README', 'README.rst', 'README.txt')\n\n\ndef walk_revctrl(dirname=''):\n \"\"\"Find all files under revision control\"\"\"\n for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):\n for item in ep.load()(dirname):\n yield item\n\n\n# TODO will need test case\nclass re_finder(object):\n \"\"\"\n Finder that locates files based on entries in a file matched by a\n regular expression.\n \"\"\"\n\n def __init__(self, path, pattern, postproc=lambda x: x):\n self.pattern = pattern\n self.postproc = postproc\n self.entries_path = convert_path(path)\n\n def _finder(self, dirname, filename):\n f = open(filename, 'rU')\n try:\n data = f.read()\n finally:\n f.close()\n for match in self.pattern.finditer(data):\n path = match.group(1)\n # postproc was formerly used when the svn finder\n # was an re_finder for calling unescape\n path = self.postproc(path)\n yield svn_utils.joinpath(dirname, path)\n\n def find(self, dirname=''):\n path = svn_utils.joinpath(dirname, self.entries_path)\n\n if not os.path.isfile(path):\n # entries file doesn't exist\n return\n for path in self._finder(dirname, path):\n if os.path.isfile(path):\n yield path\n elif os.path.isdir(path):\n for item in self.find(path):\n yield item\n\n __call__ = find\n\n\ndef _default_revctrl(dirname=''):\n 'Primary svn_cvs entry point'\n for finder in finders:\n for item in finder(dirname):\n yield item\n\n\nfinders = [\n re_finder('CVS/Entries', re.compile(r\"^\\w?/([^/]+)/\", re.M)),\n svn_utils.svn_finder,\n]\n\n\nclass sdist(orig.sdist):\n \"\"\"Smart sdist that finds anything supported by revision control\"\"\"\n\n user_options = [\n ('formats=', None,\n \"formats for source distribution (comma-separated list)\"),\n ('keep-temp', 'k',\n \"keep the distribution tree around after creating \" +\n \"archive file(s)\"),\n ('dist-dir=', 'd',\n \"directory to put the source distribution archive(s) in \"\n \"[default: dist]\"),\n ]\n\n negative_opt = {}\n\n def run(self):\n self.run_command('egg_info')\n ei_cmd = self.get_finalized_command('egg_info')\n self.filelist = ei_cmd.filelist\n self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt'))\n self.check_readme()\n\n # Run sub commands\n for cmd_name in self.get_sub_commands():\n self.run_command(cmd_name)\n\n # Call check_metadata only if no 'check' command\n # (distutils <= 2.6)\n import distutils.command\n\n if 'check' not in distutils.command.__all__:\n self.check_metadata()\n\n self.make_distribution()\n\n dist_files = getattr(self.distribution, 'dist_files', [])\n for file in self.archive_files:\n data = ('sdist', '', file)\n if data not in dist_files:\n dist_files.append(data)\n\n def __read_template_hack(self):\n # This grody hack closes the template file (MANIFEST.in) if an\n # exception occurs during read_template.\n # Doing so prevents an error when easy_install attempts to delete the\n # file.\n try:\n orig.sdist.read_template(self)\n except:\n sys.exc_info()[2].tb_next.tb_frame.f_locals['template'].close()\n raise\n\n # Beginning with Python 2.7.2, 3.1.4, and 3.2.1, this leaky file handle\n # has been fixed, so only override the method if we're using an earlier\n # Python.\n has_leaky_handle = (\n sys.version_info < (2, 7, 2)\n or (3, 0) <= sys.version_info < (3, 1, 4)\n or (3, 2) <= sys.version_info < (3, 2, 1)\n )\n if has_leaky_handle:\n read_template = __read_template_hack\n\n def add_defaults(self):\n standards = [READMES,\n self.distribution.script_name]\n for fn in standards:\n if isinstance(fn, tuple):\n alts = fn\n got_it = 0\n for fn in alts:\n if os.path.exists(fn):\n got_it = 1\n self.filelist.append(fn)\n break\n\n if not got_it:\n self.warn(\"standard file not found: should have one of \" +\n ', '.join(alts))\n else:\n if os.path.exists(fn):\n self.filelist.append(fn)\n else:\n self.warn(\"standard file '%s' not found\" % fn)\n\n optional = ['test/test*.py', 'setup.cfg']\n for pattern in optional:\n files = list(filter(os.path.isfile, glob(pattern)))\n if files:\n self.filelist.extend(files)\n\n # getting python files\n if self.distribution.has_pure_modules():\n build_py = self.get_finalized_command('build_py')\n self.filelist.extend(build_py.get_source_files())\n # This functionality is incompatible with include_package_data, and\n # will in fact create an infinite recursion if include_package_data\n # is True. Use of include_package_data will imply that\n # distutils-style automatic handling of package_data is disabled\n if not self.distribution.include_package_data:\n for _, src_dir, _, filenames in build_py.data_files:\n self.filelist.extend([os.path.join(src_dir, filename)\n for filename in filenames])\n\n if self.distribution.has_ext_modules():\n build_ext = self.get_finalized_command('build_ext')\n self.filelist.extend(build_ext.get_source_files())\n\n if self.distribution.has_c_libraries():\n build_clib = self.get_finalized_command('build_clib')\n self.filelist.extend(build_clib.get_source_files())\n\n if self.distribution.has_scripts():\n build_scripts = self.get_finalized_command('build_scripts')\n self.filelist.extend(build_scripts.get_source_files())\n\n def check_readme(self):\n for f in READMES:\n if os.path.exists(f):\n return\n else:\n self.warn(\n \"standard file not found: should have one of \" +\n ', '.join(READMES)\n )\n\n def make_release_tree(self, base_dir, files):\n orig.sdist.make_release_tree(self, base_dir, files)\n\n # Save any egg_info command line options used to create this sdist\n dest = os.path.join(base_dir, 'setup.cfg')\n if hasattr(os, 'link') and os.path.exists(dest):\n # unlink and re-copy, since it might be hard-linked, and\n # we don't want to change the source version\n os.unlink(dest)\n self.copy_file('setup.cfg', dest)\n\n self.get_finalized_command('egg_info').save_version_info(dest)\n\n def _manifest_is_not_generated(self):\n # check for special comment used in 2.7.1 and higher\n if not os.path.isfile(self.manifest):\n return False\n\n fp = open(self.manifest, 'rbU')\n try:\n first_line = fp.readline()\n finally:\n fp.close()\n return (first_line !=\n '# file GENERATED by distutils, do NOT edit\\n'.encode())\n\n def read_manifest(self):\n \"\"\"Read the manifest file (named by 'self.manifest') and use it to\n fill in 'self.filelist', the list of files to include in the source\n distribution.\n \"\"\"\n log.info(\"reading manifest file '%s'\", self.manifest)\n manifest = open(self.manifest, 'rbU')\n for line in manifest:\n # The manifest must contain UTF-8. See #303.\n if PY3:\n try:\n line = line.decode('UTF-8')\n except UnicodeDecodeError:\n log.warn(\"%r not UTF-8 decodable -- skipping\" % line)\n continue\n # ignore comments and blank lines\n line = line.strip()\n if line.startswith('#') or not line:\n continue\n self.filelist.append(line)\n manifest.close()\n","repo_name":"rootsongjc/kubernetes-handbook","sub_path":"node_modules/gitbook-plugin-sitemap/node_modules/sitemap/env/lib/python2.7/site-packages/setuptools/command/sdist.py","file_name":"sdist.py","file_ext":"py","file_size_in_byte":8559,"program_lang":"python","lang":"en","doc_type":"code","stars":10808,"dataset":"github-code","pt":"67"} +{"seq_id":"71137769173","text":"class Node:\r\n def __init__(self, data):\r\n self.data = data\r\n self.next = None\r\n\r\ndef merge_alternate(head1, head2):\r\n # Traverse the first linked list and keep a pointer to the next node to be merged\r\n curr1 = head1\r\n curr2 = head2\r\n while curr1 and curr2:\r\n next1 = curr1.next\r\n # Insert the next node to be merged from the first linked list after the current node in the second linked list\r\n curr1.next = curr2.next\r\n curr2.next = curr1\r\n curr1 = next1\r\n curr2 = curr2.next.next\r\n return head2\r\n","repo_name":"mousumimall20/DSA-Assessments","sub_path":"no3.py","file_name":"no3.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21981514229","text":"import scipy.sparse as sp\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom app.utils.preprocessor import Preprocessor\nfrom app.utils.feature_combiner import FeatureCombiner\nfrom app.bootstrap import sub_obj_clf, sub_obj_vocab, pos_neg_clf, pos_neg_vocab, _obj_sub_idfs, _pos_neg_idfs\n\n\nclass ObjSubVectoriser(TfidfVectorizer):\n \"\"\"Subclass of TfidfVectorizer to instantiate idf values.\"\"\"\n TfidfVectorizer.idf_ = _obj_sub_idfs\n\n\nclass PosNegVectoriser(TfidfVectorizer):\n \"\"\"Subclass of TfidfVectorizer to instantiate idf values.\"\"\"\n TfidfVectorizer.idf_ = _pos_neg_idfs\n\n\ndef get_vectorisor(preprocess, vocabulary, obj_vs_sub):\n\n if obj_vs_sub:\n vec = ObjSubVectoriser(tokenizer=preprocess,\n lowercase=False,\n min_df=1,\n max_df=0.8,\n ngram_range=(1, 1),\n norm='l2')\n vec._tfidf._idf_diag = sp.spdiags(_obj_sub_idfs,\n diags=0,\n m=len(_obj_sub_idfs),\n n=len(_obj_sub_idfs))\n else:\n vec = PosNegVectoriser(tokenizer=preprocess,\n lowercase=False,\n min_df=1,\n max_df=0.8,\n ngram_range=(1, 2),\n norm='l2')\n vec._tfidf._idf_diag = sp.spdiags(_pos_neg_idfs,\n diags=0,\n m=len(_pos_neg_idfs),\n n=len(_pos_neg_idfs))\n\n vec.vocabulary_ = vocabulary\n return vec\n\n\ndef get_prediction(tweet, obj_vs_sub, clf, vocab):\n\n def preprocess(s):\n if obj_vs_sub:\n return preprocessor.tokenise(s, True)\n else:\n return preprocessor.tokenise(s, False)\n\n preprocessor = Preprocessor()\n\n vec = get_vectorisor(preprocess, vocab, obj_vs_sub)\n tfidf_matrix = vec.transform(tweet)\n\n if not obj_vs_sub:\n return clf.predict(tfidf_matrix)\n\n feat_comb = FeatureCombiner()\n feat_matrix = feat_comb.transform(tfidf_matrix.todense(), preprocessor)\n return clf.predict(feat_matrix)\n\n\ndef predict(tweet):\n \"\"\"Classifies tweets as positive, negative or neutral.\n\n Args:\n tweet (list: str): List containing a single tweet string.\n\n Returns:\n list: int: [2] if neutral, [0] if negative, [4] if positive.\n\n \"\"\"\n # Objective vs Subjective clf\n y_pred = get_prediction(tweet, True, sub_obj_clf, sub_obj_vocab)\n if y_pred.tolist() == [0]:\n return [2]\n\n # Positive vs Negative clf\n y_pred = get_prediction(tweet, False, pos_neg_clf, pos_neg_vocab)\n return y_pred.tolist()\n","repo_name":"jamieconnelly/SentiVis","sub_path":"sentiment_webservice/app/utils/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36334095381","text":"import random\nimport torch\nimport collections\nimport re\nfrom matplotlib import pyplot as plt\n\n\ndef read_time_machine():\n \"\"\"Load the time machine dataset into a list of text line\"\"\"\n with open('time_machine', 'r') as f:\n lines = f.readlines()\n return [re.sub('[^A-Za-z]+', ' ', line).strip().lower() for line in lines]\n\n\ndef tokensize(f_lines, f_token = 'word'):\n if f_token == 'word':\n return [line.split() for line in f_lines]\n elif f_token == 'char':\n return [list(line) for line in f_lines]\n else: \n print('error ' + f_token)\n\n\nclass Vocab:\n \"\"\"Vocabulary for text.\"\"\"\n def __init__(self, tokens=None, min_freq=0, reserved_tokens=None):\n if tokens is None:\n tokens = []\n if reserved_tokens is None:\n reserved_tokens = []\n counter = count_corpus(tokens)\n self.token_freqs = sorted(counter.items(), key=lambda x: x[1], reverse=True)\n self.unk, uniq_tokens = 0, [''] + reserved_tokens\n uniq_tokens += [\n token for token, freq in self.token_freqs\n if freq >= min_freq and token not in uniq_tokens\n ]\n self.idx_to_token, self.token_to_idx = [], dict()\n for token in uniq_tokens:\n self.idx_to_token.append(token)\n self.token_to_idx[token] = len(self.idx_to_token) - 1\n\n def __len__(self):\n return len(self.idx_to_token)\n\n def __getitem__(self, tokens):\n if not isinstance(tokens, (list, tuple)):\n return self.token_to_idx.get(tokens, self.unk)\n return [self.__getitem__(token) for token in tokens]\n\n def to_tokens(self, indices):\n if not isinstance(indices, (list, tuple)):\n return self.idx_to_token[indices]\n return [self.idx_to_token[index] for index in indices]\n\n\ndef count_corpus(f_tokens):\n \"\"\"Count token frequencies.\"\"\"\n if len(f_tokens) == 0 or isinstance(f_tokens[0], list):\n f_tokens = [token for line in f_tokens for token in line]\n return collections.Counter(f_tokens)\n\n\ndef load_corpus_time_machine(max_tokens = -1):\n lines = read_time_machine()\n tokens = tokensize(lines, 'char')\n vocab = Vocab(tokens)\n corpus = [vocab[token] for line in tokens for token in line]\n if max_tokens > 0:\n corpus = corpus[: max_tokens]\n return corpus, vocab\n\n\ntokens = tokensize(read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = Vocab(corpus)\n# print(\"vocab.idx_to_token[:10]\", vocab.idx_to_token[:10])\nfreqs = [freq for _, freq in vocab.token_freqs]\n# print('freqs[:10] = ', freqs[:10])\n\n\ndef set_figsize(figsize=(3.5, 2.5)):\n \"\"\"Set the figure size for matplotlib.\n Defined in :numref:`sec_calculus`\"\"\"\n plt.rcParams['figure.figsize'] = figsize\n\n\ndef set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):\n \"\"\"Set the axes for matplotlib.\n\n Defined in :numref:`sec_calculus`\"\"\"\n axes.set_xlabel(xlabel)\n axes.set_ylabel(ylabel)\n axes.set_xscale(xscale)\n axes.set_yscale(yscale)\n axes.set_xlim(xlim)\n axes.set_ylim(ylim)\n if legend:\n axes.legend(legend)\n axes.grid()\n\n\ndef plot(X, Y=None, xlabel=None, ylabel=None, legend=None, xlim=None,\n ylim=None, xscale='linear', yscale='linear',\n fmts=('-', 'm--', 'g-.', 'r:'), figsize=(3.5, 2.5), axes=None):\n \"\"\"Plot data points.\n\n Defined in :numref:`sec_calculus`\"\"\"\n if legend is None:\n legend = []\n\n set_figsize(figsize)\n axes = axes if axes else plt.gca()\n\n # Return True if `X` (tensor or list) has 1 axis\n def has_one_axis(X):\n return (hasattr(X, \"ndim\") and X.ndim == 1 or isinstance(X, list)\n and not hasattr(X[0], \"__len__\"))\n\n if has_one_axis(X):\n X = [X]\n if Y is None:\n X, Y = [[]] * len(X), X\n elif has_one_axis(Y):\n Y = [Y]\n if len(X) != len(Y):\n X = X * len(Y)\n axes.cla()\n for x, y, fmt in zip(X, Y, fmts):\n if len(x):\n axes.plot(x, y, fmt)\n else:\n axes.plot(y, fmt)\n set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend)\n\n\n# plot(freqs, xlabel='token: x', ylabel='frequency: n(x)', xscale='log', yscale='log')\n# plt.savefig('frequency.png')\n# plt.show()\n\nbigram_tokens = [pair for pair in zip(corpus[:-1], corpus[1:])]\nbigram_vocab = Vocab(bigram_tokens)\n# print('bigram_vovab.token_freqs[:10] = ', bigram_vovab.token_freqs[:10])\n\n# zip_list = [1, 2, 3, 4, 5, 6]\n# print('zip_list[1:3] = ', zip_list[1:6])\n# print('zip_list[:-1] = ', zip_list[:-1])\n# print('zip_list[:-2] = ', zip_list[:-2])\n# print('zip_list[1:-1] = ', zip_list[1:-1])\n# # zip_list[1:3] = [2, 3, 4, 5, 6]\n# # list[a:b]: the first number means the index and \n# the second number means the place(the 6th element)\n# # zip_list[:-1] = [1, 2, 3, 4, 5]\n# # zip_list[:-2] = [1, 2, 3, 4]\n# # zip_list[1:-1] = [2, 3, 4, 5]\n# pair_list = [pair for pair in zip(zip_list[:-1], zip_list[1:])]\n# print(\"pair_list = \", pair_list)\n# # pair_list = [(1, 2), (2, 3), (3, 4), (4, 5), (5, 6)]\n\ntrigram_tokens = [triple for triple in zip(corpus[:-2], corpus[1:-1], corpus[2:])]\ntrigram_vocab = Vocab(trigram_tokens)\n# print('trigram_vocab.token_freqs[:5] = ', trigram_vocab.token_freqs[:5])\n# trigram_vocab.token_freqs[:10] = [(('the', 'time', 'traveller'), 59), \n# (('the', 'time', 'machine'), 30), \n# (('the', 'medical', 'man'), 24), \n# (('it', 'seemed', 'to'), 16), \n# (('it', 'was', 'a'), 15), \n\nbigram_freqs = [freq for _, freq in bigram_vocab.token_freqs]\ntrigram_freqs = [freq for _, freq in trigram_vocab.token_freqs]\nplot([freqs, bigram_freqs, trigram_freqs], xlabel='token: x', ylabel='frequency: n(x)', \n xscale='log', yscale='log', legend=['unigram', 'bigram', 'trigram'])\n# plt.savefig('3frquency.png')\n# plt.show()\n\n\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n \"\"\"Generate a minibatch of subsequences using random sampling.\"\"\"\n corpus = corpus[random.randint(0, num_steps - 1):]\n # both included\n num_subsequences = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subsequences * num_steps, num_steps))\n random.shuffle(initial_indices)\n\n def data(pos):\n return corpus[pos:pos + num_steps]\n \n num_batches = num_subsequences // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i:i + batch_size]\n x = [data(j) for j in initial_indices_per_batch]\n y = [data(j+1) for j in initial_indices_per_batch]\n yield torch.tensor(x), torch.tensor(y)\n\n\n# print('list(range(0, 4, 4)) = ', list(range(0, 4, 4)))\n# # list(range(0, 4, 4)) = [0]\n# my_seq = list(range(35))\n# for x, y in seq_data_iter_random(my_seq, batch_size=2, num_steps=5):\n# print('x = ', x, 'y = ', y)\n# # num_subsequences = 6\n# # initial_indices = [0, 5, 10, 15, 20, 25]\n# # initial_indices_per_batch = [20, 0]\n# # x = [[20, 21, 22, 23, 24], [0, 1, 2, 3, 4]]\n# # y = [[21, 22, 23, 24, 25], [1, 2, 3, 4, 5]]\n# # x = tensor([[20, 21, 22, 23, 24],\n# # [ 0, 1, 2, 3, 4]]) \n# # y = tensor([[21, 22, 23, 24, 25],\n# # [ 1, 2, 3, 4, 5]])\n# # initial_indices_per_batch = [10, 25]\n# # x = [[10, 11, 12, 13, 14], [25, 26, 27, 28, 29]]\n# # y = [[11, 12, 13, 14, 15], [26, 27, 28, 29, 30]]\n# # x = tensor([[10, 11, 12, 13, 14],\n# # [25, 26, 27, 28, 29]]) \n# # y = tensor([[11, 12, 13, 14, 15],\n# # [26, 27, 28, 29, 30]])\n# # initial_indices_per_batch = [15, 5]\n# # x = [[15, 16, 17, 18, 19], [5, 6, 7, 8, 9]]\n# # y = [[16, 17, 18, 19, 20], [6, 7, 8, 9, 10]]\n# # x = tensor([[15, 16, 17, 18, 19],\n# # [ 5, 6, 7, 8, 9]]) \n# # y = tensor([[16, 17, 18, 19, 20],\n# # [ 6, 7, 8, 9, 10]])\n\n\ndef seq_data_iter_sequential(corpus, batch_size, num_steps): \n \"\"\"Generate a minibatch of subsequences using sequential partitioning\"\"\"\n offset = random.randint(0, num_steps)\n print('offset = ', offset)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n print('num_tokens = ', num_tokens)\n xs = torch.tensor(corpus[offset:offset + num_tokens])\n print('xs = ', xs)\n ys = torch.tensor(corpus[offset + 1:offset + num_tokens + 1])\n print('ys = ', ys)\n xs, ys = xs.reshape(batch_size, -1), ys.reshape(batch_size, -1)\n print('xs = ', xs)\n print('ys = ', ys)\n num_batches = xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n x = xs[:, i:i + num_steps]\n y = ys[:, i:i + num_steps]\n yield x, y\n\n\n# my_seq = list(range(35))\n# for x, y in seq_data__iter_sequential(my_seq, batch_size=2, num_steps=5):\n# print('x = ', x, '\\ny = ', y)\n# offset = 2\n# num_tokens = 32\n# xs = tensor([ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n# 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33])\n# ys = tensor([ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,\n# 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34])\n# xs = tensor([[ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17],\n# [18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33]])\n# ys = tensor([[ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18],\n# [19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]])\n# x = tensor([[ 2, 3, 4, 5, 6],\n# [18, 19, 20, 21, 22]]) \n# y = tensor([[ 3, 4, 5, 6, 7],\n# [19, 20, 21, 22, 23]])\n# x = tensor([[ 7, 8, 9, 10, 11],\n# [23, 24, 25, 26, 27]]) \n# y = tensor([[ 8, 9, 10, 11, 12],\n# [24, 25, 26, 27, 28]])\n# x = tensor([[12, 13, 14, 15, 16],\n# [28, 29, 30, 31, 32]]) \n# y = tensor([[13, 14, 15, 16, 17],\n# [29, 30, 31, 32, 33]])\n\nclass SeqDataLoader: #@save\n \"\"\"An iterator to load sequence data.\"\"\"\n def __init__(self, batch_size, num_steps, use_random_iter, max_tokens):\n if use_random_iter:\n self.data_iter_fn = seq_data_iter_random\n else:\n self.data_iter_fn = seq_data_iter_sequential\n self.corpus, self.vocab = load_corpus_time_machine(max_tokens)\n self.batch_size, self.num_steps = batch_size, num_steps\n\n def __iter__(self):\n return self.data_iter_fn(self.corpus, self.batch_size, self.num_steps)\n\n\ndef load_data_time_machine(batch_size, num_steps, use_random_iter=False, max_tokens=10000):\n \"\"\"Return the iterator and the vocabulary of the time machine dataset.\"\"\"\n data_iter = SeqDataLoader(batch_size, num_steps, use_random_iter, max_tokens)\n return data_iter, data_iter.vocab\n","repo_name":"MildCloud/LearningDeepLearningByHand","sub_path":"LanguageModel/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20861064738","text":"# -*- mode: python -*-\nversion = \"1.3.2\"\na = Analysis([os.path.join(HOMEPATH,'support/_mountzlib.py'),\nos.path.join(HOMEPATH,'support/useUnicode.py'), 'luminoso/app.py'],\n pathex=['pyinstaller', \n 'luminoso/lib/standalone_nlp',\n 'luminoso/lib',\n '.',\n ])\npyz = PYZ(a.pure)\nexe = EXE(pyz,\n a.scripts,\n exclude_binaries=1,\n name=os.path.join('build/pyi.darwin/luminoso', 'luminoso'),\n debug=False,\n strip=False,\n upx=True,\n console=1 )\ncoll = COLLECT( exe,\n a.binaries,\n a.zipfiles,\n a.datas,\n strip=False,\n upx=True,\n name=os.path.join('dist', 'luminoso'))\napp = BUNDLE(exe,\n name=os.path.join('dist', 'Luminoso.app'),\n version=version)\n","repo_name":"commonsense/luminoso","sub_path":"luminoso.spec","file_name":"luminoso.spec","file_ext":"spec","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"67"} +{"seq_id":"30755887968","text":"import scrapy\nfrom scrapy.http import FormRequest\nfrom scrapy.utils.response import open_in_browser\n\nfrom ..items import QuotesItem\n\n\nclass LoginQuoteScraper(scrapy.Spider):\n name = \"LoginQuoteScraper\"\n start_urls = [\"http://quotes.toscrape.com/login\"]\n\n def _parse(self, response, **kwargs):\n csrf_token = response.css(\"input[name='csrf_token']::attr(value)\").extract_first()\n print(csrf_token)\n return FormRequest.from_response(response, formdata={\n \"csrf_token\": csrf_token,\n \"username\": \"devil\",\n \"password\": \"password\"\n }, callback=self.start_scraping, dont_filter=True)\n\n def start_scraping(self, response):\n open_in_browser(response)\n item = QuotesItem()\n for quote in response.css(\".quote\"):\n title = quote.css(\".text::text\").extract_first()\n author = quote.css(\".author::text\").extract_first()\n item[\"title\"] = title\n item[\"author\"] = author\n yield item\n","repo_name":"chaudharypraveen98/QuotesTutorial","sub_path":"quotes/quotes/spiders/LoginQuoteScraper.py","file_name":"LoginQuoteScraper.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16391422545","text":"#!/usr/bin/env python3\n# !python3\n# coding: utf-8\n\nimport os\nimport time\nimport sys\n\n\ndef remove_nonascii_characters(input_string):\n output_string = str(input_string).encode('utf-8', 'ignore').decode().encode('ascii', 'ignore').decode()\n return output_string\n\n\ndef write_log(information_string):\n output_file = open('botlog.log', mode='a', encoding='utf-8')\n output_file.write(time.strftime('%Y-%m-%d %H:%M:%S') + ' - ' + str(information_string) + '\\n')\n output_file.close()\n\n\ndef fix_windows_filenames(input_string):\n output_string = input_string\n \n output_string = output_string.replace('\\\\', '')\n output_string = output_string.replace('/', '')\n output_string = output_string.replace(':', '_')\n output_string = output_string.replace('*', '')\n output_string = output_string.replace('?', '')\n output_string = output_string.replace('\"', '')\n output_string = output_string.replace('<', '')\n output_string = output_string.replace('>', '')\n output_string = output_string.replace('|', '')\n \n return output_string\n\n\ndef remove_zalgo_from_string(input_string):\n output_string = ''.join(chr(x) for x in map(ord, input_string) if not (\n 767 < x < 880 or 6831 < x < 6912 or 7615 < x < 7680 or 8399 < x < 8448 or 65055 < x < 65072))\n \n return output_string\n\n\ndef replace_unicodes(input_string):\n output_string = str(input_string)\n output_string = output_string.replace(\"\\u2019\", \"'\")\n return output_string\n\n\ndef string_to_list(input_data):\n input_data = str(input_data)\n \n output_list = []\n if os.path.isfile(input_data):\n with open(input_data, mode='r', encoding='utf-8') as f:\n output_list = f.read().splitlines()\n else:\n if ',' in str(input_data):\n output_list = input_data.split(',')\n else:\n output_list.append(input_data)\n \n # Trim all entries\n output_list = list(map(str.strip, output_list))\n \n # Removes empty entries\n output_list = list(filter(None, output_list))\n \n return output_list\n\n\ndef get_nextcloud_path():\n nextcloud_path = ''\n nextcloud_config_path = os.path.join(os.environ['LOCALAPPDATA'], r'Nextcloud\\nextcloud.cfg')\n \n if os.path.isfile(nextcloud_config_path):\n \n with open(nextcloud_config_path, mode='r', encoding='utf-8') as ins:\n for line in ins:\n if 'localPath' in line:\n nextcloud_path = str(line[line.find('=') + 1:]).strip().replace('/', '\\\\')\n \n else:\n sys.exit('Nextcloud not installed on machine or not where expected (' + nextcloud_config_path + ')')\n \n return nextcloud_path\n\n\ndef get_wordcount_approximation_string(wordcount_old, wordcount_new):\n wordcount_updated = wordcount_new - wordcount_old\n\n if wordcount_updated > 1000:\n # wordcount_updated = '~' + str(int(round(wordcount_updated / 1000))) + 'K'\n wordcount_updated = str(round(wordcount_updated / 1000, 1))\n\n # If it happens to be an even number, remove decimals, else approximate if it's above .5, then add a plus\n if wordcount_updated.endswith('.0'):\n wordcount_updated = wordcount_updated[:-len('.0')]\n wordcount_updated = wordcount_updated + 'K' # WordCount in thousands, with one decimal\n else:\n wordcount_pri, wordcount_dec = wordcount_updated.split('.')\n wordcount_updated = wordcount_pri + 'K'\n\n if int(wordcount_dec) > 5:\n wordcount_updated = wordcount_updated + '+'\n else:\n wordcount_updated = str(wordcount_updated)\n\n return wordcount_updated\n\n\nbooks_folder_path = os.path.join(get_nextcloud_path(), 'eBooks')\nrequests_file_path = os.path.join(books_folder_path, 'Requests')\n\nfff_binary_path = r'C:\\Python37\\Scripts\\fanficfare.exe'\nfff_config_path = r'config_fanficfare.ini'\nnotified_books_file = r'config_notified_books.txt'\nsites_supporting_chapter_dates = ['fsb', 'fsv']\nsupported_domains = r'config_supported_domains.yaml'\nliterature_channel_id = r'CBETU8AGH' # dev (Change this variable when running tests)\n# literature_channel_id = r'C947E0DHD' # literature\nslack_token = os.environ[\"SLACK_API_TOKEN\"]\nrequest_max_timeout = 90\n","repo_name":"Knarkoffer/bookbot","sub_path":"bot_general.py","file_name":"bot_general.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71727577492","text":"from typing import TYPE_CHECKING, List\nfrom uuid import UUID\n\nfrom sqlalchemy import Column, ForeignKey, UniqueConstraint, func\nfrom sqlalchemy import types as satypes\nfrom sqlalchemy.dialects.postgresql import BYTEA\nfrom sqlalchemy.dialects.postgresql import UUID as _PGUUID\n\nif TYPE_CHECKING:\n from sqlalchemy.ext.declarative import declarative_base\nelse:\n from sqlalchemy.orm import declarative_base\nfrom sqlalchemy.orm import RelationshipProperty, relationship\nfrom sqlalchemy.schema import CheckConstraint, Identity, MetaData # type: ignore\n\nnaming_convention = {\n \"ix\": \"ix_%(column_0_label)s\",\n \"uq\": \"uq_%(table_name)s_%(column_0_name)s\",\n \"ck\": \"ck_%(table_name)s_%(constraint_name)s\",\n \"fk\": \"fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s\",\n \"pk\": \"pk_%(table_name)s\",\n}\n\n\nmetadata = MetaData(naming_convention=naming_convention)\n\nBase = declarative_base(metadata=metadata)\n\n# https://github.com/dropbox/sqlalchemy-stubs/issues/94\nif TYPE_CHECKING:\n PGUUID = satypes.TypeEngine[UUID]\nelse:\n PGUUID = _PGUUID(as_uuid=True)\n\n\nMETADATA_SCHEMA_TABLE_ARG = {\"schema\": \"metadata\"}\n\n\nclass User(Base):\n __tablename__ = \"users\"\n __table_args__ = (\n # FIXME: this regex is incorrect - A-z allows other characters, such as _\n # the correct regex should be ^[A-z][-A-Za-z0-9]+$ but first\n # implementing this ban at application level before correcting this\n # here.\n CheckConstraint(\"username ~ '^[A-z][-A-z0-9]+$'\", \"username_format\"),\n CheckConstraint(\"char_length(username) <= 200\", \"username_length\"),\n METADATA_SCHEMA_TABLE_ARG,\n )\n\n user_uuid = Column(PGUUID, primary_key=True)\n username = Column(satypes.String, nullable=False, unique=True, index=True)\n password_hash = Column(satypes.String, nullable=False)\n timezone = Column(satypes.String, nullable=False)\n registered = Column(satypes.DateTime(timezone=True), nullable=False)\n\n email_obj: \"RelationshipProperty[UserEmail]\" = relationship(\n \"UserEmail\", uselist=False, backref=\"user\"\n )\n\n api_key: \"RelationshipProperty[APIKey]\" = relationship(\n \"APIKey\", uselist=False, backref=\"user\"\n )\n\n table_objs: \"RelationshipProperty[List[Table]]\" = relationship(\n \"Table\", uselist=True, backref=\"user\"\n )\n\n praise_objs: \"RelationshipProperty[List[Praise]]\" = relationship(\n \"Praise\", uselist=True, backref=\"user_objs\"\n )\n\n stripe_customers_obj: \"RelationshipProperty[StripeCustomer]\" = relationship(\n \"StripeCustomer\", uselist=False, backref=\"user_obj\"\n )\n\n payment_references: \"RelationshipProperty[List[PaymentReference]]\" = relationship(\n \"PaymentReference\", uselist=True, backref=\"user_objs\"\n )\n\n\nclass UserEmail(Base):\n __tablename__ = \"user_emails\"\n __table_args__ = (METADATA_SCHEMA_TABLE_ARG,)\n\n user_uuid = Column(PGUUID, ForeignKey(\"metadata.users.user_uuid\"), primary_key=True)\n email_address = Column(satypes.String(length=200), nullable=False, index=True)\n\n\nclass APIKey(Base):\n __tablename__ = \"api_keys\"\n __table_args__ = (METADATA_SCHEMA_TABLE_ARG,)\n\n user_uuid = Column(PGUUID, ForeignKey(\"metadata.users.user_uuid\"), primary_key=True)\n api_key = Column(BYTEA(length=16), nullable=False, unique=True, index=True)\n\n\nclass Table(Base):\n __tablename__ = \"tables\"\n __table_args__ = (\n CheckConstraint(\"table_name ~ '^[A-z][-A-z0-9]+$'\", \"table_name_format\"),\n CheckConstraint(\"char_length(table_name) <= 200\", \"table_name_length\"),\n CheckConstraint(\"char_length(caption) <= 200\", \"caption_length\"),\n UniqueConstraint(\"user_uuid\", \"table_name\"),\n METADATA_SCHEMA_TABLE_ARG,\n )\n\n table_uuid = Column(PGUUID, primary_key=True)\n user_uuid = Column(PGUUID, ForeignKey(\"metadata.users.user_uuid\"), nullable=False)\n public = Column(satypes.Boolean, nullable=False)\n created = Column(\n satypes.DateTime(timezone=True), default=func.now(), nullable=False, index=True\n )\n licence_id = Column(\n satypes.SmallInteger,\n ForeignKey(\"metadata.data_licences.licence_id\"),\n nullable=False,\n )\n table_name = Column(satypes.String, nullable=False, index=True)\n caption = Column(\n satypes.String,\n nullable=False,\n )\n last_changed = Column(\n satypes.DateTime(timezone=True), default=func.now(), nullable=False, index=True\n )\n\n readme_obj: \"RelationshipProperty[TableReadme]\" = relationship(\n \"TableReadme\", uselist=False, backref=\"table\"\n )\n\n praise_objs: \"RelationshipProperty[List[Praise]]\" = relationship(\n \"Praise\", uselist=True, backref=\"table_objs\"\n )\n\n\nclass TableReadme(Base):\n __tablename__ = \"table_readmes\"\n __table_args__ = (METADATA_SCHEMA_TABLE_ARG,)\n\n table_uuid = Column(\n PGUUID, ForeignKey(\"metadata.tables.table_uuid\"), primary_key=True\n )\n readme_markdown = Column(satypes.String(length=10_000), nullable=False)\n\n\nclass DataLicence(Base):\n __tablename__ = \"data_licences\"\n __table_args__ = (METADATA_SCHEMA_TABLE_ARG,)\n\n licence_id = Column(satypes.SmallInteger, primary_key=True, autoincrement=False)\n licence_name = Column(satypes.String)\n\n\nclass Praise(Base):\n __tablename__ = \"praise\"\n __table_args__ = (\n UniqueConstraint(\"user_uuid\", \"table_uuid\"),\n METADATA_SCHEMA_TABLE_ARG,\n )\n\n praise_id = Column(satypes.BigInteger, Identity(), primary_key=True)\n table_uuid = Column(\n PGUUID, ForeignKey(\"metadata.tables.table_uuid\"), nullable=False, index=True\n )\n user_uuid = Column(\n PGUUID, ForeignKey(\"metadata.users.user_uuid\"), nullable=False, index=True\n )\n praised = Column(\n satypes.TIMESTAMP(timezone=True),\n nullable=False,\n index=True,\n server_default=func.current_timestamp(),\n )\n\n\nclass ProhibitedUsername(Base):\n __tablename__ = \"prohibited_usernames\"\n __table_args__ = (METADATA_SCHEMA_TABLE_ARG,)\n\n username = Column(satypes.String, nullable=False, primary_key=True)\n\n\nclass PaymentReference(Base):\n __tablename__ = \"payment_references\"\n __table_args__ = (METADATA_SCHEMA_TABLE_ARG,)\n\n payment_reference_uuid = Column(PGUUID, primary_key=True)\n user_uuid = Column(\n PGUUID, ForeignKey(\"metadata.users.user_uuid\"), index=True, nullable=False\n )\n payment_reference = Column(satypes.String, index=True, unique=True, nullable=False)\n created = Column(\n satypes.DateTime(timezone=True), default=func.now(), nullable=False, index=True\n )\n\n\nclass StripeCustomer(Base):\n __tablename__ = \"stripe_customers\"\n __table_args__ = (METADATA_SCHEMA_TABLE_ARG,)\n\n user_uuid = Column(\n PGUUID,\n ForeignKey(\"metadata.users.user_uuid\"),\n primary_key=True,\n )\n stripe_customer_id = Column(satypes.String, nullable=False, index=True, unique=True)\n created = Column(\n satypes.DateTime(timezone=True), default=func.now(), nullable=False, index=True\n )\n\n\nclass StripeSubscription(Base):\n __tablename__ = \"stripe_subscriptions\"\n __table_args__ = (METADATA_SCHEMA_TABLE_ARG,)\n\n stripe_subscription_id = Column(satypes.String, primary_key=True)\n user_uuid = Column(\n PGUUID, ForeignKey(\"metadata.users.user_uuid\"), nullable=False, index=True\n )\n stripe_subscription_status_id = Column(\n satypes.SmallInteger,\n ForeignKey(\n \"metadata.stripe_subscription_statuses.stripe_subscription_status_id\"\n ),\n index=True,\n nullable=False,\n )\n created = Column(\n satypes.DateTime(timezone=True), default=func.now(), nullable=False, index=True\n )\n updated = Column(\n satypes.DateTime(timezone=True),\n default=func.now(),\n nullable=False,\n index=True,\n onupdate=func.now(),\n )\n ttl = Column(satypes.DateTime(timezone=True), nullable=False, index=True)\n\n\nclass StripeSubscriptionStatus(Base):\n __tablename__ = \"stripe_subscription_statuses\"\n __table_args__ = (METADATA_SCHEMA_TABLE_ARG,)\n\n stripe_subscription_status_id = Column(\n satypes.SmallInteger, primary_key=True, unique=True, autoincrement=False\n )\n stripe_subscription_status = Column(satypes.String, nullable=False, unique=True)\n\n\nclass Copy(Base):\n __tablename__ = \"copies\"\n __table_args__ = (METADATA_SCHEMA_TABLE_ARG,)\n\n copy_id = Column(satypes.BigInteger, Identity(), unique=True, index=True)\n from_uuid = Column(\n PGUUID,\n ForeignKey(\"metadata.tables.table_uuid\"),\n nullable=False,\n index=True,\n primary_key=True,\n )\n to_uuid = Column(\n PGUUID,\n ForeignKey(\"metadata.tables.table_uuid\"),\n nullable=False,\n index=True,\n primary_key=True,\n )\n\n created = Column(\n satypes.DateTime(timezone=True), default=func.now(), nullable=False, index=True\n )\n\n from_obj: \"RelationshipProperty[Table]\" = relationship(\n \"Table\",\n uselist=False,\n foreign_keys=[from_uuid],\n )\n to_obj: \"RelationshipProperty[Table]\" = relationship(\n \"Table\",\n uselist=False,\n foreign_keys=[to_uuid],\n )\n","repo_name":"calpaterson/csvbase","sub_path":"csvbase/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9165,"program_lang":"python","lang":"en","doc_type":"code","stars":188,"dataset":"github-code","pt":"67"} +{"seq_id":"9624805548","text":"import sys\nimport re\n\nfrom django.conf.urls import patterns, include, url\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\n\nadmin.autodiscover()\n\nfrom core.views import IndexView, MatchesView, PlayersView, TableView, MatchView\n\nurlpatterns = patterns(\n '',\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', IndexView.as_view(), name=\"index\"),\n url(r'^api/league/(?P\\w+)/matches/$', MatchesView.as_view()),\n url(r'^api/league/(?P\\w+)/players/$', PlayersView.as_view()),\n url(r'^api/league/(?P\\w+)/tables/(?P\\w+)/$', TableView.as_view()),\n url(r'^api/matches/(?P[0-9]+)/$', MatchView.as_view()),\n) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n\n# http://stackoverflow.com/questions/8258417\nif 'test' in sys.argv:\n static_url = re.escape(settings.STATIC_URL.lstrip('/'))\n urlpatterns += patterns(\n '',\n url(\n r'^%s(?P.*)$' % static_url,\n 'django.views.static.serve',\n {\n 'document_root': settings.STATIC_ROOT,\n }\n ),\n )\n\n","repo_name":"artzmb/hhkl-backend","sub_path":"hhkl/hhkl/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73977600212","text":"import numpy as np\nimport pandas as pd\nimport yfinance as yf\n\npayload=pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')\nfirst_table = payload[0]\nsecond_table = payload[1]\n\ndf = first_table\nsymbols = df['Symbol'].values.tolist()\nprint(len(symbols))\ntest = yf.download(symbols, period='1y')\ntest.head()\ntest_2 = test[test.columns[1:8]]\ntest_2 = test_2['Adj Close']\nprint(test_2)\ntest_2.to_csv('dataset_piu_variabili.csv')","repo_name":"ferrantz/Qprove","sub_path":"ottimizzazione_portafoglio/confronto_qiskit_metodo_classico/utils/operazione_dataset.py","file_name":"operazione_dataset.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33418417684","text":"import logging\nfrom flask import Flask, Response, request\nfrom google.appengine.api import wrap_wsgi_app\nfrom google.appengine.api import app_identity\nimport json\nimport base64\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom googleapiclient.discovery import build\nfrom google.cloud import storage\nimport os\nimport config\nimport random\nimport string\nimport re\n\n# logging.basicConfig(level=logging.DEBUG)\n\napp = Flask(__name__)\napp.wsgi_app = wrap_wsgi_app(app.wsgi_app)\n\n\ndef set_last_end_time(project_id, bucket_name, end_time_str, offset):\n \"\"\"Write the end_time as a string value in a JSON object in GCS.\n This file is used to remember the last end_time in case one isn't provided\n \"\"\"\n # get the datetime object\n end_time = datetime.strptime(end_time_str, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n delta = timedelta(seconds=offset)\n # Add offset seconds & convert back to str\n end_time_calc = end_time + delta\n end_time_calc_str = end_time_calc.strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n file_name = \"{}.{}\".format(project_id, config.LAST_END_TIME_FILENAME)\n\n logging.debug(\n \"set_last_end_time - end_time_str: {}, end_time_Calc_str: {}\".format(\n end_time_str, end_time_calc_str\n )\n )\n end_time_str_json = {\"end_time\": end_time_calc_str}\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(file_name)\n with blob.open(\"w\") as f:\n f.write(json.dumps(end_time_str_json))\n\n return end_time_calc_str\n\n\ndef get_last_end_time(project_id, bucket_name):\n \"\"\"Get the end_time as a string value from a JSON object in GCS.\n This file is used to remember the last end_time in case one isn't provided\n \"\"\"\n last_end_time_str = \"\"\n file_name = \"{}.{}\".format(project_id, config.LAST_END_TIME_FILENAME)\n logging.debug(\"get_last_end_time - file_name: {}\".format(file_name))\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(file_name)\n try:\n with blob.open(\"r\") as f:\n contents = f.read()\n logging.debug(\"GCS FILE CONTENTS: {}\".format(contents))\n json_contents = json.loads(contents)\n last_end_time_str = json_contents[\"end_time\"]\n except Exception as e:\n logging.error(f\"{e}\")\n\n return last_end_time_str\n\n\ndef publish_metrics(msg_list):\n \"\"\"Call the https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/publish\n using the googleapiclient to publish a message to Pub/Sub.\n The token and batch_id are included as attributes\n \"\"\"\n if len(msg_list) > 0:\n service = build(\"pubsub\", \"v1\", cache_discovery=True)\n topic_path = \"projects/{project_id}/topics/{topic}\".format(\n project_id=app_identity.get_application_id(), topic=config.PUBSUB_TOPIC\n )\n body = {\"messages\": msg_list}\n logging.debug(\"pubsub msg is {}\".format(body))\n response = (\n service.projects().topics().publish(topic=topic_path, body=body).execute()\n )\n logging.debug(\"response is {}\".format(response, sort_keys=True, indent=4))\n else:\n logging.debug(\"No pubsub messages to publish\")\n\n\ndef get_message_for_publish_metric(request, metadata):\n \"\"\"Build a message for the https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/publish\n using the googleapiclient to publish a message to Pub/Sub.\n The token and batch_id are included as attributes\n \"\"\"\n # logging.debug(\"sending message is {}\".format(json.dumps(request, sort_keys=True, indent=4)))\n\n data = json.dumps(request).encode(\"utf-8\")\n\n message = {\n \"data\": base64.b64encode(data).decode(),\n \"attributes\": {\n \"batch_id\": metadata[\"batch_id\"],\n \"token\": config.PUBSUB_VERIFICATION_TOKEN,\n \"batch_start_time\": metadata[\"batch_start_time\"],\n \"src_message_id\": metadata[\"message_id\"],\n },\n }\n # logging.debug(\"pubsub message is {}\".format(json.dumps(message, sort_keys=True, indent=4)))\n return message\n\n\ndef get_batch_id():\n \"\"\"Generate a unique id to use across the batches to uniquely identify each one\"\"\"\n return \"\".join(\n random.choice(string.ascii_uppercase + string.digits) for _ in range(32)\n )\n\n\ndef check_date_format(date_str):\n \"\"\"Check the date to ensure that it's in the proper format\"\"\"\n pattern = re.compile(r\"^\\d{4}-+\\d{2}-+\\d{2}T+\\d{2}:+\\d{2}:+\\d{2}.+\\d{1,}Z+$\")\n matched = pattern.match(date_str)\n return matched\n\n\ndef check_exclusions(metric):\n \"\"\"Check whether to exclude a metric based on the inclusions OR exclusions list.\n Note that this checks inclusions first.\n returns True for metrics to include\n returns False for metrics to exclude\n \"\"\"\n inclusions = config.INCLUSIONS\n if \"include_all\" in inclusions and inclusions[\"include_all\"] == config.ALL:\n # logging.debug(\"including based on include_all setting {},{}\".format(metric['type'],inclusions[\"include_all\"]))\n return True\n\n if \"metricKinds\" in inclusions:\n for inclusion in inclusions[\"metricKinds\"]:\n # logging.debug(\"inclusion check: {},{}\".format(metric['metricKind'],inclusion['metricKind']))\n if (metric.get(\"metricKind\") == inclusion[\"metricKind\"]) and (\n metric.get(\"valueType\") == inclusion[\"valueType\"]\n ):\n # logging.debug(\"including based on metricKind {},{} AND {},{}\".format(metric['metricKind'],inclusion['metricKind'],metric['valueType'],inclusion['valueType']))\n return True\n\n if \"metricTypes\" in inclusions:\n for inclusion in inclusions[\"metricTypes\"]:\n # logging.debug(\"inclusion metricTypes check: {},{}\".format(metric['type'],inclusion['metricType']))\n if metric.get(\"type\", \"\").find(inclusion[\"metricType\"]) != -1:\n # logging.debug(\"including based on metricType {},{}\".format(metric['type'],inclusion['metricType']))\n return True\n\n if \"metricTypeGroups\" in inclusions:\n for inclusion in inclusions[\"metricTypeGroups\"]:\n # logging.debug(\"inclusion metricTypes check: {},{}\".format(metric['type'],inclusion['metricTypeGroup']))\n if metric.get(\"type\", \"\").find(inclusion[\"metricTypeGroup\"]) != -1:\n logging.debug(\n \"including based on metricTypeGroups {},{}\".format(\n metric.get(\"type\", \"\"), inclusion[\"metricTypeGroup\"]\n )\n )\n return True\n\n exclusions = config.EXCLUSIONS\n if \"exclude_all\" in exclusions and exclusions[\"exclude_all\"] == config.ALL:\n # logging.debug(\"excluding based on exclude_all setting {},{}\".format(metric['type'],exclusions[\"exclude_all\"]))\n return False\n\n if \"metricKinds\" in exclusions:\n for exclusion in exclusions[\"metricKinds\"]:\n # logging.debug(\"exclusion check: {},{}\".format(metric['metricKind'],exclusion['metricKind']))\n if (metric.get(\"metricKind\") == exclusion[\"metricKind\"]) and (\n metric.get(\"valueType\") == exclusion[\"valueType\"]\n ):\n # logging.debug(\"excluding based on metricKind {},{} AND {},{}\".format(metric['metricKind'],exclusion['metricKind'],metric['valueType'],exclusion['valueType']))\n return False\n\n if \"metricTypes\" in exclusions:\n for exclusion in exclusions[\"metricTypes\"]:\n # logging.debug(\"exclusion metricTypes check: {},{}\".format(metric['type'],exclusion['metricType']))\n if metric.get(\"type\", \"\").find(exclusion[\"metricType\"]) != -1:\n # logging.debug(\"excluding based on metricType {},{}\".format(metric['type'],exclusion['metricType']))\n return False\n\n if \"metricTypeGroups\" in exclusions:\n for exclusion in exclusions[\"metricTypeGroups\"]:\n # logging.debug(\"exclusion metricTypeGroups check: {},{}\".format(metric['type'],exclusion['metricTypeGroup']))\n if metric.get(\"type\", \"\").find(exclusion[\"metricTypeGroup\"]) != -1:\n # logging.debug(\"excluding based on metricTypeGroup {},{}\".format(metric['type'],exclusion['metricTypeGroup']))\n return False\n return True\n\n\ndef get_metrics(project_id, next_page_token):\n \"\"\"Call the https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors/list\n using the googleapiclient to get all the metricDescriptors for the project\n \"\"\"\n\n service = build(\"monitoring\", \"v3\", cache_discovery=True)\n project_name = \"projects/{project_id}\".format(project_id=project_id)\n\n metrics = (\n service.projects()\n .metricDescriptors()\n .list(name=project_name, pageSize=config.PAGE_SIZE, pageToken=next_page_token)\n .execute()\n )\n\n logging.debug(\n \"project_id: {}, size: {}\".format(project_id, len(metrics[\"metricDescriptors\"]))\n )\n return metrics\n\n\ndef get_and_publish_metrics(message_to_publish, metadata):\n \"\"\"Publish the direct JSON results of each metricDescriptor as a separate Pub/Sub message\"\"\"\n\n stats = {}\n msgs_published = 0\n msgs_excluded = 0\n metrics_count_from_api = 0\n\n next_page_token = \"\"\n while True:\n json_msg_list = []\n pubsub_msg_list = []\n\n project_id = message_to_publish[\"project_id\"]\n metric_list = get_metrics(project_id, next_page_token)\n\n metrics_count_from_api += len(metric_list[\"metricDescriptors\"])\n for metric in metric_list[\"metricDescriptors\"]:\n logging.debug(\"Processing metric {} for publish\".format(metric))\n metadata[\"payload\"] = json.dumps(metric)\n metadata[\"error_msg_cnt\"] = 0\n\n message_to_publish[\"metric\"] = metric\n if check_exclusions(metric):\n pubsub_msg = get_message_for_publish_metric(\n message_to_publish, metadata\n )\n pubsub_msg_list.append(pubsub_msg)\n metadata[\"msg_written_cnt\"] = 1\n metadata[\"msg_without_timeseries\"] = 0\n msgs_published += 1\n else:\n # logging.debug(\"Excluded the metric: {}\".format(metric['name']))\n msgs_excluded += 1\n metadata[\"msg_written_cnt\"] = 0\n metadata[\"msg_without_timeseries\"] = 1\n\n # build a list of stats messages to write to BigQuery\n if config.WRITE_BQ_STATS_FLAG:\n json_msg = build_bigquery_stats_message(message_to_publish, metadata)\n json_msg_list.append(json_msg)\n\n # Write to pubsub if there is 1 or more\n logging.debug(\"Start publish_metrics\")\n publish_metrics(pubsub_msg_list)\n\n # write the list of stats messages to BigQuery\n if config.WRITE_BQ_STATS_FLAG:\n write_to_bigquery(json_msg_list)\n\n if \"nextPageToken\" in metric_list:\n next_page_token = metric_list[\"nextPageToken\"]\n else:\n break\n stats[\"msgs_published\"] = msgs_published\n stats[\"msgs_excluded\"] = msgs_excluded\n stats[\"metrics_count_from_api\"] = metrics_count_from_api\n\n return stats\n\n\ndef write_stats(stats, stats_project_id, batch_id):\n \"\"\"Write 3 custom monitoring metrics to the Monitoring API\"\"\"\n logging.debug(\"write_stats: {}\".format(json.dumps(stats)))\n service = build(\"monitoring\", \"v3\", cache_discovery=True)\n project_name = \"projects/{project_id}\".format(\n project_id=app_identity.get_application_id()\n )\n\n end_time = datetime.now()\n end_time_str = end_time.strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n metric_type = \"custom.googleapis.com/stackdriver-monitoring-export/msgs-published\"\n body = {\n \"timeSeries\": [\n {\n \"metric\": {\n \"type\": metric_type,\n \"labels\": {\n \"batch_id\": batch_id,\n \"metrics_project_id\": stats_project_id,\n },\n },\n \"resource\": {\n \"type\": \"generic_node\",\n \"labels\": {\n \"project_id\": app_identity.get_application_id(),\n \"location\": \"us-central1-a\",\n \"namespace\": \"stackdriver-metric-export\",\n \"node_id\": \"list-metrics\",\n },\n },\n \"metricKind\": \"GAUGE\",\n \"valueType\": \"INT64\",\n \"points\": [\n {\n \"interval\": {\"endTime\": end_time_str},\n \"value\": {\"int64Value\": stats[\"msgs_published\"]},\n }\n ],\n }\n ]\n }\n\n metrics = (\n service.projects().timeSeries().create(name=project_name, body=body).execute()\n )\n logging.debug(\n \"wrote a response is {}\".format(json.dumps(metrics, sort_keys=True, indent=4))\n )\n\n body[\"timeSeries\"][0][\"metric\"][\n \"type\"\n ] = \"custom.googleapis.com/stackdriver-monitoring-export/msgs-excluded\"\n body[\"timeSeries\"][0][\"points\"][0][\"value\"][\"int64Value\"] = stats[\"msgs_excluded\"]\n metrics = (\n service.projects().timeSeries().create(name=project_name, body=body).execute()\n )\n logging.debug(\n \"response is {}\".format(json.dumps(metrics, sort_keys=True, indent=4))\n )\n\n body[\"timeSeries\"][0][\"metric\"][\n \"type\"\n ] = \"custom.googleapis.com/stackdriver-monitoring-export/metrics-from-api\"\n body[\"timeSeries\"][0][\"points\"][0][\"value\"][\"int64Value\"] = stats[\n \"metrics_count_from_api\"\n ]\n metrics = (\n service.projects().timeSeries().create(name=project_name, body=body).execute()\n )\n logging.debug(\n \"response is {}\".format(json.dumps(metrics, sort_keys=True, indent=4))\n )\n\n\ndef build_bigquery_stats_message(metric, metadata):\n processing_end_time = datetime.now()\n processing_end_time_str = processing_end_time.strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n\n # Write the stats to the BigQuery stats tabledata\n bq_msg = {\n \"app_name\": \"list_metrics\",\n \"batch_id\": metadata[\"batch_id\"],\n \"message_id\": metadata[\"message_id\"],\n # \"src_message_id\": src_message_id,\n \"metric_type\": metric[\"metric\"][\"type\"],\n \"error_msg_cnt\": metadata[\"error_msg_cnt\"],\n \"msg_written_cnt\": metadata[\"msg_written_cnt\"],\n \"msg_without_timeseries\": metadata[\"msg_without_timeseries\"],\n \"payload\": metadata[\"payload\"],\n \"batch_start_time\": metadata[\"batch_start_time\"],\n \"processing_end_time\": processing_end_time_str,\n }\n json_msg = {\"json\": bq_msg}\n # logging.debug(\"json_msg {}\".format(json.dumps(json_msg, sort_keys=True, indent=4)))\n return json_msg\n\n\ndef write_to_bigquery(json_row_list):\n \"\"\"Write rows to the BigQuery stats table using the googleapiclient and the streaming insertAll method\n https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll\n \"\"\"\n # logging.debug(\"write_to_bigquery\")\n\n if len(json_row_list) > 0:\n bigquery = build(\"bigquery\", \"v2\", cache_discovery=True)\n\n body = {\n \"kind\": \"bigquery#tableDataInsertAllRequest\",\n \"skipInvalidRows\": \"false\",\n \"rows\": json_row_list,\n }\n # logging.debug('body: {}'.format(json.dumps(body, sort_keys=True, indent=4)))\n\n response = (\n bigquery.tabledata()\n .insertAll(\n projectId=app_identity.get_application_id(),\n datasetId=config.BIGQUERY_DATASET,\n tableId=config.BIGQUERY_STATS_TABLE,\n body=body,\n )\n .execute()\n )\n # logging.debug(\"BigQuery said... = {}\".format(response))\n\n bq_msgs_with_errors = 0\n if \"insertErrors\" in response:\n if len(response[\"insertErrors\"]) > 0:\n logging.error(\"Error: {}\".format(response))\n bq_msgs_with_errors = len(response[\"insertErrors\"])\n else:\n logging.debug(\n \"By amazing luck, there are no errors, response = {}\".format(response)\n )\n logging.debug(\"bq_msgs_written: {}\".format(bq_msgs_with_errors))\n return response\n else:\n logging.debug(\"No BigQuery records to write\")\n return None\n\n\ndef write_input_parameters_to_bigquery(project_id, metadata, msg):\n \"\"\"Write rows to the BigQuery input parameters table using the\n googleapiclient and the streaming insertAll method\n https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll\n \"\"\"\n # logging.debug(\"write_input_parameters_to_bigquery\")\n\n bigquery = build(\"bigquery\", \"v2\", cache_discovery=True)\n\n body = {\n \"kind\": \"bigquery#tableDataInsertAllRequest\",\n \"skipInvalidRows\": \"false\",\n \"rows\": [\n {\n \"json\": {\n \"start_time\": msg[\"start_time\"],\n \"end_time\": msg[\"end_time\"],\n \"aggregation_alignment_period\": msg[\"aggregation_alignment_period\"],\n \"message_id\": metadata[\"message_id\"],\n \"project_list\": {\"project_id\": [project_id]},\n \"batch_id\": metadata[\"batch_id\"],\n \"batch_start_time\": metadata[\"batch_start_time\"],\n }\n }\n ],\n }\n # logging.debug('body: {}'.format(json.dumps(body, sort_keys=True, indent=4)))\n\n response = (\n bigquery.tabledata()\n .insertAll(\n projectId=app_identity.get_application_id(),\n datasetId=config.BIGQUERY_DATASET,\n tableId=config.BIGQUERY_PARAMS_TABLE,\n body=body,\n )\n .execute()\n )\n # logging.debug(\"BigQuery said... = {}\".format(response))\n\n bq_msgs_with_errors = 0\n if \"insertErrors\" in response:\n if len(response[\"insertErrors\"]) > 0:\n logging.error(\"Error: {}\".format(response))\n bq_msgs_with_errors = len(response[\"insertErrors\"])\n else:\n logging.debug(\n \"By amazing luck, there are no errors, response = {}\".format(response)\n )\n logging.debug(\"bq_msgs_written: {}\".format(bq_msgs_with_errors))\n return response\n\n\n@app.route(\"/push-handlers/receive_messages\", methods=[\"POST\"])\ndef receive_messages_handler():\n \"\"\"Handle the Pub/Sub push messages\n Validate the input and then process the message\n \"\"\"\n logging.debug(\"received message\")\n ret_val = \"\"\n ret_code = 200\n try:\n if not request.data:\n raise ValueError(\"No request data received\")\n envelope = json.loads(request.data.decode(\"utf-8\"))\n logging.debug(\"Raw pub/sub message: {}\".format(envelope))\n if \"message\" not in envelope:\n raise ValueError(\"No message in envelope\")\n if \"messageId\" in envelope[\"message\"]:\n logging.debug(\"messageId: {}\".format(envelope[\"message\"][\"messageId\"]))\n message_id = envelope[\"message\"].get(\"messageId\", \"\")\n if \"publishTime\" in envelope[\"message\"]:\n publish_time = envelope[\"message\"][\"publishTime\"]\n if \"data\" not in envelope[\"message\"]:\n raise ValueError(\"No data in message\")\n payload = base64.b64decode(envelope[\"message\"][\"data\"])\n logging.debug(\"payload: {} \".format(payload))\n data = json.loads(payload.decode(\"utf-8\"))\n logging.debug(\"data: {} \".format(data))\n # Add any of the parameters to the pubsub message to send\n message_to_publish = {}\n # if the pubsub PUBSUB_VERIFICATION_TOKEN isn't included or doesn't match, don't continue\n if \"token\" not in data:\n raise ValueError(\"token missing from request\")\n if not data[\"token\"] == config.PUBSUB_VERIFICATION_TOKEN:\n raise ValueError(\n \"token from request doesn't match, received: {}\".format(data[\"token\"])\n )\n # if the project has been passed in, use that, otherwise use default project of App Engine app\n if \"project_id\" not in data:\n project_id = project_id = app_identity.get_application_id()\n else:\n project_id = data[\"project_id\"]\n message_to_publish[\"project_id\"] = project_id\n # if the alignment_period is supplied, use that, otherwise use default\n if \"aggregation_alignment_period\" not in data:\n aggregation_alignment_period = config.AGGREGATION_ALIGNMENT_PERIOD\n else:\n aggregation_alignment_period = data[\"aggregation_alignment_period\"]\n pattern = re.compile(r\"^\\d{1,}s+$\")\n matched = pattern.match(aggregation_alignment_period)\n if not matched:\n raise ValueError(\n \"aggregation_alignment_period needs to be digits followed by an 's' such as 3600s, received: {}\".format(\n aggregation_alignment_period\n )\n )\n alignment_seconds = int(\n aggregation_alignment_period[: len(aggregation_alignment_period) - 1]\n )\n if alignment_seconds < 60:\n raise ValueError(\n \"aggregation_alignment_period needs to be more than 60s, received: {}\".format(\n aggregation_alignment_period\n )\n )\n message_to_publish[\n \"aggregation_alignment_period\"\n ] = aggregation_alignment_period\n # get the App Engine default bucket name to store a GCS file with last end_time\n bucket_name = os.environ.get(\n \"BUCKET_NAME\", app_identity.get_default_gcs_bucket_name()\n )\n # Calculate the end_time first\n if \"end_time\" not in data:\n # the end_time should be set here for all metrics in the batch\n # setting later in the architecture would mean that the end_time may vary\n end_time = datetime.now()\n end_time_str = end_time.strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n else:\n end_time_str = data[\"end_time\"]\n matched = check_date_format(end_time_str)\n if not matched:\n raise ValueError(\n \"end_time needs to be in the format 2019-02-08T14:00:00.311635Z, received: {}\".format(\n end_time_str\n )\n )\n message_to_publish[\"end_time\"] = end_time_str\n # if the start_time is supplied, use the previous end_time\n sent_in_start_time_flag = False\n if \"start_time\" not in data:\n start_time_str = get_last_end_time(project_id, bucket_name)\n # if the file hasn't been found, then start 1 alignment period in the past\n if not start_time_str:\n start_time_str = set_last_end_time(\n project_id, bucket_name, end_time_str, (alignment_seconds * -1)\n )\n # raise ValueError(\"start_time couldn't be read from GCS, received: {}\".format(start_time_str))\n logging.debug(\n \"start_time_str: {}, end_time_str: {}\".format(\n start_time_str, end_time_str\n )\n )\n else:\n sent_in_start_time_flag = True\n start_time_str = data[\"start_time\"]\n matched = check_date_format(start_time_str)\n if not matched:\n raise ValueError(\n \"start_time needs to be in the format 2019-02-08T14:00:00.311635Z, received: {}\".format(\n start_time_str\n )\n )\n message_to_publish[\"start_time\"] = start_time_str\n # Create a unique identifier for this batch\n batch_id = get_batch_id()\n logging.debug(\"batch_id: {}\".format(batch_id))\n # Publish the messages to Pub/Sub\n logging.info(\n \"Running with input parameters - {}\".format(\n json.dumps(message_to_publish, sort_keys=True, indent=4)\n )\n )\n metadata = {\n \"batch_id\": batch_id,\n \"message_id\": message_id,\n \"batch_start_time\": publish_time,\n }\n if config.WRITE_BQ_STATS_FLAG:\n write_input_parameters_to_bigquery(project_id, metadata, message_to_publish)\n stats = get_and_publish_metrics(message_to_publish, metadata)\n logging.debug(\"Stats are {}\".format(json.dumps(stats)))\n \"\"\" Write the late end_time_str to GCS to use in a subsequent run,\n but only if the start_time was not sent in. If the start_time is \n supplied, then we consider that an ad hoc run, and won't set the\n previous end_time\n \"\"\"\n if not sent_in_start_time_flag:\n set_last_end_time(project_id, bucket_name, end_time_str, 1)\n # Write the stats to custom monitoring metrics\n if config.WRITE_MONITORING_STATS_FLAG:\n write_stats(stats, project_id, batch_id)\n ret_val = str(stats)\n except Exception as e:\n logging.error(\"Error: {}\".format(e))\n ret_val = str(e)\n ret_code = 500\n return Response(ret_val, status=ret_code, mimetype=\"text/plain\")\n","repo_name":"GoogleCloudPlatform/stackdriver-metrics-export","sub_path":"list_metrics/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":25269,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"67"} +{"seq_id":"3861827487","text":"\"\"\"Balance command-line utility configuration is defined here.\"\"\"\nfrom __future__ import annotations\n\nimport datetime\nimport sys\nimport traceback\n\nimport click\nimport pandas as pd\nfrom loguru import logger\nfrom sqlalchemy import create_engine\n\nfrom population_restorator.divider import divide_houses, save_houses_distribution_to_db\nfrom population_restorator.models.parse.social_groups import parse_distribution\nfrom population_restorator.utils import read_file\nfrom population_restorator.utils.data_saver import to_file\n\nfrom .main_group import main\n\n\n@main.command(\"divide\")\n@click.option(\n \"--houses\",\n \"-h\",\n type=click.Path(exists=True, dir_okay=False),\n help=\"Path to the social groups json file with 'social_groups' attribute, each object having 'ages_men' and\"\n \" 'ages_women' lists and optional 'total' 'is_additional' bool (default=false) attributes\",\n required=True,\n)\n@click.option(\n \"--social_groups\",\n \"-s\",\n type=click.Path(exists=True, dir_okay=False),\n help=\"Path to the social groups json file with 'social_groups' attribute, each object having 'ages' list\"\n \" and 'total' attributes with optional 'is_additional' bool (default=false)\",\n required=True,\n)\n@click.option(\n \"--output\",\n \"-o\",\n type=click.Path(dir_okay=False),\n help=\"Filename for a SQLite database file to store buildings with people divided by age, sex and social group\",\n default=\"houses_divided.sqlite\",\n show_default=True,\n)\n@click.option(\n \"--output_ids\",\n \"-oi\",\n type=click.Path(dir_okay=False),\n help=\"Filename for a copy of input houses file with 'id' attribute added (created only if it was missing)\",\n default=None,\n show_default=\"_with_ids.csv\",\n)\n@click.option(\n \"--year\",\n \"-y\",\n type=int,\n help=\"Year to save database as\",\n default=None,\n show_default=\"\",\n)\n@click.option(\n \"--verbose\", \"-v\", is_flag=True, help=\"Increase logger verbosity to DEBUG and print some additional stataments\"\n)\ndef divide( # pylint: disable=too-many-arguments,too-many-locals\n houses: str,\n social_groups: str,\n output: str,\n output_ids: str | None,\n year: int | None,\n verbose: bool,\n) -> None:\n \"\"\"Divide dwellings people by sex, age and social group\n\n Model houses population sex, age and social group parameters based on given social_groups distribution.\n The distribution is given in json format as an object with single 'social_groups' key containing a list of social\n group objects, each of them having keys: 'name', 'ages_men', 'ages_women', and optionally 'total' and\n 'is_additional' with default value of False.\n\n 'total' attribute can be absolute (number of people) or relative (number of people of social group divided by\n total). Though, if additional social groups distribution is set in absolute numbers, primary social groups 'total'\n must also be set in total.\n 'ages' list can the same way contain absolute or relative numbers, but absolute must sum up to 'total' if it is also\n set in absolute form.\n \"\"\"\n\n if not verbose:\n logger.remove()\n logger.add(sys.stderr, level=\"INFO\")\n\n if year is None:\n year = datetime.datetime.now().year\n logger.opt(colors=True).info(\"Using {} as a year to save a forecast\", year)\n\n try:\n houses_df = read_file(houses)\n except Exception as exc: # pylint: disable=broad-except\n logger.critical(\"Exception on reading input data: {!r}\", exc)\n if verbose:\n traceback.print_exc()\n sys.exit(1)\n\n id_added = \"id\" not in houses_df.columns\n if id_added:\n houses_df[\"id\"] = range(houses_df.shape[0])\n if output_ids is None:\n output_ids = f\"{houses[:houses.rindex('.')]}_with_ids.csv\"\n logger.warning(f\"Adding identifier column 'id' and saving file with ids to {output_ids}\")\n to_file(houses_df.set_index(\"id\"), output_ids)\n\n houses_df = houses_df.set_index(\"id\")\n\n logger.info(\"Parsing sex-age-social_groups distribution from file {}\", social_groups)\n distribution = parse_distribution(social_groups)\n\n logger.info(\"Dividing houses ({}) population\", houses_df.shape[0])\n distribution_series = pd.Series(\n divide_houses(houses_df[\"population\"].astype(int).to_list(), distribution), index=houses_df.index\n )\n\n logger.info(\"Saving results to {}\", output)\n engine = create_engine(f\"sqlite:///{output}\")\n save_houses_distribution_to_db(\n engine.connect(),\n distribution_series,\n houses_df[\"living_area\"] if \"living_area\" in houses_df.columns else houses_df[\"population\"],\n distribution,\n year,\n verbose,\n )\n","repo_name":"kanootoko/population-restorator","sub_path":"population_restorator/click/divider.py","file_name":"divider.py","file_ext":"py","file_size_in_byte":4706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17678802671","text":"from tkinter import * \n\nroot = Tk() \n\nroot.iconbitmap('D:\\college project\\gui\\image\\icon.ico')\n# Defining the window size\nroot.geometry(\"200x150\")\n# Fixing the window size\nroot.resizable()\n# Title bar Title\nroot.title('cms project')\n# Title Bar Icon\n\ndef submit():\n globals()[E1.get()]=channel(E1.get())\n \n\n\nchannelmake=Button(root,text=\"new channel\",command=submit)\nchannelmake.pack()\n\nchname=Label(root,text=\"Enter Channel name:\")\nchname.pack()\n\nE1 = Entry(root)\nE1.pack()\n\n\n\nclass channel:\n def __init__(self,name):\n self.name= name \n globals()[self.name+\"btn\"] = Button(root, text=self.name)\n globals()[self.name+\"btn\"].pack()\n \n\n\n\n\n\nroot.mainloop() ","repo_name":"nithinsikinam/collegeproject","sub_path":"gui/nithinplannedhs.py","file_name":"nithinplannedhs.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41107196268","text":"import sys\nsys.path.insert(0,'/home/pi/project0/ADserial')\nsys.path.insert(0,'/home/pi/project0/Rotator')\nimport rotator as rt\nimport ADserial as ADser\n\nimport math\nimport time\nimport matplotlib.pyplot as plt\nimport RPi.GPIO as GPIO\n\ntarget=200\n\nKI=0\nKP=0.065\n\nrt.setup()\n\n\n#----------------------------------------------------------------\nmainloopflag=1;\nreadloopflag=0\ncommandflag=0;\n\nGPIO.setmode(GPIO.BOARD)\n\nchan=3\n\nGPIO.setup(chan,GPIO.IN)\n\ntotangle=0\n\nerror=0\nintegral=0\noutput=0\n\nwhile mainloopflag==1:\n datalist=[]\n ADser.flush()\n \n GPIO.wait_for_edge(3,GPIO.RISING)\n t1=time.time()\n \n while (time.time()-t1)<0.001:\n \n data=ADser.rd()\n datalist.append(data) \n \n idatalist=[]\n lend=len(datalist)\n print('number of measurements received:',lend)\n \n i=0;\n while(i0:\n print('Power=:',dataave,\" Moving:+;Angle=\",output)\n rt.moverel(output)\n \n if totangle> 90 or totangle<-90:\n print(\"Request unattainable: input power too low.\")\n mainloopflag=0\n \n\n\nGPIO.cleanup()\n#-----------------------------------------------------------------\n \n","repo_name":"xjiang103/PowerStablizer","sub_path":"project0/Control2.21.py","file_name":"Control2.21.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"1777755367","text":"# -*- coding: utf-8 -*-\n\"\"\"WAVE DPCTF QR code decoder\n\nTranslates the detected QR codes into the different QR codes type.\nDPCTF specific QR codes - MezzanineDecodedQr, TestStatusDecodedQr and PreTestDecodedQr\n\nThe Software is provided to you by the Licensor under the License, as\ndefined below, subject to the following condition.\n\nWithout limiting other conditions in the License, the grant of rights under\nthe License will not include, and the License does not grant to you, the\nright to Sell the Software.\n\nFor purposes of the foregoing, “Sell” means practicing any or all of the\nrights granted to you under the License to provide to third parties, for a\nfee or other consideration (including without limitation fees for hosting\nor consulting/ support services related to the Software), a product or\nservice whose value derives, entirely or substantially, from the\nfunctionality of the Software. Any license notice or attribution required\nby the License must also include this Commons Clause License Condition\nnotice.\n\nSoftware: WAVE Observation Framework\nLicense: Apache 2.0 https://www.apache.org/licenses/LICENSE-2.0.txt\nLicensor: Consumer Technology Association\nContributor: Eurofins Digital Product Testing UK Limited\n\"\"\"\nimport json\nimport logging\nimport re\nfrom datetime import datetime\nfrom fractions import Fraction\n\nfrom qr_recognition.qr_decoder import DecodedQr, QrDecoder\n\nlogger = logging.getLogger(__name__)\n\n_mezzanine_qr_data_re = re.compile(\n r\"(.+);(\\d{2}:[0-6][0-9]:[0-6][0-9].\\d{3});(\\d{7});([0-9.]+)\"\n)\n\n\nclass MezzanineDecodedQr(DecodedQr):\n data: str\n \"\"\"qr code string\"\"\"\n location: list\n \"\"\"qr code location\"\"\"\n detection_count: int\n \"\"\"qr code detection count\"\"\"\n\n \"\"\"A decoded QR code from Mezzanine content\n ID;HH:MM:SS.MMM;;\n \"\"\"\n content_id: str\n \"\"\"The content id encoded in this QR code\"\"\"\n media_time: float\n \"\"\"The media time encoded in this QR code\"\"\"\n frame_number: int\n \"\"\"The media time encoded in this QR code\"\"\"\n frame_rate: Fraction\n \"\"\"The frame rate encoded in this QR code\"\"\"\n\n first_camera_frame_num: int\n \"\"\"recorded camera frame number that the QR code is detected on\"\"\"\n last_camera_frame_num: int\n \"\"\"recorded camera frame number that the QR code last appears on\"\"\"\n\n def __init__(\n self,\n data: str,\n location: list,\n detection_count: int,\n content_id: str,\n media_time: float,\n frame_number: int,\n frame_rate: Fraction,\n camera_frame_num: int,\n ):\n super().__init__(data, location)\n self.data = data\n self.location = location\n self.detection_count = detection_count\n self.content_id = content_id\n self.media_time = media_time\n self.frame_number = frame_number\n self.frame_rate = frame_rate\n self.first_camera_frame_num = camera_frame_num\n self.last_camera_frame_num = camera_frame_num\n\n\nclass TestStatusDecodedQr(DecodedQr):\n data: str\n \"\"\" qr code string\"\"\"\n location: list\n \"\"\"qr code location\"\"\"\n\n \"\"\"A decoded QR code for Test Runner status\n QR code in json format contain following info\n \"\"\"\n status: str\n last_action: str\n current_time: float\n delay: int\n\n camera_frame_num: int\n \"\"\"recorded camera frame number that the QR code is detected on\"\"\"\n\n def __init__(\n self,\n data: str,\n location: list,\n status: str,\n last_action: str,\n current_time: float,\n delay: int,\n camera_frame_num: int,\n ):\n super().__init__(data, location)\n self.data = data\n self.location = location\n self.status = status\n self.last_action = last_action\n self.current_time = current_time\n self.delay = delay\n self.camera_frame_num = camera_frame_num\n\n\nclass PreTestDecodedQr(DecodedQr):\n data: str\n \"\"\" qr code string\"\"\"\n location: list\n \"\"\"qr code location\"\"\"\n\n \"\"\"A decoded QR code for pre test\n QR code in json format contain following info\n \"\"\"\n session_token: str\n \"\"\"session token encoded in the test runner QR code.\n \"\"\"\n test_id: str\n \"\"\"test id encoded in the test runner QR code.\n \"\"\"\n\n camera_frame_num: int\n \"\"\"recorded camera frame number that the QR code is detected on\"\"\"\n\n def __init__(\n self,\n data: str,\n location: list,\n session_token: str,\n test_id: str,\n camera_frame_num: int,\n ):\n super().__init__(data, location)\n self.data = data\n self.location = location\n self.session_token = session_token\n self.test_id = test_id\n self.camera_frame_num = camera_frame_num\n\n\nclass DPCTFQrDecoder(QrDecoder):\n @staticmethod\n def translate_qr_test_runner(\n data: str, location: list, json_data, camera_frame_num: int\n ) -> DecodedQr:\n \"\"\"translate different type of test runner qr code\"\"\"\n code = DecodedQr(\"\", [])\n\n try:\n code = TestStatusDecodedQr(\n data,\n location,\n json_data[\"s\"],\n json_data[\"a\"],\n float(json_data[\"ct\"]),\n int(json_data[\"d\"]),\n camera_frame_num,\n )\n except Exception:\n try:\n code = TestStatusDecodedQr(\n data,\n location,\n json_data[\"s\"],\n json_data[\"a\"],\n 0,\n int(json_data[\"d\"]),\n camera_frame_num,\n )\n except Exception:\n try:\n code = TestStatusDecodedQr(\n data,\n location,\n json_data[\"s\"],\n json_data[\"a\"],\n 0,\n 0,\n camera_frame_num,\n )\n except Exception:\n try:\n code = PreTestDecodedQr(\n data,\n location,\n json_data[\"session_token\"],\n json_data[\"test_id\"],\n camera_frame_num,\n )\n except Exception:\n logger.error(f\"Unrecognised QR code detected: {data}\")\n\n return code\n\n @staticmethod\n def media_time_str_to_ms(media_time_str: str) -> float:\n \"\"\"Change media time string to ms\n return media time from mezzanine QR code in milliseconds\n \"\"\"\n media_time_datetime = datetime.strptime(media_time_str, \"%H:%M:%S.%f\")\n\n ms = media_time_datetime.microsecond / 1000\n s_to_ms = media_time_datetime.second * 1000\n min_to_ms = media_time_datetime.minute * 60 * 1000\n h_to_ms = media_time_datetime.hour * 60 * 60 * 1000\n media_time = ms + s_to_ms + min_to_ms + h_to_ms\n\n return media_time\n\n @staticmethod\n def frame_rate_str_to_fraction(frame_rate_str: str) -> Fraction:\n \"\"\"Convert string frame rate to float\n fractional frame rate fund match from map to get accurate number\"\"\"\n frame_rate_map = {}\n with open(\"frame_rate_map.json\") as f:\n frame_rate_map = json.load(f)\n try:\n res = frame_rate_map[frame_rate_str].split(\"/\")\n frame_rate = Fraction(int(res[0]), int(res[1]))\n except Exception:\n frame_rate = Fraction(float(frame_rate_str))\n return frame_rate\n\n def translate_qr(\n self, data: str, location: list, camera_frame_num: int\n ) -> DecodedQr:\n \"\"\"Given a QR code as reported by pyzbar, parse the data and convert it to\n the format we use.\n\n Returns the translated QR code, or None if it's not a valid QR code.\n\n Mezzanine QR code is higher priority and test status than the start test QR code.\n \"\"\"\n code = DecodedQr(\"\", [])\n\n match = _mezzanine_qr_data_re.match(data)\n if match:\n # matches a mezzanine signature so decode it as such\n media_time = DPCTFQrDecoder.media_time_str_to_ms(match.group(2))\n frame_rate = DPCTFQrDecoder.frame_rate_str_to_fraction(match.group(4))\n code = MezzanineDecodedQr(\n data,\n location,\n 1,\n match.group(1),\n media_time,\n int(match.group(3)),\n frame_rate,\n camera_frame_num,\n )\n else:\n try:\n json_data = json.loads(data)\n code = DPCTFQrDecoder.translate_qr_test_runner(\n data, location, json_data, camera_frame_num\n )\n except json.decoder.JSONDecodeError as e:\n logger.error(\n f\"Unrecognised QR code JSON detected in '{data}'. JSON err: {e}\"\n )\n\n return code\n","repo_name":"cta-wave/device-observation-framework","sub_path":"dpctf_qr_decoder.py","file_name":"dpctf_qr_decoder.py","file_ext":"py","file_size_in_byte":9078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72998604054","text":"import numpy as np\nfrom text import *\nimport time\nfrom sympy import symbols, solve, Add, Eq, Symbol\n\n\ndef prob_choice(counted_elements):\n \"\"\"\n Function to choose with probability based on number of occurrences left\n \"\"\"\n raw_p = counted_elements / np.max(counted_elements)\n\n exprs = []\n for i in raw_p:\n exprs.append(Symbol(\"x\") * i / len(raw_p))\n\n ex = Eq(1, Add(*exprs))\n print(ex)\n\n sol = solve(ex)\n\n ar = [(v * sol[0]) / len(raw_p) for v in raw_p]\n\n return ar\n\n\ndef rem_chosen(val_left, current_chosen):\n \"\"\"\n Function to remove one occurrence of an integer in a numpy array\n \"\"\"\n where_chosen = np.argwhere(val_left == current_chosen).flatten()\n remove_one_chosen = np.random.choice(where_chosen)\n val_left = np.delete(val_left, remove_one_chosen)\n return val_left\n\n\ndef checkTwoD(ordered_array, current_chosen, i, coords):\n \"\"\"\n Function to check whether a chosen position is valid in a 2-D grid (n x n)\n \"\"\"\n previous_chosen = ordered_array[-int(i)]\n previous_coords = coords[str(int(previous_chosen))]\n current_coords = coords[str(int(current_chosen))]\n\n # print(previous_chosen)\n # print(previous_coords)\n # print(current_coords)\n # time.sleep(1)\n\n row_diff = abs(current_coords[0] - previous_coords[0])\n column_diff = abs(current_coords[1] - previous_coords[1])\n\n if (\n row_diff != column_diff\n and any(c == 1 for c in [row_diff, column_diff])\n and any(c == 0 for c in [row_diff, column_diff])\n ):\n print(f\"\\nInvalid choice at position {-int(i)}\\n\")\n return False\n elif previous_chosen == current_chosen[0]:\n print(f\"\\nInvalid choice at position {-int(i)}\\n\")\n return False\n else:\n print(f\"\\nValid choice at position {-int(i)}\\n\")\n return True\n\n\ndef checkLinear(ordered_array, current_chosen, i, coords=None):\n \"\"\"\n Function to check whether a chosen position is valid in a line\n \"\"\"\n try:\n previous_chosen = ordered_array[-int(i)]\n # print('previous_chosen', previous_chosen)\n # print('current_chosen', current_chosen)\n\n if previous_chosen == current_chosen[0]:\n # print(f'\\nInvalid choice at position {-int(i)}\\n')\n return False\n else:\n # print(f'\\nValid choice at position {-int(i)}\\n')\n return True\n except:\n return True\n\n\ndef expRand(init_rep, func, restart=100, coor_cells=None):\n \"\"\"\n Function to converge randomisation with constraints algorithm\n \"\"\"\n while True:\n final_order = []\n final_order = randomise_constraints(\n init_rep, final_order, 0, func, restart, limit=2, coords=coor_cells\n )\n if len(final_order) < len(init_rep):\n printme(\"Didn't converge...\")\n else:\n # printme('Constraint randomisation done')\n break\n return final_order\n\n\ndef randomise_constraints(\n val_left, ordered_array, count, func, restart, limit=2, coords=None\n):\n \"\"\"\n Recursive function to randomise with constraints\n \"\"\"\n count += 1\n # print(count)\n if len(val_left) == 0:\n printme(\"Constraint randomisation done...\")\n\n elif count > restart:\n print(count)\n print(len(val_left))\n print(len(ordered_array))\n # time.sleep(2)\n return False\n\n elif len(ordered_array) == 0:\n printme(\"First value in...\")\n current_chosen = np.random.choice(val_left, 1, replace=False)\n ordered_array.append(int(current_chosen))\n val_left = rem_chosen(val_left, current_chosen)\n randomise_constraints(\n val_left, ordered_array, count, func, restart, limit, coords\n )\n\n else:\n # unique_elements, counts_elements = np.unique(val_left, return_counts=True)\n # print(unique_elements, counts_elements)\n # probs = prob_choice(counts_elements)\n # current_chosen = np.random.choice(unique_elements, 1, replace=False, p=probs)\n\n current_chosen = np.random.choice(val_left, 1, replace=False)\n\n backwards = []\n for i in np.arange(1, limit + 0.1, 1):\n if len(ordered_array) < i:\n break\n\n current_check = func(ordered_array, current_chosen, i, coords)\n backwards.append(current_check)\n\n if np.all(backwards):\n print(\"Another value in...\")\n ordered_array.append(int(current_chosen))\n val_left = rem_chosen(val_left, current_chosen)\n randomise_constraints(\n val_left, ordered_array, count, func, restart, limit, coords\n )\n else:\n randomise_constraints(\n val_left, ordered_array, count, func, restart, limit, coords\n )\n\n return ordered_array\n","repo_name":"iezqrom/tools-phd-ier","sub_path":"rand_cons.py","file_name":"rand_cons.py","file_ext":"py","file_size_in_byte":4810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37715738490","text":"import bisect\nN,Q=map(int, input().split())\narray=list(map(int, input().split()))\narray.sort()\ndic={}\nfor i,a in enumerate(array):\n dic[a]=i\nmemo=[sum(array)-array[0]*N]\nfor i in range(1,N):\n d=array[i]-array[i-1]\n memo.append(memo[-1]+d*i-d*(N-i))\n# print(array)\n# print(memo)\nfor _ in range(Q):\n x=int(input())\n if x in dic:\n print(memo[dic[x]])\n elif xarray[-1]:\n print(memo[-1]+(x-array[-1])*N)\n else:\n index=bisect.bisect(array,x)\n p=array[index]-x\n q=x-array[index-1]\n d=memo[index]-memo[index-1]\n ret=memo[index-1]+d*q//(p+q)\n print(ret)\n\n","repo_name":"mfujiwara/atcoder-ruby","sub_path":"ABC/abc255/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37850615020","text":"from sqlalchemy import Column, Table\nfrom sqlalchemy.sql.sqltypes import Integer, String\nfrom config.database import meta, engine\n\nmexico_cities = Table(\n \"mexico_cities\",\n meta,\n Column(\"id\", Integer, primary_key= True, autoincrement= True),\n Column(\"name\", String(50))\n)\n\nmeta.create_all(engine)","repo_name":"GerardoVR7/TricksForTrips","sub_path":"service/models/mexico_cities.py","file_name":"mexico_cities.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31747457083","text":"from collections import deque\n\nclass aho:\n def __init__(self):\n self.go = {}\n self.output = []\n self.breaks = None\n\n\ndef aho_trie(list1):\n root = aho()\n\n for way in list1:\n cur = root\n for i in way:\n cur = cur.go.setdefault(i, aho())\n\n cur.output.append(way)\n\n return root\n\ndef aho_set_failure_links(list1):\n root = aho_trie(list1)\n queue = []\n\n for child in root.go.values():\n queue.append(child)\n child.breaks = root\n\n while len(queue) > 0:\n right_child = queue.pop(0)\n\n for clue, uniquechild in right_child.go.items():\n queue.append(uniquechild)\n firstpoint = right_child.breaks\n\n while firstpoint != None and clue not in firstpoint.go:\n firstpoint = firstpoint.breaks\n uniquechild.breaks = firstpoint.go[clue] if firstpoint else root\n uniquechild.output += uniquechild.breaks.output\n\n return root\n\ndef aho_search(y, root, call): #searching the input\n point = root\n\n for i in range(len(y)):\n while point != None and y[i] not in point.go:\n point = point.breaks\n if point == None:\n point = root\n continue\n point = point.go[y[i]]\n for design in point.output:\n call(i - len(design) + 1, design)\n\ndef found(loc, list1): #printing the results\n print (f\"The Design found at position {loc}, found­ pattern: {list1}\")\n\nlist1 = ['a', 'ab', 'aa', 'abc', 'bc', 'bca', 'cc', 'c', 'cba', 'cab']\ny = \"abcbaacab\"\nmain = aho_set_failure_links(list1)\naho_search(y, main, found)\n\n","repo_name":"FloraQinQvQ/Aho-Corasick-Python","sub_path":"naive_aho_corasick.py","file_name":"naive_aho_corasick.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19465387688","text":"import requests\nfrom bs4 import BeautifulSoup\nhtml = requests.get(\"https://pythonawesome.com/\").content\nsoup = BeautifulSoup(html,\"lxml\")\n\n#you find seo keywords from meta tags\n\n#tags link script a noscript nav head \n#href src\ndef get_links_from_tag_attribute(tag,attribute):\n script_tags = soup.find_all(tag)\n src_links = []\n for script_tag in script_tags:\n try:\n src = script_tag[attribute]\n src_links.append(src)\n except KeyError:\n pass\n return src_links\n\ndef is_abs_link(link):\n return link[0:4] == \"http\"\n\ndef split_link_types(links):\n abs_links = []\n rel_links = []\n for link in links:\n if is_abs_link(link):\n abs_links.append(link)\n else:\n rel_links.append(link)\n \n return abs_links, rel_links\n \n","repo_name":"aakashbilly/tutorials-code-snippets","sub_path":"python-code-snippets/scrapeEverything.py","file_name":"scrapeEverything.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"41330912953","text":"import sys\n\ndef extract_table(input_data, protein_id):\n # Find the transcript variant with the specified protein ID\n target_line = f\"protein {protein_id}\"\n start_index = input_data.find(target_line)\n\n # Extract the corresponding table\n start_table = input_data.find(\"Genomic\", start_index)\n end_table = input_data.find(\"\\n\\n\", start_table)\n table = input_data[start_table:end_table]\n\n return table\n\nif __name__ == \"__main__\":\n input_file = sys.argv[1]\n protein_id = sys.argv[2]\n\n with open(input_file, 'r') as f:\n input_data = f.read()\n\n extracted_table = extract_table(input_data, protein_id)\n print(extracted_table)\n","repo_name":"celinehohzm/intron_pos_conserve","sub_path":"src/1_extract_table.py","file_name":"1_extract_table.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40332374049","text":"import random\nimport characters\n\n\ndef create_board(number_of_rooms, width, height):\n '''\n Creates a new game board based on input parameters.\n \n Args:\n int: The width of the board\n int: The height of the board\n \n Returns:\n list: Game board\n '''\n board = []\n for _ in range(number_of_rooms):\n room = []\n \n top_wall = [\"🌫️\"] * (width + 2)\n room.append(top_wall)\n \n for _ in range(height):\n row = ['🌫️']\n for _ in range(width):\n row.append(\" \")\n row.append(\"🌫️\")\n room.append(row)\n \n bottom_wall = [\"🌫️\"] * (width + 2)\n room.append(bottom_wall)\n board.append(room)\n \n return board\n\n\ndef generate_random_gate(board, mid_board, last_wall_choice):\n width = len(board[0])\n height = len(board)\n \n wall_choices = [\"top\", \"bottom\", \"left\", \"right\"]\n\n if last_wall_choice in wall_choices:\n wall_choices.remove(last_wall_choice)\n\n wall_choice = random.choice(wall_choices)\n \n if wall_choice == \"top\":\n gate_x = random.randint(1, width - 2)\n gate_y = 0\n gate_x1 = gate_x\n gate_y1 = gate_y + height - 1\n elif wall_choice == \"bottom\":\n gate_x = random.randint(1, width - 2)\n gate_y = height - 1\n gate_x1 = gate_x\n gate_y1 = gate_y - height + 1\n elif wall_choice == \"left\":\n gate_x = 0\n gate_y = random.randint(1, height - 2)\n gate_x1 = gate_x + width -1\n gate_y1 = gate_y\n elif wall_choice == \"right\":\n gate_x = width - 1\n gate_y = random.randint(1, height - 2)\n gate_x1 = gate_x - width + 1\n gate_y1 = gate_y\n\n board[gate_y][gate_x] = '🔒'\n mid_board[gate_y1][gate_x1] = '🔒'\n return board, mid_board, wall_choice, gate_x, gate_y, gate_x1, gate_y1\n\n\ndef get_gates(board):\n board[0], board[1], last_wall_choice, gate_x_0_1, gate_y_0_1, gate_x_1_0, gate_y_1_0 = generate_random_gate(board[0], board[1], None)\n board[2], board[1], last_wall_choice, gate_x_2_1, gate_y_2_1, gate_x_1_2, gate_y_1_2 = generate_random_gate(board[2], board[1], last_wall_choice)\n gates = [[gate_x_0_1, gate_y_0_1], [gate_x_1_0, gate_y_1_0], [gate_x_1_2,gate_y_1_2], [gate_x_2_1,gate_y_2_1]]\n\n return gates\n\n\ndef check_free_space(board):\n free_spaces = [[] for _ in range(len(board))]\n for i in range(len(board)):\n for j in range(len(board[i])):\n for k in range(len(board[i][j])):\n if board[i][j][k] == \" \":\n free_spaces[i].append([j, k])\n\n return free_spaces\n\n\ndef check_boss_neighborhood(boss, player):\n neighborhood = []\n min_around_boss = -3\n max_around_boss = 3\n\n for x in range(min_around_boss, max_around_boss + 1):\n for y in range(min_around_boss, max_around_boss + 1):\n if min_around_boss in (x, y) or max_around_boss in (x, y):\n neighborhood.append([x, y])\n\n for position in neighborhood:\n x = boss[\"position x\"] + position[0]\n y = boss[\"position y\"] + position[1]\n if [x, y] == [player[\"position x\"], player[\"position y\"]]:\n return True\n return False\n\n\ndef check_player_neighborhood():\n neighborhood = []\n min_around_item = -1\n max_around_item = 1\n\n for x in range(min_around_item, max_around_item + 1):\n for y in range(min_around_item, max_around_item + 1):\n if min_around_item in (x, y) or max_around_item in (x, y):\n neighborhood.append([x, y])\n \n return neighborhood\n \n\ndef update_inventory(x, y, item, player):\n if (x, y) == (item[\"position x\"], item[\"position y\"]):\n item[\"colected\"] = 1\n if item['icon'] == \"🍎\":\n player[\"used inventory\"].append(item[\"icon\"])\n characters.develop_player(item['icon'], player)\n elif item['icon'] in \"🥼🪄\":\n player[\"inventory\"].append(item[\"icon\"])\n characters.develop_player(item['icon'], player)\n elif item['icon'] == \"🗝️\":\n player[\"inventory\"].append(item[\"icon\"])\n \n\ndef collect_inventory(board, player, items):\n neighborhood = check_player_neighborhood()\n\n for position in neighborhood:\n x = player[\"position x\"] + position[0]\n y = player[\"position y\"] + position[1]\n for item in items:\n update_inventory(x, y, item, player)\n \n\ndef get_number_od_monsters(monsters, player):\n number_of_monsters = 0\n for monster in monsters:\n if monster[\"board\"] == player[\"board\"] and monster[\"lives\"] > 0:\n number_of_monsters += 1\n return number_of_monsters","repo_name":"FilipKoncewicz/Roguelike_game","sub_path":"engine_board.py","file_name":"engine_board.py","file_ext":"py","file_size_in_byte":4681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5814259157","text":"\"\"\"\n Divide uma lista de nomes de uma sala em grupo aleatórios com\n ecolha de quantidade de participantes em tempo de execução.\n\"\"\"\n\nimport random\nimport csv\n\nfrom all_files import take_csv, choice_file\n\ndic_file = take_csv()\n\nif dic_file:\n print(\"Arquivo\")\n\n for key in dic_file:\n print(f\"\\t{key}: {dic_file[key][0]}\")\n\n # choice_file = input(\"\\nEscolha a lista \")\n\n list_parter = []\n with open(choice_file(dic_file)[1], 'r') as csv_file:\n csv_reader = csv.reader(csv_file)\n\n for line in csv_file:\n if \"*\" not in line:\n list_parter.append(line.replace(\"\\n\", ''))\n\n print(f\"Quantidade total: {len(list_parter)}\")\n\n qtd = int(input(\"\\nDigite qtd de componentes \")) # quantidade de alunos por grupo\n list_group = []\n while qtd < len(list_parter):\n tmp = []\n while len(tmp) < qtd:\n name = random.choice(list_parter)\n list_parter.remove(name)\n tmp.append(name)\n\n list_group.append(tmp)\n else:\n list_group.append(list_parter)\n\n count = 1\n for group in list_group:\n print(count)\n for parter in sorted(group):\n print(f\"\\t{parter}\")\n\n count = count + 1\nelse:\n print(\"\\nSem arquivos para analisar.\\n\")\n","repo_name":"mephessivolc/TestesPythonicos","sub_path":"CriarListaGrupos/criarGrupos.py","file_name":"criarGrupos.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"278782602","text":"from contextlib import closing\nimport hashlib\n\nfrom selenium import webdriver\n\nfrom .models import Blog, Post, Comment\nfrom project.celery import app\n\n\ndef explore_posts(blog_id, url):\n blog = Blog.objects.get(id=blog_id)\n\n with closing(webdriver.Firefox()) as browser:\n browser.get(url)\n for link in browser.find_elements_by_xpath(blog.post_url_xpath):\n post, created = Post.objects.get_or_create(\n blog_id=blog.pk,\n url=link.get_attribute('href'))\n crawl_post.delay(post.pk)\n\n\n@app.task()\ndef crawl_blog(blog_id):\n blog = Blog.objects.get(id=blog_id)\n\n with closing(webdriver.Firefox()) as browser:\n url = blog.url\n while True:\n explore_posts(blog.pk, url)\n browser.get(url)\n next_page_sel = browser.find_element_by_xpath(blog.next_page_xpath)\n url = next_page_sel.get_attribute('href')\n if not url:\n break\n\n\n@app.task()\ndef crawl_post(post_id):\n post = Post.objects.get(id=post_id)\n blog = post.blog\n\n with closing(webdriver.Firefox()) as browser:\n browser.get(post.url)\n\n # extract post title\n sel = browser.find_element_by_xpath(blog.post_title_xpath)\n post.title = sel.text\n # extract post content\n sel = browser.find_element_by_xpath(blog.post_content_xpath)\n post.content = sel.text\n # extract post date\n sel = browser.find_element_by_xpath(blog.post_date_xpath)\n post.date = sel.text\n post.save()\n\n # find and save the comments\n for sel in browser.find_elements_by_xpath(blog.post_comments_xpath):\n content = sel.text\n comment, created = Comment.objects.get_or_create(\n post_id=post.pk,\n md5_hash=hashlib.md5(content.encode('utf-8')).hexdigest())\n if created:\n comment.content = content\n comment.save()\n","repo_name":"AugustLONG/django_crawler","sub_path":"crawler/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31350259686","text":"import os\n\nfrom pathlib import Path\nfrom zipfile import ZipFile, ZIP_DEFLATED\n\n\ndef create_zip(path):\n with ZipFile(f'{path}.zip', 'w', ZIP_DEFLATED) as zipf:\n for root, dirs, files in os.walk(path):\n for f in files:\n print(f'Zipping {f}...')\n zipf.write(os.path.join(root, f))\n\n\ndef make_zipfile(zip_file_path, folder_or_file_to_zip, exclude_function=None):\n \"\"\"Create an archive with exclusive files or directories. Adapted from shutil._make_zipfile.\n\n :param zip_file_path: Path of zip file to create.\n :param folder_or_file_to_zip: Directory or file that will be zipped.\n :param exclude_function: Function of exclude files or directories\n \"\"\"\n with ZipFile(zip_file_path, \"w\") as zf:\n if os.path.isfile(folder_or_file_to_zip):\n zf.write(folder_or_file_to_zip, os.path.basename(folder_or_file_to_zip))\n else:\n for dirpath, dirnames, filenames in os.walk(folder_or_file_to_zip):\n relative_dirpath = os.path.relpath(dirpath, folder_or_file_to_zip)\n for name in sorted(dirnames):\n full_path = os.path.normpath(os.path.join(dirpath, name))\n relative_path = os.path.normpath(os.path.join(relative_dirpath, name))\n if exclude_function and exclude_function(full_path):\n continue\n zf.write(full_path, relative_path)\n for name in filenames:\n full_path = os.path.normpath(os.path.join(dirpath, name))\n relative_path = os.path.normpath(os.path.join(relative_dirpath, name))\n if exclude_function and exclude_function(full_path):\n continue\n if os.path.isfile(full_path):\n zf.write(full_path, relative_path)\n\n\nif __name__ == '__main__':\n for p in Path('.').iterdir():\n if p.is_dir():\n print(f'Processing {p} ...')\n make_zipfile(\n zip_file_path=str(p) + '.zip',\n folder_or_file_to_zip=p,\n )\n","repo_name":"zzn2/sample_modules","sub_path":"build_zip.py","file_name":"build_zip.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"74427394452","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Marker interfaces for models and containers.\"\"\"\n\n__all__ = [\n \"IDeclarativeBase\",\n \"IModel\",\n \"IModelContainer\",\n]\n\nfrom zope.interface import Interface\n\n\nclass IDeclarativeBase(Interface):\n \"\"\"Implemented by the declarative base and all classes that inherit from it.\"\"\"\n\n\nclass IModel(IDeclarativeBase):\n \"\"\"Provided by models.\"\"\"\n\n\nclass IModelContainer(Interface):\n \"\"\"Provided by model containers.\"\"\"\n","repo_name":"fizyk/pyramid_basemodel","sub_path":"pyramid_basemodel/interfaces.py","file_name":"interfaces.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"67"} +{"seq_id":"30263284412","text":"import pygame\nimport tkinter as tk\nfrom tkinter import *\nimport os\n\n\nroot = tk.Tk()\n\nembed = tk.Frame(root, width = 500, height = 500) #creates embed frame for pygame window\nembed.grid(columnspan = (600), rowspan = 500) # Adds grid\nembed.pack(side = LEFT) #packs window to the left\n\nbuttonwin = tk.Frame(root, width = 75, height = 500)\nbuttonwin.pack(side = LEFT)\n\n\nroot.update() # Required to prevent x-server error from preventing the code from running\nos.environ['SDL_WINDOWID'] = str(embed.winfo_id())\n# os.environ['SDL_VIDEODRIVER'] = 'windib' # <- This is for Windows support\n\nscreen = pygame.display.set_mode((500,500))\nscreen.fill(pygame.Color('white'))\n\npygame.display.init()\npygame.display.flip()\n\n\ndef draw():\n screen.fill(pygame.Color('white'))\n pygame.draw.circle(screen, pygame.Color('black'), (250,250), 125)\n pygame.display.flip()\n\ndef clear():\n screen.fill(pygame.Color('white'))\n pygame.display.flip()\n\ndef write():\n screen.fill(pygame.Color('white'))\n pygame.font.init()\n text = entry1.get()\n font = pygame.font.SysFont('quicksandmedium',25, bold=1)\n displaytext = font.render(text, 1, pygame.Color('black'))\n screen.blit(displaytext, (250-displaytext.get_width()//2,250-displaytext.get_height()//2))\n pygame.display.flip()\n\n\n# Controls to interact with PyGame window.\nbutton1 = Button(buttonwin,text = 'Draw', command=draw)\nbutton1.pack(side=TOP)\nbutton2 = Button(buttonwin,text = 'Clear', command=clear)\nbutton2.pack(side=BOTTOM)\nentry1 = Entry(buttonwin)\nentry1.pack(side=BOTTOM)\nbutton3 = Button(buttonwin,text = 'Write', command=write)\nbutton3.pack(side=BOTTOM)\n\nroot.update()\n\nwhile True:\n pygame.display.flip()\n root.update()","repo_name":"menaaziz27/my-python-scripts","sub_path":"02-playground/OOP/pygame.py","file_name":"pygame.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"26689046442","text":"import boto3\nfrom pprint import pprint\nimport hashlib\nimport time\nimport os\n'''\naws는 boto3 라이브러리를 통해 연결 할 수 있다.\n'''\nfileKey = ''\n# 파일 키값 저장\n\nfileName = 'test.jpg'\n# 파일이름 파일의 위치까지 지정해도 상관업다. 스플릿 해서 잘라 사용할꺼니깐\n\nbucketName = ''\n# 내 버킷 이름\n\n\ns3 = boto3.resource('s3')\n# 전체 함수에서 사용할 객체\n\nstatinfo = os.stat(fileName)\n\n\n#진행도를 나타내기 위한 함수\nprogressBytes = 0\n\ndef progressPercent(chunk): # 업로드한 byte 입력됨 누적시켜야함\n global progressBytes\n if progressBytes >= statinfo.st_size :\n progressBytes = 0\n progressBytes += chunk\n print(progressBytes / statinfo.st_size * 100)\n\n\ndef getHash(string): # 파일명 + 현재시간 을 sha1코드로 변환\n\n # 입력받은 문자열 + 현재 시간(형식 : 1510647686.5457149 ) 을 해쉬매핑\n changeString = string.encode('utf-8') + str(time.time()).encode('utf-8')\n sha = hashlib.sha1(changeString)\n\n # hexdigent 방법을 이용하여 객체로 부터 해시값을 16진법으로 구함\n hexSha1 = sha.hexdigest()\n return hexSha1\n\n\ndef getMetadate(fileKeyGet) :\n client = boto3.client('s3')\n response = client.get_object(\n Bucket = bucketName,\n Key = fileKeyGet\n )\n #response['Metadata']['filename'] responce에서(dict타입) 파일 이름 뜯어내는 방법\n return response\n\n\ndef upload(filePathUp) :\n fileNameUp = filePathUp.split('/').pop()#파일 이름을 / 로 나눈 배열의 맨 끝\n fileKeyUp = getHash((fileNameUp))\n s3.Bucket(bucketName).upload_file(filePathUp, fileKeyUp, Callback = progressPercent)\n\n '''\n s3.Bucket(MyBucketName).upload_file(uploadFileInMyComputer, uploadFileNameInS3)\n '''\n # 메타데이터 넣기\n s3_object = s3.Object(bucketName, fileKeyUp)\n s3_object.metadata.update({'fileName': fileNameUp})\n s3_object.metadata.update({'test2': 'testes2'})\n s3_object.copy_from(CopySource={'Bucket': bucketName, 'Key': fileKeyUp}, Metadata=s3_object.metadata,\n MetadataDirective='REPLACE')\n # 완료\n return fileKeyUp\n\n\ndef download(fileKeyDown) :\n metadata = getMetadate(fileKeyDown)\n fileNameDown = metadata['Metadata']['filename']\n s3.Bucket(bucketName).download_file(fileKeyDown, fileNameDown, Callback = progressPercent)\n #다운 받고싶은 바일 이름을 바꾸고 싶아면 fileNameDown 변경\n '''\n s3.Bucket(MyBucketName).download_file(target_Key_Name_In_S3, file_name_As_MyComputer)\n '''\n\n\ndef deleteObject(fileKeyDelete) :\n client = boto3.client('s3')\n obj = s3.Object(bucketName, fileKeyDelete)\n response = client.delete_object(\n Bucket=obj.bucket_name,\n Key=obj.key\n )\n\n\ndef getList() :\n keys = []\n client = boto3.client('s3')\n response = client.list_objects(\n Bucket=bucketName\n )\n for obj in response['Contents']:\n keys.append(obj['Key'])\n print(keys)\n #일단 key값만 가져오게 만들었다 다음에 쓸모에 따라 수정하자\n\n\nfileKey = upload(fileName)\ndownload(fileKey)\n","repo_name":"devjyno96/S3Control_Python","sub_path":"S3Control.py","file_name":"S3Control.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"32697441272","text":"from matplotlib import pyplot as plt\nimport openpyxl as xl\n\n# excelファイルを読み込む\n# Load excel file\ndf=xl.load_workbook('sakana.xlsx')\nsheet = df['Sheet1']\n\n# セル値を読み込む\n# Load cell values\nx_con=[]\ny_con=[]\nz_con=[]\nx_test=[]\ny_test=[]\nz_test=[]\nfor i in range(15):\n c_x_con = sheet.cell(i+2,5)\n c_y_con = sheet.cell(i+2,6)\n c_z_con= sheet.cell(i+2,7)\n c_x_test = sheet.cell(i+19,5)\n c_y_test = sheet.cell(i+19,6)\n c_z_test= sheet.cell(i+19,7)\n x_con.append(c_x_con.value)\n y_con.append(c_y_con.value)\n z_con.append(c_z_con.value)\n x_test.append(c_x_test.value)\n y_test.append(c_y_test.value)\n z_test.append(c_z_test.value)\n\n# figure オブジェクトを作成\n# Create a figure object\nfig = plt.figure()\n \n# バブルチャートを作成\n# Create bubble chart\nplt.scatter(x_con, y_con, s=z_con, alpha=0.5, label='contcol', c='b')\nplt.scatter(x_test, y_test, s=z_test, alpha=0.5, label='test', c='r')\n\n# ラベルを追加\n# Add labels\nplt.rcParams['font.family'] = 'Meiryo'\nplt.xlabel(sheet.cell(1,5).value)\nplt.ylabel(sheet.cell(1,6).value)\n \n# グラフを表示\n# Display Graph\nplt.legend()\nplt.show()\n","repo_name":"fin358/fish","sub_path":"trout/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"30735328298","text":"from read_file import read_mat,expectedOutputs,normalize\nfrom neural_network import Neural_Network\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn-whitegrid')\nimport argparse\n\n\"\"\"\nThis function stands for drawing plots of the model accuracy or loss values\n\"\"\"\ndef plot(pkl_name_create, train_accuracy_list, validation_accuracy_list, epoch_list):\n\n x1 = train_accuracy_list\n x2 = validation_accuracy_list\n\n plt.plot(epoch_list, x1, color='green')\n plt.plot(epoch_list, x2, color='orange')\n\n plt.legend(['train', 'validation'], loc='upper right')\n\n plt.title(\"Optimum Train and Validation Accuracy with 2 Hidden Layer\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Accuracy\");\n\n figure_name = \"Optimum_Train_and_Validation_Accuracy_with_2_Hidden_Layer\" + \".png\"\n\n plt.savefig(figure_name)\n plt.show()\n\n\"\"\"\nepochProcess takes some parameters such as epoch size, batch size, learning rate, neural network etc.\nwhile neural network is creating, each layer node number and hidden layer number of neural network can \nbe changed dynamically. I write the code dynamically, so neural network creates weights and connections \nwhile using the same function with given parameters. \n\"\"\"\ndef epochProcess(epoch_size, batch_size, learning_rate, NN, size_den_inp, deneme_input, deneme_expected):\n validation_image_values = read_mat(\"validation.mat\")[0] # images\n normalized_images_validation = normalize(validation_image_values) # normalized images\n expected_classes_validation = read_mat(\"validation.mat\")[1] # expected flower types\n expected_outputs_validation = expectedOutputs(expected_classes_validation) # flatten outputs\n X_validation = normalized_images_validation # normalized input images\n size_of_validation = len(X_validation)\n loss_error_list = []\n epoch_list = []\n train_accuracy_list = []\n validation_accuracy_list = []\n for i in range(epoch_size):\n print(\"\\nepoch\",i+1,\"-->\")\n NN.hit = 0\n NN.total_loss_value = 0\n for j in range(0, size_den_inp, batch_size):\n batch_input = deneme_input[j:j + batch_size]\n batch_expect = deneme_expected[j:j + batch_size]\n hit = NN.trainModel(batch_input, batch_expect, learning_rate, batch_size)\n\n print(\"train hit: \", NN.hit, \", Accuracy: \", NN.hit / size_den_inp * 100)\n train_accuracy_list.append(NN.hit / size_den_inp * 100)\n print(\"train loss: \", NN.total_loss_value )\n loss_error_list.append(NN.total_loss_value )\n epoch_list.append(i)\n\n # print(\"validation size: \", size_of_validation)\n predicted_outputs_validation = NN.forwardPropagation(X_validation, expected_outputs_validation)\n valid_hit = NN.hit_count(predicted_outputs_validation, expected_outputs_validation)\n print(\"\\nvalidation hit: \", valid_hit, \"accuracy: \", valid_hit / size_of_validation * 100)\n validation_accuracy_list.append(valid_hit / size_of_validation * 100)\n\n loss_error_list = np.asarray(loss_error_list)\n print(\"number of layer: \", NN.numberOfHidden, \"layer size: \", NN.hiddenSize, \"learning rate: \", learning_rate,\n \"batch size: \", batch_size)\n pkl_name_create = str(NN.numberOfHidden) + \"_\" + str(NN.hiddenSize) + \"_\" + str(learning_rate) + \"_\" + str(batch_size)\n pkl_name_create += \"_\" + str(epoch_size)\n # Save to file in the current working directory\n pkl_file = pkl_name_create + \".pkl\"\n with open(pkl_file, 'wb') as file:\n pickle.dump(NN, file, pickle.HIGHEST_PROTOCOL)\n\n #plot(pkl_name_create, train_accuracy_list, validation_accuracy_list, epoch_list)\n\n\"\"\"\nprogramWorkStation is the baseline of the assignment. Calls specific functions.\n\"\"\"\ndef programWorkStation(train_file):\n\n image_values = read_mat(train_file)[0] # images\n normalized_images = normalize(image_values) # normalized images\n expected_classes = read_mat(train_file)[1] # expected flower types\n expected_outputs = expectedOutputs(expected_classes) # flatten outputs\n X = normalized_images #normalized input images\n size_of_one_image = len(normalized_images[0])\n size_of_input = size_of_one_image\n\n # parameters of neural network\n hidden_node_number = 100\n hidden_layer_number = 2\n size_of_output = 5\n learning_rate = 0.005\n epoch_size = 300\n batch_size = 20\n\n # neural network object is created here.\n Beauty_Neural_Network = Neural_Network(size_of_input, hidden_node_number, size_of_output, hidden_layer_number)\n\n deneme_input = X\n size_den_inp = len(deneme_input)\n den_expected = expected_outputs\n\n # run the code according to epoch and batch sizes.\n epochProcess(epoch_size, batch_size, learning_rate, Beauty_Neural_Network, size_den_inp, deneme_input, den_expected)\n\n\nparser = argparse.ArgumentParser(description='train the model')\nparser.add_argument('train_data_file', type=argparse.FileType('rb'))\nargs = parser.parse_args()\n\n# program station is called here.\nprogramWorkStation(args.train_data_file)\n\n\n\n","repo_name":"zetrowski/Classification-of-flowers-using-NeuralNetwork","sub_path":"code/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33630561994","text":"from sys import argv\r\nfrom Lexer import Lexer, LexerException\r\nfrom Parser import Parser, ParserException\r\n\r\nwith open(argv[1], 'r') as i:\r\n try:\r\n l = Lexer(i)\r\n if len(argv) > 2 and argv[2] == 'l':\r\n a = l.get_token()\r\n while not a.ttype is None:\r\n print(a.to_cats())\r\n a = l.get_token()\r\n else:\r\n p = Parser(l)\r\n path = argv[1].split('\\\\')\r\n print(p.parse_stmt().print_str(), end='') if path[1] == 'parser-stmt'\\\r\n else print(p.parse_expr().print_str(), end='') if path[1] == 'parser-expr'\\\r\n else print(p.parse_program().print_str(), end='') if path[1] == 'parser-decl'\\\r\n else None\r\n\r\n except LexerException as e:\r\n print(e.to_cats(), end = '')\r\n except ParserException as e:\r\n e.eprint()\r\n ","repo_name":"pinkslot/2015translator","sub_path":"com.py","file_name":"com.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31155506303","text":"import os\r\nimport tkinter as tk\r\nfrom tkinter import filedialog\r\nimport socket\r\nimport threading\r\n\r\n\r\nclass SenderApp:\r\n def __init__(self, root):\r\n self.root = root\r\n self.root.title(\"Sender App\")\r\n\r\n self.filepath = \"\"\r\n self.destination_ip = \"\"\r\n\r\n self.create_widgets()\r\n\r\n def create_widgets(self):\r\n self.file_label = tk.Label(self.root, text=\"Fichier à envoyer :\")\r\n self.file_label.pack()\r\n\r\n self.select_file_button = tk.Button(self.root, text=\"Sélectionner un fichier\", command=self.select_file)\r\n self.select_file_button.pack()\r\n\r\n self.destination_label = tk.Label(self.root, text=\"Adresse IP du destinataire :\")\r\n self.destination_label.pack()\r\n\r\n self.destination_entry = tk.Entry(self.root)\r\n self.destination_entry.pack()\r\n\r\n self.send_button = tk.Button(self.root, text=\"Envoyer\", command=self.send_file)\r\n self.send_button.pack()\r\n\r\n def select_file(self):\r\n self.filepath = filedialog.askopenfilename()\r\n\r\n def send_file(self):\r\n self.destination_ip = self.destination_entry.get()\r\n if not self.filepath or not self.destination_ip:\r\n return\r\n\r\n try:\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.connect((self.destination_ip, 12345)) # Utilisez le port que vous préférez\r\n\r\n filename = os.path.basename(self.filepath)\r\n s.send(filename.encode())\r\n\r\n with open(self.filepath, 'rb') as file:\r\n while True:\r\n data = file.read(1024)\r\n if not data:\r\n break\r\n s.send(data)\r\n\r\n s.close()\r\n print(\"Fichier envoyé avec succès.\")\r\n except Exception as e:\r\n print(f\"Erreur lors de l'envoi : {str(e)}\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n root = tk.Tk()\r\n app = SenderApp(root)\r\n root.mainloop()\r\n","repo_name":"NemraV1/FileSharing","sub_path":"file_share_app.py","file_name":"file_share_app.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"44337087410","text":"from django.conf import settings\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nimport solr\nimport os\nimport uuid\n\nfrom nomr.models import Book, BookPart, Page\nfrom nomr.resources.generateglyphs import GlyphGen\n\ndef glyphs(request):\n sanitized_q = u\"\"\n\n q = request.GET.get('q')\n if not q:\n sanitized_q = \"*:*\"\n else:\n # perform search on general text field (concatenation of all fields)\n sanitized_q = \"text:%s\" % q\n \n # get facet queries\n fq = request.GET.getlist('fq')\n\n start_date = request.GET.get('startdate')\n end_date = request.GET.get('enddate')\n if start_date and end_date:\n sanitized_q += (' AND publication_date:[%sT00:00:00.000Z TO %sT00:00:00.000Z]' % (start_date, end_date))\n\n s_conn = solr.SolrConnection(settings.SOLR_SERVER)\n response = s_conn.select(sanitized_q, fq=fq)\n\n # create a folder in the media root for the glyphs to be generated\n # folder name will be a generated UUID for now\n glyph_folder_name = 'glyphs/%s' % uuid.uuid4()\n glyph_collection_path = os.path.join(settings.MEDIA_ROOT, glyph_folder_name)\n glyph_collection_url = os.path.join(settings.MEDIA_URL, glyph_folder_name)\n\n try:\n os.makedirs(glyph_collection_path)\n except OSError:\n # uuids are unique, so this should never be thrown\n err_msg = 'There was an error generating the glyphs for the given search query. Please try again.'\n return render_to_response('error.html', {'msg': err_msg}, context_instance=RequestContext(request))\n\n # for each book, get the pages\n for b in response.results:\n # get page image links\n book_part = BookPart.objects.get(book=b['uuid'])\n pages = Page.objects.filter(book_part=book_part.uuid)\n \n image_paths = [str(os.path.join(settings.MEDIA_ROOT, p.image.name)) for p in pages]\n mei_paths = [str(os.path.join(settings.MEDIA_ROOT, p.mei.name)) for p in pages]\n for image_path, mei_path in zip(image_paths, mei_paths):\n # for each image and mei pair\n gg = GlyphGen(image_path, mei_path)\n gg.gen_glyphs(glyph_collection_path)\n\n # get list of relative urls to the generated glyphs\n glyph_urls = []\n for glyph_file in os.listdir(glyph_collection_path):\n glyph_urls.append(os.path.join(glyph_collection_url, glyph_file))\n\n return render_to_response('glyphs.html', {'glyphs': glyph_urls}, context_instance=RequestContext(request))\n","repo_name":"DDMAL/Nomr","sub_path":"nomr/glyphs.py","file_name":"glyphs.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"32617612762","text":"from UserInput import HandleUserInput\nfrom StateMachine import StateMachine\n\nif __name__ == \"__main__\":\n \"\"\"Runner for the Tape Player\n \"\"\"\n tape_len = int(input(\"Enter the tape length: \"))\n\n tape = StateMachine(abs(tape_len))\n tape.handle_start_tape()\n\n input_handler = HandleUserInput(tape)\n input_handler.start_user_input()\n","repo_name":"protoi/tape-player","sub_path":"TapePlayer.py","file_name":"TapePlayer.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31993686234","text":"#!/usr/bin/env python3\n\nimport sys\nfrom string import *\nimport pefile\n\ndef main(functionName):\n lenStr = len(functionName)\n counter = 1\n fNameLen = lenStr\n\n hashValue = 0\n\n while(fNameLen):\n reverseCounter = lenStr - counter\n if lenStr == counter:\n reverseCounter = 1\n # Get char from the begining | bChar means chars from the begining\n bChar = functionName[counter - 1]\n bUpperChar = bChar.upper()\n bCharXOR = ord(bChar) ^ lenStr\n bUpperCharXOR = ord(bUpperChar) ^ lenStr\n \n # Get chars from the end | eChar means chars from the end\n eChar = functionName[reverseCounter - 1]\n eCharUpper = eChar.upper()\n eCharXOR = ord(eChar) ^ lenStr\n eCharUpperXOR = ord(eCharUpper) ^ lenStr\n\n hashValue = eCharUpperXOR ^ (hashValue + (bUpperCharXOR * eCharXOR * bCharXOR))\n counter += 1\n fNameLen -= 1\n \n return hashValue\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 3:\n libPath = sys.argv[1]\n searchedHash = sys.argv[2]\n # Parse export table\n pe = pefile.PE(libPath)\n pe.parse_data_directories()\n for exp in pe.DIRECTORY_ENTRY_EXPORT.symbols:\n if exp.name is not None:\n hashVal = main((exp.name).decode())\n if hashVal == int(searchedHash, 16):\n print(f\"Function name : {exp.name} -> Hash : {hex(hashVal)}\")\n else:\n print(\"[!] This script must be used with two arguments : Library path (ntdll / kernel32 ...) and hash\")\n exit(1)","repo_name":"Yafnag/danabot","sub_path":"hashFunctionName.py","file_name":"hashFunctionName.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18579145740","text":"import types\nimport re\nimport os\nimport requests\nfrom requests.exceptions import MissingSchema\nimport logging\nimport json\nfrom tempfile import TemporaryFile\nfrom pathvalidate import sanitize_filename\nimport urllib\nfrom bs4 import BeautifulSoup\nfrom markdownify import markdownify as md\nfrom canvasapi import Canvas\nfrom canvasapi.exceptions import Unauthorized, ResourceDoesNotExist\n\nfrom canvasapi.canvas_object import CanvasObject\nfrom canvasapi.file import File\nfrom canvasapi.paginated_list import PaginatedList\nfrom canvasapi.util import combine_kwargs\n\n\nclass MediaObject(CanvasObject):\n pass\n\n\ndef get_media_objects(self, *args, **kwargs):\n return PaginatedList(\n MediaObject,\n self._requester,\n \"GET\",\n \"courses/{}/media_objects\".format(self.id),\n {\"course_id\": self.id},\n _kwargs=combine_kwargs(**kwargs),\n )\n\n\n\nclass CanvasScraper:\n def __init__(\n self, base_url, api_key, path, overwrite,\n videos, markdown, logger=None):\n self.api_key = api_key\n self.base_url = self._create_base_url(base_url)\n self.headers = {'Authorization': f'Bearer {self.api_key}'}\n self._path = path\n self.overwrite = overwrite\n self.videos = videos\n self.markdown = markdown\n self._logger = logger\n self._canvas = Canvas(self.base_url, self.api_key)\n self.user = self._canvas.get_current_user()\n self.visited_page_links = []\n\n if not self._logger:\n self._logger = logging\n\n self._loggers = [self._logger]\n self._names = []\n self._ids = []\n\n def scrape(self):\n courses = self.user.get_courses()\n for c in courses:\n try:\n print(c)\n except AttributeError:\n print(\"Null course\")\n #import pdb\n #pdb.set_trace()\n self.recurse_course(c)\n\n def recurse_course(self, course):\n try:\n try:\n self.push(course, \"course\")\n except KeyError:\n return\n\n try:\n external_tools = course.get_external_tools()\n external_tools = list(external_tools)\n self.logger.info(str(course.name))\n self.logger.info(external_tools)\n if external_tools:\n import pdb\n pdb.set_trace()\n except (Unauthorized, ResourceDoesNotExist) as e:\n self.logger.warning(e)\n self.logger.warning(f\"External tools not accesible\")\n\n self.push_raw(f\"assignments_{course.id}\", \"assignments\", 0)\n try:\n assignments = course.get_assignments()\n for a in assignments:\n self.push_raw(f\"assignment_{a.name}\", \"assignment\", 0)\n try:\n self.handle_assignment(a)\n finally:\n self.pop()\n except (Unauthorized, ResourceDoesNotExist) as e:\n self.logger.warning(e)\n self.logger.warning(f\"Assignments not accesible\")\n finally:\n self.pop()\n\n self.push_raw(f\"pages_{course.id}\", \"pages\", 0)\n try:\n pages = course.get_pages()\n for p in pages:\n self.push_raw(f\"page_{p.title}\", \"page\", 0)\n try:\n self.handle_page(p)\n finally:\n self.pop()\n except (Unauthorized, ResourceDoesNotExist) as e:\n self.logger.warning(e)\n self.logger.warning(f\"Pages not accesible\")\n finally:\n self.pop()\n\n try:\n fp_path = os.path.join(self.path, \"front_page.html\")\n fp_md_path = os.path.join(self.path, \"front_page.md\")\n fp = course.show_front_page().body\n\n if self._dl_page(fp, fp_path) and self.markdown:\n self._dl_page_data(fp_path, course._requester)\n self._markdownify(fp_path, fp_md_path)\n except (Unauthorized, ResourceDoesNotExist) as e:\n self.logger.warning(e)\n self.logger.warning(f\"Front page not accesible\")\n\n try:\n modules = course.get_modules()\n for m in modules:\n self.recurse_module(m)\n except (Unauthorized, ResourceDoesNotExist) as e:\n self.logger.warning(e)\n self.logger.warning(f\"Modules not accesible\")\n\n try:\n groups = course.get_groups()\n for g in groups:\n self.recurse_group(g)\n except (Unauthorized, ResourceDoesNotExist) as e:\n self.logger.warning(e)\n self.logger.warning(f\"Groups not accesible\")\n\n\n self.scrape_files(course)\n\n self.scrape_media(course)\n finally:\n self.pop()\n\n def recurse_group(self, group):\n try:\n try:\n self.push(group, \"group\")\n except KeyError:\n return\n json_path = os.path.join(self.path, \"group.json\")\n self._dl_obj(group, json_path)\n self.scrape_files(group)\n finally:\n self.pop()\n\n def scrape_files(self, obj):\n try:\n # Hack to put files under a separate subfolder from modules\n self.push_raw(f\"files_{obj.id}\", \"files\", 0)\n try:\n # get_folders() returns a flat list of all folders\n folders = obj.get_folders()\n for f in folders:\n self.recurse_folder(f)\n except Unauthorized:\n self.logger.warning(f\"Files not accesible\")\n finally:\n self.pop()\n\n def scrape_media(self, obj):\n try:\n # Hack to put media under a separate subfolder from modules\n self.push_raw(f\"media_{obj.id}\", \"media\", 0)\n try:\n obj.__class__.get_media_objects = get_media_objects\n media_objs = obj.get_media_objects()\n for m in media_objs:\n if \"video\" in m.media_type:\n self.handle_media_video(m)\n else:\n self.logger.warning(\n f\"Media '{m.title}' type {m.media_type} is unsupported\")\n import pdb\n pdb.set_trace()\n except (Unauthorized, ResourceDoesNotExist) as e:\n self.logger.warning(e)\n self.logger.warning(f\"Media objects not accesible\")\n finally:\n self.pop()\n\n def recurse_folder(self, folder):\n self.push(folder, \"folder\", name_key=\"full_name\")\n try:\n files = folder.get_files()\n try:\n for f in files:\n try:\n f_name = f.title\n except AttributeError:\n try:\n f_name = f.display_name\n except Exception as e:\n import pdb\n pdb.set_trace()\n\n f_path = os.path.join(self.path, f_name)\n\n if self._should_write(f_path):\n self.logger.info(f\"Downloading {f_path}\")\n try:\n f.download(f_path)\n self.logger.info(f\"{f_path} downloaded\")\n except (Unauthorized, ResourceDoesNotExist) as e:\n self.logger.warning(f\"file not accesible\")\n self.logger.warning(str(e))\n except (Unauthorized, ResourceDoesNotExist) as e:\n self.logger.warning(f\"folder not accesible\")\n self.logger.warning(str(e))\n finally:\n self.pop()\n\n def recurse_module(self, module):\n self.push(module, \"module\")\n try:\n items = module.get_module_items()\n for i in items:\n self.recurse_item(i)\n finally:\n self.pop()\n\n def recurse_item(self, item):\n self.push(item, \"item\", name_key=\"title\")\n try:\n if item.type == \"File\":\n self.logger.info(\"Handling file\")\n self.handle_file(item)\n elif item.type == \"Page\":\n self.logger.info(\"Handling page\")\n self.handle_page(item)\n elif item.type == \"Assignment\":\n self.logger.info(\"Handling assignment\")\n self.handle_assignment(item)\n elif item.type == \"Quiz\":\n self.logger.info(\"Handling quiz\")\n self.handle_quiz(item)\n elif item.type == \"SubHeader\":\n # TODO: Assuming you can't nest subheaders, it's probably enough\n # to just pop the stack if the top contains a subheader, and then \n # push a new folder for each subheader.\n self.logger.warning(\n \"SubHeader's are not supported for now, skipping\")\n #self.handle_subheader(item)\n elif item.type == \"ExternalUrl\":\n self.logger.info(\"Handling external URL\")\n self.handle_external_url(item)\n else:\n self.logger.warning(f\"Unsupported type {item.type}\")\n import pdb\n pdb.set_trace()\n finally:\n self.pop()\n\n def handle_external_url(self, item):\n file_path = os.path.join(self.path, f\"{item.title}.txt\")\n url = item.external_url\n if self._should_write(file_path):\n with open(file_path, \"w\") as f:\n f.write(url)\n self.logger.info(f\"{file_path} downloaded\")\n\n def handle_file(self, item):\n file_name = item.title\n file_url = item.url\n file_path = os.path.join(self.path, file_name)\n requester = item._requester\n self.logger.info(f\"Downloading {file_name}\")\n self._dl_canvas_file(\n file_url, file_path, requester)\n\n def handle_media_video(self, item):\n media_name = item.title\n media_path = os.path.join(self.path, media_name)\n sources = item.media_sources\n sources.sort(key=lambda s: int(s['size']), reverse=True)\n media_url = sources[0]['url']\n self._dl(media_url, media_path)\n\n def handle_page(self, item):\n if getattr(item, \"page_url\", None):\n url = item.page_url\n elif getattr(item, \"url\", None):\n url = item.url\n else:\n self.logger.error(\"Could not get url for page item\")\n import pdb;pdb.set_trace()\n page = self._canvas.get_course(\n item.course_id).get_page(url)\n try:\n page_body = page.body\n except AttributeError:\n if page.locked_for_user:\n self.logger.info(\"Page locked, reason:\")\n self.logger.info(page.lock_explanation)\n self.logger.error(\"Page not accessible\")\n return\n\n page_path = os.path.join(self.path, \"page.html\")\n page_md_path = os.path.join(self.path, \"page.md\")\n\n if self.markdown and self._dl_page(page_body, page_path):\n self._markdownify(page_path, page_md_path)\n self._dl_page_data(page_path, item._requester)\n\n def handle_assignment(self, item):\n if getattr(item, \"content_id\", None):\n asn_id = item.content_id\n elif getattr(item, \"id\", None):\n asn_id = item.id\n else:\n self.logger.error(\"Could not get url for assignment item\")\n import pdb;pdb.set_trace()\n\n page_path = os.path.join(self.path, \"assignment.html\")\n page_md_path = os.path.join(self.path, \"assignment.md\")\n json_path = os.path.join(self.path, \"assignment.json\")\n assignment = self._canvas.get_course(\n item.course_id).get_assignment(asn_id)\n\n self._dl_obj(assignment, json_path)\n\n page = assignment.description\n if page:\n if self.markdown and self._dl_page(page, page_path):\n self._markdownify(page_path, page_md_path)\n self._dl_page_data(page_path, item._requester)\n\n submission = assignment.get_submission(self.user)\n self.handle_submission(submission)\n\n def handle_quiz(self, item):\n page_path = os.path.join(self.path, \"quiz.html\")\n page_md_path = os.path.join(self.path, \"quiz.md\")\n json_path = os.path.join(self.path, \"quiz.json\")\n quiz = self._canvas.get_course(\n item.course_id).get_quiz(item.content_id)\n page = quiz.description\n if page:\n if self.markdown and self._dl_page(page, page_path):\n self._markdownify(page_path, page_md_path)\n self._dl_page_data(page_path, item._requester)\n self._dl_obj(quiz, json_path)\n\n def handle_submission(self, submission):\n self.push(submission, \"submission\", name_key=\"id\")\n try:\n json_path = os.path.join(self.path, f\"submission_{submission.id}.json\")\n\n try:\n attachments = submission.attachments\n for a in attachments:\n f_path = os.path.join(self.path, a[\"filename\"])\n url = a[\"url\"]\n self._dl(url, f_path)\n except AttributeError:\n self.logger.warning(\"No attachments found\")\n\n self._dl_obj(submission, json_path)\n finally:\n self.pop()\n\n def push(self, obj, type, name_key=\"name\"):\n id = obj.id\n try:\n name = str(getattr(obj, name_key))\n except:\n name = str(id)\n\n self.push_raw(name, type, id)\n\n def push_raw(self, name, type, id):\n self._push_logger(f\"{type}_{id}\")\n self._push_name(name)\n self._push_id(id)\n self.logger.info(name)\n\n def pop(self):\n self._pop_logger()\n self._pop_name()\n self._pop_id()\n\n def get_all_objects(self, url):\n self.logger.debug(f\"Grabbing all pages for {url}\")\n objects = []\n page = 1\n while True:\n r = self._get(url, params={\"page\": page})\n if not r.json():\n break\n objects.extend(r.json())\n self.logger.debug(f\"Grabbed page {page}\")\n page += 1\n return objects\n\n @property\n def logger(self):\n return self._loggers[-1]\n\n @property\n def path(self):\n return os.path.join(\n self._path, *[sanitize_filename(n) for n in self._names])\n\n @property\n def name(self):\n return self._names[-1]\n\n @property\n def id(self):\n return self._ids[-1]\n\n @staticmethod\n def _create_base_url(base_url):\n if \"https\" not in base_url:\n base_url = f\"https://{base_url}\"\n return base_url\n\n def _courses_url(self):\n return f\"{self.base_url}/courses\"\n\n def _course_url(self, course_id):\n return f\"{self._courses_url()}/{course_id}\"\n\n def _course_frontpage_url(self, course_id):\n return f\"{self._course_url(course_id)}/front_page\"\n\n def _modules_url(self, course_id):\n return f\"{self._course_url(course_id)}/modules\"\n\n def _kaltura_manifest_url(self, base_url, entry_id, flavor_id):\n base_url = base_url[:base_url.index(\"embedIframeJs\")]\n return os.path.join(\n base_url,\n \"playManifest/entryId\",\n str(entry_id),\n \"flavorIds\",\n str(flavor_id),\n \"format/applehttp/protocol/https/a.m3u8\")\n\n def _get(self, url, params=None):\n return requests.get(url, params=params, headers=self.headers)\n\n def _mkd(self, path):\n return os.makedirs(path, exist_ok=True)\n\n def _dl(self, url, path):\n if self._should_write(path):\n try:\n self.logger.info(f\"Downloading {path}\")\n r = self._get(url)\n with open(path, \"wb\") as f:\n f.write(r.content)\n self.logger.info(f\"{path} downloaded\")\n return True\n except MissingSchema as e:\n self.logger.error(f\"{url} is not a valid url\")\n return False\n except Exception as e:\n self.logger.error(\"file download failed\")\n import pdb\n pdb.set_trace()\n self.logger.error(e)\n\n def _dl_page(self, page, path):\n if self._should_write(path):\n with open(path, \"w\") as f:\n f.writelines(page)\n self.logger.info(f\"{path} downloaded\")\n return True\n\n def _dl_obj(self, obj, path):\n if self._should_write(path):\n with open(path, \"w\") as f:\n json.dump(obj.__dict__, f, indent=2, default=str)\n self.logger.info(f\"{path} downloaded\")\n\n def _dl_page_data(self, src_path, requester):\n self.logger.info(f\"Downloading page data for {src_path}\")\n with open(src_path, \"r\") as f:\n src = f.read()\n\n soup = BeautifulSoup(src, \"html.parser\")\n links = soup.find_all('a')\n\n if links:\n self._mkd(os.path.join(self.path, \"files\"))\n for link in links:\n href = link.get(\"href\")\n title = link.get(\"title\")\n if not title:\n title = link.text\n if not href:\n self.logger.warning(f\"Link not found for title {title}\")\n continue\n self.logger.info(f\"Downloading link for: {title}\")\n self.logger.info(href)\n if href in self.visited_page_links:\n self.logger.warning(\"Page has been visited before, skipping\")\n continue\n self.visited_page_links.append(href)\n if link.get(\"class\") and \"instructure_file_link\" in link[\"class\"] and \"canvas\" in href:\n # This is necessary because files don't always show up\n # under the files section of a course for some reason\n self.logger.info(\n \"Canvas file detected, using Canvas API for download\")\n try:\n self._dl_canvas_file(\n href, os.path.join(self.path, \"files\"), requester)\n except (Unauthorized, ResourceDoesNotExist) as e:\n self.logger.error(\"Could not download file\")\n elif href.startswith(\"mailto\"):\n self.logger.info(\"mailto link detected, saving email\")\n mail_path = os.path.join(self.path, \"files\", title)\n with open(mail_path, \"w\") as f:\n f.write(href)\n elif self._is_page_url(href):\n self.logger.info(\"Canvas page detected, handling page\")\n page_item = self._page_url_to_item(href, requester)\n self.push_raw(f\"page_{page_item.page_url}\", \"page\", 0)\n try:\n self.handle_page(page_item)\n except:\n self.logger.info(\"Could not handle page item\")\n finally:\n self.pop()\n elif self._is_assignment_url(href):\n self.logger.info(\"Canvas assignment detected, handling assignment\")\n assignment_item = self._assignment_url_to_item(href, requester)\n self.push_raw(f\"assignment_{assignment_item.content_id}\", \"assignment\", 0)\n try:\n self.handle_assignment(assignment_item)\n except:\n self.logger.info(\"Could not handle assignment item\")\n finally:\n self.pop()\n else:\n self.logger.warning(\n \"Non Canvas file link, attempting generic download\")\n dl_path = os.path.join(self.path, \"files\", title)\n self._dl(link[\"href\"], dl_path)\n\n if self.videos:\n # Download Kaltura videos\n videos = soup.find_all('iframe', **{'id': 'kaltura_player'})\n for idx, video in enumerate(videos):\n video_path = os.path.join(self.path, \"videos\", f\"{idx}.mp4\")\n self._dl_video(video[\"src\"], video_path)\n\n def _dl_canvas_file(self, url, path, requester):\n canvas_path = urllib.parse.urlparse(url).path\n canvas_path = canvas_path.replace(\"/api/v1\", \"\")\n resp = requester.request(\"GET\", canvas_path)\n file = File(requester, resp.json())\n dl_path = os.path.join(path, file.filename)\n if not self._should_write(dl_path):\n return\n file.download(dl_path)\n self.logger.info(f\"{dl_path} downloaded\")\n return True\n\n def _dl_video(self, base_url, path):\n if not self._should_write(path):\n return\n # Get data from Kaltura iframe\n lines = requests.get(base_url).text.splitlines()\n iframe_data = next(\n (l for l in lines if \"kalturaIframePackageData\" in l), None)\n if not iframe_data:\n self.logger.warning(f\"iframe data not found for {base_url}\")\n return\n # Ignore js syntax, pull json text out of line\n iframe_data = iframe_data[iframe_data.index(\"{\"):-1]\n iframe_data = json.loads(iframe_data)\n try:\n flavor_assets = (iframe_data[\"entryResult\"]\n [\"contextData\"]\n [\"flavorAssets\"])\n except KeyError:\n self.logger.warning(f\"flavorAssets not found in {base_url}\")\n return\n\n flavor_asset = next(\n (f for f in flavor_assets if f.get(\"flavorParamsId\") == 5),\n None)\n if not flavor_asset:\n self.logger.warning(\n f\"Could not find correct flavorAsset for {base_url}\")\n return\n try:\n entry_id = flavor_asset[\"entryId\"]\n flavor_id = flavor_asset[\"id\"]\n except KeyError:\n self.logger.warning(\n f\"Could not find keys inside flavorAsset for {base_url}\")\n return\n manifest_url = self._kaltura_manifest_url(\n base_url, entry_id, flavor_id)\n lines = requests.get(manifest_url).text.splitlines()\n index_url = next((l for l in lines if \"index\" in l), None)\n if not index_url:\n self.logger.warning(\n f\"Could not find index urlfor {base_url}\")\n return\n index = filter(\n lambda l: not l.startswith(\"#\"),\n requests.get(index_url).text.splitlines())\n streaming_url = index_url.replace(\"index.m3u8\", \"\")\n with TemporaryFile() as tf:\n for i in index:\n self.logger.info(f\"Downloading video segment {i}\")\n segment_url = os.path.join(streaming_url, i)\n tf.write(requests.get(segment_url).content)\n with open(path, \"wb\") as f:\n tf.seek(0)\n f.write(tf.read())\n self.logger.info(f\"Downloaded {path} successfully\")\n\n def _is_page_url(self, url):\n page_regex = re.compile(r\".+courses/\\d+/pages/.+\")\n matches = page_regex.match(url)\n return bool(matches)\n\n def _is_assignment_url(self, url):\n page_regex = re.compile(r\".+courses/\\d+/assignments/.+\")\n matches = page_regex.match(url)\n return bool(matches)\n\n def _page_url_to_item(self, url, requester):\n return self._url_to_item(url, requester, \"page_url\")\n\n def _assignment_url_to_item(self, url, requester):\n return self._url_to_item(url, requester, \"content_id\")\n\n def _url_to_item(self, url, requester, attrname):\n segments = url.split(\"/\")\n course_idx = segments.index(\"courses\")\n course_id = segments[course_idx + 1]\n name = segments[-1]\n item = types.SimpleNamespace()\n item.course_id = course_id\n item._requester = requester\n setattr(item, attrname, name)\n return item\n\n\n def _markdownify(self, src_path, dest_path):\n if self._should_write(dest_path):\n self.logger.info(f\"Converting {src_path} to markdown\")\n with open(src_path, \"r\") as f:\n src = f.read()\n with open(dest_path, \"w\") as f:\n f.writelines(md(src))\n\n def _should_write(self, path):\n if os.path.isfile(path) and self.overwrite is \"no\":\n self.logger.debug(f\"Skipping file {path}\")\n return False\n elif (self.overwrite is \"ask\" and\n input(f\"{path} already exists, overwrite? (y/n)\") != \"y\"):\n return False\n # Ensure folder exists before writing\n os.makedirs(os.path.dirname(path), exist_ok=True)\n return True\n\n def _push_logger(self, name):\n self._loggers.append(self.logger.getChild(name))\n\n def _pop_logger(self):\n self._loggers.pop(-1)\n\n def _push_name(self, name):\n self._names.append(name)\n self._mkd(self.path)\n\n def _pop_name(self):\n self._names.pop(-1)\n\n def _push_id(self, id):\n self._ids.append(id)\n\n def _pop_id(self):\n self._ids.pop(-1)\n\n\n\n\n","repo_name":"Gigahawk/canvas-file-scraper","sub_path":"canvas_file_scraper/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":25502,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"67"} +{"seq_id":"9901391355","text":"import torch\nimport torchmetrics\nimport pytorch_lightning as pl\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport seaborn as sns\nfrom typing import Dict, List\n\nclass ModelBase(pl.LightningModule):\n def __init__(self, metrics: Dict, classes: List[str]):\n super().__init__()\n self.save_hyperparameters(ignore='metrics')\n self.cm_metric = metrics.pop('CM', None)\n metrics = torchmetrics.MetricCollection(metrics)\n self.val_metrics = metrics.clone(f\"val/\")\n self.test_metrics = metrics.clone(f\"test/\")\n self.classes = classes\n self.example_input_array = torch.zeros(1,2,1024)\n\n def training_step(self, batch, batch_nb):\n data, target = batch\n output = self.forward(data)\n loss = self.loss(output, target)\n if self.global_step!= 0: self.logger.log_metrics({'train/loss': loss, 'epoch': self.current_epoch}, self.global_step)\n return loss\n\n def validation_step(self, batch, batch_nb):\n data, target = batch\n output = self.forward(data)\n self.val_metrics.update(output, target)\n if self.cm_metric: self.cm_metric.update(output, target)\n\n def validation_epoch_end(self, outputs):\n metrics_dict = self.val_metrics.compute()\n self.val_metrics.reset()\n if self.global_step!=0: self.logger.log_metrics(metrics_dict, self.global_step)\n \n if self.cm_metric:\n mpl.use(\"Agg\")\n fig = plt.figure(figsize=(13, 13))\n cm = self.cm_metric.compute().cpu().detach().numpy()\n self.cm_metric.reset()\n ax = sns.heatmap(cm, annot=True, fmt=\".2f\", cbar=False)\n # labels, title and ticks\n ax.set_xlabel('Predicted labels')\n ax.set_ylabel('True labels')\n ax.set_title('Confusion Matrix')\n ax.xaxis.set_ticklabels(self.classes, rotation=90)\n ax.yaxis.set_ticklabels(self.classes, rotation=0)\n plt.tight_layout()\n self.logger.experiment.add_figure(\"val/cm\", fig, global_step=self.global_step)\n \n def test_step(self, batch, batch_nb):\n data, target = batch\n output = self.forward(data)\n self.test_metrics.update(output, target)\n if self.cm_metric: self.cm_metric.update(output, target)\n return {\"test_out\": output, \"test_true\": target}\n\n def test_epoch_end(self, outputs):\n metrics_dict = self.test_metrics.compute()\n self.test_metrics.reset()\n if self.global_step!= 0: self.logger.log_metrics(metrics_dict, self.global_step)\n \n if self.cm_metric:\n mpl.use(\"Agg\")\n fig = plt.figure(figsize=(13, 13))\n cm = self.cm_metric.compute().cpu().detach().numpy()\n self.cm_metric.reset()\n ax = sns.heatmap(cm, annot=True, fmt=\".2f\", cbar=False)\n # labels, title and ticks\n ax.set_xlabel('Predicted labels')\n ax.set_ylabel('True labels')\n ax.set_title('Confusion Matrix')\n ax.xaxis.set_ticklabels(self.classes, rotation=90)\n ax.yaxis.set_ticklabels(self.classes, rotation=0)\n plt.tight_layout()\n self.logger.experiment.add_figure(\"test/cm\", fig, global_step=self.global_step)\n \n test_true = torch.cat([x['test_true'] for x in outputs])\n test_out = torch.cat([x['test_out'] for x in outputs])\n","repo_name":"SpontaneousDuck/pytorch_lightning_base","sub_path":"models/ModelBase.py","file_name":"ModelBase.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30071436444","text":"from falcon import HTTPMethodNotAllowed, HTTP_ACCEPTED\nfrom zopsm.lib.rest.serializers import ZopsBaseDBSerializer\n\nfrom zopsm.lib.rest.custom import ZopsRetrieveUpdateDeleteApi\n\nclass ContactSerializer(ZopsBaseDBSerializer):\n pass\n\n\nclass Contact(ZopsRetrieveUpdateDeleteApi):\n \"\"\"\n Allows to remove a contact from contact list.\n\n #### DELETE:\n Removes the contact with given id from contact list.\n ##### Request:\n ```bash\n #bash\n curl \\\\\n --request DELETE \\\\\n --header \"Content-Type: application/json; charset=utf-8\" \\\\\n --header \"Authorization: Token a3449bd777754fd1ab284ebcc8c878c23cea6b2295bd41ddbe289f03801402e6\" \\\\\n https://api_baseurl/v1/roc/contacts/c06d227d7f36477db80d76c3a5d643d4\n\n ```\n\n ```python\n # python\n import requests\n\n header = {\"Content-Type\": \"application/json; charset=utf-8\",\n \"Authorization\": \"Token a3449bd777754fd1ab284ebcc8c878c23cea6b2295bd41ddbe289f03801402e6\"}\n\n req = requests.delete(\"https://api_baseurl/v1/roc/contacts/c06d227d7f36477db80d76c3a5d643d4\",\n headers=header)\n\n ```\n ##### Response:\n 202 Accepted.\n ```json\n {\n \"content\": {\n \"trackingId\": \"178c700a-f01f-4db8-93d8-8060e51f38ff\"\n },\n \"meta\": {\n \"params\": {\n \"indent\": 0\n }\n }\n }\n ```\n > Warning\n >\n > Error response of this request, if any, will be delivered via WebSocket connection with\n > `trackingId` obtained from the response.\n\n ### Possible Errors\n - __Bad Request__: Probably it was made a request with invalid resource id.\n - __Object Not Found__: Probably you try to get, update or delete a non-existent resource.\n - __Method Not Allowed__: Probably it was made a request except DELETE or OPTIONS.\n\n \"\"\"\n\n serializer = ContactSerializer()\n\n def __repr__(self):\n return \"Contact Delete\"\n\n def __str__(self):\n return self.__repr__()\n\n def delete(self, params, meta, **kwargs):\n user = kwargs.get('context').get('user') # user dict\n user_id = user.get('user')\n project_id = user.get('project')\n service = user.get('service')\n contact_id = kwargs.get('subscriber_id')\n\n self.check_resource_id(contact_id)\n body = {\n \"project_id\": project_id,\n \"subscriber_id\": user_id,\n \"contact_id\": contact_id,\n \"service\": service,\n }\n rpc_params = self.rpc_client.rpc_call(\"delete_contact\", body, blocking=False)\n return {\"trackingId\": rpc_params['tracking_id']}\n\n def retrieve(self, params, meta, **kwargs):\n \"\"\"Not Implemented\"\"\"\n raise HTTPMethodNotAllowed(self.allowed_methods())\n\n def update(self, params, meta, **kwargs):\n \"\"\"Not Implemented\"\"\"\n raise HTTPMethodNotAllowed(self.allowed_methods())\n\n\n","repo_name":"kunthar/zops-platform","sub_path":"zopsm/roc/resources/contact.py","file_name":"contact.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"67"} +{"seq_id":"44956161323","text":"import os\nimport random\nimport autoaugment\nimport numpy as np\nfrom PIL import Image\nimport torchvision.transforms as transforms\nfrom torch.utils.data import Dataset, DataLoader\n\nfrom collections import Counter\n\ndataset_stats = {\n 'CIFAR10' : {'mean': (0.49139967861519607, 0.48215840839460783, 0.44653091444546567),\n 'std' : (0.2470322324632819, 0.24348512800005573, 0.26158784172796434),\n 'size' : 32},\n 'CIFAR100': {'mean': (0.5070751592371323, 0.48654887331495095, 0.4409178433670343),\n 'std' : (0.2673342858792409, 0.25643846291708816, 0.2761504713256834),\n 'size' : 32}\n}\n\ndef get_transform(dataset_name='CIFAR100', aug_type='none'):\n\n if aug_type == 'weak':\n transform_weak = transforms.Compose(\n [\n transforms.RandomCrop(dataset_stats[dataset_name]['size'], padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(dataset_stats[dataset_name]['mean'], dataset_stats[dataset_name]['std']),\n ]\n )\n return transform_weak\n elif aug_type == 'strong':\n transform_strong = transforms.Compose(\n [\n autoaugment.RandomAugment(),\n transforms.ToTensor(),\n transforms.Normalize(dataset_stats[dataset_name]['mean'], dataset_stats[dataset_name]['std']),\n autoaugment.Cutout()\n ]\n )\n return transform_strong\n else:\n transform_none = transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.Normalize(dataset_stats[dataset_name]['mean'], dataset_stats[dataset_name]['std']),\n ]\n )\n return transform_none\n\nclass dataset(Dataset):\n def __init__(self, args, task, train=True, lab=True, buffer=None):\n self.root = os.path.join(args.root, args.dataset)\n self.args = args\n self.train = train\n self.lab = lab\n self.transform = {\n \"labeled\": [\n get_transform(args.dataset, 'weak')\n ],\n \"unlabeled\": [\n get_transform(args.dataset, 'weak'),\n get_transform(args.dataset, 'strong')\n ],\n \"test\": [get_transform(args.dataset, 'none')]\n }\n\n if self.train:\n if self.lab:\n # load labeled data & label\n labeled_image_file = self.root + '/Train/Labeled/' + args.dataset + '_Images_Task' + str(task) + '_' + args.mode + '.npy'\n labeled_file = self.root + '/Train/Labeled/' + args.dataset + '_Labels_Task' + str(task) + '_' + args.mode + '.npy'\n\n train_xl = np.squeeze(np.load(labeled_image_file))\n train_yl = np.squeeze(np.load(labeled_file))\n self.train_xl = train_xl\n self.train_yl = train_yl\n\n if buffer is not None:\n max_size = len(buffer[0]) + len(self.train_xl)\n if max_size > args.buffer_size:\n buffer_size, remainder = divmod(args.buffer_size, (task + 1))\n else:\n buffer_size = len(self.train_xl)\n \n sample_list = list(range(len(self.train_xl)))\n sample_list = random.sample(sample_list, buffer_size)\n\n if task == 0:\n self.buffer_x, self.buffer_y = self.train_xl[sample_list], self.train_yl[sample_list]\n else:\n # Update buffer\n temp_buffer_x = []\n temp_buffer_y = []\n pre_size = self.args.buffer_size // (task)\n\n for k in range(task + 1):\n if k == task:\n if remainder != 0:\n sample_list = list(range(len(self.train_xl)))\n sample_list = random.sample(sample_list, buffer_size+remainder)\n temp_buffer_x.extend(self.train_xl[sample_list])\n temp_buffer_y.extend(self.train_yl[sample_list])\n else:\n step = (pre_size*k)+buffer_size\n temp_buffer_x.extend(buffer[0][pre_size*k:step])\n temp_buffer_y.extend(buffer[1][pre_size*k:step])\n \n temp_buffer_x = np.array(temp_buffer_x)\n temp_buffer_y = np.array(temp_buffer_y)\n\n self.buffer_x, self.buffer_y = temp_buffer_x, temp_buffer_y\n\n self.train_xl = np.concatenate((self.train_xl, buffer[0]), axis=0)\n self.train_yl = np.concatenate((self.train_yl, buffer[1]), axis=0)\n\n else:\n # load unlabeled data & label\n unlabeled_image_file = self.root + '/Train/Unlabeled/' + args.dataset + '_Images_Task' + str(task) + '_' + args.mode + '.npy'\n unlabeled_file = self.root + '/Train/Unlabeled/' + args.dataset + '_Labels_Task' + str(task) + '_' + args.mode + '.npy'\n\n train_xul = np.squeeze(np.load(unlabeled_image_file))\n train_yul = np.squeeze(np.load(unlabeled_file))\n self.train_xul = train_xul\n self.train_yul = train_yul\n\n else:\n # load test data & label\n self.test_x = []\n self.test_y = []\n\n for task_idx in range(task+1):\n test_image_file = self.root + '/Test/' + args.dataset + '_Images_Task' + str(task_idx) + '_' + args.mode + '.npy'\n test_label_file = self.root + '/Test/' + args.dataset + '_Labels_Task' + str(task_idx) + '_' + args.mode + '.npy'\n\n test_x = np.squeeze(np.load(test_image_file))\n test_y = np.squeeze(np.load(test_label_file))\n self.test_x.extend(test_x)\n self.test_y.extend(test_y)\n\n self.test_x = np.array(self.test_x)\n self.test_y = np.array(self.test_y)\n\n def __len__(self):\n if self.train:\n if self.lab: return len(self.train_xl)\n else: return len(self.train_xul)\n else:\n return len(self.test_x)\n\n def __getitem__(self, index):\n if self.train:\n if self.lab:\n img, target = 255*self.train_xl[index], self.train_yl[index]\n img = img.astype(np.uint8)\n img = Image.fromarray(img)\n img = self.transform[\"labeled\"][0](img)\n return img, target\n else:\n img, target = 255*self.train_xul[index], self.train_yul[index] \n img = img.astype(np.uint8)\n img = Image.fromarray(img)\n weak = self.transform[\"unlabeled\"][0](img)\n strong = self.transform[\"unlabeled\"][1](img)\n return weak, strong, target\n else:\n img, target = 255*self.test_x[index], self.test_y[index]\n img = img.astype(np.uint8)\n img = Image.fromarray(img)\n img = self.transform['test'][0](img) \n return img, target\n\n\nclass dataloader():\n def __init__(self, args):\n self.args = args\n self.dataset_name = args.dataset\n self.split_size = 0\n self.buffer_x = []\n self.buffer_y = []\n\n def load(self, task, train=True):\n if train:\n labeled_dataset = dataset(self.args, task, train, lab=True, buffer=(self.buffer_x, self.buffer_y))\n unlabeled_dataset = dataset(self.args, task, train, lab=False, buffer=False)\n mu = int(unlabeled_dataset.__len__() / labeled_dataset.__len__())\n if mu == 0: mu = 1\n\n self.buffer_x = labeled_dataset.buffer_x\n self.buffer_y = labeled_dataset.buffer_y\n\n labeled_trainloader = DataLoader(labeled_dataset, batch_size=self.args.batch_size, shuffle=True, num_workers=self.args.num_workers)\n unlabeled_trainloader = DataLoader(unlabeled_dataset, batch_size=self.args.batch_size*mu, shuffle=True, num_workers=self.args.num_workers)\n\n return labeled_trainloader, unlabeled_trainloader\n\n else:\n test_dataset = dataset(self.args, task, train)\n test_loader = DataLoader(test_dataset, batch_size=self.args.test_size, shuffle=False, num_workers=self.args.num_workers)\n\n return test_loader","repo_name":"hopo55/On-Device-SSCL","sub_path":"CNLL-Based/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":8691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25043395896","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\n\nANALYSIS_CHOICES = [\n (\"MD\", \"Matrix Decomposition\"),\n (\"FDBSS\", \"Fourier Domain Blind Source Separation\")\n]\n\nLOCATION_ID_CHOICES = [\n (\"31\", \"Right Cheek\"),\n (\"35\", \"Left Cheek\"),\n (\"27\", \"Upper Nose\"),\n (\"28\", \"Mid-Upper Nose\"),\n (\"29\", \"Mid-Lower Nose\"),\n (\"30\", \"Lower Nose\"),\n (\"17\", \"Left Outer Brow\"),\n (\"18\", \"Left Mid-Outer Brow\"),\n (\"19\", \"Left Mid Brow\"),\n (\"20\", \"Left Mid-Inner Brow\"),\n (\"21\", \"Left Inner Brow\"),\n (\"22\", \"Right Inner Brow\"),\n (\"23\", \"Right Mid-Inner Brow\"),\n (\"24\", \"Right Mid Brow\"),\n (\"25\", \"Right Mid-Outer Brow\"),\n (\"26\", \"Right Outer Brow\"),\n]\n\nROLE_CHOICES = [\n (\"RN\", \"Registered Nurse\"),\n (\"PHYS\", \"Physician\"),\n (\"PHARM\", \"Pharmacist\"),\n (\"RT\", \"Respiratory Therapist\"),\n]\n\nGENDER_CHOICES = [\n (\"M\", \"Male\"),\n (\"F\", \"Female\"),\n]\n\n# Create your models here.\nclass Device(models.Model):\n # Device ID (PK)\n\n # Model\n device_model = models.CharField(\n max_length=100,\n )\n\n serial_number = models.CharField(\n max_length=100,\n unique=True,\n )\n\n def __str__(self):\n return \"_\".join([\"PT\", format(self.id, \"04\")])\n\n\nclass Batch(models.Model):\n # Time the batch was created\n creation_time = models.DateTimeField(\n auto_now=False,\n )\n\n # Preprocessing analysis type\n preprocessing_analysis = models.CharField(\n max_length=100,\n choices=ANALYSIS_CHOICES,\n )\n\n device = models.ForeignKey(\n \"Device\",\n on_delete=models.CASCADE\n )\n\n analyzed = models.BooleanField(\n default=False\n )\n\n # When the object PK is displayed\n def __str__(self):\n return \"_\".join([\"B\", format(self.id, \"05\")])\n\n\nclass ROI(models.Model):\n # ROI ID (PK)\n\n # Location ID\n location_id = models.CharField(\n max_length=100,\n choices=LOCATION_ID_CHOICES,\n )\n\n # Batch ID\n batch = models.ForeignKey(\n 'Batch',\n on_delete=models.CASCADE\n )\n \n # Data\n red_data = models.TextField()\n blue_data = models.TextField()\n green_data = models.TextField()\n \n # Collection Time\n collection_time = models.DateTimeField(\n auto_now=True,\n )\n\n # Analyzed by the Heart Rate Algorithm\n hr_analyzed = models.BooleanField(\n default=False\n )\n\n # Analyzed by the Respiratory Rate Algorithm\n rr_analyzed = models.BooleanField(\n default=False\n )\n\n # Currently being analyzed\n analysis_in_progress = models.BooleanField(\n default=False\n )\n\n # Device ID (FK)\n device = models.ForeignKey(\n \"Device\",\n on_delete=models.CASCADE\n )\n\n preprocessing_analysis = models.CharField(\n max_length=100,\n choices=ANALYSIS_CHOICES,\n )\n\n # When the object PK is displayed\n def __str__(self):\n return \"_\".join([\"ROI\", format(self.id, \"05\")])\n\n\nclass User(AbstractUser):\n class Meta:\n ordering = ['id']\n\n is_patient = models.BooleanField(default=False)\n is_health_care = models.BooleanField(default=False)\n \n USERNAME_FIELD = 'username'\n\n\nclass Patient(models.Model):\n\n # Patient ID (PK)\n user = models.OneToOneField(\n User, \n on_delete=models.CASCADE,\n )\n \n # Date of Birth\n birth_date = models.DateField(\n )\n\n # Gender\n gender = models.CharField(\n max_length=100,\n choices=GENDER_CHOICES,\n )\n\n # TODO: some sort of convention for lbs or kgs\n # Weight (in kg)\n weight = models.DecimalField(\n max_digits=6,\n decimal_places=2,\n )\n\n # TODO: some sort of convention for ft or m\n # Height (in cm)\n height = models.DecimalField(\n max_digits=6,\n decimal_places=2\n )\n\n # Health Conditions\n health_conditions = models.TextField(\n )\n\n # Current Medications\n\n # Device ID (FK)\n device = models.OneToOneField(\n \"Device\",\n on_delete=models.CASCADE\n )\n\n # Health Care Provider (FK)\n health_care_provider = models.ManyToManyField(\n 'HealthCare'\n )\n \n\n # When the object PK is displayed\n def __str__(self):\n return \"_\".join([\"P\", format(self.id, \"04\")])\n\n\nclass HealthCare(models.Model):\n # Health Care ID (PK) \n user = models.OneToOneField(\n User, \n on_delete=models.CASCADE,\n #primary_key=True\n )\n\n role = models.CharField(\n max_length=100,\n choices=ROLE_CHOICES,\n )\n\n # When the object PK is displayed\n def __str__(self):\n return \" \".join([self.user.first_name, self.user.last_name])\n\n\nclass HeartRate(models.Model):\n # Heart Rate ID (PK)\n\n # Heart Rate Number\n heart_rate = models.DecimalField(\n blank=True,\n max_digits=5,\n decimal_places=2,\n )\n\n # Data\n data = models.TextField(\n blank=True,\n null=True\n )\n\n # Collection Time\n analyzed_time = models.DateTimeField(\n auto_now=True,\n )\n\n # Patient ID (FK)\n patient = models.ForeignKey(\n \"Patient\",\n on_delete=models.CASCADE,\n )\n\n # ROI (FK)\n # TODO: Use the following once windowing is introduced\n '''\n roi = models.ForeignKey(\n \"ROI\",\n on_delete=models.CASCADE\n )\n '''\n # TODO: Remove once we introduce windowing?\n batch = models.ForeignKey(\n 'Batch',\n on_delete=models.CASCADE,\n )\n\n # When the object PK is displayed\n def __str__(self):\n return \"_\".join([\"HR\", format(self.id, \"04\")])\n\n\nclass RespiratoryRate(models.Model):\n # Respiratory Rate ID (PK)\n\n # Respiratory Rate Number\n respiratory_rate = models.DecimalField(\n blank=True,\n max_digits=5,\n decimal_places=2,\n )\n\n # Data\n data = models.TextField(\n blank=True,\n null=True\n )\n\n # Collection Time\n analyzed_time = models.DateTimeField(\n auto_now=True,\n )\n\n # Patient ID (FK)\n patient = models.ForeignKey(\n \"Patient\",\n on_delete=models.CASCADE,\n )\n\n # ROI (FK)\n # TODO: Use the following once windowing is introduced?\n '''\n roi = models.ForeignKey(\n \"ROI\",\n on_delete=models.CASCADE\n )\n '''\n # TODO: Remove once we introduce windowing?\n # Batch (FK)\n batch = models.ForeignKey(\n 'Batch',\n on_delete=models.CASCADE\n )\n\n # When the object PK is displayed\n def __str__(self):\n return \"_\".join([\"RR\", format(self.id, \"04\")])\n","repo_name":"brittanyhewitson/pulse_tracer","sub_path":"spectrum_metrics/pulse_tracer/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30032431375","text":"import subprocess\nimport sys\nimport time\nimport os\nimport socket\nfrom tabulate import tabulate\nfrom dotenv import dotenv_values\n\nenv = dotenv_values(\".env\")\nHOST = env['SOCKETIP'] # Standard loopback interface address (localhost)\nPORT = int(env['SOCKETPORT']) # Port to listen on (non-privileged ports are > 1023)\nUSERDATAPATH = env['USERDATAPATH']\nAPPLICATIONPATH = env['APPLICATIONPATH']\n\ndef show_help():\n print(\"Usage:\")\n print(tabulate([[\"--help\", \"-h\", \"shows this help\"],\n [\"--world worldname\", \"-w worldname\", \"(re-)start foundry with given world\"],\n [\"--socket\", \"-s\", \"start script in socket-mode\"],\n [\"--list-worlds\", \"-l\", \"lists all available worlds\"]]))\n\n pass\n\n\ndef check_foundry_running():\n screens = subprocess.getoutput(\"/usr/bin/screen -ls\")\n if \"foundryserver\" in screens:\n return True\n else:\n return False\n\n\ndef kill_foundry():\n subprocess.call([\"screen\", \"-r\", \"foundryserver\", \"-X\", \"stuff\", \"'^C'\"])\n return\n\n\ndef get_worlds(userdatapath):\n folder_scan = os.scandir(userdatapath + \"Data/worlds/\")\n folder_list = []\n for entry in folder_scan:\n if entry.is_dir():\n folder_list.append(entry.name.lower())\n return folder_list\n\n\ndef check_if_world_exits(user_data_path, world):\n folder_list = get_worlds(user_data_path)\n if world.lower() in folder_list:\n return True\n else:\n return False\n\n\ndef restart_foundry(user_data_path, world=None):\n if (world is None) or (check_if_world_exits(user_data_path, world)):\n if check_foundry_running():\n kill_foundry()\n\n time.sleep(5)\n x = 0\n while check_foundry_running():\n x += 1\n time.sleep(5)\n if x > 5:\n x = 0\n kill_foundry()\n\n if world is None:\n world_parameter = \"\"\n else:\n world_parameter = f\"--world={world}\"\n subprocess.call([\"screen\", \"-dmS\", \"foundryserver\", \"node\", APPLICATIONPATH,\n f\"--dataPath={user_data_path}\", world_parameter])\n else:\n print(f\"World {world} doesn't exists! Choose one of the following:\")\n print(get_worlds(user_data_path))\n # raise NotFound(f\"World {world} doesn't exists! Choose one of the following:\\n\" + get_worlds(user_data_path))\n\n\n\narguments = sys.argv\nif len(arguments) > 3:\n print(f\"To many arguments\")\n show_help()\n sys.exit()\nelif len(arguments) == 1:\n restart_foundry(USERDATAPATH, world=None)\nelif len(arguments) >= 2:\n if arguments[1] in [\"--socket\", \"-s\"]:\n if not check_foundry_running():\n restart_foundry(USERDATAPATH)\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as socket:\n socket.bind((HOST, PORT))\n socket.listen()\n while True:\n conn, addr = socket.accept()\n with conn:\n conn.settimeout(30)\n while True:\n data = conn.recv(1048576).decode()\n if data == \"getworlds\":\n conn.send(get_worlds(USERDATAPATH).encode())\n elif data.startswith(\"setworld\"):\n world = data.split(\" \")[1]\n if world.lower() == \"none\":\n world = None\n restart_foundry(USERDATAPATH, world)\n conn.send(\"Die Welt wurde geändert\".encode())\n elif data == \"exit\":\n break\n elif arguments[1] in [\"--list-worlds\", \"-l\"]:\n print(get_worlds())\n elif arguments[1] in [\"-w\", \"--world\"]:\n try:\n world = arguments[2]\n except:\n print(\"Please provide a world\")\n print(get_worlds(USERDATAPATH))\n else:\n restart_foundry(USERDATAPATH, world=arguments[2])\n elif arguments[1] in [\"--help\", \"-h\"]:\n show_help()\n else:\n print(f\"Unknown parameter: {arguments[1]}\")\n show_help()\n","repo_name":"Grizzly-ger/PnPBot","sub_path":"foundrysocket.py","file_name":"foundrysocket.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71733457174","text":"import analyzer\n\nif __name__ == \"__main__\":\n sequence_miner_path = '../build/app/miner'\n\n # files = ['synthetic10k.spmf', 'synthetic20k.spmf', 'synthetic50k.spmf']\n files = ['bible.spmf']\n files_directory = '../data/'\n min_support = [0.3, 0.2, 0.1]\n algorithms = ['PrefixSpan', 'SPADE']\n # algorithms = ['PrefixSpan']\n\n results_file = './prefixspan_vs_spade_bibile.csv'\n # results_file = './temp.csv'\n\n with open(results_file, 'w') as fw:\n fw.write('algorithm,input_file,all_sequences,min_support,frequenct_sequences,time,real_time\\n')\n\n for f in files :\n file_path = files_directory + f\n lines = analyzer.count_lines(file_path)\n for sup in min_support:\n for alg in algorithms:\n print('Running {} for file {} with support {}'.format(alg, f, sup))\n out_ps = 'out_{}_{}_{}.txt'.format(f[:-5], sup, alg)\n ps = analyzer.benchmark(sequence_miner_path, file_path, alg, sup, out_ps, True)\n\n with open(results_file, 'a') as fa:\n fa.write('{},{},{},{},{},{},{}\\n'.format(alg,f,lines,sup,ps['sequences_count'],ps['time'], ps['real_time']))\n","repo_name":"adamnapieralski/sequence-miner","sub_path":"performance_analysis/prefixspan_vs_spade.py","file_name":"prefixspan_vs_spade.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"26274286240","text":"from draw import draw_pixbuf, propagate_expose, draw_vlinear, cairo_state\nfrom theme import ui_theme\nfrom skin_config import skin_config\nfrom utils import get_window_shadow_size\nimport gobject\nimport gtk\n\nclass EventBox(gtk.EventBox):\n '''\n Event box, not like Gtk.EventBox, it don't show visible window default.\n '''\n\t\n def __init__(self):\n '''\n Initialize the EventBox class.\n '''\n gtk.EventBox.__init__(self)\n self.set_visible_window(False)\n \nclass ImageBox(gtk.EventBox):\n '''\n ImageBox.\n \n @undocumented: expose_image_box\n '''\n\t\n def __init__(self, image_dpixbuf):\n '''\n Initialize the ImageBox class.\n\n @param image_dpixbuf: Image dynamic pixbuf.\n '''\n # Init.\n gtk.EventBox.__init__(self)\n self.set_visible_window(False)\n self.image_dpixbuf = image_dpixbuf\n \n # Set size.\n pixbuf = self.image_dpixbuf.get_pixbuf()\n self.set_size_request(pixbuf.get_width(), pixbuf.get_height())\n \n # Connect expose signal.\n self.connect(\"expose-event\", self.expose_image_box)\n \n def expose_image_box(self, widget, event):\n '''\n Callback for `expose-event` signal.\n\n @param widget: Gtk.Widget instance.\n @param event: Expose event.\n '''\n # Init.\n cr = widget.window.cairo_create()\n rect = widget.allocation\n pixbuf = self.image_dpixbuf.get_pixbuf()\n \n # Draw.\n draw_pixbuf(cr, pixbuf, rect.x, rect.y)\n \n # Propagate expose.\n propagate_expose(widget, event)\n \n return True\n \ngobject.type_register(ImageBox)\n\nclass BackgroundBox(gtk.VBox):\n '''\n BackgroundBox is container for clip background.\n \n @undocumented: expose_background_box\n '''\n\t\n def __init__(self):\n '''\n Initialize the BackgroundBox class.\n '''\n # Init.\n gtk.VBox.__init__(self)\n self.set_can_focus(True)\n \n self.connect(\"expose-event\", self.expose_background_box)\n \n def draw_mask(self, cr, x, y, w, h):\n '''\n Mask render function.\n \n @param cr: Cairo context.\n @param x: X coordinate of draw area.\n @param y: Y coordinate of draw area.\n @param w: Width of draw area.\n @param h: Height of draw area.\n '''\n draw_vlinear(cr, x, y, w, h,\n ui_theme.get_shadow_color(\"linear_background\").get_color_info()\n )\n \n def expose_background_box(self, widget, event):\n '''\n Callback for `expose-event` signal.\n\n @param widget: BackgroundBox self.\n @param event: Expose event. \n @return: Always return False. \n '''\n cr = widget.window.cairo_create()\n rect = widget.allocation\n toplevel = widget.get_toplevel()\n coordinate = widget.translate_coordinates(toplevel, rect.x, rect.y)\n (offset_x, offset_y) = coordinate\n \n with cairo_state(cr):\n cr.rectangle(rect.x, rect.y, rect.width, rect.height)\n cr.clip()\n \n (shadow_x, shadow_y) = get_window_shadow_size(toplevel)\n skin_config.render_background(cr, widget, shadow_x, shadow_y)\n \n self.draw_mask(cr, rect.x, rect.y, rect.width, rect.height) \n\n return False\n \ngobject.type_register(BackgroundBox)\n\n","repo_name":"liuhuan520/deepin-ui","sub_path":"dtk/ui/box.py","file_name":"box.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"70339117974","text":"\n# Simple loop for displaying predictions for random slices from the test dataset\n#\n# Usage:\n#\n# python test_loop.py path/to/experiment_logs\n#\n#\n# Authors:\n# Christian F. Baumgartner (c.f.baumgartner@gmail.com)\n# Lisa M. Koch (lisa.margret.koch@gmail.com)\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport glob\nfrom importlib.machinery import SourceFileLoader\nimport argparse\nimport h5py\nimport config.system as sys_config\nimport utils\nfrom image_utils import print_coloured, print_grayscale\nimport image_utils\nimport matplotlib.pyplot as plt\nimport model\nfrom random_walker import segment\nimport acdc_data\nfrom medpy.metric.binary import dc\n\nOUTPUT_FOLDER = sys_config.project_root + 'poster/images/'\nfig = plt.figure()\ndef dice(result, reference):\n d = 0\n res = np.squeeze(result)\n ref = np.squeeze(reference)\n for layer in range(1, 3):\n d+=dc(res==layer, ref==layer)\n\n return d/3\n\n\n\ndef main(ws_exp_config, slices, test):\n # Load data\n exp_dir = sys_config.project_root + 'acdc_logdir/' + ws_exp_config.experiment_name + '/'\n base_data = h5py.File(os.path.join(exp_dir, 'base_data.hdf5'), 'r')\n\n # Get number of recursions\n num_recursions = acdc_data.most_recent_recursion(sys_config.project_root + 'acdc_logdir/' + ws_exp_config.experiment_name)\n print(num_recursions)\n\n num_recursions+=1\n # Get images\n batch_size = len(slices)\n\n if test:\n slices = slices[slices < len(base_data['images_test'])]\n images = base_data['images_test'][slices, ...]\n gt = base_data['masks_test'][slices, ...]\n prefix='test'\n else:\n slices = slices[slices < len(base_data['images_train'])]\n images = base_data['images_train'][slices, ...]\n gt = base_data['masks_train'][slices, ...]\n scr = base_data['scribbles_train'][slices, ...]\n prefix='train'\n\n image_tensor_shape = [batch_size] + list(ws_exp_config.image_size) + [1]\n images_pl = tf.placeholder(tf.float32, shape=image_tensor_shape, name='images')\n feed_dict = {\n images_pl: np.expand_dims(images, -1),\n }\n\n #Get weak supervision predictions\n mask_pl, softmax_pl = model.predict(images_pl, ws_exp_config.model_handle, ws_exp_config.nlabels)\n saver = tf.train.Saver()\n init = tf.global_variables_initializer()\n predictions = np.zeros([batch_size] + list(ws_exp_config.image_size) + [num_recursions])\n predictions_klc = np.zeros_like(predictions)\n predictions_rw = np.zeros_like(predictions)\n with tf.Session() as sess:\n sess.run(init)\n for recursion in range(num_recursions):\n try:\n try:\n checkpoint_path = utils.get_latest_model_checkpoint_path(ws_model_path,\n 'recursion_{}_model_best_xent.ckpt'.format(recursion))\n except:\n try:\n checkpoint_path = utils.get_latest_model_checkpoint_path(ws_model_path,\n 'recursion_{}_model_best_dice.ckpt'.format(recursion))\n except:\n checkpoint_path = utils.get_latest_model_checkpoint_path(ws_model_path,\n 'recursion_{}_model.ckpt'.format(recursion))\n saver.restore(sess, checkpoint_path)\n mask_out, _ = sess.run([mask_pl, softmax_pl], feed_dict=feed_dict)\n predictions[..., recursion] = mask_out\n for i in range(batch_size):\n predictions_klc[i, :, :, recursion] = image_utils.keep_largest_connected_components(mask_out[i, ...])\n\n predictions_rw[..., recursion] = segment(images, np.squeeze(predictions_klc[..., recursion]), beta=ws_exp_config.rw_beta, threshold=0)\n\n print(\"Classified for recursion {}\".format(recursion))\n except Exception:\n predictions[..., recursion] = -1*np.zeros_like(predictions[..., recursion])\n print(\"Could not find checkpoint for recursion {} - skipping\".format(recursion))\n\n\n for i in range(batch_size):\n pref = '{}{}'.format(prefix, slices[i])\n\n print_grayscale(images[i, ...], filepath=OUTPUT_FOLDER, filename='{}_image'.format(pref))\n print_coloured(gt[i, ...], filepath=OUTPUT_FOLDER, filename='{}_gt'.format(pref))\n for recursion in range(num_recursions):\n if np.max(predictions[i, :, :, recursion]) >= -0.5:\n print_coloured(predictions[i, :, :, recursion], filepath=OUTPUT_FOLDER, filename=\"{}_ws_pred_r{}\".format(pref, recursion))\n print_coloured(predictions_klc[i, :, :, recursion], filepath=OUTPUT_FOLDER, filename=\"{}_ws_pred_klc_r{}\".format(pref, recursion))\n print_coloured(predictions_rw[i, :, :, recursion], filepath=OUTPUT_FOLDER, filename=\"{}_ws_pred_klc_rw_r{}\".format(pref,recursion))\n print(\"Dice coefficient for slice {} is {}\".format(slices[i],\n dice(predictions_rw[i, :, :, recursion], gt[i, ...])))\n if not test:\n print_coloured(scr[i, ...], filepath=OUTPUT_FOLDER, filename='{}_scribble'.format(pref))\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(\n description=\"Script for a simple test loop evaluating a 2D network on slices from the test dataset\")\n parser.add_argument(\"WS_EXP_PATH\", type=str, help=\"Path to weakly supervised experiment folder \"\n \"(assuming you are in the working directory)\")\n\n parser.add_argument(\"TEST_DATA\", type=int, help=\"If 1, will use test data. Else will use training data\")\n\n parser.add_argument(\"SLICE_NUMBERS\", type=int, help=\"Indices of desired images\", nargs=\"+\")\n\n args = parser.parse_args()\n base_path = sys_config.project_root\n\n ws_model_path = os.path.join(base_path, args.WS_EXP_PATH)\n ws_config_file = glob.glob(ws_model_path + '/*py')[0]\n ws_config_module = ws_config_file.split('/')[-1].rstrip('.py')\n\n ws_exp_config = SourceFileLoader(ws_config_module, os.path.join(ws_config_file)).load_module()\n\n test_data = (args.TEST_DATA == 1)\n\n main(ws_exp_config=ws_exp_config,\n slices=np.sort(np.unique(args.SLICE_NUMBERS)),\n test=test_data)","repo_name":"baumgach/basil_scribble_supervision","sub_path":"get_ws_images.py","file_name":"get_ws_images.py","file_ext":"py","file_size_in_byte":6437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34797895490","text":"from gpiozero import DistanceSensor\nimport numpy as np\nimport simpleaudio\nimport time\nimport json\n\nTRIG_PIN, ECHO_PIN = 23, 24\nDIST_M_MAX = 2\nTO_CM_SCALING = 100\n\nUPDATE_ALERT_PERIOD_SEC = 0.5\nUPDATE_ALERT_PRI = 3\nupdate_conditions = None\n\nALERT_PERIOD_SEC = 1\nALERT_PRI = 2\nalert_mode = 3\nALERT_SAMPLING_RATE = 44100\naudio_waves = []\n\n\ndef alert_user(scheduler):\n\tscheduler.enter(ALERT_PERIOD_SEC, ALERT_PRI, alert_user, (scheduler, ))\n\t# print(f\"ALERT TYPE {alert_mode}! @ {time.ctime()}\")\n\tif alert_mode is not None:\n\t\tsimpleaudio.play_buffer(audio_waves[alert_mode], 1, 2, ALERT_SAMPLING_RATE)\n\n\ndef update_alert_mode(scheduler, distance_sensor):\n\tscheduler.enter(UPDATE_ALERT_PERIOD_SEC, UPDATE_ALERT_PRI, \n\t\t\t\t\tupdate_alert_mode, (scheduler, distance_sensor, ))\n\t\n\tglobal alert_mode\n\tif distance_sensor.value == 1:\n\t\talert_mode = None\n\t\treturn\n\treading = distance_sensor.distance * TO_CM_SCALING\n\tfor mode, cond in enumerate(update_conditions):\n\t\tif reading <= cond[\"threshold\"]:\n\t\t\talert_mode = mode\n\t\t\tbreak\n\n\ndef setup_audio_tracks():\n\tglobal audio_waves\n\tfor cond in update_conditions:\n\t\tT = cond[\"duration\"]\n\t\t# Setup waveform according to the sound config\n\t\ttimes = np.linspace(0, T, int(T * ALERT_SAMPLING_RATE), False)\n\t\twave = np.sin(cond[\"frequency\"] * 2 * np.pi * times)\n\t\t# Normalize to 16-bit range\n\t\twave *= 32767 / np.max(np.abs(wave))\n\t\twave = wave.astype(np.int64)\n\t\t# Save waveform\n\t\taudio_waves.append(wave)\n\n\ndef setup_tasks(scheduler):\n\tdistance_sensor = DistanceSensor(trigger=TRIG_PIN, echo=ECHO_PIN, max_distance=DIST_M_MAX)\n\tglobal update_conditions\n\twith open(\"config/alert.json\", 'r') as config_file:\n\t\tupdate_conditions = json.load(config_file)\n\tsetup_audio_tracks()\n\t\n\tscheduler.enter(ALERT_PERIOD_SEC, ALERT_PRI, alert_user, (scheduler, ))\n\tscheduler.enter(UPDATE_ALERT_PERIOD_SEC, UPDATE_ALERT_PRI, \n\t\t\t\t\tupdate_alert_mode, (scheduler, distance_sensor, ))\n","repo_name":"n3slami/Smart_Shoe","sub_path":"high_freq/distance_manager.py","file_name":"distance_manager.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20622249228","text":"def Main():\n RandomWord = GetWord()\n print(RandomWord)\n RandomLetter = GetLetter(RandomWord)\n GuessWord(RandomWord, RandomLetter)\n \ndef GetWord():\n WordsList = []\n LetterList = []\n \n HangmanWords = open('words.txt', 'r')\n\n word = HangmanWords.readline()\n word = word.rstrip('\\n')\n\n while word:\n WordsList.append(word)\n word = HangmanWords.readline()\n word = word.rstrip('\\n')\n\n import random\n RandomWord = random.choice(WordsList)\n return RandomWord\n\ndef GetLetter(RandomWord): \n import random\n RandomLetterIndex = random.randint(0, len(RandomWord)- 1)\n RandomLetter = RandomWord[RandomLetterIndex]\n return RandomLetter\n\ndef GuessWord(RandomWord, RandomLetter):\n ResultString = ''\n UpdatedDashString = ''\n DashString = '_ '\n score = 0\n guessed = set([RandomLetter])\n ResultList = []\n\n for num in range(len(RandomWord)):\n if RandomWord[num] == RandomLetter:\n UpdatedDashString += RandomLetter\n else:\n UpdatedDashString += DashString\n print(UpdatedDashString)\n \n UserInput = GetInput()\n ResultList = UpdatedDashString.split(' ')\n if UserInput in guessed:\n print(\"Incorrect entry\")\n else:\n for num in range(len(RandomWord)):\n if RandomWord[num] == UserInput:\n ResultList[num] = UserInput\n score += 1 \n guessed.add(UserInput)\n print(ResultList)\n \n ResultString = \" \".join(ResultList)\n print(ResultString)\n print(guessed)\n \ndef GetInput():\n print('')\n UserInput = input('Please enter a letter from \"a\" to \"z\": ')\n return UserInput\n\nMain()\n\n\n\n\n \n \n\n\n\n","repo_name":"ahabib1999/python-hangman","sub_path":"proj code.py","file_name":"proj code.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"896602898","text":"# -*- coding: utf-8 -*-\r\n\r\n__author__ = 'Takashi Yahata (@paoneJP)'\r\n__copyright__ = 'Copyright (c) 2014, Takashi Yahata'\r\n__license__ = 'MIT License'\r\n\r\n\r\nSERVER_NAME = 'localhost'\r\nSERVER_PORT = 8080\r\n\r\nLOG_FILENAME = 'logs/access_log'\r\nLOG_MAX_BYTES = 1000000\r\nLOG_BACKUP_COUNT = 1\r\n","repo_name":"paoneJP/python-SelfIop","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39895870582","text":"import os\n\n\ndef main(argv):\n # Создали директорию my_project (если она не существует)\n dir_name = 'my_project'\n if not os.path.exists(dir_name):\n os.mkdir(dir_name)\n\n # Зашли директорию my_project\n os.chdir('my_project')\n\n # Создали все остальные директории в my_project (если они не существуют)\n dir_name = 'settings'\n if not os.path.exists(dir_name):\n os.mkdir(dir_name)\n dir_name = 'mainapp'\n if not os.path.exists(dir_name):\n os.mkdir(dir_name)\n dir_name = 'authapp'\n if not os.path.exists(dir_name):\n os.mkdir(dir_name)\n\n # Создали нужные файлы в директории settings\n open('settings/__init__.py', 'w')\n open('settings/dev.py', 'w')\n open('settings/prod.py', 'w')\n\n # Создали нужные файлы в директории mainapp\n open('mainapp/__init__.py', 'w')\n open('mainapp/models.py', 'w')\n open('mainapp/views.py', 'w')\n # Зашли туда, создали там доп. директории, вышли\n os.chdir('mainapp')\n dir_path = os.path.join('templates', 'mainapp')\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n os.chdir('..')\n # Создали нужные файлы в этих д��ректориях\n open('mainapp/templates/mainapp/base.html', 'w')\n open('mainapp/templates/mainapp/index.html', 'w')\n\n\n # Создали нужные файлы в директории authapp\n open('authapp/__init__.py', 'w')\n open('authapp/models.py', 'w')\n open('authapp/views.py', 'w')\n # Зашли туда, создали там доп. директории, вышли\n os.chdir('authapp')\n dir_path = os.path.join('templates', 'authapp')\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n os.chdir('..')\n # Создали нужные файлы в этих директориях\n open('authapp/templates/authapp/base.html', 'w')\n open('authapp/templates/authapp/index.html', 'w')\n\n # Вышли из директории my_project\n os.chdir('..')\n\n return 0\n\n\nif __name__ == '__main__':\n import sys\n\n exit(main(sys.argv))\n","repo_name":"Krooper/PythonHW","sub_path":"Python_HW7/2_create_script.py","file_name":"2_create_script.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5861210409","text":"from __future__ import division\nimport argparse\n\nfrom PIL import Image\nimport numpy as np\nimport gym\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Flatten, Convolution2D, Permute\nfrom keras.optimizers import Adam\nimport keras.backend as K\n\nfrom rl.agents.dqn import DQNAgent\nfrom rl.policy import LinearAnnealedPolicy, BoltzmannQPolicy, EpsGreedyQPolicy\nfrom rl.memory import SequentialMemory\nfrom rl.core import Processor\nfrom rl.callbacks import FileLogger, ModelIntervalCheckpoint\n\n\nINPUT_SHAPE = (84, 84)\nWINDOW_LENGTH = 4\n\n\nclass AtariProcessor(Processor):\n def process_observation(self, observation):\n assert observation.ndim == 3 # (height, width, channel)\n img = Image.fromarray(observation)\n img = img.resize(INPUT_SHAPE).convert('L') # resize and convert to grayscale\n processed_observation = np.array(img)\n assert processed_observation.shape == INPUT_SHAPE\n return processed_observation.astype('uint8') # saves storage in experience memory\n\n def process_state_batch(self, batch):\n # We could perform this processing step in `process_observation`. In this case, however,\n # we would need to store a `float32` array instead, which is 4x more memory intensive than\n # an `uint8` array. This matters if we store 1M observations.\n processed_batch = batch.astype('float32') / 255.\n return processed_batch\n\n def process_reward(self, reward):\n return np.clip(reward, -1., 1.)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--mode', choices=['train', 'test'], default='train')\nparser.add_argument('--env-name', type=str, default='BreakoutDeterministic-v4')\nparser.add_argument('--weights', type=str, default=None)\nargs = parser.parse_args()\n\n# Get the environment and extract the number of actions.\nenv = gym.make(args.env_name)\nnp.random.seed(123)\nenv.seed(123)\nnb_actions = env.action_space.n\n\n# Next, we build our model. We use the same model that was described by Mnih et al. (2015).\ninput_shape = (WINDOW_LENGTH,) + INPUT_SHAPE\nmodel = Sequential()\nif K.image_dim_ordering() == 'tf':\n # (width, height, channels)\n model.add(Permute((2, 3, 1), input_shape=input_shape))\nelif K.image_dim_ordering() == 'th':\n # (channels, width, height)\n model.add(Permute((1, 2, 3), input_shape=input_shape))\nelse:\n raise RuntimeError('Unknown image_dim_ordering.')\nmodel.add(Convolution2D(32, (8, 8), strides=(4, 4)))\nmodel.add(Activation('relu'))\nmodel.add(Convolution2D(64, (4, 4), strides=(2, 2)))\nmodel.add(Activation('relu'))\nmodel.add(Convolution2D(64, (3, 3), strides=(1, 1)))\nmodel.add(Activation('relu'))\nmodel.add(Flatten())\nmodel.add(Dense(512))\nmodel.add(Activation('relu'))\nmodel.add(Dense(nb_actions))\nmodel.add(Activation('linear'))\nprint(model.summary())\n\n# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and\n# even the metrics!\nmemory = SequentialMemory(limit=1000000, window_length=WINDOW_LENGTH)\nprocessor = AtariProcessor()\n\n# Select a policy. We use eps-greedy action selection, which means that a random action is selected\n# with probability eps. We anneal eps from 1.0 to 0.1 over the course of 1M steps. This is done so that\n# the agent initially explores the environment (high eps) and then gradually sticks to what it knows\n# (low eps). We also set a dedicated eps value that is used during testing. Note that we set it to 0.05\n# so that the agent still performs some random actions. This ensures that the agent cannot get stuck.\npolicy = LinearAnnealedPolicy(EpsGreedyQPolicy(), attr='eps', value_max=1., value_min=.1, value_test=.05,\n nb_steps=1000000)\n\n# The trade-off between exploration and exploitation is difficult and an on-going research topic.\n# If you want, you can experiment with the parameters or use a different policy. Another popular one\n# is Boltzmann-style exploration:\n# policy = BoltzmannQPolicy(tau=1.)\n# Feel free to give it a try!\n\ndqn = DQNAgent(model=model, nb_actions=nb_actions, policy=policy, memory=memory,\n processor=processor, nb_steps_warmup=50000, gamma=.99, target_model_update=10000,\n train_interval=4, delta_clip=1.)\ndqn.compile(Adam(lr=.00025), metrics=['mae'])\n\nif args.mode == 'train':\n # Okay, now it's time to learn something! We capture the interrupt exception so that training\n # can be prematurely aborted. Notice that now you can use the built-in Keras callbacks!\n weights_filename = 'dqn_{}_weights.h5f'.format(args.env_name)\n checkpoint_weights_filename = 'dqn_' + args.env_name + '_weights_{step}.h5f'\n log_filename = 'dqn_{}_log.json'.format(args.env_name)\n callbacks = [ModelIntervalCheckpoint(checkpoint_weights_filename, interval=250000)]\n callbacks += [FileLogger(log_filename, interval=100)]\n dqn.fit(env, callbacks=callbacks, nb_steps=1750000, log_interval=10000)\n\n # After training is done, we save the final weights one more time.\n dqn.save_weights(weights_filename, overwrite=True)\n\n # Finally, evaluate our algorithm for 10 episodes.\n dqn.test(env, nb_episodes=10, visualize=False)\nelif args.mode == 'test':\n weights_filename = 'dqn_{}_weights.h5f'.format(args.env_name)\n if args.weights:\n weights_filename = args.weights\n dqn.load_weights(weights_filename)\n dqn.test(env, nb_episodes=10, visualize=True)\n","repo_name":"keras-rl/keras-rl","sub_path":"examples/dqn_atari.py","file_name":"dqn_atari.py","file_ext":"py","file_size_in_byte":5390,"program_lang":"python","lang":"en","doc_type":"code","stars":5450,"dataset":"github-code","pt":"67"} +{"seq_id":"21125731148","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n self.parent = None\n\"\"\"\n\nclass Solution:\n def lowestCommonAncestor(self, p: 'Node', q: 'Node') -> 'Node':\n i, j = p, q\n while i != j:\n i = i.parent if i.parent else q\n j = j.parent if j.parent else p\n return i","repo_name":"fxrcode/FG","sub_path":"1650-lowest-common-ancestor-of-a-binary-tree-iii/1650-lowest-common-ancestor-of-a-binary-tree-iii.py","file_name":"1650-lowest-common-ancestor-of-a-binary-tree-iii.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"10259694041","text":"import random\nimport math\nfrom pandac.PandaModules import NodePath, Point3\nfrom direct.interval.IntervalGlobal import Sequence, Parallel, Wait, Func\nfrom direct.interval.LerpInterval import LerpFunc\nfrom direct.task import Task\nfrom panda3d.core import TextNode\nimport FishingGlobals\nfrom FishFSM import FishFSM\nfrom BlendActor import BlendActor\nfrom pirates.effects.FishIdleBubbleEffect import FishIdleBubbleEffect\nfrom pirates.effects.FishBitingBubbleEffect import FishBitingBubbleEffect\nfrom pirates.effects.FishFightingHookedBubbleEffect import FishFightingHookedBubbleEffect\nimport MinigameUtils\nfrom pirates.uberdog.UberDogGlobals import InventoryType\n\nclass Fish(NodePath):\n\n def __init__(self, fishManager, myData, index, trophy=0):\n NodePath.__init__(self, '%s_%d' % (myData['name'], index))\n self.trophy = trophy\n self.myData = myData\n if not self.trophy:\n self.fishManager = fishManager\n self.index = index\n self.fsm = FishFSM(self)\n self.weight = random.randint(self.myData['weightRange'][0], self.myData['weightRange'][1])\n else:\n self.weight = trophy\n self.adjustedScale = (self.myData['scaleRange'][1] - self.myData['scaleRange'][0]) * (self.weight - self.myData['weightRange'][0]) / (self.myData['weightRange'][1] - self.myData['weightRange'][0]) + self.myData['scaleRange'][0]\n self.initActor()\n if not self.trophy:\n self.initVariables()\n self.initFishStatusIcon()\n if FishingGlobals.wantDebugCollisionVisuals:\n self.initCollisions()\n self.avoidingFish = False\n self.biteBubbleEffect = None\n self.idleBubbleEffect = None\n self.fightBubbleEffect = None\n self.behaviorNameToFunction = {'straight': self.performStraightBehavior,'sineStraight': self.performSineStraightBehavior,'erratic': self.performErraticBehavior}\n self.sineDtAccumulator = 0.0\n self.erraticDtAccumulator = 0.0\n self.myZ = 0.0\n if not self.trophy:\n self.setLightOff()\n return\n\n def initActor(self):\n self.animDict = {}\n for anim in FishingGlobals.fishAnimations:\n self.animDict[anim] = 'models/char/pir_a_gam_fsh_%s_%s.bam' % (self.myData['model'], anim)\n\n self.actor = BlendActor('models/char/pir_r_gam_fsh_%s.bam' % self.myData['model'], self.animDict, FishingGlobals.defaultFishBlendTime, FishingGlobals.fishBlendTimeDict)\n self.actor.reparentTo(self)\n self.actor.setScale(self.adjustedScale)\n self.mouthJoint = self.actor.exposeJoint(None, 'modelRoot', 'hookAttach')\n self.attractionPoint = NodePath('AttractionPoint')\n self.attractionPoint.reparentTo(self.mouthJoint)\n self.attractionPoint.setPos(0.0, 0.0, 0.0)\n self.actor.setPlayRate(self.myData['speed'] * self.myData['swimAnimationMultiplier'], 'swimIdle')\n self.actor.setPlayRate(self.myData['speed'] * self.myData['swimAnimationMultiplier'], 'swimIdleOpposite')\n self.actor.setPlayRate(self.myData['speed'] * self.myData['turnAnimationMultiplier'], 'turn')\n self.actor.setPlayRate(self.myData['speed'] * self.myData['turnAnimationMultiplier'], 'turnOpposite')\n if not self.trophy:\n self.setBin('fishingGame', 10)\n return\n\n def codeReload(self):\n self.actor.setPlayRate(self.myData['speed'] * self.myData['swimAnimationMultiplier'], 'swimIdle')\n self.actor.setPlayRate(self.myData['speed'] * self.myData['swimAnimationMultiplier'], 'swimIdleOpposite')\n self.actor.setPlayRate(self.myData['speed'] * self.myData['turnAnimationMultiplier'], 'turn')\n self.actor.setPlayRate(self.myData['speed'] * self.myData['turnAnimationMultiplier'], 'turnOpposite')\n\n def initFishStatusIcon(self):\n self.fishStatusIconTextNode = TextNode('fishBitingIcon')\n self.fishStatusIconNodePath = NodePath(self.fishStatusIconTextNode)\n self.fishStatusIconNodePath.setPos(0.0, 0.0, self.myData['indicatorHeightOffset'])\n self.fishStatusIconTextNode.setText('?')\n self.fishStatusIconTextNode.setTextColor(1.0, 0.0, 0.0, 1.0)\n self.fishStatusIconNodePath.reparentTo(self.mouthJoint)\n self.fishStatusIconNodePath.setBillboardPointEye()\n self.fishStatusIconNodePath.hide()\n self.fishStatusIconNodePath.setShaderOff()\n\n def initVariables(self):\n self.attractionVisual = None\n self.collisionVisual = None\n self.movingRight = True\n self.turnSpeed = 160.0\n self.turnTowardLureInterval = None\n self.velocity = FishingGlobals.baseFishVelocity * self.myData['speed']\n self.accel = FishingGlobals.baseFishAccel * self.myData['speed']\n self.fishMoveSequence = None\n self.bubbleEffect = None\n return\n\n def initCollisions(self):\n self.collisionVisual = loader.loadModel('models/props/crate')\n self.collisionVisual.setTransparency(1)\n self.collisionVisual.setColor(1.0, 1.0, 1.0, 0.3)\n self.collisionVisual.setScale(*self.myData['collisionBoxSize'])\n self.collisionVisual.setPos(*self.myData['collisionBoxOffset'])\n self.collisionVisual.reparentTo(self)\n self.collisionVisual.hide()\n self.attractionVisual = loader.loadModel('models/ammunition/cannonball')\n self.attractionVisual.setTransparency(1)\n self.attractionVisual.setColor(0.0, 1.0, 0.0, 0.3)\n self.attractionVisual.setScale(self.myData['attractionRadius'])\n self.attractionVisual.reparentTo(self.attractionPoint)\n self.attractionVisual.hide()\n self.collisionVisualVisible = False\n\n def hide(self):\n NodePath.hide(self)\n if self.idleBubbleEffect:\n self.idleBubbleEffect.hide()\n\n def show(self):\n NodePath.show(self)\n if self.idleBubbleEffect:\n self.idleBubbleEffect.show()\n\n def reloadCollisions(self):\n if FishingGlobals.wantDebugCollisionVisuals:\n self.collisionVisual.removeNode()\n self.attractionVisual.removeNode()\n self.initCollisions()\n\n def cleanFishData(self):\n pass\n\n def destroy(self):\n self.closeFish = []\n self.actor.destroy()\n self.stopIdleBubbleEffect()\n self.stopFightBubbleEffect()\n if self.fishMoveSequence:\n self.fishMoveSequence.pause()\n self.fishMoveSequence = None\n if self.fsm:\n del self.fsm\n self.fsm = None\n self.behaviorNameToFunction = {}\n self.removeNode()\n return\n\n def pickPositionAndSwim(self):\n self.initVariables()\n self.actor.clearControlEffectWeights()\n if self.myData['depth'] == 0:\n depth = random.uniform(FishingGlobals.fishingLevelBoundaries[self.myData['depth']], self.fishManager.gameObject.waterLevel + FishingGlobals.fishSpawnBelowWaterLevelHeight)\n else:\n depth = random.uniform(FishingGlobals.fishingLevelBoundaries[self.myData['depth']], FishingGlobals.fishingLevelBoundaries[self.myData['depth'] - 1])\n startX = random.uniform(FishingGlobals.leftFishBarrier + 5.0, FishingGlobals.rightFishBarrier - 5.0)\n self.setPos(startX, 0.0, depth)\n if random.randint(0, 1):\n self.fsm.request('TurnAround', 'Swimming', False)\n else:\n self.fsm.request('Swimming')\n\n def turnAround(self, nextState, shouldMoveRight):\n if self.velocity[0] < 0 and shouldMoveRight:\n self.velocity[0] = -self.velocity[0]\n elif self.velocity[0] > 0 and not shouldMoveRight:\n self.velocity[0] = -self.velocity[0]\n self.movingRight = self.velocity[0] > 0\n if self.fishMoveSequence:\n self.fishMoveSequence.pause()\n self.fishMoveSequence.clearToInitial()\n animationToTurn = 'turn'\n if self.movingRight:\n animationToTurn = 'turnOpposite'\n durationOfFishTurn = self.myData['durationOfFishTurn']\n self.fishMoveSequence = Parallel(Sequence(Func(self.actor.changeAnimationTo, animationToTurn, False), Wait(durationOfFishTurn), Func(self.fsm.request, nextState)), Sequence(Wait(durationOfFishTurn * 0.33), Func(self.setXVelocity, 0.0), Wait(durationOfFishTurn * 0.66), Func(self.setXVelocity, self.velocity[0])), name='%s_turnAroundInterval' % self.getName())\n self.velocity[0] = -self.velocity[0]\n self.fishMoveSequence.start()\n\n def setXVelocity(self, newVel):\n self.velocity[0] = newVel\n\n def checkForBiting(self):\n if self.fishManager.activeFish is not None:\n return\n if self.fishManager.gameObject.fsm.getCurrentOrNextState() not in ['Fishing', 'Reeling', 'LureStall', 'LegdFishShow']:\n return\n inv = localAvatar.getInventory()\n rodLvl = inv.getItemQuantity(InventoryType.FishingRod)\n if self.myData['depth'] + 1 > rodLvl:\n return\n self.fsm.request('Biting')\n return\n\n def checkForBoxOverlap(self, otherFish):\n pos = self.getPos(self.fishManager.gameObject.fishingSpot)\n size = self.myData['collisionBoxSize']\n offset = list(self.myData['collisionBoxOffset'])\n otherPos = otherFish.getPos()\n otherSize = otherFish.myData['collisionBoxSize']\n otherOffset = list(otherFish.myData['collisionBoxOffset'])\n if pos[0] + size[0] / 2.0 + offset[0] > otherPos[0] - otherSize[0] / 2.0 + otherOffset[0] and pos[0] - size[0] / 2.0 + offset[0] < otherPos[0] + otherSize[0] / 2.0 + otherOffset[0] and pos[2] + size[2] / 2.0 + offset[2] > otherPos[2] - otherSize[2] / 2.0 + otherOffset[2] and pos[2] - size[2] / 2.0 + offset[2] < otherPos[2] + otherSize[2] / 2.0 + otherOffset[2]:\n return True\n return False\n\n def checkForCloseFish(self, index):\n if index < len(self.fishManager.uncaughtFish) - 1:\n for i in range(index + 1, len(self.fishManager.uncaughtFish)):\n if self.fishManager.uncaughtFish[i].index != self.index:\n if self.checkForBoxOverlap(self.fishManager.uncaughtFish[i]):\n self.closeFish.append(self.fishManager.uncaughtFish[i])\n if FishingGlobals.wantDebugCollisionVisuals:\n self.collisionVisual.setColor(1, 0, 0, 0.3)\n\n if len(self.closeFish) == 0:\n if FishingGlobals.wantDebugCollisionVisuals:\n self.collisionVisual.setColor(1, 1, 1, 0.3)\n\n def checkForLures(self, currentState, lurePos):\n if self.getX() + FishingGlobals.fishAttractionOffset < lurePos[0] and self.movingRight or self.getX() - FishingGlobals.fishAttractionOffset > lurePos[0] and not self.movingRight:\n if self.attractionPoint.getDistance(self.fishManager.gameObject.lure) < self.myData['attractionRadius'] + self.fishManager.gameObject.lure.lureAttractRadius:\n self.checkForBiting()\n\n def update(self, dt, index, lurePos):\n currentState = self.fsm.getCurrentOrNextState()\n self.closeFish = []\n if currentState in ['ScareAway', 'Swimming', 'Flee', 'TurnAround']:\n self.checkForCloseFish(index)\n if currentState in ['Swimming']:\n self.checkForLures(currentState, lurePos)\n self.updateBasedOnBehavior(dt, lurePos)\n elif currentState in ['Hooked', 'AboutToFight', 'HookedFighting']:\n self.checkForCloseFish(-1)\n for fish in self.closeFish:\n self.makeFishRunFromMe(fish)\n\n def makeFishRunFromMe(self, otherFish):\n if otherFish.fsm.getCurrentOrNextState() == 'Flee' or otherFish.fsm.getCurrentOrNextState() == 'TurnAround':\n return\n if otherFish.getX() < self.getX(self.fishManager.gameObject.fishingSpot) and otherFish.movingRight:\n otherFish.fsm.request('TurnAround', 'Flee', False)\n elif otherFish.getX() > self.getX(self.fishManager.gameObject.fishingSpot) and not otherFish.movingRight:\n otherFish.fsm.request('TurnAround', 'Flee', True)\n else:\n otherFish.fsm.request('Flee')\n\n def updateBasedOnBehavior(self, dt, lurePos):\n currentState = self.fsm.getCurrentOrNextState()\n newX = self.getX()\n newY = self.getY()\n newZ = self.getZ()\n for fish in self.closeFish:\n if self.myData['size'] == 'small' and fish.myData['size'] == 'large':\n if self.checkForEating(fish):\n return\n self.avoidingFish = True\n if fish.velocity[1] > 0.0 and fish.avoidingFish:\n self.velocity[1] = -FishingGlobals.fishAvoidYVelocity\n else:\n self.velocity[1] = FishingGlobals.fishAvoidYVelocity\n if abs(fish.getY() - self.getY()) > self.myData['collisionBoxSize'][1] + fish.myData['collisionBoxSize'][1]:\n self.velocity[1] = 0.0\n\n if len(self.closeFish) == 0 and abs(self.getY()) > FishingGlobals.fishYTolerance:\n self.avoidingFish = False\n if self.getY() > 0:\n self.velocity[1] = -FishingGlobals.fishAvoidYVelocity\n else:\n self.velocity[1] = FishingGlobals.fishAvoidYVelocity\n elif len(self.closeFish) == 0 and abs(self.getY()) < FishingGlobals.fishYTolerance:\n self.avoidingFish = False\n self.velocity[1] = 0.0\n self.setY(0.0)\n newY = self.getY() + self.velocity[1] * dt + self.accel[1] * dt * dt\n if currentState in ['Swimming', 'TurnAround', 'Flee', 'ScareAway']:\n if currentState == 'ScareAway':\n newX, newZ = self.performScareAwayBehavior(dt, self.velocity, self.accel)\n else:\n if currentState == 'Flee':\n newX, newZ = self.performFleeBehavior(dt, self.velocity, self.accel)\n else:\n newX, newZ = self.behaviorNameToFunction[self.myData['behaviorDict']['name']](dt, self.velocity, self.accel)\n currentState = self.fsm.getCurrentOrNextState()\n if newX < FishingGlobals.leftFishBarrier:\n if currentState == 'ScareAway':\n if newX < FishingGlobals.leftFishBarrier - FishingGlobals.fullyOffscreenXOffset:\n self.fsm.request('Offscreen')\n return\n elif currentState != 'TurnAround' and not self.movingRight:\n self.fsm.request('TurnAround', 'Swimming', True)\n elif newX > FishingGlobals.rightFishBarrier:\n if currentState != 'TurnAround' and self.movingRight:\n self.fsm.request('TurnAround', 'Swimming', False)\n newZ = min(max(FishingGlobals.fishingLevelBoundaries[len(FishingGlobals.fishingLevelBoundaries) - 1], newZ), self.fishManager.gameObject.waterLevel + FishingGlobals.fishSpawnBelowWaterLevelHeight)\n self.setPos(newX, newY, newZ)\n\n def checkForEating(self, fishThatWillEat):\n if (self.getX() < fishThatWillEat.getX() and not fishThatWillEat.movingRight or self.getX() > fishThatWillEat.getX() and fishThatWillEat.movingRight) and self.fsm.getCurrentOrNextState() == 'Swimming' and fishThatWillEat.fsm.getCurrentOrNextState() == 'Swimming' and random.random() < 1.0:\n self.fsm.request('BeingEaten', fishThatWillEat)\n fishThatWillEat.fsm.request('Eating', self.weight)\n return True\n return False\n\n def startIdleBubbleEffect(self):\n self.idleBubbleEffect = FishIdleBubbleEffect.getEffect(unlimited=True)\n if self.idleBubbleEffect:\n self.idleBubbleEffect.reparentTo(self.mouthJoint)\n self.idleBubbleEffect.setScale(1.0)\n self.idleBubbleEffect.setHpr(0, 0, 0)\n self.idleBubbleEffect.setLifespanBasedOnDepth(self.getPos(render))\n self.idleBubbleEffect.setBubbleSizeBasedOnWeight(self.weight)\n self.idleBubbleEffect.particleDummy.setBin('fishingGame', 5)\n self.idleBubbleEffect.startLoop()\n\n def stopIdleBubbleEffect(self):\n if self.idleBubbleEffect:\n self.idleBubbleEffect.stopLoop()\n self.idleBubbleEffect = None\n return\n\n def startBiteBubbleEffect(self):\n self.biteBubbleEffect = FishBitingBubbleEffect.getEffect(unlimited=True)\n if self.biteBubbleEffect:\n self.biteBubbleEffect.reparentTo(self.mouthJoint)\n self.biteBubbleEffect.setScale(1.0)\n self.biteBubbleEffect.setHpr(0, 0, 0)\n self.biteBubbleEffect.setLifespanBasedOnDepth(self.getPos(render))\n self.biteBubbleEffect.setBubbleSizeBasedOnWeight(self.weight)\n self.biteBubbleEffect.particleDummy.setBin('fishingGame', 5)\n self.biteBubbleEffect.play()\n\n def stopBiteBubbleEffect(self):\n if self.biteBubbleEffect:\n self.biteBubbleEffect.stopLoop()\n self.biteBubbleEffect = None\n return\n\n def startFightBubbleEffect(self):\n self.fightBubbleEffect = FishFightingHookedBubbleEffect.getEffect(unlimited=True)\n if self.fightBubbleEffect:\n self.fightBubbleEffect.reparentTo(self.mouthJoint)\n self.fightBubbleEffect.setScale(1.0)\n self.fightBubbleEffect.setHpr(0, 0, 0)\n self.fightBubbleEffect.setLifespanBasedOnDepth(self.getPos(render))\n self.fightBubbleEffect.setBubbleSizeBasedOnWeight(self.weight)\n self.fightBubbleEffect.particleDummy.setBin('fishingGame', 5)\n self.fightBubbleEffect.startLoop()\n\n def stopFightBubbleEffect(self):\n if self.fightBubbleEffect:\n self.fightBubbleEffect.stopLoop()\n self.fightBubbleEffect = None\n return\n\n def performStraightBehavior(self, dt, velocity, accel):\n newX = self.getX() + velocity[0] * dt + accel[0] * dt * dt\n newZ = self.getZ() + velocity[2] * dt + accel[2] * dt * dt\n return (\n newX, newZ)\n\n def performSineStraightBehavior(self, dt, velocity, accel):\n self.sineDtAccumulator += dt\n newX = self.getX() + velocity[0] * dt + accel[0] * dt * dt\n newZ = self.myZ + math.sin(self.sineDtAccumulator) * self.myData['behaviorDict']['sineMultiplier']\n return (\n newX, newZ)\n\n def performScareAwayBehavior(self, dt, velocity, accel):\n newX = self.getX() + velocity[0] * FishingGlobals.scareAwayVelocityMultiplier * dt + accel[0] * dt * dt\n newZ = self.getZ() + velocity[2] * FishingGlobals.scareAwayVelocityMultiplier * dt + accel[2] * dt * dt\n return (\n newX, newZ)\n\n def performFleeBehavior(self, dt, velocity, accel):\n newX = self.getX() + velocity[0] * FishingGlobals.fleeVelocityMultiplier * dt + accel[0] * dt * dt\n newZ = self.getZ() + velocity[2] * FishingGlobals.fleeVelocityMultiplier * dt + accel[2] * dt * dt\n return (\n newX, newZ)\n\n def performErraticBehavior(self, dt, velocity, accel):\n self.erraticDtAccumulator += dt\n self.sineDtAccumulator += dt\n newX = self.getX() + velocity[0] * dt + accel[0] * dt * dt\n newZ = self.myZ + math.sin(self.sineDtAccumulator) * self.myData['behaviorDict']['sineMultiplier']\n if self.erraticDtAccumulator > self.myData['behaviorDict']['secondsBetweenChanges']:\n self.erraticDtAccumulator = 0\n if random.random() < self.myData['behaviorDict']['chanceOfTurning']:\n if self.fsm.getCurrentOrNextState() != 'TurnAround':\n self.fsm.request('TurnAround', 'Swimming', not self.movingRight)\n return (\n newX, newZ)\n\n def showAttractionCollisionVisuals(self):\n if FishingGlobals.wantDebugCollisionVisuals:\n self.attractionVisual.show()\n\n def hideAttractionCollisionVisuals(self):\n if FishingGlobals.wantDebugCollisionVisuals:\n self.attractionVisual.hide()\n\n def showAvoidanceCollisionVisuals(self):\n if FishingGlobals.wantDebugCollisionVisuals:\n self.collisionVisual.show()\n\n def hideAvoidanceCollisionVisuals(self):\n if FishingGlobals.wantDebugCollisionVisuals:\n self.collisionVisual.hide()","repo_name":"PiratesOnlineRewritten/Pirates-Online-Rewritten","sub_path":"pirates/minigame/Fish.py","file_name":"Fish.py","file_ext":"py","file_size_in_byte":20273,"program_lang":"python","lang":"en","doc_type":"code","stars":80,"dataset":"github-code","pt":"67"} +{"seq_id":"20672587198","text":"from flask import Flask\nimport os\nimport numpy as np\nimport json\nimport requests\nfrom sklearn.cluster import KMeans\n\n#from kmeans-python import TokenizeCases\n\n\napp = Flask(__name__)\n\nport = int(os.getenv(\"PORT\", 64781))\n\n#for some reason the user has to be postgres and not root\n'''Db connection'''\n#conn =psycopg2.connect(database='jim', user='postgres', password='root', host='127.0.0.1', port='5432')\nconn=''\n\n# class Object:\n# def toJSON(self):\n# return json.dumps(self, default=lambda o: o.__dict__,\n# sort_keys=True, indent=4)\n\n\n\n'''\nFunction Name:getIntiialize\nPurpose: Reading teh normalized cases based on teh equipment type\nVariables:\n -conn: the connection object\n -equipmentType: The category of cases\n\n'''\ndef getIntiialize(conn,equipmentType):\n # cur = conn.cursor()\n # # cur.mogrify(\"Select id,description from cases.smartsignal_jim_allfields where smartsignal_jim_allfields.equipmentType=%s\",(equipmentType,))\n # cur.execute(\"Select id,\\\"normalizedCase\\\" from cases.smartsignal_normalized_case where \\\"equipmentType\\\"=%s\",\n # (equipmentType,))\n #\n # rows = cur.fetchall()\n #\n # cases = {}\n # # display the rows\n # for row in rows:\n # # print (row[0],row[1])\n # # cases[row[0]]={\"original\":row[1]}\n # cases[row[0]] = row[1]\n # url = 'https://data-ingestion-api.run.aws-usw02-pr.ice.predix.io/'\n # # url='https://text-mining-212470820-02.run.asv-pr.ice.predix.io/processData/30?Tenant=PredixForum&Token=Token'\n #\n # headers = {'content-type': \"application/x-www-form-urlencoded\",\n # 'Authorization': \"Basic aW5nZXN0b3IuZWExYTAyNDUtMTc1OC00ZTY2LWFiZjAtZjI1Mzk1NmE3ODIwLmY4ZGFhYTUwLTBhOGYtNGU1ZS1hZjJhLTkyNjEyMDQwNGJhMTo=\"\n # }\n #\n # resp = requests.post('https://4126b27b-6860-48ee-9dc1-9cba313eac9f.predix-uaa.run.asv-pr.ice.predix.io/oauth/token',\n # data='grant_type=password&username=f743b7ef-42df-4d7e-89dd-90dc3b53b0ac_ingestor&password=Pa55w0rd',\n # headers=headers)\n\n\n #authString = json.loads(resp.text)\n\n print(\"------------------\")\n #print(authString)\n\n #now using the authstring find out the cases\n\n\n\n fname = \"normalized_cases_jim.txt\"\n with open(fname, 'r') as myfile:\n data = myfile.read()\n data = data.split('---------BREAK---------')\n cases = [case.strip() for case in data]\n #print(cases[1:10])\n #print(len(cases))\n #lengthCases = len(cases)\n #lines = cases\n\n #cases = pickle.load(open(\"casesDict.p\", \"rb\"))\n\n stopwordsFile = open('stopwordsss.txt', 'r')\n stopwords = stopwordsFile.read()\n stopwordList = stopwords.split(\",\")\n\n return (cases, stopwordList)\n\n\n\n''' Utility functions'''\n\n\n'''\nFunction Name:getKeywordIndexInVocabulary\nPurpose: Given the keywprd we want to know what is its index in the vocab list\nVariables:\n -vocab: the vocabulary\n -keyword: for which the index needs to be found\n'''\ndef getKeywordIndexInVocabulary(vocab,keyword):\n idx = list(vocab).index(keyword)\n return(idx)\n\n\n\n'''\nFunction Name:getDocumentsContainingKeywords\nPurpose: Given the keywprd we want to know in which documents it has appeared. We will use the countDtm that is a matrix of documents and the keywords\nfor this task. The vocabulary is required to find out the index of the keyword. the same index is the column index in the countDtm\nVariables:\n -vocab: the vocabulary\n -keyword: for which the documents needs to be found\n -countDtm: document -term matrix\n'''\ndef getDocumentsContainingKeywords(countDtm,vocab,keyword):\n #GET THE COLUMN NUM OF THE KEYWORD\n keywordIndex=getKeywordIndexInVocabulary(vocab,keyword)\n tt=list(countDtm[:,keywordIndex]>0)\n res=[]\n for idx,w in enumerate(tt):\n if(w==True):\n res.append(idx)\n return(res)\n\n'''\nFunction Name:getKeywordsOfDocument\nPurpose: Given the document we want to find out all the keywords. Basically using the dtm we find the coulm indexes\nwhich are non zero for the particular document and then using the vocabulary we get the words that are in those indexes\nVariables:\n -vocab: the vocabulary\n -docNo: the document for which the keywords need to be found\n -dtm: the document-term matrix\n'''\ndef getKeywordsOfDocument(dtm,vocab,docNo):\n nn=np.flatnonzero(dtm[docNo,])\n print(nn)\n #print(nn[1])\n #get the corresponding word from the vocabulary list\n return(vocab[nn].tolist())\n\n\n\n'''\nFunction Name:getFreqOfKeyword\nPurpose: How many times a particular word has appeared in the corpus(not document frequncy as we will consider the\nabsolute number and not the binary)\nVariables:\n -freqDict: the dictionary containing the frequency of all the keywords. This is created at the time of creating the countDTM\n -keyword: the word for which the frequncy needs to be found\n\n'''\ndef getFreqOfKeyword(freqDict,keyword):\n for key in freqDict:\n if(key['word']==keyword):\n return(key['count'])\n\n\n\n'''\nFunction Name:getNumberOfWordsForCluster\nPurpose: How many words to be shown for each cluster. This is based on the proportion of the size of cluster. Basically what percentage of\n documents are in a cluster. Then out of 100 words in the wordscloud , how many words come from this cluster\nVariables:\n -km: The kmeans object containing the results of algorithm\n -clusterNum: the cluster id for which we want to determine the number of words\n -N: Number of documents which were considered for clustering\n'''\ndef getNumberOfWordsForCluster(km, clusterNum, N):\n\tret = np.unique(km.labels_, return_counts=True)\n\tclusterSize = ret[1][clusterNum]\n\ttotalWords = float(clusterSize / N) * 100\n\tprint(totalWords)\n\tif (totalWords < 10):\n\t\treturn (10)\n\treturn (int(totalWords) + 1)\n\n\n# casesDict, stopwordList = getIntiialize(conn,equipmentType)\n# print(casesDict)\n# lines = []\n# countToCaseIdMap={}\n# # # maximum is 4997\n# count=0\n# for key in casesDict:\n# lines.append(casesDict[key]) # now we need to vectorize the corpus\n# countToCaseIdMap[count]=key\n# count=count+1\n#\n# print(lines)\n# print(countToCaseIdMap)\n\n# def getNumberOfWordsForCluster(km,clusterNum,N):\n# ret=np.unique(km.labels_, return_counts=True)\n# clusterSize=ret[1][clusterNum]\n# totalWords=float(clusterSize/N)*100\n# print(totalWords)\n# if(totalWords<10):\n# return(10)\n# return(int(totalWords)+1)\n\n\n'''\nFunction Name:getVectorized\nPurpose: Thsi is the main workhorse. The following are the tasks it performs:\n1. countToCaseIdMap: A map containing the index of the document in the resultset (casesDict) and the actual caseid.\nThis caseId is the primary key to retrieve the original case from the db at any time\n2.tfSparseMatrix: this is the tf-idf document-term matrix that will be used for clustering\n3.countDtm: This is the document-term matrix without tf-idf. we will use it for findong frequecy of keywords, vocab etc\n4.vocab: list of keywords obtained from countDtm\n\n\nVariables:\n -conn: The postgres connection object\n -equipmentType: WIND_TURBINE, STEAM_TURBINE etc\n'''\ndef getVectorized(conn,equipmentType):\n\n casesDict, stopwordList = getIntiialize(conn,equipmentType)\n print(casesDict)\n lines = casesDict\n countToCaseIdMap={}\n # # maximum is 4997\n count=0\n for key in range(len(casesDict)):\n #lines.append(casesDict[key]) # now we need to vectorize the corpus\n countToCaseIdMap[count] = key\n count=count+1\n\n from sklearn.feature_extraction.text import TfidfVectorizer\n vectorizer = TfidfVectorizer(min_df=0.006, stop_words=stopwordList, strip_accents='unicode', norm='l2',\n sublinear_tf=True)\n\n\n tfSparseMatrix = vectorizer.fit_transform(lines)\n tfDtm = tfSparseMatrix.toarray()\n tfDtm = np.array(tfDtm)\n\n\n from sklearn.feature_extraction.text import CountVectorizer\n count_vect = CountVectorizer(min_df=0.006, stop_words=stopwordList, strip_accents='unicode', binary=False)\n rawdtm = count_vect.fit_transform(lines)\n vocab = count_vect.get_feature_names()\n # convert the dtm to regular array\n countDtm = rawdtm.toarray()\n # convert the dtm to numpy array\n countDtm = np.array(countDtm)\n print(countDtm)\n # need to convert it to numpy array so that we can easily perform the operations on it\n vocab = np.array(vocab)\n\n\n ##########\n # freqsum = np.sum(countDtm, axis=0)\n #\n # # for each of the vocabulary word create a dictionary containing the count\n # freqDict = []\n # for idx, v in enumerate(vocab):\n # freqDict.append({'word': v, 'count': freqsum[idx]})\n\n # from sklearn.cluster import KMeans\n # # the max_iter is how many iterations before the convergence is assumed\n # # n_init is the number of times the algo is run\n # K_Cluster = 3\n # km = KMeans(n_clusters=K_Cluster, init='k-means++', max_iter=1000, n_init=10, verbose=False)\n # # you need to call the km.fit_predict so that the kmeans cane be run and then each of the points can be assigned a cluster index\n # km.fit_predict(tfSparseMatrix)\n\t# # return (format(tfSparseMatrix.shape))\n\t#return(\"skjdhkjsd\")\n\n\n return(lines,countToCaseIdMap,tfSparseMatrix,count_vect,countDtm)\n\n\n'''\nFunction Name:performKmeans\nPurpose: performs kmeans clustering using the tf-idf matrix\n\n\nVariables:\n -tfSparseMatrix: tf-idf matrix\n\n'''\ndef performKmeans(tfSparseMatrix):\n\n # the max_iter is how many iterations before the convergence is assumed\n # n_init is the number of times the algo is run\n K_Cluster = 3\n km = KMeans(n_clusters=K_Cluster, init='k-means++', max_iter=1000, n_init=10, verbose=False)\n # you need to call the km.fit_predict so that the kmeans cane be run and then each of the points can be assigned a cluster index\n km.fit_predict(tfSparseMatrix)\n return(km)\n\n'''\nFunction Name:getWordMapForCluster\nPurpose: This function will create a map of the keyword and the frequecy of its ocurence in that cluster.\nIt takes as an input a set of documents that have appeared in the cluster. Now for each document will find out the number of times\na particular word has appeared\nIt will finally sort the word list as per the frequency and return\n\n\nVariables:\n -documents: A list of document ids that are to be considered\n -countDtm: This is the Document-term matrix whose cells will tell the frequency of the word in the document\n -vocab: a list of keywords\n'''\ndef getWordMapForCluster(documents, countDtm, vocab):\n wordMap = {}\n #for each document find out which all keywords appear\n for d in documents:\n #print(d)\n # this gives the index of columns and hence keywords\n nn = np.flatnonzero(countDtm[int(d),])\n\n # find the word and the count on that index. Using the vicabulary you will know the word\n for ind in nn:\n if (vocab[ind] not in wordMap.keys()):\n wordMap[vocab[ind]] = countDtm[int(d), ind]\n else:\n wordMap[vocab[ind]] += countDtm[int(d), ind]\n\n #sort the map as per the frequency of the words\n from operator import itemgetter\n sortedWordMap = sorted(wordMap.items(), key=itemgetter(1), reverse=True)\n return (sortedWordMap)\n\n\n\n'''\nFunction Name:getwordList\nPurpose: This function will create the final list of wordcreate a map of the keyword and the frequecy of its ocurence in all the cluster.\nIt will iteratively find out the wordlist for each cluster. Then pick up the top n words (based on the size of cluter) and then create a final\nlist\nSome of the words can be common in all the clusters (multiple words list), so they will be merged\n\nVariables:\n -km: kmeans clustering object\n -N: The number of documents in teh corpus\n -k: The number of clusters\n -casesPerCluster: distribution of cases per cluster\n -countDtm: the document term matrix\n -vocab: the list of the keywords\n'''\ndef getwordList(km, N, k,casesDistributionPerCluster, countDtm, vocab):\n finalList = []\n numberOfClusters = k\n numberOfCases = N\n for clusterNum in range(numberOfClusters):\n #print(clusterNum)\n n = getNumberOfWordsForCluster(km, clusterNum, numberOfCases)\n # get wordmap for a cluster\n wordList = getWordMapForCluster(casesDistributionPerCluster[clusterNum].split(','), countDtm, vocab)\n finalList.append(wordList[:n])\n\n print(finalList)\n '''We have got a list of list that needs to be flattened'''\n\n flatten = lambda l: [item for sublist in l for item in sublist]\n flattenFinalList = flatten(finalList)\n print(flattenFinalList)\n\n '''some of the words in the wordlist can be repeatitive so we will meger them together'''\n wordListForWordCloud = {}\n for listItem in flattenFinalList:\n\n key = listItem[0]\n value = listItem[1]\n if (key not in wordListForWordCloud.keys()):\n wordListForWordCloud[key] = value\n else:\n wordListForWordCloud[key] += value\n\n print(wordListForWordCloud)\n\n '''Sort the list and print. the list containe the word ans its frequencty'''\n from operator import itemgetter\n sortedWordMap = sorted(wordListForWordCloud.items(), key=itemgetter(1), reverse=True)\n\n return (sortedWordMap)\n\n\n'''\nFunction Name:getWordCloudListJson\nPurpose: returns a JSON of wordlist that can be used to generate a wordcloud\n\nVariables:\n -km: kmeans clustering object\n -N: The number of documents in teh corpus\n -k: The number of clusters\n -casesDistributionPerCluster: distribution of cases per cluster\n -countDtm: the document term matrix\n -vocab: the list of the keywords\n'''\ndef getWordCloudListJson(km,N,k,casesDistributionPerCluster,countDtm,vocab):\n wl = getwordList(km,N, k, casesDistributionPerCluster, countDtm, vocab)\n wlJson = []\n for key in wl:\n vocabWord = key[0]\n vocabFreq = key[1]\n wlJson.append({\"keyword\":vocabWord,\"frequency\":str(vocabFreq)})\n\n return(json.dumps(wlJson))\n\n\n'''\nFunction Name:getWordCloudListWithCasesJson\nPurpose: returns a JSON of wordlist along with which all cases are associated with that word. It will also highlight\n the section of the case where the word has appeared\n\nVariables:\n -km: kmeans clustering object\n -N: The number of documents in teh corpus\n -k: The number of clusters\n -casesDistributionPerCluster: distribution of cases per cluster\n -countDtm: the document term matrix\n -vocab: the list of the keywords\n'''\ndef getWordCloudListWithCasesJson(km, N, k, casesDistributionPerCluster, countDtm, vocab,countToCaseIdMap,cases):\n wlWithCases = []\n wl = getwordList(km, N,k, casesDistributionPerCluster, countDtm, vocab)\n\n for key in wl:\n vocabWord = key[0]\n\n index = vocab.index(vocabWord)\n print(vocabWord)\n # get all the rows for which the column at this index is nonzero\n nn = np.flatnonzero(countDtm[:, index])\n caseIds = []\n count=0\n for n in nn:\n caseIds.append({'caseId': str(countToCaseIdMap[n]),\n 'description': cases[n].replace(vocabWord, \"\" + vocabWord + \"\")})\n # caseIds.append(countToCaseIdMap[n])\n count+=1\n if(count>10):\n break\n wlWithCases.append({'tag':vocabWord,'size': str(key[1]), 'caseIds': caseIds})\n finalResult = {}\n finalResult[\"name\"] = \"WordCloud\"\n finalResult[\"errorCode\"] = \"Null\"\n finalResult[\"wordFrequencyModel\"] = wlWithCases\n\n return (json.dumps(finalResult))\n\n\n\n\n'''\nFunction Name:getCasesDistributionPerCLuster\nPurpose: This function will find out the distribution of the cases in the cluster. Basically which cases belon to which cluster\n\nVariables:\n -km: kmeans clustering object\n\n'''\n#which all cases belong to which cluster\ndef getCasesDistributionPerCluster(km):\n casesPerCluster = {}\n for i, cluster in enumerate(km.labels_):\n if cluster not in casesPerCluster.keys():\n casesPerCluster[cluster] = str(i)\n else:\n casesPerCluster[cluster] += \",\" + str(i)\n return(casesPerCluster)\n\n\n\n'''\nFunction Name:getFreqDistribution\nPurpose: How many times a particular word has appeared in the corpus(not document frequncy as we will consider the\nabsolute number and not the binary. A dictionary or word and its overall frequency is created\n\nVariables:\n -countDtm: document-term matrix containing all teh keywords in the columns. the cell determins the tf score\n\n'''\ndef getFreqDistribution(countDtm,vocab):\n freqsum = np.sum(countDtm, axis=0)\n\n # for each of the vocabulary word create a dictionary containing the count\n freqDict = []\n for idx, v in enumerate(vocab):\n freqDict.append({'word': v, 'count': freqsum[idx]})\n return(freqDict)\n\ndef printAsJSON(obj):\n return(json.dumps(obj))\n\n\n'''The main code'''\n\n@app.route('/')\ndef getWordListAPi():\n equipmentType=\"STEAM_TURBINE\"\n #conn=''\n cases,countToCaseIdMap,tfSparseMatrix,count_vect,countDtm=getVectorized(conn,equipmentType)\n km=performKmeans(tfSparseMatrix)\n\n #return(\"sjdjksdk\")\n '''Find out how many documents are in each cluster'''\n clusterDistribution = np.unique(km.labels_, return_counts=True)\n numberOfClusters=len(clusterDistribution[0])\n\n print(numberOfClusters)\n #return(str(numberOfClusters))\n\n # '''the vocabulary of the words'''\n vocab=count_vect.get_feature_names()\n print(vocab)\n #return(printAsJSON(vocab))\n # ''' How the documents are distrbiuted in each cluster..which all cases lie in cluster 1 ..3'''\n #\n casesDistributionPerCluster=getCasesDistributionPerCluster(km)\n print(casesDistributionPerCluster[1])\n #return(printAsJSON(casesDistributionPerCluster[1]))\n\n '''\n Get the word list in JSON\n '''\n wl=getWordCloudListJson(km,len(cases),3,casesDistributionPerCluster,countDtm,vocab)\n print(wl)\n #\n # return(wl)\n\n # ''''\n # Now find out how the various cases in which the cases have appeared.\n # '''\n wlWithCases=getWordCloudListWithCasesJson(km,len(cases),3,casesDistributionPerCluster,countDtm,vocab,countToCaseIdMap,cases)\n return(wlWithCases)\n\n # # dump the json to a file\n #outF = open(\"kmeansOutput.json\", \"w\")\n # outF.write(json.dumps(wlWithCases))\n # outF.close()\n #\n\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=port)\n\n\n#create a map\n\n","repo_name":"rohdimp24/kmeans-clustering-python-cf","sub_path":"kmeans_without_tokenize.py","file_name":"kmeans_without_tokenize.py","file_ext":"py","file_size_in_byte":18424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20678376081","text":"\nN = int(input())\nx, a = 1, 1\n\nwhile True:\n if N < x + a: # a는 해당층수, if (층수의 경계 안에 해당되면)\n gap = N - x # 홀수층, 짝수층에 따라 gap으로 값이 나뉨\n if a % 2 == 0:\n print(1 + gap, \"/\", a - gap,sep=\"\")\n else:\n print(a - gap, \"/\", gap + 1,sep=\"\")\n break\n else:\n x = x + a\n a += 1\n \n\n","repo_name":"Minoolian/Coding_Test","sub_path":"Baekjoon/단계별 코딩테스트/08. Math_1/1193 fraction.py","file_name":"1193 fraction.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42037567812","text":"# To change this license header, choose License Headers in Project Properties.\r\n# To change this template file, choose Tools | Templates\r\n# and open the template in the editor.\r\n\r\nimport cv2\r\nimport filtros\r\n\r\nimport numpy as np\r\n\r\n\r\nimagenes = filtros.Filtros(\"cul.jpg\")\r\n#imagenes.mi_filtro()\r\n#imagenes = imagenes.cierre()\r\n#imagenes = imagenes.apertura()\r\n#imagenes = imagenes.erosion()\r\nimagenes = imagenes.dilatacion()\r\n#imagenes = imagenes.gradiente()\r\n#imagenes = imagenes.tophat1()\r\n\r\n\r\n#imagenes.dos_grises()\r\n\"\"\"El tiempo de ejecucion de opencv fue: 0.021999835968\r\nEl tiempo de ejecucion de propio fue: 178.450999975\"\"\"\r\ncv2.imwrite(\"nooo.jpg\", imagenes)\r\ncv2.imshow(\"cierre.jpg\", imagenes)\r\ncv2.waitKey(0)\r\n\r\ndef mario():\r\n img_rgb = cv2.imread('mario.jpg')\r\n template = cv2.imread('moneda.jpg')\r\n w, h = template.shape[:-1]\r\n\r\n res = cv2.matchTemplate(img_rgb, template, cv2.TM_CCOEFF_NORMED)\r\n threshold = .8\r\n loc = np.where(res >= threshold)\r\n for pt in zip(*loc[::-1]): \r\n cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 1)\r\n\r\n cv2.imwrite('result.png', img_rgb)\r\n \r\n#mario()\r\n","repo_name":"BRPedro/pythonPatrones","sub_path":"pythonPatrones/src/pythonpatrones.py","file_name":"pythonpatrones.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40969355291","text":"from typing import List, Dict\n# from flask import Flask\nfrom flask import Flask, render_template, url_for, flash, redirect, request\nimport mysql.connector\nimport json\nfrom werkzeug.middleware.dispatcher import DispatcherMiddleware\nfrom prometheus_client import start_http_server, Summary, Counter, make_wsgi_app, generate_latest, Gauge\nimport random\nimport time\nimport logging\nimport datetime\nfrom multiprocessing.pool import ThreadPool\n\n\nfrom flask_cors import CORS, cross_origin\n\napp = Flask(__name__)\nlogging.basicConfig(level=logging.DEBUG)\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\n\n@app.route(\"/\")\n@app.route(\"/home\")\ndef home() :\n return render_template('layout.html')\n\n@app.route(\"/get-messages\", methods=[\"GET\"])\n@cross_origin()\ndef get_stock_report() -> List[Dict]:\n config = {\n 'user': 'root',\n 'password': 'root',\n 'host': 'db',\n 'port': '3306',\n 'database': 'store'\n }\n connection = mysql.connector.connect(**config)\n cursor = connection.cursor()\n query = \"SELECT * FROM messages\"\n cursor.execute(query)\n results = [[date.strftime(\"%m/%d/%Y\"), name, email, phone_number, message] for (date, name, email, phone_number, message) in cursor]\n cursor.close()\n connection.close()\n return json.dumps(results)\n\n@app.route(\"/post-message\", methods=[\"POST\"])\n@cross_origin()\ndef post_message() -> str:\n body = request.get_json()\n name = ''\n email = ''\n phone_number = ''\n message = ''\n if \"name\" in body:\n name = body['name']\n if \"email\" in body:\n email = body['email']\n if \"phone_number\" in body:\n phone_number = body['phone_number']\n if \"message\" in body:\n message = body['message']\n config = {\n 'user': 'root',\n 'password': 'root',\n 'host': 'db',\n 'port': '3306',\n 'database': 'store'\n }\n\n connection = mysql.connector.connect(**config)\n cursor = connection.cursor()\n update_query = \"INSERT INTO messages VALUES (\" + \"\\'\" + time.strftime('%Y-%m-%d %H:%M:%S') + \"\\'\" + \",\" + \"\\'\" + name + \"\\'\" + \",\" + \"\\'\"+email + \"\\',\\'\" + phone_number + \"\\',\" + \"\\'\" + message + \"\\'\" + \")\"\n app.logger.info(\"QUERY: \" + update_query)\n # cursor.execute(\"INSERT INTO messages (date, name, email, phone_number, message) VALUES ('%s',%s, %s, %s)\")\n cursor.execute(update_query)\n connection.commit()\n\n cursor.close()\n connection.close()\n return json.dumps(\"OK\")\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',debug=True)","repo_name":"cristinabuciu/sales-store","sub_path":"customer-request-service/customer-request.py","file_name":"customer-request.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14376246773","text":"from cityMap.citymap import Coordinate\nfrom typing import List\nfrom commons.decorators import auto_str\nfrom drones.drone import Drone\nimport copy\n\n\n@auto_str\nclass DroneGenerator:\n \"\"\"Drone generator: generate drones\"\"\"\n \n def __init__(self, warehouses: List[Coordinate]):\n self.warehouses = warehouses\n self.warehouse_pointer = 0\n self.ids = 0\n \n def get_drones(self, num) -> List[Drone]:\n \"\"\"\n Create and initialize a list of Drone instances.\n\n The drone will be initialized to a warehouse. The strategy for determining which warehouse to be initialized\n in is Round Robin.\n The warehouse pointer will increment by 1 when a new drone is generated.\n\n :return: a list of new Drone instances\n \"\"\"\n drones = list()\n for i in range(num):\n self.ids += 1\n start_location = copy.deepcopy(self.warehouses[self.warehouse_pointer])\n drone = Drone(drone_id=self.ids, warehouses=self.warehouses,\n start_location=start_location, height=0)\n self.warehouse_pointer = (self.warehouse_pointer + 1) % len(self.warehouses)\n drones.append(drone)\n return drones\n","repo_name":"zewei94yomi/msc-project","sub_path":"src/drones/dronegenerator.py","file_name":"dronegenerator.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33967419928","text":"import datetime as dt\nimport requests\nimport statistics\nfrom dataclasses import dataclass\nfrom typing import Optional, Dict\nfrom itertools import groupby\nfrom sqlmodel import select\nimport schedule\n\nfrom core import log as log_, config\nfrom db.db import service as db_service\nfrom db.models import OpenWeatherData\n\nlog = log_.service.logger('open_weather')\n\n\n@dataclass\nclass WeatherStatistics:\n span: dt.timedelta\n temperature: float\n wind: float\n clouds: float\n humidity: float\n pressure: float\n\n\nclass OpenWeatherService(config.Component):\n\n def __init__(self, name):\n super().__init__(name)\n self.apikey = config.ConfigOption(required=True).secret # type: str\n self.longitude = config.ConfigOption(required=True).float # type: float\n self.latitude = config.ConfigOption(required=True).float # type: float\n self.days_of_keeping_data = config.ConfigOption(required=True).integer # type: int\n\n def schedule_jobs(self):\n log.debug(\"scheduling open weather related jobs\")\n schedule.every(10).minutes.do(self.get_weather_data)\n schedule.every().day.do(self._clear_data)\n\n def _clear_data(self):\n db_service.clear_old_data(OpenWeatherData, dt.datetime.now() - dt.timedelta(days=self.days_of_keeping_data))\n\n @property\n def raw_data(self):\n try:\n raw = requests.get(\n f'https://api.openweathermap.org/data/2.5/weather?lat={self.latitude}&lon={self.longitude}&appid={self.apikey}&units=metric').json()\n log.info('weather request was successful')\n log.debug(f\"raw_data = {raw}\")\n return raw\n except Exception as e:\n log.error(f'request failed: {e}')\n return None\n\n @staticmethod\n def get_last_data():\n return db_service.get_last(OpenWeatherData)\n\n def get_weather_data(self):\n weather = self.get_last_data()\n if not weather or weather.timestamp < dt.datetime.now() - dt.timedelta(minutes=5):\n data = self.raw_data\n if data:\n log.info('Retrieved Open weather data')\n weather = OpenWeatherData(\n temperature=round(data['main']['temp'], 2),\n wind=data['wind']['speed'],\n clouds=data['clouds']['all'],\n pressure=data['main']['pressure'],\n humidity=data['main']['humidity'],\n timezone=data['timezone'],\n sunrise=dt.datetime.fromtimestamp(data['sys']['sunrise']),\n sunset=dt.datetime.fromtimestamp(data['sys']['sunset']))\n log.info(f\"Open weather data: {weather}\")\n db_service.add(weather)\n return weather\n\n @staticmethod\n def get_average_weather(timedelta: dt.timedelta) -> Optional[WeatherStatistics]:\n weathers = db_service.session.exec(select(OpenWeatherData.timestamp > dt.datetime.now()-timedelta))\n if weathers:\n average = WeatherStatistics(\n span=timedelta,\n temperature=statistics.mean([w.temperature for w in weathers]),\n wind=statistics.mean([w.wind for w in weathers]),\n clouds=statistics.mean([w.clouds for w in weathers]),\n humidity=statistics.mean([w.humidity for w in weathers]),\n pressure=statistics.mean([w.pressure for w in weathers]))\n log.info(f\"Average weather from arduino for the past {timedelta}: {average}\")\n return average\n return None\n\n @staticmethod\n def get_hourly_average_weather_for_last_day() -> Optional[Dict[int, WeatherStatistics]]:\n weathers = db_service.session.exec(select(OpenWeatherData.timestamp > dt.datetime.now() - dt.timedelta(hours=24))).all()\n weathers_by_hour = {key: list(value) for key, value in groupby(weathers, key=lambda w: w.timestamp.hour)}\n if len(weathers_by_hour) > 0:\n averages_by_hour = dict()\n log.info('calculating hourly average weather')\n for name, data in weathers_by_hour.items():\n averages_by_hour[name] = WeatherStatistics(\n span=dt.timedelta(hours=1),\n temperature=statistics.mean([w.temperature for w in data if w is not None]),\n wind=statistics.mean([w.wind for w in data if w is not None]),\n clouds=statistics.mean([w.clouds for w in data if w is not None]),\n humidity=statistics.mean([w.humidity for w in data if w is not None]),\n pressure=statistics.mean([w.pressure for w in data if w is not None]))\n log.info(f\"Averages: {averages_by_hour}\")\n return averages_by_hour\n return None\n\n\nservice = OpenWeatherService('OpenWeatherService')\n","repo_name":"koltony/scarlet","sub_path":"scarlet/services/open_weather.py","file_name":"open_weather.py","file_ext":"py","file_size_in_byte":4857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29964081234","text":"# -*- coding: utf-8 -*-\n\n# Scrapy settings for ppmoney project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n#\n\nBOT_NAME = 'ppmoney'\n\nSPIDER_MODULES = ['ppmoney.spiders']\nNEWSPIDER_MODULE = 'ppmoney.spiders'\nITEM_PIPELINES={\n 'ppmoney.pipelines.PpmoneyPipeline':400,\n}\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'ppmoney (+http://www.yourdomain.com)'\n","repo_name":"yfjelley/scrapy_1214049153","sub_path":"crawl/ppmoney/ppmoney/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"25928758432","text":"## movement.py Version 1.0\n## Written by Magnus Berg Sletfjerding (eembees)\n###########################################################\n\"\"\"\nMovement function:\nMoves a set of points along a randomly generated direction vector\nINPUTS:\npoints -- Points as ndarray\ndist -- Distance points move\n\nOUTPUTS:\nnewpoints -- New Points as ndarray\n\"\"\"\n\n\"\"\"\nImporting modules\n\"\"\"\nimport numpy as np\n\n\"\"\"\nDefining move function\n\"\"\"\ndef randommove(points, dist):\n # # Standard factors\n dimension = len(points[0]) # # working dimension of coordinate system, extracted from first point\n # # Generate random direction vector\n direction = np.random.random_sample(dimension)\n direction = [(x-0.5)*2 for x in direction]\n # print direction\n sqrtsum_random = np.sqrt(sum([x**2 for x in direction]))\n direction = [direction[i] / sqrtsum_random for i in range(dimension)]\n\n # # Obtain movement vector\n movement = [direction[i]*dist for i in range(dimension)]\n\n # # Initiate movement\n newpoints = [] # # List of output points\n # print type(newpoints)\n\n for i in range(len(points)):\n newpoint = np.add(points[i], movement)\n newpoints.append(newpoint)\n\n return np.asarray(newpoints)\n","repo_name":"eembees/molstat_water","sub_path":"movement.py","file_name":"movement.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72892272533","text":"TYPE_COMMAND = \"type_command\"\r\nTYPE_MENU = \"type_menu\"\r\n\r\nACTION_ADD = \"action_add\"\r\nACTION_ADD_LIST = \"action_add_list\"\r\nACTION_INSERT = \"action_insert\"\r\nACTION_REMOVE = \"action_remove\"\r\nACTION_REMOVE_DAY = \"action_remove_day\"\r\nACTION_REMOVE_RANGE = \"action_remove_range\"\r\nACTION_REMOVE_CATEGORY = \"action_remove_category\"\r\nACTION_LIST_ALL = \"action_list_all\"\r\nACTION_LIST_CATEGORY = \"action_list_category\"\r\nACTION_LIST_CATEGORY_CONDITION = \"action_list_category_condition\"\r\nACTION_SUM_CATEGORY = \"action_sum_category\"\r\nACTION_MAX_DAY = \"action_max_day\"\r\nACTION_SORT_DAY = \"action_sort_day\"\r\nACTION_SORT_CATEGORY = \"action_sort_category\"\r\nACTION_FILTER_CATEGORY = \"action_filter_category\"\r\nACTION_FILTER_CATEGORY_CONDITION = \"action_filter_category_condition\"\r\nACTION_UNDO = \"action_undo\"\r\nACTION_CHANGE_UI = \"action_change_ui\"\r\nACTION_EXIT = \"action_exit\"\r\nACTION_ERROR = \"action_error\"\r\nACTION_HELP = \"action_help\"\r\n\r\nCATEGORY_HOUSEKEEPING = \"housekeeping\"\r\nCATEGORY_FOOD = \"food\"\r\nCATEGORY_TRANSPORT = \"transport\"\r\nCATEGORY_CLOATHING = \"cloathing\"\r\nCATEGORY_INTERNET = \"internet\"\r\nCATEGORY_OTHERS = \"others\"\r\n","repo_name":"Akitektuo/University","sub_path":"1st year/PF/Lab/Assignment3/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"10771025309","text":"import csv\nimport logging\nimport os\nimport subprocess\n\nimport shutil\nimport time\n\nfrom emuvim.api.osm.pre_configured_osm import PreConfiguredOSM\nfrom emuvim.api.util.docker_utils import build_dockerfile_dir\nfrom mininet.log import setLogLevel\n\nlogging.basicConfig(level=logging.DEBUG)\nsetLogLevel('debug') # set Mininet loglevel\nlogging.getLogger('werkzeug').setLevel(logging.DEBUG)\nlogging.getLogger('api.openstack.base').setLevel(logging.DEBUG)\nlogging.getLogger('api.openstack.compute').setLevel(logging.DEBUG)\nlogging.getLogger('api.openstack.keystone').setLevel(logging.DEBUG)\nlogging.getLogger('api.openstack.nova').setLevel(logging.DEBUG)\nlogging.getLogger('api.openstack.neutron').setLevel(logging.DEBUG)\nlogging.getLogger('api.openstack.heat').setLevel(logging.DEBUG)\nlogging.getLogger('api.openstack.heat.parser').setLevel(logging.DEBUG)\nlogging.getLogger('api.openstack.glance').setLevel(logging.DEBUG)\nlogging.getLogger('api.openstack.helper').setLevel(logging.DEBUG)\n\nprefix = os.path.dirname(os.path.abspath(__file__))\n\nbuild_dockerfile_dir('../images/sshcontainer/', 'sshcontainer')\n\nlayers_folder = os.path.join(prefix, '../charms/layers')\nsimple_charm_folder = os.path.join(layers_folder, 'simple')\ncharm_target_dir = os.path.join(prefix, '../vnfs/simple_charmed_vnfd/charms/')\nshutil.rmtree(charm_target_dir, ignore_errors=True)\nif not subprocess.call(['/snap/bin/charm', 'build'], cwd=simple_charm_folder, env={\n 'CHARM_BUILD_DIR': charm_target_dir,\n 'CHARM_LAYERS_DIR': layers_folder\n}) in [0, 100]: # 100 means tests skipped\n raise RuntimeError('charm build failed')\n\n\ndef get_detailed_configuration_status(osm):\n status = osm.ns_get(ns_id)['_admin']['deployed']['VCA'][0]['detailed-status']\n print('current status: %s' % status)\n return status\n\n\ndef wait_for_detailed_configuration_status(osm, status):\n while get_detailed_configuration_status(osm) != status:\n time.sleep(1)\n\n\nwith open('charmed-%d.csv' % time.time(), 'w') as csvfile:\n fieldnames = ['ns_create', 'charm_deployment_start', 'waiting_for_machine', 'installing_charm_software',\n 'ns_action', 'ns_delete']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n\n for n in range(1, 10 + 1):\n with PreConfiguredOSM() as osm:\n osm.onboard_vnfd('../vnfs/simple_charmed_vnfd')\n nsd_id = osm.onboard_nsd('../services/simple_charmed_nsd')\n ns_create = time.time()\n ns_id = osm.ns_create('charmed-ns-%d' % n, nsd_id)\n osm.ns_wait_until_all_in_status('running')\n ns_created = time.time()\n\n wait_for_detailed_configuration_status(osm, 'waiting for machine')\n waiting_for_machine_start = time.time()\n\n wait_for_detailed_configuration_status(osm, 'installing charm software')\n installing_charm_start = time.time()\n\n wait_for_detailed_configuration_status(osm, 'Ready!')\n ready = time.time()\n\n instance = osm.api.compute.find_server_by_name_or_id('dc1_charmed-ns-%d-1--1' % n).emulator_compute\n osm.ns_action(ns_id, 1, 'touch')\n while instance.cmd('cat /testmanual') != '':\n time.sleep(0.1)\n ns_action_done = time.time()\n\n osm.ns_delete(ns_id)\n osm.ns_wait_until_all_in_status('terminated')\n ns_deleted = time.time()\n\n writer.writerow({\n 'ns_create': ns_created - ns_create,\n 'charm_deployment_start': waiting_for_machine_start - ns_created,\n 'waiting_for_machine': installing_charm_start - waiting_for_machine_start,\n 'installing_charm_software': ready - installing_charm_start,\n 'ns_action': ns_action_done - ready,\n 'ns_delete': ns_deleted - ns_action_done,\n })\n csvfile.flush()\n","repo_name":"containernet/vim-emu","sub_path":"examples/performance_measurements/charm_deployment.py","file_name":"charm_deployment.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"67"} +{"seq_id":"73469477334","text":"import discord\nfrom discord.ext import commands\nfrom discord import Embed, Color\nfrom dotenv import load_dotenv\nimport os\nimport requests\n\nfrom bgg import (\n BggCollectionError, \n BggCollectionTimeoutError, \n get_bgg_collection, \n get_game_details,\n get_game_from_collection,\n search_bgg,\n combine_bgg_collections,\n collections_known\n)\n\nfrom utils.text import normalize\n\nintents = discord.Intents.default()\nintents.message_content = True\nbot = commands.Bot(command_prefix='/', intents=intents)\n\nknown_users = collections_known\n\n@bot.command()\nasync def ping(ctx):\n await ctx.send(\"pong bitch\")\n\n\n@bot.command()\nasync def game(ctx, *, game_name):\n game_name = normalize(game_name, True)\n\n collections = []\n for user in known_users:\n try: \n collections.append(await get_bgg_collection(user))\n except BggCollectionTimeoutError as e: \n await ctx.send(f\"{str(e)}; creating partial combined collection\")\n except BggCollectionError as e: \n await ctx.send(f\"{str(e)}; creating partial combined collection\")\n\n combined_collection = await combine_bgg_collections(collections)\n collection_search_results = [collection_game for collection_game in combined_collection['games'] if game_name in collection_game['name']]\n\n game_in_collection = False\n game_description = \"No game was found in the collections of the known users using the provided search. The best match has been provided via BGG search.\"\n game_owners = f\"```No one currently owns this game```\"\n\n if len(collection_search_results) > 0:\n game_in_collection = True\n game_description = None\n print(\"game in collection\")\n found_game = collection_search_results[0]\n game_owners = f\"```{', '.join(found_game['owned_by'])}```\"\n game_details = await get_game_details(found_game['objectid'])\n else:\n print(\"game not in collection\")\n search_results = await search_bgg(game_name)\n boardgame_names = []\n\n if len(search_results) < 1:\n return await ctx.send(f\"No game found using the provided search criteria: `{game_name}`\")\n elif len(search_results) == 1:\n preferred_game_id = search_results[0]['objectid']\n else:\n boardgame_game_ids = [int(search_result['objectid']) for search_result in search_results]\n preferred_game_id = boardgame_game_ids[int(len(boardgame_game_ids)/2)]\n \n for search_result in search_results:\n if int(search_result['objectid']) != preferred_game_id:\n boardgame_game_names.append(search_result['name'])\n\n\n game_details = await get_game_details(preferred_game_id)\n num_games_to_return = 10\n\n\n embed = Embed(\n title=f\"{game_details['label']} ({game_details['yearpublished']})\",\n url=f\"https://boardgamegeek.com/boardgame/{game_details['objectid']}\",\n description=game_description,\n colour=discord.Color.dark_purple(),\n )\n\n embed.set_thumbnail(url=game_details['image'])\n embed.add_field(name=\"Avg Rating\", value=game_details['averagerated'], inline=True)\n # embed.add_field(name=\"Play Time\", value=game_details['playtime'], inline=True)\n embed.add_field(name=\"Weight\", value=f\"{round(float(game_details['averageweight']), 2)} / 5\", inline=True)\n embed.add_field(name=\"Player Count\", value=game_details['playercount'], inline=True)\n embed.add_field(name=f\"Game Description ({game_details['playtime']} min)\", value=f\"*{game_details['descriptionshort']}*\", inline=False)\n embed.add_field(name=\"Owned By\",value=game_owners, inline=False)\n embed.set_footer(text=f\"{', '.join(category['label'] for category in game_details['categories'])}\")\n\n # Set other search results field based on additional results being from the collection or via bgg search\n if game_in_collection:\n if len(collection_search_results) > 1:\n # add bot command links for each found game, that can be clicked to send the command lookup for that game: https://stackoverflow.com/questions/73741997/clickable-command-in-text-discord\n all_found_games = \"\\n\".join([search_game['label'] for search_game in collection_search_results[1:]])\n embed.add_field(name=\"Other Search Matches in Combined Collection\", value=f\"{all_found_games}\", inline=False)\n else:\n if len(boardgame_names) == 0:\n title = f\"Other Search Results ({len(boardgame_names)})\"\n embed.add_field(name=title,value=\"*No other games found matching search*\", inline=False)\n elif len(boardgame_names) >= num_games_to_return:\n title = f\"Other Search Results ({num_games_to_return} of {len(boardgame_names)})\"\n embed.add_field(name=title,value=\"\\n\".join(boardgame_names[:num_games_to_return]), inline=False)\n else:\n title = f\"Other Search Results ({len(boardgame_names)} of {len(boardgame_names)})\"\n embed.add_field(name=title,value=\"\\n\".join(boardgame_names[:num_games_to_return]), inline=False) \n\n await ctx.send(embed=embed)\n\n\n@bot.command()\nasync def refresh_collection(ctx, *, username):\n username = normalize(username, True)\n await ctx.send(f\"refreshing {username}'s collection cache\")\n delete_cache(\"collection\", username)\n user_collection = await get_bgg_collection(username)\n await ctx.send(f\"{username}'s collection cache updated: {len(user_collection['game_id_list'])} games\")\n\n\n@bot.command()\nasync def collection(ctx, username):\n collection = await get_bgg_collection(username)\n collection_formatted = f\"\"\"```json {json.dumps(collection[\"games\"][0])}```\"\"\"\n for game in collection['games']:\n await ctx.send(game['thumbnail'])\n\n\n@bot.command()\nasync def known_collections(ctx):\n # TODO: Add link to bgg user collection https://boardgamegeek.com/collection/user/\n kc = []\n for user in known_users:\n crown = \"\"\n # TODO: find actual top game count and assign crown\n if user.lower() == \"jchamilton\":\n crown = \":crown:\"\n\n user_link = f\"[{user}](https://boardgamegeek.com/collection/user/{user})\"\n user_collection = await get_bgg_collection(user)\n user_collection_size = len(user_collection['games'])\n\n kc.append(f\"{user_link} ({user_collection_size} Games) {crown}\")\n\n kc = \"\\n\".join(kc)\n\n embed = Embed(\n title=\"Known Collections\",\n colour=discord.Color.dark_purple(),\n )\n embed.add_field(name=f\"\"\"Currently I know the board game collections of:\"\"\", value=kc)\n await ctx.send(embed=embed)\n\n# TODO: hot list command\n# TODO: list all games in combined collection command\n\nload_dotenv()\nbot.run(os.getenv('BOT_TOKEN', None))\n","repo_name":"PrimordialLight/bggbot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":6757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73073189012","text":"from unittest.mock import MagicMock\n\nimport pytest\nfrom authlib.jose import jwt\nfrom flask import session\n\nfrom auth.permissions import make_jwt_permissions_reader\n\n\n@pytest.mark.skip\ndef test_register(client, app):\n assert client.get(\"/auth/register\").status_code == 200\n response = client.post(\"/auth/register\", data={\"username\": \"a\", \"password\": \"a\"})\n assert \"http://localhost/auth/login\" == response.headers[\"Location\"]\n\n # with app.app_context():\n # assert (\n # get_db()\n # .execute(\n # \"select * from user where username = 'a'\",\n # )\n # .fetchone()\n # is not None\n # )\n\n\n@pytest.mark.skip\n@pytest.mark.parametrize(\n (\"username\", \"password\", \"message\"),\n (\n (\"\", \"\", b\"Username is required.\"),\n (\"a\", \"\", b\"Password is required.\"),\n ),\n)\ndef test_register_validate_input(client, username, password, message):\n response = client.post(\n \"/auth/register\", data={\"username\": username, \"password\": password}\n )\n assert message in response.data\n\n\n@pytest.mark.parametrize(\n (\"username\", \"password\"),\n [\n [\"test\", \"test\"],\n [\"other\", \"othertest\"],\n ],\n)\ndef test_login(\n app, client, mock_auth_repo: MagicMock, auth_service_url, username, password, mocker\n):\n # Arrange\n\n mock_token = jwt.encode(\n {\"alg\": \"HS256\"},\n {\"sub\": 2, \"name\": username, \"permissions\": \"\"},\n app.secret_key,\n ).decode(\"utf-8\")\n\n _get_response = mocker.MagicMock()\n _get_response.json.return_value = {\"JWT\": mock_token}\n\n mock_auth_repo.post.return_value = _get_response\n\n # Act\n client.post(\"/auth/login\", data={\"username\": username, \"password\": password})\n\n # Assert\n mock_auth_repo.post.assert_called_once_with(\n auth_service_url + \"/auth/login\",\n json={\"username\": username, \"password\": password},\n )\n\n # ..The user data is stored in the session\n # TODO\n # with app.test_request_context(\n # \"/auth/login\", data={\"username\": username, \"password\": password}\n # ):\n # assert session[\"user\"] == {\"user_id\": 2}\n\n\ndef test_logout(client, auth):\n auth.login()\n\n with client:\n auth.logout()\n assert \"user_id\" not in session\n\n\nclass TestPermissionsReader:\n \"\"\"\n * Class Name: TestPermissions\n * Purpose: This purpose of this class is to test the PermissionsReader\n * classes.\n \"\"\"\n\n @pytest.mark.parametrize(\n (\"payload\", \"resource\", \"expected\"),\n [\n (\n {\n \"sub\": \"test.txt\",\n \"iss\": \"1\",\n \"aud\": \"public\",\n \"permissions\": [\"read: files\", \"read:disk_storage\"],\n },\n \"test.txt\",\n False,\n ),\n (\n {\n \"sub\": \"test.txt\",\n \"iss\": \"1\",\n \"aud\": \"public\",\n \"permissions\": [\"write: files\", \"write:disk_storage\"],\n },\n \"test.txt\",\n True,\n ),\n (\n {\n \"sub\": \"test.txt\",\n \"iss\": \"1\",\n \"aud\": \"public\",\n \"permissions\": [\"write: files\", \"write:disk_storage\"],\n },\n \"otherfile.txt\",\n False,\n ),\n (\n {\n \"sub\": \"2\",\n \"name\": \"test\",\n \"permissions\": [\"write: files\", \"write: disk_storage\"],\n \"iat\": 1516239022,\n },\n \"otherfile.txt\",\n True,\n ),\n (\n {\n \"sub\": \"test.txt\",\n \"iss\": \"1\",\n \"aud\": \"public\",\n \"permissions\": [\"write: files\"],\n },\n \"test.txt\",\n False,\n ),\n (\n {\n \"sub\": \"test.txt\",\n \"iss\": \"1\",\n \"aud\": \"public\",\n \"permissions\": [\"write: disk_storage\"],\n },\n \"test.txt\",\n False,\n ),\n ],\n )\n def test_may_delete(self, payload, resource, expected):\n # Arrange\n auth_token = payload\n\n # Act\n may_delete = make_jwt_permissions_reader(auth_token).may_delete(resource)\n\n # Assert\n assert may_delete is expected\n\n @pytest.mark.parametrize(\n (\"payload\", \"resource\", \"expected\"),\n [\n (\n {\n \"sub\": \"test.txt\",\n \"iss\": \"1\",\n \"aud\": \"public\",\n \"permissions\": [\"read: files\", \"read:disk_storage\"],\n },\n \"test.txt\",\n False,\n ),\n (\n {\n \"sub\": \"test.txt\",\n \"iss\": \"1\",\n \"aud\": \"public\",\n \"permissions\": [\"write: files\", \"write:disk_storage\"],\n },\n \"test.txt\",\n False,\n ),\n (\n {\n \"sub\": \"test.txt\",\n \"iss\": \"1\",\n \"aud\": \"public\",\n \"permissions\": [\"write: files\", \"write:disk_storage\"],\n },\n \"otherfile.txt\",\n False,\n ),\n (\n {\n \"sub\": \"2\",\n \"name\": \"test\",\n \"permissions\": [\"read: files\", \"read: disk_storage\"],\n \"iat\": 1516239022,\n },\n \"otherfile.txt\",\n True,\n ),\n (\n {\n \"sub\": \"2\",\n \"name\": \"test\",\n \"permissions\": [\"read: files\"],\n \"iat\": 1516239022,\n },\n \"otherfile.txt\",\n True,\n ),\n (\n {\n \"sub\": \"2\",\n \"name\": \"test\",\n \"permissions\": [\"write: files\", \"write: disk_storage\"],\n \"iat\": 1516239022,\n },\n \"otherfile.txt\",\n False,\n ),\n (\n {\n \"sub\": \"test.txt\",\n \"iss\": \"1\",\n \"aud\": \"public\",\n \"permissions\": [\"write: files\"],\n },\n \"test.txt\",\n False,\n ),\n (\n {\n \"sub\": \"test.txt\",\n \"iss\": \"1\",\n \"aud\": \"public\",\n \"permissions\": [\"write: disk_storage\"],\n },\n \"test.txt\",\n False,\n ),\n ],\n )\n def test_may_share(self, payload, resource, expected):\n # Arrange\n auth_token = payload\n\n # Act\n may_share = make_jwt_permissions_reader(auth_token).may_share(resource)\n\n # Assert\n assert may_share is expected\n","repo_name":"KaiPrince/RippedWebServer","sub_path":"server/tests/test_auth.py","file_name":"test_auth.py","file_ext":"py","file_size_in_byte":7284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"12320839149","text":"from random import choice\nfrom verify import verify\nimport pandas as pd\nimport os\n\n# output_file holds the path the programs output will be stored\noutput_file = \"exam_formation.xlsx\"\n\n# Data provided is stored in stored_file\nstored_file = \"classes_data.xlsx\"\n\ndef StoreDB(hall_db_filename):\n\n # CLASSES holds information about the school class and student.\n CLASSES = {'JSS1':['Chukwujekwu Pius','Akintoye Tolu','Adefisoye Sam','Ofoke Max','Ofoke James','16','17','18','19','110','111','112','113','114'],\n 'JSS2':['21','22','23','24','25','26','27','28','29','210','211','212','213','214'],\n 'JSS3':['31','32','33','34','35','36','37','38','39','310','311','312','313','314'],\n 'SS1A':['1A1','1A2','1A3','1A4','1A5','1A6','1A7','1A8','1A9','1A10','1A11','1A12','1A13','1A14'],\n 'SS1B':['1B1','1B2','1B3','1B4','1B5','1B6','1B7','1B8','1B9','1B10','1B11','1B12','1B13','1B14'],\n 'SS2A':['2A1','2A2','2A3','2A4','2A5','2A6','2A7','2A8','2A9','2A10','2A11','2A12','2A13','2A14'],\n 'SS2B':['2B1','2B2','2B3','2B4','2B5','2B6','2B7','2B8','2B9','2B10','2B11','2B12','2B13','2B14'],\n 'SS3A':['3A1','3A2','3A3','3A4','3A5','3A6','3A7','3A8','3A9','3A10','3A11','3A12','3A13','3A14'],\n 'SS3B':['3B1','3B2','3B3','3B4','3B5','3B6','3B7','3B8','3B9','3B10','3B11','3B12','3B13','3B14']}\n\n # halls holds where random student will be stored.\n halls = {'A':[],'B':[],'C':[],'D':[],'E':[],'F':[],'G':[],'H':[],'I':[],'J':[],'K':[],'L':[],'M':[]}\n\n\n print(f'''\n THE TOTAL NUMBER OF CLASS YOU WILL BE GIVING SHOULD INCLUDE THE *A,B,& ...* OF THE CLASSES;\n SO COUNT THEM ALL TOGETHER.\n \\n\\n\\n''')\n\n\n ''' Getting names of class writing exam'''\n def get_class_name():\n class_name = input(f\"\\n\\t\\t.....WHAT CLASS IS THIS?.....: \").upper()\n if class_name in CLASSES:\n return class_name\n else:\n print(f\"\\n!!!The class '{class_name}' you entered does not match the one in our database!!!.\")\n response = input(f\"\\nDO YOU WANT TO CREATE AND ADD {class_name} TO YOUR DATABASE?. [Y/n]: \")\n if response == \"Y\":\n CLASSES[class_name] = list()\n return class_name\n elif response == \"n\":\n return get_class_name()\n else:\n print(\"INVALID INPUT, TRY AGAIN. \")\n return get_class_name()\n\n ''' Getting students in each class'''\n def get_students(class_name):\n class_limit = verify(input(f\"\\n\\t\\t |*| HOW MANY STUDENTS ARE IN {class_name}: \"))\n for i in range(class_limit):\n CLASSES[class_name].append(input(f\"\\n\\t\\t\\t[*] GIVE ME ALL {class_name} STUDENTS NAME, ONE AFTER THE OTHER.: \"))\n\n ''' Storing the class data \"Class name & It's students\". '''\n def class_data():\n class_name = get_class_name()\n get_students(class_name)\n\n ''' Getting the total number of the class writing the exam'''\n all_class = verify(input(f\" ___HOW MANY CLASS ARE PRESENTLY WRITING THIS EXAM___?: \"))\n\n ''' Gathering data for each class '''\n for klass in range(all_class):\n class_data()\n\n # This function will be used to store the CLASSES parmanently as a database.\n def store_classes_data(_CLASSES):\n with pd.ExcelWriter(stored_file) as writer:\n for Class, Students in _CLASSES.items():\n df = pd.DataFrame(Students, columns = [Class]) \n sheet_name= Class\n df.to_excel(writer, sheet_name=sheet_name, index=False)\n\n user_response = input(\"\\n\\nDO YOU WANT TO SAVE THE INFORMATION PROVIDED ABOUT ALL CLASS AND STUDENT [Y/n]: \")\n\n while True:\n if user_response == \"Y\":\n store_classes_data(CLASSES)\n break\n elif user_response == \"n\":\n print(\"Okay. \\n INFO NOT SAVED.\")\n break\n else:\n print(\"Response provided does not correspond; TRY AGAIN. \")\n user_response = input(\"\\n\\nDO YOU WANT TO SAVE THE INFORMATION PROVIDED ABOUT ALL CLASS AND STUDENT [Y/n]: \")\n\n ''' Getting hall limit and also checking if it greater than total students left.'''\n def h_lim():\n hall_limit = verify(input(f\"\\n\\t\\t~~~~~WHAT'S HALL {hall} LIMIT?~~~~~: \"))\n all_students = sum([len(CLASSES[clas[:]]) for clas in CLASSES])\n if hall_limit > all_students:\n print(f\" Your hall limit '{hall_limit}', is greater than the number of students '{all_students}' left. \".upper())\n return h_lim()\n\n else:\n return hall_limit\n\n # Generating the tabulated output\n for hall in halls:\n hall_limit = h_lim()\n ''' Randomly spliting all students to their hall'''\n while len(halls[hall]) < hall_limit:\n for clas in CLASSES:\n if len(CLASSES[clas[:]]) > 0:\n random_student = choice(CLASSES[clas[:]])\n rnd_student_detail = random_student,clas\n halls[hall].append(list(rnd_student_detail))\n CLASSES[clas[:]].remove(random_student)\n if len(halls[hall]) == hall_limit:\n break\n\n else:\n pass\n r = sum([len(CLASSES[clas[:]]) for clas in CLASSES])\n print(\"\\nThe total number of students left is: \", r,\"\\n\")\n\n # Sorting student by class\n def sort_by_class(student):\n return student[1]\n\n # Sorting the output and writing the output to an Excel file.\n while True:\n user_input = input(f\"\"\" HOW DO WANT TO ARRANGE YOUR OUTPUT?\n A) ACCORDING TO NAME ALPHABETICAL ORDER\n B) ACCORDING TO CLASS ORDER\n ENTER: \"\"\")\n\n if user_input == \"A\":\n # Writing the output to Excel with different sheets for each hall\n with pd.ExcelWriter(hall_db_filename) as writer:\n for hall, students in halls.items():\n df = pd.DataFrame(sorted(students), columns = [\"Name\", \"Class\"])\n # Reset index to start from 1\n df = df.reset_index(drop=True)\n df.index += 1\n # Write the DataFrame to Excel\n sheet_name = \"Hall \" + hall\n df.to_excel(writer, sheet_name=sheet_name, index=True)\n\n print(\"Your output has been stored in an Excel file in your present working directory/path.\")\n break\n elif user_input == \"B\":\n # Writing the output to Excel with different sheets for each hall\n with pd.ExcelWriter(hall_db_filename) as writer:\n for hall, students in halls.items():\n sorted_students = sorted(students, key= sort_by_class)\n df = pd.DataFrame(sorted_students, columns = [\"Name\", \"Class\"])\n # Reset index to start from 1\n df = df.reset_index(drop=True)\n df.index += 1\n # Write the DataFrame to Excel\n sheet_name = \"Hall \" + hall\n df.to_excel(writer, sheet_name=sheet_name, index=True)\n\n print(\"Your output has been stored in an Excel file in your present working directory/path.\")\n break\n else:\n print(\"\\n\\t[*] Your input does not correspond to required input; 'TRY AGAIN' [*]\")\n\nclas_dict = {}\nclas_list = []\n\ndef loadDB(class_db_filename):\n df_Excel = pd.read_excel(class_db_filename, sheet_name=None)\n clas = input(\"WHICH CLASS DO YOU WANT TO MODIFY?: \").upper()\n while True:\n if clas not in df_Excel.keys():\n print(f\"\\n.....'{clas}' CLASS IS NOT PRESENT IN YOUR STORED CLASS..... \")\n clas = input(\"\\n.....SO WHICH CLASS DO YOU WANT TO MODIFY?.....: \").upper()\n else:\n print(f\"\"\"WHAT DO YOU WANT TO DO T0 \"{clas}\" CLASS?\n 1) DELETE CLASS\n 2) REMOVE STUDENT\n 3) ADD STUDENT\n 4) ADD CLASS\n \"\"\")\n reply = input(\" ENTER HERE ===>: \")\n if reply == \"1\":\n del(df_Excel[clas])\n print(df_Excel)\n break\n elif reply == \"2\":\n print(\"\\n\", df_Excel[clas])\n name_index = int(input(f\"\\n USING THE PERSON S/N \\n WHO DO YOU WANT TO REMOVE?: \"))\n located = df_Excel[clas].iloc[name_index]\n del(located)\n print(\"Getting deleted: \", df_Excel[clas]) \n break\n\n elif reply == \"3\":\n pass\n elif reply == \"4\":\n pass\n\n\n\n\nStoreDB(output_file)\n# loadDB(stored_file)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# response = input(\"\"\"DO YOU WANT TO MAKE US THE STORED DATA TO GENERATE THE HALL ARRANGEMENT OR MODIFY THE DATA?\n# [M] Modify\n# [Y] Yes\n# [N] No\n# ENTER ==>: \"\"\")\n# if os.path.exists(stored_file):\n# if response == \"Y\":\n# loadDB(stored_file)\n# elif response == \"N\":\n# pass\n# elif response == \"M\":\n# pass \n# else:\n# print(\"INVALID INPUT.\")\n \n\n\n\n\n\n\n\n\n\n\n\n\n ","repo_name":"Alpha1Guru/python_folder","sub_path":"Mujeeb/exam_formation1.py","file_name":"exam_formation1.py","file_ext":"py","file_size_in_byte":9373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26126247469","text":"def new_value(old, operator, value):\n if value=='old':\n val = old\n else:\n val = int(value)\n if operator=='+':\n return (old+val)//3\n elif operator=='*':\n return (old*val)//3\n else:\n print(f'ERROR {operator}')\n\n\nwith open('input') as f:\n\tdata = f.read()\n\t\n\t\n\"\"\"\nMonkey {3}:\n Starting items: {71, 64, 75}\n Operation: new = old {+} {2}\n Test: divisible by {17}\n If true: throw to monkey {6}\n If false: throw to monkey {2}\n\"\"\"\nmonkeys = list()\nraw_monkeys = data.split('\\n\\n')\nfor raw_monkey in raw_monkeys:\n monkey = raw_monkey.split('\\n')\n monkey_dict = dict()\n \n num_monkey = int(monkey[0].split(' ')[-1][:-1])\n monkey_dict['starting_items'] = list(map(int, monkey[1][17:].split(',')))\n monkey_dict['operation'] = monkey[2].split(' ')[-2:]\n monkey_dict['test'] = int(monkey[3].split(' ')[-1])\n monkey_dict['if_true'] = int(monkey[4].split(' ')[-1])\n monkey_dict['if_false'] = int(monkey[5].split(' ')[-1])\n \n monkeys.append(monkey_dict)\n\nrounds = 20\nnum_of_monkeys = len(monkeys)\n\ninspected_objects = dict()\nfor i in range(num_of_monkeys):\n inspected_objects[i] = 0\n\nfor _ in range(rounds):\n for i in range(num_of_monkeys):\n inspected_objects[i] += len(monkeys[i]['starting_items'])\n for item in monkeys[i]['starting_items']:\n new = new_value(item, monkeys[i]['operation'][0], monkeys[i]['operation'][1])\n if new % monkeys[i]['test']==0:\n monkeys[monkeys[i]['if_true']]['starting_items'].append(new)\n else:\n monkeys[monkeys[i]['if_false']]['starting_items'].append(new)\n monkeys[i]['starting_items'] = []\n \n\nsorted_values = sorted(inspected_objects.values())\nmonkey_business = sorted_values[-1]*sorted_values[-2]\n\nprint(f'monkey business {monkey_business}')","repo_name":"rubzip/adventofcode_22","sub_path":"day_11/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33124110313","text":"# -*- coding = utf-8 -*-\n# @Time : 2020/8/10 21:49\n# @Author : EmperorHons\n# @File : Rookie069.py\n# @Software : PyCharm\n\"\"\"\nhttps://www.runoob.com/python3/python-selection-sort.html\nPython 选择排序\n选择排序(Selection sort)是一种简单直观的排序算法。它的工作原理如下。\n首先在未排序序列中找到最小(大)元素,存放到排序序列的起始位置,\n然后,再从剩余未排序元素中继续寻找最小(大)元素,然后放到已排序序列的末尾。\n以此类推,直到所有元素均排序完毕。\n\"\"\"\n\nimport pysnooper\n\n\n@pysnooper.snoop()\ndef Select_sort(List):\n for i in range(len(List)):\n min_dix = i\n # print(\"i的值\", i)\n for j in range(i+1, len(List)):\n # print(\"j的值\", j)\n if List[min_dix] > List[j]:\n min_dix = j\n\n List[i], List[min_dix] = List[min_dix], List[i]\n print(\"排序后的数组:\")\n for i in range(len(List)):\n print(\"%d\" % List[i]),\n return List[i]\n\n\nif __name__ == '__main__':\n List = [64, 25, 12, 22, 11]\n Select_sort(List)","repo_name":"yusheng88/RookieInstance","sub_path":"Rookie069.py","file_name":"Rookie069.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30294811350","text":"\"\"\"\n Python script for solo control. \n\"\"\"\n\nimport math\nimport os\nimport csv\nfrom scipy.interpolate import interp1d\nimport numpy as np\nfrom config import *\nimport time\nfrom control.free_solo_ctrl import *\n\ntry:\n import libmaster_board_sdk_pywrap as mbs\nexcept ImportError:\n # print(\"- Cannot find libmaster_board_sdk_pywrap. PyBullet Simulation will work as normal but SOLO control will not work.\")\n pass \n\nclass SoloControlClass():\n\n def __init__(self,\n name_interface = 'enp9s0f1',\n n_slaves = 6,\n csv_joint_positions_file_name = TRAJ_JOINTS_FILE_NAME,\n solo_output_file = SOLO_DATA_OUTPUT_FILE_NAME,\n phase_0_calibration = False,\n phase_1_calibration = False,\n phase_2_calibration = False,\n ):\n self.debug_mode = False\n self.counter = 0 \n self.current_time = 0\n self.frequency = 1000 # Control frequency\n self.dt = 1/self.frequency #  Time step\n self.n_slaves = n_slaves\n self.phase_0_calibration = phase_0_calibration\n self.phase_1_calibration = phase_1_calibration\n self.phase_2_calibration = phase_2_calibration\n self.name_interface = name_interface\n self.csv_joint_positions_file_name = GLOBAL_AUTOGENERATED_DIRECTORY+csv_joint_positions_file_name\n self.solo_output_file = GLOBAL_OUTPUT_DIRECTORY+solo_output_file\n self.unique_solo_output_file = GLOBAL_OUTPUT_DIRECTORY + HISTORY_DIR + SOLO_DATA_OUTPUT_FILE_UNIQUE\n\n self.init_starting_params()\n if self.phase_0_calibration:\n self.init_masterboard_params()\n print(\"PHASE 0 Calibration. Motors and masterboard initialized. Exiting... \\nRun Phase 1 with -c flag. \\nRun Phase 2 with -v flag. \\nRun sequences using calibrated parameters with no flag.\\nEnd of program.\")\n exit(1)\n\n self.sequence_motion_trajectory = self.load_trajectory(self.csv_joint_positions_file_name)\n self.init_constant_positions_joint_angles() # loads smooth_home_pos and smooth_landing_pos\n\n if self.phase_1_calibration:\n self.phase_2_calibration = False \n \n if not self.phase_1_calibration and not phase_2_calibration:\n print(\"Loading Phase 2 Calibration Indices.\")\n self.load_calibrated_zero_angles = True \n else:\n if self.phase_1_calibration:\n print(\"Running Phase 1 Calibration\")\n else:\n print(\"Running Phase 2 Calibration\")\n self.load_calibrated_zero_angles = False \n\n self.init_masterboard_params()\n self.init_controller_params()\n\n if self.phase_1_calibration or self.phase_2_calibration: \n self.init_calibration()\n\n self.main_loop()\n\n self.robot_if.Stop() # Shut down the interface between the computer and the master board\n\n print('---')\n\n if self.robot_if.IsTimeout():\n print(\"Masterboard timeout detected.\")\n print(\"Either the masterboard has been shut down or there has been a connection issue with the cable/wifi.\")\n\n \n def main_loop(self):\n \"\"\"\n Main execution block. Runs until robot is not timeout. Maintains frequency of 1000 Hz. \n\n :return: None.\n :rtype: None.\n \"\"\"\n self.prev_controller_time = time.time()\n self.prev_target = [0.0] * self.n_slaves * 2\n\n while ((not self.robot_if.IsTimeout()) or 1): \n\n if self.program_complete:\n break\n\n if ((time.time() - self.last) > self.dt):\n self.last = time.time()\n\n if self.counter == 0:\n self.data_file = open(self.solo_output_file, 'w')\n self.data_file_c = open(self.unique_solo_output_file, 'w')\n self.data_writer = csv.writer(self.data_file)\n self.data_writer_c = csv.writer(self.data_file_c)\n # header = [\"counter[0]\", \"motor_pos[1-12]\", \"target_pos[13-24]\", \"current[25-36]\", \"imu[37-48]\"] # update this later\n header = [\n \"timestamp[0]\",\n \"pos_bl_hip[1]\", \"pos_br_hip\", \"pos_bl_lower\", \"pos_bl_upper\", \"pos_br_lower\", \"pos_br_upper\", \"pos_fl_hip\", \"pos_fr_hip\", \"pos_fl_lower\", \"pos_fl_upper\", \"pos_fr_lower\", \"pos_fr_upper\",\n \"target_pos_bl_hip[13]\", \"target_pos_br_hip\", \"target_pos_bl_lower\", \"target_pos_bl_upper\", \"target_pos_br_lower\", \"target_pos_br_upper\", \"target_pos_fl_hip\", \"target_pos_fr_hip\", \"target_pos_fl_lower\", \"target_pos_fl_upper\", \"target_pos_fr_lower\", \"target_pos_fr_upper\",\n \"current_A_bl_hip[25]\", \"current_A_br_hip\", \"current_A_bl_lower\", \"current_A_bl_upper\", \"current_A_br_lower\", \"current_A_br_upper\", \"current_A_fl_hip\", \"current_A_fr_hip\", \"current_A_fl_lower\", \"current_A_fl_upper\", \"current_A_fr_lower\", \"current_A_fr_upper\",\n \"imu_accelerometer_x[37]\", \"imu_accelerometer_y\", \"imu_accelerometer_z\", \"imu_gyroscope_x\", \"imu_gyroscope_y\", \"imu_gyroscope_z\", \"imu_attitude_x\", \"imu_attitude_y\", \"imu_attitude_z\", \"imu_linear_acceleration_x\", \"imu_linear_acceleration_y\", \"imu_linear_acceleration_z\",\n \"adc[49]\"\n ]\n self.data_writer.writerow(header)\n self.data_writer_c.writerow(header)\n\n self.counter += 1\n\n if not self.phase_1_calibration and not self.phase_2_calibration:\n self.sequence_counter += 1\n\n if self.phase_1_calibration or self.phase_2_calibration:\n if not self.reset_calibration_complete:\n self.reset_calibration_counter += 1\n\n self.current_time += self.dt\n self.robot_if.ParseSensorData() # Read sensor data sent by the masterboard\n\n if (self.state == 0): #  If the system is not ready\n self.state = 1\n\n # for all motors on a connected slave\n for i in self.motors_spi_connected_indexes: # Check if all motors are enabled and ready\n if not (self.robot_if.GetMotor(i).IsEnabled() and self.robot_if.GetMotor(i).IsReady()):\n self.state = 0\n self.init_pos_motors[i] = self.robot_if.GetMotor(i).GetPosition()\n self.current_time = 0\n\n\n else: # If the system is ready\n # for all motors on a connected slave\n for i in self.motors_spi_connected_indexes:\n\n if i % 2 == 0 and self.robot_if.GetDriver(i // 2).GetErrorCode() == 0xf:\n #print(\"Transaction with SPI{} failed\".format(i // 2))\n continue #user should decide what to do in that case, here we ignore that motor\n \n self.global_motor_i = i\n \n if self.robot_if.GetMotor(i).IsEnabled():\n\n if self.debug_mode:\n self.debug_joint.get_joint()\n self.debug_joint.set_joint_position()\n\n elif self.phase_1_calibration:\n self.fill_index_calibration()\n if self.is_calibration_complted(self.index_calibration_array):\n self.completed_phase_1_exit = False \n if not self.calibrated_offsets_saved:\n self.save_calibrated_offsets(phase=1, f_name=self.name_of_calibration_saved_csv)\n else:\n self.phase_1_calibration = False\n self.calibrated_zero_position = self.zero_position\n # if not self.phase_1_calibration:\n else:\n self.run_calibration_synced()\n\n elif not self.completed_phase_1_exit:\n print(\"Calibration Phase 1 Completed. In Free SOLO Control.\")\n self.robot_if.Stop()\n FreeSoloClass()\n exit(1)\n\n elif self.phase_2_calibration:\n if self.calibrated_offsets == []:\n self.calibrated_offsets = self.load_offsets(f_name=self.name_of_calibration_saved_csv)\n \n self.fill_index_calibration()\n\n if self.is_calibration_complted(self.phase_2_index_calibration_array):\n if not self.offsets_to_calibrated_zeros_saved:\n self.save_calibrated_offsets(phase=2, f_name=self.name_of_offset_calibrated_zeros_csv)\n self.phase_2_calibration = False \n self.use_i = True\n self.calibrated_zero_position = self.calibration_target_position\n self.target_position = self.calibrated_zero_position\n \n else:\n self.run_calibration_synced()\n self.do_zero_position_after_calibration = True\n\n elif self.load_calibrated_zero_angles:\n self.calibrated_zero_position = self.load_offsets(f_name=self.name_of_offset_calibrated_zeros_csv)\n self.target_position = self.calibrated_zero_position\n self.load_calibrated_zero_angles = False \n self.use_i = False \n\n elif not self.phase_1_calibration and \\\n not self.phase_2_calibration and \\\n not self.in_motion_trajectory_sequence and \\\n not self.end_sequence:\n # self.robot_if.Stop()\n # exit(1)\n # self.go_to_home_position()\n if self.do_zero_position_after_calibration:\n self.go_to_zero_position()\n else:\n self.go_to_home_position()\n\n elif not self.trigger_is_triggered:\n self.maintain_home_position()\n self.read_trigger_signal()\n \n\n elif (self.in_home_position and self.start_sequence) or self.in_motion_trajectory_sequence:\n if self.start_sequence:\n self.sequence_counter = 0\n print(\"Running sequence ... \")\n print('---')\n print(\"Sequence Start Time:\", self.sequence_start_time)\n\n self.start_sequence_motion_trajectory()\n\n if self.sequence_counter % 1000 == 0:\n print('Sequence Run Time:', int(self.last - self.sequence_start_time),'s.', end='\\r')\n\n\n else: # ending\n if self.sequence_counter == 0 and self.global_motor_i==2:\n print(\"Sequence completed!\")\n print('---')\n print(\"Sequence End Time: \", self.sequence_end_time)\n print(\"Total Sequence Runtime:\", self.sequence_end_time - self.sequence_start_time)\n print('---')\n self.do_smooth_landing()\n\n self.controller()\n\n if not self.phase_0_calibration and not self.phase_1_calibration and not self.phase_2_calibration and self.trigger_is_triggered: \n self.get_imu_data()\n self.save_pos_in_arr()\n\n self.robot_if.SendCommand() # Send the reference currents to the master board\n \n \n def maintain_home_position(self):\n \"\"\"\n Maintains home position until trigger is pressed by setting target position as smooth home position. \n\n :return: None.\n :rtype: None.\n \"\"\"\n self.target_position = self.smooth_home_pos\n if ((self.counter % 3000) == 0 and self.global_motor_i == 4):\n print(\"Maintaining position.\")\n print('---')\n \n \n def init_calibration(self):\n \"\"\"\n Initialize calibration parameters.\n\n :return: None.\n :rtype: None.\n \"\"\"\n self.zero_position = [0] * self.n_slaves * 2\n self.home_position = [0] * self.n_slaves * 2\n self.index_pos_array = [0] * self.n_slaves * 2\n self.index_calibration_array = [None] * self.n_slaves * 2\n self.phase_2_index_calibration_array = [None] * self.n_slaves * 2\n # self.calibration_joint_offset_step_value = 0.00000001\n self.calibration_joint_offset_step_value = 0.000001 # 0.000001 rad/ms = 0.00005729578 degrees/ms = 0.05729578 degrees/s in motor\n self.number_of_steps_for_calibration = 1000\n self.max_motor_angle_in_degrees = 25 # input in degrees (leg angle)\n self.min_motor_angle_in_degrees = -25 # input in degrees (leg angle)\n self.max_motor_angle = math.radians(self.max_motor_angle_in_degrees) * 9 # motor angle\n self.min_motor_angle = math.radians(self.min_motor_angle_in_degrees) * 9 # motor angle\n self.tolerance_for_startup_position_in_degrees = 10 # tolerance of leg at startup\n self.tolerance_for_startup_position = math.radians(self.tolerance_for_startup_position_in_degrees) * 9 # tolerance angle in radians at motor\n\n self.move_in_max_dir = True \n self.calibration_target_position = self.zero_position\n self.calibration_joint_offset_array = [0] * self.n_slaves *2\n self.calibration_counter_i = 0\n self.calibration_counter_j = 0\n self.calibrated_offsets_saved = False \n self.calibrated_hips_phase_1 = False \n self.calibrated_lower_ls_phase_1 = False\n self.calibrated_upper_ls_phase_1 = False\n self.calibrated_hips_phase_2 = False \n self.calibrated_hips_phase_2_prev = False \n self.calibrated_lower_ls_phase_2 = False\n self.calibrated_lower_ls_phase_2_prev = False\n self.calibrated_upper_ls_phase_2 = False\n self.calibrated_offsets = []\n self.multiple_indices_found = [False] * self.n_slaves * 2\n self.target_hip_angles = None\n self.target_lower_ls_angles = None\n self.target_upper_ls_angles = None\n self.offsets_to_calibrated_zeros_saved = False\n\n self.reset_calibration_complete = True \n self.reset_calibration_counter = 0\n self.smooth_reset_trajectory = [[None] * self.n_slaves * 2] \n\n \n def load_trajectory(self, name_of_csv_file):\n \"\"\"\n Loads PyBullet trajectory, does time scaling, does interpolation of trajectory to run in 1000 Hz, and outputs sequence with correct index mapping.\n\n :param name_of_csv_file: Name of csv file where trajectory is being loaded from.\n :type name_of_csv_file: str.\n :return: Time scaled, interpolated, and correct index trajectory for csv file.\n :rtype: list[float].\n \"\"\"\n print('Loading trajectory for: '+name_of_csv_file+'...', end='')\n trajectory_from_csv = self.read_from_csv(name_of_csv_file, True, 2)\n mapped_trajectory = list([self.get_mapped_joints_from_pybullet_to_robot(trajector) for trajector in trajectory_from_csv])\n print(\" Done!\")\n return mapped_trajectory\n\n\n def read_from_csv(self, name_of_csv, header, num_headers=1):\n \"\"\"\n Reads data from any csv file.\n\n :param name_of_csv: name of csv file.\n :type name_of_csv: str.\n :param header: Specify whether csv file have header or not.\n :type header: Bool.\n :return: Returns all data in the csv file.\n :rtype: list[list[float]].\n \"\"\"\n t_num_headers = num_headers\n input_file = open(name_of_csv)\n csv_reader = csv.reader(input_file)\n data = []\n header_skipped = False\n for line in csv_reader:\n temp = []\n if header and not header_skipped:\n t_num_headers -= 1\n if t_num_headers == 0:\n header_skipped = True\n\n if line[0] == 'pybullet':\n print(\"\\n==================================================================\\n\")\n print(\"ERROR: SEQUENCE WAS GENERATED FOR PYBULLET ENV (240 HZ). EXITING!\")\n print(\"==================================================================\\n\")\n exit(-1)\n\n continue\n for e in line:\n try:\n temp.append(float(e))\n except:\n temp.append(e)\n data.append(temp)\n return data\n\n\n def init_constant_positions_joint_angles(self):\n \"\"\"\n Initializes constant positions joint angles.\n\n :return: None\n :rtype: None\n \"\"\"\n self.smooth_home_pos = self.sequence_motion_trajectory[0]\n try:\n self.smooth_landing_pos = self.load_offsets(GLOBAL_CALIBRATION_FILES_DIRECTORY+LANDING_POS_FILE)\n except:\n self.smooth_landing_pos = [0.0]*12\n\n\n def get_mapped_joints_from_pybullet_to_robot(self, pybullet_trajectory):\n \"\"\"\n Converts PyBullet joint/motor indices into robot joint/motor indices and returns updated trajectory.\n\n :param pybullet_trajectory: Trajectory from pybullet whose indices need to be updated.\n :type pybullet_trajectory: list[list[float]].\n :return: Trajectory with updated indices.\n :rtype: list[list[float]].\n \"\"\"\n if len(pybullet_trajectory) != 12 and len(pybullet_trajectory) != 16:\n print(\"Number of joint positions != 12 or 16. Exiting!\")\n exit(-1) \n\n\n if len(pybullet_trajectory) == 16: # removing dummy joints if input\n dummy_joints = [3,7,11,15]\n updated_pybullet_trajectory = []\n for ti, tv in enumerate(pybullet_trajectory):\n if ti in dummy_joints:\n continue\n else:\n updated_pybullet_trajectory.append(tv)\n \n pybullet_trajectory = updated_pybullet_trajectory\n\n\n mapped_trajectory = [None] * self.n_slaves * 2\n ''' mapped_trajectory[robot_motor_index] = pybullet_trajectory[correpsonding_pybullet_joint_index] '''\n mapped_trajectory[self.motor_mapping[\"bl_hip\"]] = - pybullet_trajectory[6] # bl_hip = hip_left_back \n mapped_trajectory[self.motor_mapping[\"br_hip\"]] = pybullet_trajectory[9] # br_hip = hip_right_back\n mapped_trajectory[self.motor_mapping[\"bl_lower\"]] = pybullet_trajectory[8] # bl_lower = lower_leg_left_back\n mapped_trajectory[self.motor_mapping[\"bl_upper\"]] = pybullet_trajectory[7] # bl_upper = upper_leg_left_back\n mapped_trajectory[self.motor_mapping[\"br_lower\"]] = pybullet_trajectory[11] # br_lower = lower_leg_right_back\n mapped_trajectory[self.motor_mapping[\"br_upper\"]] = pybullet_trajectory[10] # br_upper = upper_leg_right_back\n mapped_trajectory[self.motor_mapping[\"fl_hip\"]] = pybullet_trajectory[0] # fl_hip = hip_left_front\n mapped_trajectory[self.motor_mapping[\"fr_hip\"]] = - pybullet_trajectory[3] # fr_hip = hip_right_front\n mapped_trajectory[self.motor_mapping[\"fl_lower\"]] = pybullet_trajectory[2] # fl_lower = lower_leg_left_front\n mapped_trajectory[self.motor_mapping[\"fl_upper\"]] = pybullet_trajectory[1] # fl_upper = upper_leg_left_front\n mapped_trajectory[self.motor_mapping[\"fr_lower\"]] = pybullet_trajectory[5] # fr_lower = lower_leg_right_front\n mapped_trajectory[self.motor_mapping[\"fr_upper\"]] = pybullet_trajectory[4] # fr_upper = upper_leg_right_front\n \n mapped_trajectory = np.array(mapped_trajectory) * 9 # accounting for gearing\n\n return mapped_trajectory\n\n\n def init_starting_params(self):\n \"\"\"\n Initialize starting parameters.\n\n :return: None.\n :rtype: None.\n \"\"\"\n self.in_home_position = False \n self.start_sequence = False \n self.in_motion_trajectory_sequence = False \n self.sequence_motion_trajectory = None \n self.sequence_counter = 0 \n self.end_sequence = False \n self.going_home = False \n self.going_zero = False\n self.interploate_zero_complete = False\n self.completed_phase_1_exit = True \n self.name_of_calibration_saved_csv = GLOBAL_CALIBRATION_FILES_DIRECTORY+CALIBRATION_PHASE_1_FILE\n self.name_of_offset_calibrated_zeros_csv = GLOBAL_CALIBRATION_FILES_DIRECTORY+CALIBRATION_PHASE_2_FILE\n self.target_position = [0] * self.n_slaves * 2\n self.calibrated_zero_position = [0] * self.n_slaves * 2\n self.save_curr_pos = []\n self.save_curr_target = []\n self.save_time = [] \n self.data = []\n self.doing_smooth_landing = False \n self.interploate_smooth_landing_complete = False \n self.use_i = False\n self.finished_smooth_landing = False\n self.diff_threshold = 5.0\n self.motors_fighting = [False] * self.n_slaves * 2\n self.interploate_home_complete = False \n self.interpolate_home_trajectory = [[None] * self.n_slaves * 2] \n self.interpolate_zero_trajectory = [[None] * self.n_slaves * 2] \n self.interpolate_landing_trajectory = [[None] * self.n_slaves * 2] \n self.motor_pos = [0] * self.n_slaves * 2\n self.program_complete = False \n self.trigger_is_triggered = False \n self.imu_data = []\n self.do_zero_position_after_calibration = False\n\n self.adc_trigger_threshold = 0.50\n\n self.sequence_start_time = 0\n\n self.motor_mapping = {\n \"bl_hip\" : 0,\n \"br_hip\" : 1,\n \"bl_lower\" : 2,\n \"bl_upper\" : 3,\n \"br_lower\" : 4,\n \"br_upper\" : 5,\n \"fl_hip\" : 6,\n \"fr_hip\" : 7,\n \"fl_lower\" : 8,\n \"fl_upper\" : 9,\n \"fr_lower\" : 10,\n \"fr_upper\" : 11,\n 0 : \"bl_hip\",\n 1 : \"br_hip\",\n 2 : \"bl_lower\",\n 3 : \"bl_upper\",\n 4 : \"br_lower\",\n 5 : \"br_upper\",\n 6 : \"fl_hip\",\n 7 : \"fr_hip\",\n 8 : \"fl_lower\",\n 9 : \"fl_upper\",\n 10 : \"fr_lower\",\n 11 : \"fr_upper\"\n }\n\n\n def init_masterboard_params(self):\n \"\"\"\n Initialize masterboard parameters.\n\n :return: None.\n :rtype: None.\n \"\"\"\n self.state = 0 # State of the system (ready (1) or not (0))\n self.global_motor_i = 0\n os.nice(-20) #  Set the process to highest priority (from -20 highest to +20 lowest)\n self.init_motor_drivers()\n \n\n def init_motor_drivers(self):\n \"\"\"\n Initialize motor drivers.\n\n :return: None.\n :rtype: None.\n \"\"\"\n self.init_pos_motors = [0.0 for i in range(self.n_slaves * 2)] # List that will store the initial position of motors\n \n self.motors_spi_connected_indexes = [] # indexes of the motors on each connected slaves\n self.motors_spi_connected_indexes_array = np.zeros(self.n_slaves*2) # 1 if motor at index is connected\n\n self.robot_if = mbs.MasterBoardInterface(self.name_interface)\n self.robot_if.Init() # Initialization of the interface between the computer and the master board\n\n for i in range(self.n_slaves): #  We enable each controler driver and its two associated motors\n self.robot_if.GetDriver(i).motor1.SetCurrentReference(0)\n self.robot_if.GetDriver(i).motor2.SetCurrentReference(0)\n self.robot_if.GetDriver(i).motor1.Enable()\n self.robot_if.GetDriver(i).motor2.Enable()\n self.robot_if.GetDriver(i).EnablePositionRolloverError()\n self.robot_if.GetDriver(i).SetTimeout(5)\n self.robot_if.GetDriver(i).Enable()\n\n self.last = time.time()\n\n while (not self.robot_if.IsTimeout() and not self.robot_if.IsAckMsgReceived()):\n if ((time.time() - self.last) > self.dt):\n self.last = time.time()\n self.robot_if.SendInit()\n\n if self.robot_if.IsTimeout():\n print(\"Timeout while waiting for ack.\")\n else:\n # fill the connected motors indexes array\n for i in range(self.n_slaves):\n if self.robot_if.GetDriver(i).IsConnected():\n # if slave i is connected then motors 2i and 2i+1 are potentially connected\n self.motors_spi_connected_indexes.append(2 * i)\n self.motors_spi_connected_indexes.append(2 * i + 1)\n\n for i in self.motors_spi_connected_indexes:\n self.motors_spi_connected_indexes_array[i] = 1\n\n\n def init_controller_params(self):\n \"\"\"\n Initialize controller parameters.\n\n :return: None.\n :rtype: None.\n \"\"\"\n self.seq_kp = 3.5\n self.seq_kd = 0.0375\n\n self.seq_ki = 0.0\n self.i_sat = 0.\n self.iq_sat = 12.0\n self.cur = np.zeros(self.n_slaves*2)\n self.controller_i = np.zeros(self.n_slaves*2)\n self.p_err = 0.0\n\n\n def interpolate_smooth_trajectory(self, prev_sequence=None, next_sequence=None, step_size=1000):\n \"\"\"\n Returns interpolated transition trajectory between two sequences/states.\n\n :param prev_sequence: Optional previous sequence.\n :type prev_sequence: list[list[float]] or None.\n :param next_sequence: Optional next sequence.\n :type next_sequence: list[list[float]] or None.\n :param step_size: Desired transition time between previous sequence and next_sequence in milli-seconds.\n :type step_size: Integer.\n :return: Interpolated transition trajectory.\n :rtype: list[list[float]].\n \"\"\"\n yx = np.arange(0, 2) \n\n if next_sequence is None:\n next_sequence = self.home_position_trajectory\n\n if prev_sequence is None:\n prev_sequence = [[self.robot_if.GetMotor(m).GetPosition() for m in range(self.n_slaves*2)]]\n\n y = [prev_sequence[-1], next_sequence[0]]\n f = interp1d(yx, y, kind='linear', axis=0)\n\n interpolate_x = np.arange(0, 1, step=1/step_size, dtype=float)\n transition_traj = f(interpolate_x)\n\n transition_traj[0] = prev_sequence[-1]\n\n return transition_traj\n\n\n def controller(self):\n \"\"\"\n PD/PID Controller for Robot.\n\n :return: None.\n :rtype: None.\n \"\"\"\n v_ref = 0 # desired velocity\n self.motor_pos = np.zeros(self.n_slaves*2)\n motor_vel = np.zeros(self.n_slaves*2)\n\n # Individual controller gains for different tasks\n if self.going_home:\n # self.iq_sat = 1.0\n self.kp = 4\n self.ki = 0.0\n self.kd = 0.03\n\n elif self.in_motion_trajectory_sequence:\n # self.iq_sat = 1.0\n self.kp = self.seq_kp\n self.ki = self.seq_ki\n self.kd = self.seq_kd\n\n elif self.phase_1_calibration or self.phase_2_calibration or self.going_zero or self.doing_smooth_landing:\n # self.iq_sat = 1.0\n self.kp = 3\n self.ki = 0.1\n self.kd = 0.03\n\n else: \n # self.iq_sat = 1.0\n self.kp = 4\n self.ki = 0.0\n self.kd = 0.03\n \n\n for i in range(self.n_slaves*2):\n self.motor_pos[i] = self.robot_if.GetMotor(i).GetPosition()\n motor_vel[i] = self.robot_if.GetMotor(i).GetVelocity()\n if self.phase_1_calibration or self.phase_2_calibration or self.going_zero:\n self.p_err = (self.calibration_target_position - self.motor_pos) * self.motors_spi_connected_indexes_array # 0 for not connected indexes\n elif self.doing_smooth_landing:\n self.p_err = (self.target_position - self.motor_pos) * self.motors_spi_connected_indexes_array # 0 for not connected indexes\n else: \n self.target_position = np.array(self.target_position) + np.array(self.calibrated_zero_position) \n self.p_err = (self.target_position - self.motor_pos) * self.motors_spi_connected_indexes_array # 0 for not connected indexes\n \n if self.phase_2_calibration or self.use_i: # only use PID when calibrating\n for i in range(self.n_slaves*2):\n self.controller_i[i] += self.p_err[i]\n if self.controller_i[i] > self.i_sat:\n self.controller_i[i] = self.i_sat\n elif self.controller_i[i] < - self.i_sat:\n self.controller_i[i] = - self.i_sat \n else: # else use PD controller \n self.ki = 0\n\n v_err = (v_ref - motor_vel) * self.motors_spi_connected_indexes_array # 0 for not connected indexes\n \n # I part is 0 if not calibrating \n self.cur = (self.kp * self.p_err) + (self.ki * self.controller_i) + (self.kd * v_err)\n\n for e in range(len(self.cur)):\n if self.cur[e] > self.iq_sat:\n self.cur[e] = self.iq_sat\n elif self.cur[e] < -self.iq_sat:\n self.cur[e] = -self.iq_sat\n \n for i in self.motors_spi_connected_indexes:\n if self.debug_mode:\n self.robot_if.GetMotor(i).SetCurrentReference(0.) # sets currents to 0 so nothing happens \n # print(self.motor_pos)\n else:\n self.robot_if.GetMotor(i).SetCurrentReference(self.cur[i])\n \n '''\n add anti gravity torque code here\n '''\n \n\n def go_to_zero_position(self):\n \"\"\"\n Sets target position to zero posiiton by using interpolated trajectory to zero position.\n\n :return: None.\n :rtype: None.\n \"\"\"\n if not self.going_zero:\n print(\"Going Zero Position!\")\n self.sequence_counter = 0\n self.interploate_zero_complete = False \n self.new_zero_position = self.calibrated_zero_position\n \n if not self.interploate_zero_complete:\n if self.interpolate_zero_trajectory[0][0] is None:\n self.interpolate_zero_trajectory = self.interpolate_smooth_trajectory(next_sequence=[self.new_zero_position], step_size=5000)\n self.sequence_counter = 0 \n self.going_zero = True \n print(\"Starting interpolated trajectory for 5 seconds!\")\n print(\"Starting Free Solo Control after 5 seconds. Move platform to landing position and save the press the ADC trigger.\")\n print('---')\n\n elif self.sequence_counter == len(self.interpolate_zero_trajectory)-1:\n self.interploate_zero_complete = True \n self.sequence_counter = 0\n self.interpolate_zero_trajectory = [[None] * self.n_slaves * 2]\n print(\"Completed interpolated trajectory!\")\n else:\n self.target_position = self.interpolate_zero_trajectory[self.sequence_counter]\n else:\n self.going_zero = True\n \n print(\"Completed zero pos!\")\n print('---')\n self.robot_if.Stop()\n FreeSoloClass() # call free solo class for saving landing position\n exit(1)\n\n\n def go_to_home_position(self):\n \"\"\"\n Sets target position to home posiiton by using interpolated trajectory to home position.\n\n :return: None.\n :rtype: None.\n \"\"\"\n if not self.going_home:\n print(\"Going Home!\")\n self.sequence_counter = 0\n self.interploate_home_complete = False \n \n if not self.interploate_home_complete:\n if self.interpolate_home_trajectory[0][0] is None:\n self.interpolate_home_trajectory = self.interpolate_smooth_trajectory(next_sequence=[self.smooth_home_pos], step_size=TIME_INTERPOLATE_HOME)\n self.sequence_counter = 0 \n self.going_home = True \n print(\"Starting interpolated trajectory!\")\n print('---')\n\n elif self.sequence_counter == len(self.interpolate_home_trajectory)-1:\n self.interploate_home_complete = True \n self.sequence_counter = 0\n self.interpolate_home_trajectory = [[None] * self.n_slaves * 2]\n print(\"Completed interpolated trajectory!\")\n else:\n self.target_position = self.interpolate_home_trajectory[self.sequence_counter]\n else:\n self.going_home = True\n \n print(\"Completed homing!\")\n print('---')\n self.in_home_position = True\n self.going_home = False\n\n if self.sequence_motion_trajectory is not None: \n self.sequence_counter = 0\n self.start_sequence = True\n self.in_motion_trajectory_sequence = True \n\n\n def do_smooth_landing(self):\n \"\"\"\n Sets target position to smooth landing position by using interpolated trajectory to smooth landing position.\n\n :return: None.\n :rtype: None.\n \"\"\"\n if not self.doing_smooth_landing:\n if self.global_motor_i == 4:\n print(\"Doing Smooth Landing!\")\n self.sequence_counter = 0\n self.interploate_smooth_landing_complete = False \n\n if self.finished_smooth_landing:\n if self.global_motor_i == 4:\n print(\"Program completed!\")\n self.program_complete = True \n return\n\n else:\n if not self.interploate_smooth_landing_complete:\n if self.interpolate_landing_trajectory[0][0] is None:\n self.interpolate_landing_trajectory = self.interpolate_smooth_trajectory(next_sequence=[self.smooth_landing_pos], step_size=TIME_INTERPOLATE_LANDING)\n self.sequence_counter = 0 \n self.doing_smooth_landing = True \n print('---')\n elif self.sequence_counter == len(self.interpolate_landing_trajectory)-1:\n self.finished_smooth_landing = True \n print(\"Completed interpolated trajectory!\")\n else:\n self.target_position = self.interpolate_landing_trajectory[self.sequence_counter]\n\n\n def get_imu_data(self):\n \"\"\"\n Gets data from imu and stores in imu_data list. \n\n :return: None.\n :rtype: None.\n \"\"\"\n self.imu_data = [] \n for ii in range(3):\n self.imu_data.append(self.robot_if.imu_data_accelerometer(ii))\n for ii in range(3):\n self.imu_data.append(self.robot_if.imu_data_gyroscope(ii))\n for ii in range(3):\n self.imu_data.append(self.robot_if.imu_data_attitude(ii))\n for ii in range(3):\n self.imu_data.append(self.robot_if.imu_data_linear_acceleration(ii))\n \n \n def save_pos_in_arr(self):\n \"\"\"\n Saves joint angles into csv file.\n\n :return: None.\n :rtype: None.\n \"\"\"\n if not self.phase_0_calibration and not self.phase_1_calibration and not self.phase_2_calibration and self.trigger_is_triggered:\n line = [self.counter, *self.motor_pos, *self.target_position, *self.cur, *self.imu_data, self.robot_if.GetDriver(3).adc[0]]\n self.data_writer.writerow(line)\n self.data_writer_c.writerow(line)\n\n\n def start_sequence_motion_trajectory(self):\n \"\"\"\n Sets target position to sequence motion trajectory.\n\n :return: None.\n :rtype: None.\n \"\"\"\n self.start_sequence = False \n self.in_home_position = False \n self.in_motion_trajectory_sequence = True \n\n if self.sequence_counter != len(self.sequence_motion_trajectory):\n self.target_position = np.array(self.sequence_motion_trajectory[self.sequence_counter])\n else:\n self.sequence_counter = 0 \n self.in_motion_trajectory_sequence = False \n self.end_sequence = True \n self.sequence_end_time = time.time()\n\n\n def check_motor_torques(self):\n \"\"\"\n Checks motor torques and prints warning output if motors are fighting against each other.\n\n :return: Returns whether motors are fighting or not.\n :rtype: Bool.\n \"\"\"\n offset_i_arr = []\n avg_offset_not_i_arr = []\n for motor_i_to_check in range(self.n_slaves*2):\n offset_i = abs(self.p_err[motor_i_to_check])\n offset_arr_not_i = list([abs(self.p_err[motor_j]) for motor_j in range(self.n_slaves*2) if motor_j!= motor_i_to_check])\n avg_offset_not_i = np.average(offset_arr_not_i)\n if offset_i > 2 * avg_offset_not_i and offset_i > self.diff_threshold:\n self.motors_fighting[motor_i_to_check] = True \n else:\n self.motors_fighting[motor_i_to_check] = False\n offset_i_arr.append(offset_i)\n avg_offset_not_i_arr.append(avg_offset_not_i)\n\n # print debugger\n if any(self.motors_fighting):\n print(\" Motor no. Motor name. Diff i. Diff !i Diff threshold\")\n for motor_i in range(self.n_slaves*2):\n if self.motors_fighting[motor_i]: \n flag=' ->' \n else: \n flag=' '\n print(flag, ' ', motor_i, ' ', self.motor_mapping[motor_i], ' %.3f'% offset_i_arr[motor_i], ' %.3f'% avg_offset_not_i_arr[motor_i], ' ', self.diff_threshold)\n are_motors_fighting = any(self.motors_fighting)\n self.motors_fighting = [False] * self.n_slaves * 12\n return are_motors_fighting # return true if any motors are fighting, else return false\n\n\n def run_calibration_synced(self):\n \"\"\"\n Calls calibration process in sync for hip joints, lower leg joints, and upper leg joint. Used in Calibration Phase 1 and 2.\n\n :return: None.\n :rtype: None.\n \"\"\"\n hips = [0, 1, 6, 7]\n lower_ls = [2, 4, 8, 10]\n upper_ls = [3, 5, 9, 11]\n\n if self.phase_1_calibration:\n if not self.calibrated_hips_phase_1 and not self.calibrated_lower_ls_phase_1 and not self.calibrated_upper_ls_phase_1:\n self.calibrate_together(hips)\n if self.calibrated_hips_phase_1 and not self.calibrated_lower_ls_phase_1 and not self.calibrated_upper_ls_phase_1:\n self.calibrate_together(lower_ls)\n if self.calibrated_hips_phase_1 and self.calibrated_lower_ls_phase_1 and not self.calibrated_upper_ls_phase_1: \n self.calibrate_together(upper_ls)\n \n if self.calibrated_hips_phase_1 and self.calibrated_lower_ls_phase_1 and self.calibrated_upper_ls_phase_1:\n return\n \n elif self.phase_2_calibration:\n \n if not self.calibrated_hips_phase_2 and not self.calibrated_lower_ls_phase_2 and not self.calibrated_upper_ls_phase_2: \n self.calibrate_together(hips)\n\n if self.calibrated_hips_phase_2 and not self.calibrated_lower_ls_phase_2 and not self.calibrated_upper_ls_phase_2:\n if not self.calibrated_hips_phase_2_prev == self.calibrated_hips_phase_2:\n self.calibration_target_position = self.zero_position \n self.calibrate_together(lower_ls)\n\n if self.calibrated_hips_phase_2 and self.calibrated_lower_ls_phase_2 and not self.calibrated_upper_ls_phase_2: \n if not self.calibrated_lower_ls_phase_2_prev == self.calibrated_lower_ls_phase_2:\n self.calibration_target_position = self.zero_position \n self.calibrate_together(upper_ls)\n self.calibrated_hips_phase_2_prev = self.calibrated_hips_phase_2\n self.calibrated_lower_ls_phase_2_prev = self.calibrated_lower_ls_phase_2\n \n if self.calibrated_hips_phase_2 and self.calibrated_lower_ls_phase_2 and self.calibrated_upper_ls_phase_2:\n self.set_calibrated_zero_pos()\n\n self.use_i = True\n self.calibrated_zero_position = self.calibration_target_position\n self.target_position = self.calibrated_zero_position\n \n self.phase_2_calibration = False\n if not self.offsets_to_calibrated_zeros_saved:\n self.save_calibrated_offsets(phase=2, f_name=self.name_of_offset_calibrated_zeros_csv)\n\n return\n\n\n def set_calibrated_zero_pos(self):\n \"\"\"\n Finds best motor indices.\n\n :return: None.\n :rtype: None.\n \"\"\"\n sign_indices = np.array([1, -1, 1, 1, -1, -1, 1, -1, 1, -1, -1, -1])\n\n # all angles in radians and at the motor\n starting_position = 0 # position of motor at startup\n min_angle = starting_position + self.min_motor_angle * sign_indices\n max_angle = starting_position + self.max_motor_angle * sign_indices\n\n calibration_offset = self.calibrated_offsets\n index_found_at = self.phase_2_index_calibration_array\n tolerance = self.tolerance_for_startup_position\n\n if any(index_found_a == None for index_found_a in index_found_at):\n print('not all indices are found')\n exit()\n\n for i in range(self.n_slaves * 2):\n\n second_index_found = False # True if there are two indices while calibration\n final_index = None\n \n # check if there's a second index in this calibration sequence\n # positive direction\n if index_found_at[i] < max_angle[i] - math.pi * 2 and sign_indices[i] > 0:\n second_index_found = True\n # negative direction\n elif index_found_at[i] > max_angle[i] + math.pi * 2 and sign_indices[i] < 0:\n second_index_found = True\n\n if not second_index_found:\n final_index = 1\n # check if the calibration offset of phase one is possible with respect to the angle restrictions\n # positive direction\n elif index_found_at[i] - calibration_offset[i] < min_angle[i] and index_found_at[i] + math.pi * 2 - calibration_offset[i] < max_angle[i] and sign_indices[i] > 0:\n final_index = 2\n # negative direction\n elif index_found_at[i] - calibration_offset[i] > min_angle[i] and index_found_at[i] - math.pi * 2 - calibration_offset[i] > max_angle[i] and sign_indices[i] < 0:\n final_index = 2\n else:\n final_index = 1\n\n # calculate position to calibrated zero position\n if final_index == 1:\n calibrated_zero_position = index_found_at[i] - calibration_offset[i]\n # check if calculated position is in tolerance zone\n while calibrated_zero_position > tolerance:\n calibrated_zero_position -= math.pi * 2\n while calibrated_zero_position < -tolerance:\n calibrated_zero_position += math.pi * 2\n else:\n calibrated_zero_position = index_found_at[i] + math.pi * 2 * sign_indices[i] - calibration_offset[i]\n\n self.calibration_target_position[i] = calibrated_zero_position\n\n print('motor index', i, 'zero_calc:', calibrated_zero_position, 'index found at:', index_found_at[i], 'calibration_offset', calibration_offset[i])\n\n\n def calibrate_together(self, motor_arr):\n \"\"\"\n Runs actual calibration process where target position is updated with small steps until either maximum or minimum angle threshold is reached, or until all motor indices are found. \n\n :param motor_arr: A list of motor ids that need to be calibrated together (hips, upper legs, lower legs).\n :type motor_arr: list[int].\n :return: None.\n :rtype: None.\n \"\"\"\n if self.counter % 1 == 0 and ((self.global_motor_i+1) % 12) == 0: \n\n if not self.reset_calibration_complete:\n self.reset_calibration(motor_arr)\n return \n else:\n self.reset_calibration_counter = -1\n\n motor_angles = [] # stores current motor angles \n for motor_angles_index, actual_motor_i in enumerate(motor_arr):\n m_angle = self.robot_if.GetMotor(actual_motor_i).GetPosition() \n motor_angles.append(m_angle) \n\n if self.check_is_motor_arr_complete(motor_arr):\n print(\"Motor indices array is complete\")\n self.reset_calibration_complete = False \n\n # go 1 direction\n if self.move_in_max_dir: \n # print(\"Move in forward direction\")\n if any(motor_angles[mi] < self.max_motor_angle for mi in range(len(motor_angles)) if mi%2 == 0):\n # move in forward direction \n for motor_angles_index, actual_motor_i in enumerate(motor_arr):\n if motor_angles_index % 2 == 0:\n self.calibration_joint_offset_array[actual_motor_i] += self.calibration_joint_offset_step_value\n elif motor_angles_index % 2 != 0:\n self.calibration_joint_offset_array[actual_motor_i] -= self.calibration_joint_offset_step_value\n self.calibration_target_position = np.asarray(self.calibration_target_position) + np.asarray(self.calibration_joint_offset_array)\n else:\n print('All motor anges > max')\n self.move_in_max_dir = False \n self.calibration_joint_offset_array = [0]*self.n_slaves*2 \n\n # go other direction \n elif not self.move_in_max_dir:\n # print(\"Move in opposite direction\")\n if any(motor_angles[mi] > self.min_motor_angle for mi in range(len(motor_angles)) if mi%2 == 0):\n # move in backwards direction \n for motor_angles_index, actual_motor_i in enumerate(motor_arr):\n if motor_angles_index % 2 == 0:\n self.calibration_joint_offset_array[actual_motor_i] -= self.calibration_joint_offset_step_value\n elif motor_angles_index % 2 != 0:\n self.calibration_joint_offset_array[actual_motor_i] += self.calibration_joint_offset_step_value\n\n self.calibration_target_position = np.asarray(self.calibration_target_position) + np.asarray(self.calibration_joint_offset_array)\n else:\n print('All motor anges < min')\n self.reset_calibration_complete = False \n\n\n def check_is_motor_arr_complete(self, motor_arr):\n \"\"\"\n Checks if all motor indices are found for motors with id specified in motor_arr.\n\n :param motor_arr: A list of motor ids to check if all motor indices are found.\n :type motor_arr: list[int].\n :return: Returns True if all motor indices are found and False if all motor indices are not found.\n :rtype: Bool.\n \"\"\"\n if self.phase_1_calibration:\n return not any(list([self.index_calibration_array[m] is None for m in motor_arr])) \n elif self.phase_2_calibration:\n return not any(list([self.phase_2_index_calibration_array[m] is None for m in motor_arr])) \n\n\n def reset_calibration(self, motor_arr):\n \"\"\"\n Resets temporary calibration arrays and parameters after one calibration step is complete. Interpolates smooth trajectory to go back to calibration position after each calibration step is complete.\n\n :param motor_arr: A list of motor ids which will be moved back to calibration position after interpolation.\n :type motor_arr: list[int].\n :return: None.\n :rtype: None.\n \"\"\"\n if self.reset_calibration_counter == 0 and self.smooth_reset_trajectory[0][0] is None:\n print(\"Interpolated smooth reset calibration.\")\n self.smooth_reset_trajectory = self.interpolate_smooth_trajectory(next_sequence=[self.zero_position], step_size=2000)\n\n if (self.reset_calibration_counter != len(self.smooth_reset_trajectory)-1) or (self.reset_calibration_counter == 0):\n self.calibration_target_position = self.smooth_reset_trajectory[self.reset_calibration_counter]\n else:\n self.reset_calibration_complete = True \n self.smooth_reset_trajectory = [[None] * self.n_slaves * 2] \n\n if self.reset_calibration_complete:\n if self.phase_1_calibration:\n if motor_arr[0] == 0: self.calibrated_hips_phase_1 = True \n elif motor_arr[0] == 2: self.calibrated_lower_ls_phase_1 = True \n elif motor_arr[0] == 3: self.calibrated_upper_ls_phase_1 = True \n self.print_calibration_status(self.calibrated_hips_phase_1, self.calibrated_upper_ls_phase_1, self.calibrated_lower_ls_phase_1)\n elif self.phase_2_calibration:\n if motor_arr[0] == 0: self.calibrated_hips_phase_2 = True \n elif motor_arr[0] == 2: self.calibrated_lower_ls_phase_2 = True \n elif motor_arr[0] == 3: self.calibrated_upper_ls_phase_2 = True \n self.print_calibration_status(self.calibrated_hips_phase_2, self.calibrated_upper_ls_phase_2, self.calibrated_lower_ls_phase_2)\n \n self.move_in_max_dir = True\n self.calibration_counter_i += 1\n self.calibration_counter_j = 0\n self.calibration_joint_offset_array = [0] * self.n_slaves *2\n\n self.reset_calibration_counter = -1\n\n print('---')\n\n \n def print_calibration_status(self, hips, upper, lower):\n \"\"\"\n Prints calibration status for hips, upper, and lower legs. \n\n :param hips: True if hips are calibrated, False if not. \n :type hips: Bool.\n :param upper: True if upper legs are calibrated, False if not. \n :type upper: Bool.\n :param lower: True if lower legs are calibrated, False if not. \n :type lower: Bool.\n :return: None.\n :rtype: None.\n \"\"\"\n print(\"Hips Calibrated :\", hips)\n print(\"Upper Legs Calibrated :\", upper)\n print(\"Lower Legs Calibrated :\", lower)\n\n\n def fill_index_calibration(self):\n \"\"\"\n Checks if motor index has been detected and fills index_calibration_array if index is found.\n\n :return: None.\n :rtype: None.\n \"\"\"\n for i in range(self.n_slaves*2):\n if self.robot_if.GetMotor(i).HasIndexBeenDetected():\n if self.phase_1_calibration:\n if self.index_calibration_array[i] is None: # if array not filled \n self.index_calibration_array[i] = self.robot_if.GetMotor(i).GetPosition() # in rad \n print(\"Phase 1 Calibration Array:\",self.index_calibration_array)\n elif self.phase_2_calibration:\n if self.phase_2_index_calibration_array[i] is None: # if array not filled \n self.phase_2_index_calibration_array[i] = self.robot_if.GetMotor(i).GetPosition() # in rad \n print(\"Phase 2 Calibration Array:\",self.phase_2_index_calibration_array)\n\n\n def is_calibration_complted(self, cali_array):\n \"\"\"\n Checks if calibration array is filled.\n\n :param cali_array: Calibration array is filled with motor position (float) if found index, else is None \n :type cali_array: list[None, float].\n :return: Returns True if all motor indices are found and False if all motor indices are not found.\n :rtype: Bool.\n \"\"\"\n for i in range(self.n_slaves*2):\n if cali_array[i] is None:\n return False \n return not self.phase_2_calibration\n\n \n def read_trigger_signal(self): \n \"\"\"\n Reads ADC trigger from masterboard and sets trigger_is_triggered if trigger is detected. \n\n :return: None.\n :rtype: None.\n \"\"\"\n if ((self.counter % 3000) == 0 and self.global_motor_i == 4): \n print(\"Waiting for ADC trigger.\")\n print(\"---\")\n\n if (self.robot_if.GetDriver(3).adc[0]) > self.adc_trigger_threshold:\n print(\"ADC Triggered! Starting Sequence Movement...\")\n self.trigger_is_triggered = True\n self.sequence_start_time = time.time()\n print(\"---\")\n\n\n def load_offsets(self, f_name):\n \"\"\"\n Loads calibration offsets from given file.\n\n :param f_name: Name of file.\n :type f_name: str.\n :return: Returns calibraiton offset array.\n :rtype: list[float].\n \"\"\"\n row = []\n try:\n f = open(f_name, 'r')\n reader = csv.reader(f)\n for line in reader:\n row.append(line)\n row = list([float(co) for co in row[-1]])\n print(\"CSV:\", f_name, \"loaded successfully!\")\n print('---')\n if f_name == self.name_of_calibration_saved_csv:\n print(\"Calibration offsets from Phase 1:\", row)\n elif f_name == self.name_of_offset_calibrated_zeros_csv:\n print(\"Calibration zero offset from Phase 2:\", row)\n else:\n print(\"Loaded values:\", row)\n except:\n print(\"CSV:\", f_name, \"not found! Exiting.\")\n exit(1)\n print('---')\n return row \n\n\n def save_calibrated_offsets(self, phase, f_name):\n \"\"\"\n Saves calibration offsets into given file.\n\n :param phase: Calibration phase 1 or 2.\n :type phase: int.\n :param f_name: Name of file.\n :type f_name: str.\n :return: None.\n :rtype: None.\n \"\"\"\n f = open(f_name, 'w')\n writer = csv.writer(f)\n if phase == 1:\n row = self.index_calibration_array\n writer.writerow(row)\n f.close()\n self.calibrated_offsets_saved = True\n elif phase == 2:\n row = self.calibrated_zero_position\n writer.writerow(row)\n f.close() \n self.offsets_to_calibrated_zeros_saved = True\n print(\"Phase\", phase, \"~ Calibration saved as:\", f_name, \"!\")\n\n\n def print_debugger(self):\n \"\"\"\n Prints debugger similar to Masterboard code. \n\n :return: None.\n :rtype: None.\n \"\"\"\n if ((self.counter % 100) == 0): # Display state of the system once every 100 iterations of the main loop\n # print(chr(27) + \"[2J\")\n # To read IMU data in Python use robot_if.imu_data_accelerometer(i), robot_if.imu_data_gyroscope(i)\n # or robot_if.imu_data_attitude(i) with i = 0, 1 or 2\n self.robot_if.PrintIMU()\n self.robot_if.PrintADC()\n self.robot_if.PrintMotors()\n self.robot_if.PrintMotorDrivers()\n self.robot_if.PrintStats()\n print(self.cur)\n if self.debug_mode:\n print('--- DEBUG ---')\n print('Active Joint: ', self.debug_joint.joint, ' position offset: ', self.debug_joint.joint_position[self.debug_joint.joint])\n #sys.stdout.flush() # for Python 2, use print( .... , flush=True) for Python 3\n print(flush=True)\n print(self.current_time)","repo_name":"nayan-pradhan/solo-6dof-motion-platform","sub_path":"src/control/solo_ctrl.py","file_name":"solo_ctrl.py","file_ext":"py","file_size_in_byte":57174,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"67"} +{"seq_id":"27881033998","text":"input_ = '''class: 1-3 or 5-7\nrow: 6-11 or 33-44\nseat: 13-40 or 45-50\n\nyour ticket:\n7,1,14\n\nnearby tickets:\n7,3,47\n40,4,50\n55,2,20\n38,6,12\n'''\n\nlocation = __file__\ninput_ = open(location.replace('.py', '_input.txt')).read()\n\n\nrulesDict = {}\nrules, myTicket, nearbyTickets = input_.split('\\n\\n')\n\nmyTicket = myTicket.split('your ticket:\\n')[-1]\nmyTicket = [int(x) for x in myTicket.split(',')]\nprint(myTicket)\n\n\nvalidRanges = []\n\nrules = rules.splitlines()\nfor r in rules:\n field, values = r.split(': ', 1)\n values = values.split(' or ', 1)\n\n rulesDict[field] = []\n for v in values:\n\n min_, max_ = v.split('-')\n range_ = range(int(min_), int(max_)+1)\n rulesDict[field].append(range_)\n validRanges.append(range_)\n\n\nnearbyTickets = nearbyTickets.splitlines()[1:]\nnearbyTickets = [x.split(',') for x in nearbyTickets]\nvalidTickets = []\n\ntally = 0\nfor t in nearbyTickets:\n invalidTicket = False\n for number in t:\n n = int(number)\n\n invalid = True\n for r in validRanges:\n if n in r:\n invalid = False\n\n if invalid:\n invalidTicket = True\n tally += n\n if not invalidTicket:\n validTickets.append(t)\n\nprint(tally)\n\n\ndef get_values_of_fields(tickets):\n fieldsCount = len(tickets[0])\n ranges = [[] for _ in range(fieldsCount)]\n for i in range(fieldsCount):\n for t in tickets:\n ranges[i].append(int(t[i]))\n return ranges\n\n\ndef get_matching_fields(numbers):\n unmatchedFields = []\n matchedFields = []\n for field, ranges in rulesDict.items():\n valid = True\n for n in numbers:\n if n not in ranges[0] and n not in ranges[1]:\n #if n in ranges[0] or n in ranges[1]:\n unmatchedFields.append(field)\n valid = False\n break\n if valid:\n matchedFields.append(field)\n return list(set(matchedFields))\n\n\ndef eliminate_fields():\n count = 0\n locatedFields = {}\n fieldRanges = get_values_of_fields(validTickets)\n potentialFields = [get_matching_fields(r) for r in fieldRanges]\n numOfFields = len(rulesDict.keys())\n while count < numOfFields:\n for i, x in enumerate(potentialFields):\n if len(x) == 1:\n print(f'Field {x} is entry {i}.')\n # Store the field name and the column\n locatedFields[x[0]] = i\n count += 1\n\n # remove the located field from all other columns\n for j in range(len(potentialFields)):\n potentialFields[j] = list(set(potentialFields[j]).difference(locatedFields.keys()))\n\n return locatedFields\n\n\ndef parse_my_ticket():\n value = 1\n fields = eliminate_fields()\n for field, index in fields.items():\n if field.startswith('departure'):\n value *= myTicket[index]\n return value\n\n\nprint(parse_my_ticket())","repo_name":"techartorg/Advent_of_Code_2020","sub_path":"rob_kovach/puzzle_16.py","file_name":"puzzle_16.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"5236796776","text":"lines = list(map(lambda x: x.strip(), open(\"input\").readlines()))\n\n# part 1\nhp, dp = 0, 0\nfor line in lines:\n d, i = line.split()\n i = int(i)\n \n if d == \"forward\":\n hp += i\n if d == \"down\":\n dp += i\n if d == \"up\":\n dp -= i\n \nprint(hp*dp)\n\n\n# part 2\nhp, dp, aim = 0, 0, 0\nfor line in lines:\n d, i = line.split()\n i = int(i)\n \n if d == \"forward\":\n hp += i\n dp += aim*i\n if d == \"down\":\n aim += i\n if d == \"up\":\n aim -= i\n \nprint(hp*dp)\n","repo_name":"mastercake10/AdventOfCode2021","sub_path":"day2/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"17050475298","text":"from datetime import datetime\nfrom typing import Optional, List\n\nfrom fastapi.encoders import jsonable_encoder\nfrom sqlalchemy import desc\nfrom sqlalchemy.orm import Session\n\nfrom app import models\n\n\ndef get_users(db: Session, skip: int = 0, limit: int = 100):\n return db.query(models.User).offset(skip).limit(limit).all()\n\n\ndef check_password_of_user(db: Session, phone: str, hashed_password: str):\n return db.query(models.User).filter_by(phone=phone, hashed_password=hashed_password).first()\n\n\ndef update_password(db: Session, user_id: int, hashed_password: str):\n current_user = db.query(models.User).filter(models.User.id == user_id).first()\n if current_user is not None:\n current_user.updated_at = datetime.now()\n current_user.hashed_password = hashed_password\n db.commit()\n db.refresh(current_user)\n return current_user\n\n\ndef create_new_user(db: Session, payload_of_user: dict):\n new_user = models.User(**payload_of_user)\n db.add(new_user)\n db.commit()\n db.refresh(new_user)\n return new_user\n\n\ndef get_user_by_phone(db: Session, phone: str):\n return db.query(models.User).filter(models.User.phone == phone).first()\n\n\ndef get_user_by_id(db: Session, user_id: int):\n return db.query(models.User).filter(models.User.id == user_id).first()\n\n\ndef update_user_info(db: Session, user_id: int, fullname: str, address: str, dob, email):\n current_user = db.query(models.User).filter(models.User.id == user_id).first()\n if current_user is not None:\n current_user.fullname = fullname\n current_user.address = address\n\n if dob is not None:\n current_user.dob = dob\n if email is not None:\n current_user.email = email\n\n db.commit()\n db.refresh(current_user)\n return current_user\n\n\ndef count_user(db: Session, phone):\n if phone is not None:\n return db.query(models.User).filter(models.User.phone == phone).count()\n else:\n return db.query(models.User).count()\n\n\ndef get_list_user(db: Session, phone, skip, limit):\n if phone is not None:\n return db.query(models.User).filter(models.User.phone == phone).offset(skip).limit(limit).all()\n else:\n return db.query(models.User).offset(skip).limit(limit).all()\n\n\ndef lock_user(db: Session, user_id, is_active):\n current_user = db.query(models.User).filter(models.User.id == user_id).first()\n if current_user is not None:\n current_user.is_active = is_active\n db.commit()\n db.refresh(current_user)\n return current_user\n\n\ndef user_update_location(db: Session, user_id: int, location: str):\n current_user = db.query(models.User).filter(models.User.id == user_id).first()\n if current_user is not None:\n current_user.address = location\n db.commit()\n db.refresh(current_user)\n return current_user\n\n\ndef count_favorite_staff_list(db: Session, user_id: int, is_favorite: Optional[bool] = None):\n if is_favorite is not None:\n return db.query(models.FavoriteStaffOfUser).filter(models.FavoriteStaffOfUser.user_id == user_id,\n models.FavoriteStaffOfUser.is_favorite == is_favorite).count()\n else:\n return db.query(models.FavoriteStaffOfUser).filter(models.FavoriteStaffOfUser.user_id == user_id).count()\n\n\ndef get_favorite_staff_list(db: Session, user_id: int, is_favorite: Optional[bool] = None, limit: int = 20,\n skip: int = 0):\n if is_favorite is not None:\n return db.query(models.FavoriteStaffOfUser).filter(models.FavoriteStaffOfUser.user_id == user_id,\n models.FavoriteStaffOfUser.is_favorite == is_favorite).limit(\n limit).offset(skip).all()\n else:\n return db.query(models.FavoriteStaffOfUser).filter(models.FavoriteStaffOfUser.user_id == user_id).limit(\n limit).offset(skip).all()\n\n\ndef get_staff_list_info(db: Session, staff_list_id: List):\n staff_list = jsonable_encoder(db.query(models.Staff.id,\n models.Staff.fullname,\n models.Staff.phone,\n models.Staff.address,\n models.Staff.district_code,\n models.Staff.province_code,\n models.Staff.avatar_img,\n models.Staff.join_from_date,\n models.Staff.working_count,\n models.Staff.vote_count,\n models.Staff.vote_average_score).filter(\n models.Staff.id.in_(staff_list_id)).all())\n\n for staff in staff_list:\n district_code = staff['district_code']\n province_code = staff['province_code']\n district_fullname = db.query(models.LocationDistrict.fullname).filter(\n models.LocationDistrict.code == district_code).first()\n province_fullname = db.query(models.LocationProvince.fullname).filter(\n models.LocationProvince.code == province_code).first()\n staff['address'] = staff['address'] + \", \" + district_fullname.fullname + \", \" + province_fullname.fullname\n staff.pop('district_code')\n staff.pop('province_code')\n return staff_list\n\n\ndef set_favorite_staff(db: Session, data: dict):\n current_favorite_staff = db.query(models.FavoriteStaffOfUser).filter(\n models.FavoriteStaffOfUser.user_id == data['user_id'],\n models.FavoriteStaffOfUser.staff_id == data['staff_id']).first()\n if current_favorite_staff is None:\n db_favorite_staff = models.FavoriteStaffOfUser(**data)\n db.add(db_favorite_staff)\n db.commit()\n db.refresh(db_favorite_staff)\n return db_favorite_staff\n else:\n for key, value in data.items():\n setattr(current_favorite_staff, key, value)\n current_favorite_staff.updated_at = datetime.now()\n db.commit()\n db.refresh(current_favorite_staff)\n return current_favorite_staff\n\n\ndef check_staff_in_favorite_staff_list(db: Session, user_id: int, staff_id: int):\n # Check staff is already in favorite staff list\n # if not, pass\n # if yes, delete it from favorite staff list\n checking = db.query(models.FavoriteStaffOfUser).filter_by(user_id=user_id, staff_id=staff_id).first()\n if checking is None:\n pass\n else:\n db.query(models.FavoriteStaffOfUser).filter_by(user_id=user_id, staff_id=staff_id).delete()\n db.commit()\n db.refresh(checking)\n\n\ndef get_user_device_info(db: Session, user_id: int, device_info: Optional[str] = None):\n if device_info:\n return db.query(models.UserDevices).filter(models.UserDevices.user_id == user_id,\n models.UserDevices.device_info == device_info).all()\n return db.query(models.UserDevices).filter(models.UserDevices.user_id == user_id).all()\n\n\ndef get_user_device_info_by_token(db: Session, user_id: int, device_info: str, token: str):\n return db.query(models.UserDevices).filter(models.UserDevices.user_id == user_id,\n models.UserDevices.device_info == device_info,\n models.UserDevices.FCM_token == token).first()\n\n\ndef update_user_device_info(db: Session, user_id: int, device_info: any):\n info = device_info.device_info\n\n current_device = db.query(models.UserDevices).filter(models.UserDevices.user_id == user_id,\n models.UserDevices.device_info == info).first()\n\n if current_device is None:\n db_device = models.UserDevices(user_id=user_id, is_active=True, **device_info.dict())\n db.add(db_device)\n db.commit()\n db.refresh(db_device)\n return db_device\n\n else:\n current_device.FCM_token = device_info.FCM_token\n current_device.updated_at = datetime.now()\n db.commit()\n db.refresh(current_device)\n return current_device\n\n\ndef get_total_accumulated_points(db: Session, user_id: int):\n current_user = db.query(models.User).filter(models.User.id == user_id).first()\n return current_user\n\n\ndef update_user_avatar(db: Session, user_id: int, avatar: str):\n current_user = db.query(models.User).filter(models.User.id == user_id).first()\n\n if current_user is not None:\n current_user.avatar = avatar\n db.commit()\n db.refresh(current_user)\n return current_user\n else:\n return None\n\n\ndef add_banned_staff(db: Session, staff_id: int, user_id: int):\n # Check if staff is already banned\n checking = db.query(models.StaffBannedByUser).filter(models.StaffBannedByUser.staff_id == staff_id,\n models.StaffBannedByUser.user_id == user_id).first()\n if checking is None:\n db_banned_staff = models.StaffBannedByUser(staff_id=staff_id, user_id=user_id)\n db.add(db_banned_staff)\n db.commit()\n db.refresh(db_banned_staff)\n return \"success\"\n else:\n return None\n\n\ndef remove_banned_staff(db: Session, staff_id: int, user_id: int):\n # Checking staff not in banned list\n checking = db.query(models.StaffBannedByUser).filter(models.StaffBannedByUser.staff_id == staff_id,\n models.StaffBannedByUser.user_id == user_id).first()\n if checking is None:\n return None\n else:\n db.query(models.StaffBannedByUser).filter(models.StaffBannedByUser.staff_id == staff_id,\n models.StaffBannedByUser.user_id == user_id).delete()\n db.commit()\n return \"success\"\n\n\ndef count_staff_banned(db: Session, user_id: int):\n return db.query(models.StaffBannedByUser).filter_by(user_id=user_id).count()\n\n\ndef get_list_staff_banned(db: Session, user_id: int, limit: int = 20, skip: int = 0):\n list_staff_banned_id = db.query(models.StaffBannedByUser.staff_id).filter(\n models.StaffBannedByUser.user_id == user_id).offset(skip).limit(limit).all()\n staff_list_id = list(map(lambda x: x[0], list_staff_banned_id))\n list_staff = db.query(models.Staff).filter(models.Staff.id.in_(staff_list_id)).all()\n return list_staff\n\n\ndef check_favorite_existed(db: Session, user_id: int, data: dict):\n data = {\n **data,\n \"user_id\": user_id\n }\n query = db.query(models.UserFavorite).filter_by(**data).first()\n return bool(query)\n\n\ndef add_new_favorite(db: Session, user_id: int, data: dict):\n # Check favorite existed\n checking = check_favorite_existed(db, user_id, data)\n\n if checking is True:\n return None\n\n try:\n db_favorite = models.UserFavorite(user_id=user_id, **data)\n db.add(db_favorite)\n db.commit()\n db.refresh(db_favorite)\n return \"success\"\n except Exception as e:\n return \"Lỗi: Không thêm được item yêu thích mới!\"\n\n\ndef remove_favorite(db: Session, user_id: int, data: dict):\n # Check favorite existed\n checking = check_favorite_existed(db, user_id, data)\n\n if checking is False: # If not existed\n return None\n\n try:\n db.query(models.UserFavorite).filter_by(user_id=user_id, **data).delete()\n db.commit()\n return \"success\"\n except Exception as e:\n return \"Lỗi: Không xóa được item yêu thích này!\"\n\n\ndef count_favorite(db: Session, user_id: int, favorite_type: str):\n if favorite_type not in ['product', 'combo', 'store']:\n return None\n\n if favorite_type == 'product':\n return db.query(models.UserFavorite).filter(models.UserFavorite.user_id == user_id,\n models.UserFavorite.product_id.isnot(None)).count()\n elif favorite_type == 'combo':\n return db.query(models.UserFavorite).filter(models.UserFavorite.user_id == user_id,\n models.UserFavorite.combo_id.isnot(None)).count()\n elif favorite_type == 'store':\n return db.query(models.UserFavorite).filter(models.UserFavorite.user_id == user_id,\n models.UserFavorite.store_id.isnot(None)).count()\n\n\ndef get_list_favorite(db: Session, user_id: int, favor_type: str, limit: int = 20, skip: int = 0):\n if favor_type == 'product':\n list_favor = db.query(models.UserFavorite).filter(models.UserFavorite.user_id == user_id,\n models.UserFavorite.product_id.isnot(None)).order_by(\n desc(models.UserFavorite.created_at)).offset(skip).limit(limit).all()\n id_list = list(map(lambda x: x.product_id, list_favor))\n\n return db.query(models.Product).filter(models.Product.id.in_(id_list)).all()\n\n elif favor_type == 'combo':\n list_favor = db.query(models.UserFavorite).filter(models.UserFavorite.user_id == user_id,\n models.UserFavorite.combo_id.isnot(None)).order_by(\n desc(models.UserFavorite.created_at)).offset(skip).limit(limit).all()\n\n id_list = list(map(lambda x: x.combo_id, list_favor))\n\n return db.query(models.Combo).filter(models.Combo.id.in_(id_list)).all()\n\n elif favor_type == 'store':\n list_favor = db.query(models.UserFavorite).filter(models.UserFavorite.user_id == user_id,\n models.UserFavorite.store_id.isnot(None)).order_by(\n desc(models.UserFavorite.created_at)).offset(skip).limit(limit).all()\n\n id_list = list(map(lambda x: x.store_id, list_favor))\n\n return db.query(models.Store).filter(models.Store.id.in_(id_list)).all()\n\n\ndef get_point_of_user(db: Session, user_id: int) -> int:\n return db.query(models.User.accumulated_points).filter(models.User.user_id == user_id).first()[0]\n\n\ndef decrease_accumulated_point(db: Session, user_id: int, amount: int):\n user = db.query(models.User).filter(models.User.user_id == user_id).first()\n if not user:\n return None\n user.accumulated_points -= amount\n db.commit()\n db.refresh(user)\n return user.accumulated_points\n","repo_name":"nhanlethanh1198/backend_with_sso","sub_path":"app/repositories/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":14432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22640978421","text":"__author__ = 'uiandwe'\n\n\nclass util():\n\n def multiply1536(self, x, y):\n if x.bit_length() <= 1536 or y.bit_length() <= 1536: # Base case\n return x * y\n\n else:\n n = max(x.bit_length(), y.bit_length())\n half = (n + 32) // 64 * 32\n mask = (1 << half) - 1\n xlow = x & mask\n ylow = y & mask\n xhigh = x >> half\n yhigh = y >> half\n\n a = self.multiply(xhigh, yhigh)\n b = self.multiply(xlow + xhigh, ylow + yhigh)\n c = self.multiply(xlow, ylow)\n d = b - a - c\n return (((a << half) + d) << half) + c\n\n def Karatsuba(self, number1, number2, N):\n\n if N == 1:\n return number1*number2\n\n if N % 2 == 1:\n N += 1\n\n exp = N//2\n pw = pow(10, exp)\n\n num1, num2 = number1 // pw, number1 % pw\n num3, num4 = number2 // pw, number2 % pw\n\n U = self.Karatsuba(num1, num3, exp)\n V = self.Karatsuba(num2, num4, exp)\n\n N1 = num1-num2\n N2 = num3-num4\n\n\n W = self.Karatsuba(N1, N2, exp)\n Z = U+V-W\n\n result = pow(10, N) * U + pow(10, N/2) * Z + V\n\n return result\n\n\n\n\nif __name__ == '__main__':\n u = util()\n print(999*99)\n num = u.Karatsuba(999, 99, 3)\n print(num)","repo_name":"uiandwe/algorism","sub_path":"util/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31288518871","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 13 13:48:53 2017\n\n@author: KevinMorgan\n\"\"\"\n\nfrom __future__ import print_function\nfrom datetime import date\nimport pandas.io.sql as psql\nfrom sqlalchemy import create_engine\nimport pandas as pd\nimport AcadiaHelperFunctions as ahf\n\n\n# today\ndt = date.today()\n# dt = date(2017, 7, 28)\n\n# setup sqlalchemy's connection to database\nstring_driver = 'mysql+mysqlconnector://root:Acadia@localhost/AcadiaRisk'\nengine = create_engine(string_driver)\ncnx = engine.connect()\n\n# get holidays list of list and flatten with the \"sum\" trick\n# (note: not efficient for very large datasets)\nstring_query = \"\"\"Select HolidayDate from holiday where Exchange = 3\"\"\"\nholidays = sum(psql.read_sql_query(string_query, cnx).values.tolist(), [])\n\n# get tickermappings into dictionary from dataframe\nstring_query = \"\"\"select Ticker, tickerId\nfrom tickermappings\norder by Ticker;\"\"\"\ndf_tickermappings = psql.read_sql_query(string_query, cnx)\ndict_tickermappings = df_tickermappings.set_index('Ticker').to_dict()\n\n# create simple CP dictionary\ndict_cp = {'call': 'C', 'put': 'P'}\n\n# previous business day\ndt_1 = ahf.date_by_subtracting_business_days(dt, 1, holidays)\n\n# path & filename of molecule pnl file\npath = \"C:/Users/KevinMorgan/Downloads/\"\nfilename = \"pnl-\" + dt_1.strftime(\"%Y-%m-%d\") + \".csv\"\n\n# read pnl summary into dataframe\ndf_pnl = pd.read_csv(path+filename)\n\n\n# create new sliced dataframe for new trades only\ndf_tradeblotter = \\\n df_pnl.loc[df_pnl['explanation'] == 'New Position',\n ['id', 'primary_product_code', 'trade_date',\n 'book', 'counterparty', 'right', 'strike', 'quantity',\n 'contract_start', 'price', 'url']]\n\n# load entire trade blotter from molecule\n# df_tradeblotter = \\\n# df_pnl.loc[:,\n# ['id', 'primary_product_code', 'trade_date',\n# 'book', 'counterparty', 'right', 'strike', 'quantity',\n# 'contract_start', 'price', 'url']]\n\n# create a unique id for multiple legged trades.\n# Required so that a primary key set can be created for mysql database table\nlegid = [0] * df_tradeblotter.shape[0]\nlegid[0] = 1\nfor i in range(1, df_tradeblotter.shape[0]):\n if (df_tradeblotter['id'].iloc[i] == df_tradeblotter['id'].iloc[i - 1]):\n legid[i] = legid[i - 1] + 1\n else:\n legid[i] = 1\n\n# append new id to trade_blotter\ndf_tradeblotter['legid'] = pd.Series(legid, index=df_tradeblotter.index)\n\n# inplace replace the primary_product_code from string to integer\n# through tickermappings dictionary\n# this is done because the database is setup to take integers as 'foreign key'\n# into tickermappings table\ndf_tradeblotter['primary_product_code'].\\\n replace(dict_tickermappings['tickerId'], inplace=True)\n\n# change call/put to C/P\ndf_tradeblotter['right'].replace(dict_cp, inplace=True)\n\n\n# rename columns to match database\ndf_tradeblotter.columns = ['TradeId', 'Ticker', 'TradeDate',\n 'FCMAccount', 'CounterParty', 'CP', 'Strike',\n 'Quantity', 'ContractStart', 'TradePrice',\n 'MoleculeURL', 'LegId']\n\n\n# insert data into trade_blotter\ndf_tradeblotter.to_sql('trade_blotter',\n con=cnx, if_exists='append', index=False)\n\n# Close database connection\ncnx.close()\n","repo_name":"kmorgan9339/RiskProgramming","sub_path":"Acadia Molecule PnL Summary.py","file_name":"Acadia Molecule PnL Summary.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29046780324","text":"from mymodule import pyidea\r\nfrom mymodule import pydsa\r\nfrom mymodule import pyEC\r\nimport hashlib\r\n\r\n\r\ndef idea_ofb(key, IV, bytesarr):\r\n o = IV\r\n for i in range(0, len(bytesarr), 8):\r\n my_IDEA = pyidea.IDEA(key)\r\n o = my_IDEA.encrypt(o)\r\n r = min(len(bytesarr) - i, 8)\r\n t = o >> (64 - 8*r)\r\n txt = int.from_bytes(bytesarr[i:i + r], 'big')\r\n txt = txt^t\r\n bytesarr[i:i + r] = bytearray(txt.to_bytes(r, 'big'))\r\n \r\n return bytesarr\r\n\r\n# The symmetric IDEA key definition:\r\nkey = 0x006400c8012c019001f4025802bc0320\r\n\r\n\r\n# El Gamal key delivery:\r\nprint(\"\\nAlice encrypts her symmetric IDEA key using Bob's public key...\\n\")\r\nelgamal = pyEC.ElGamal()\r\nelgamal.curve = pyEC.P256\r\nC1, C2 = elgamal.encrypt(key.to_bytes(16, 'big'), pyEC.publickey)\r\nprint(\"Key before and after encryption:\", hex(key), \"--->\\n\", \"\\nC1 =\", \"(\",hex(C1.x), \",\", hex(C1.y), \")\", \"\\n\\nC2 =\", \"(\", hex(C2.x), \",\", hex(C2.y), \")\")\r\n\r\n\r\nIV = 0xEEA37DEE36032EB1\r\n\r\n\r\nprint(\"\\n\\nReading Alice's sound file...\")\r\nplainbytes = bytearray()\r\nfile = open(\"sound.bin\", \"rb\")\r\n\r\nbyte = file.read(1)\r\nwhile byte:\r\n plainbytes += byte\r\n byte = file.read(1)\r\nfile.close()\r\n\r\ndsa_key = pydsa.dsa_key\r\n\r\nm = hashlib.sha1()\r\nm.update(str(plainbytes).encode('utf-8'))\r\nmessage = int(\"0x\" + m.hexdigest(), 0)\r\n\r\nprint(\"Alice signs her file using DSA signature with her private key...\")\r\nr, s = pydsa.dsa_sign(dsa_key[\"Q\"], dsa_key[\"P\"], dsa_key[\"G\"], dsa_key[\"priv\"], message)\r\nprint(\"Signature output = (r,s) = (\",hex(r),\",\",hex(s), \")\")\r\nplainbytes = plainbytes + r.to_bytes(20, 'big') + s.to_bytes(20, 'big')\r\nprint(\"Sound file with signature concatenated by the end =\", hex(int.from_bytes(plainbytes, 'big')))\r\n\r\n# IDEA - OFB encryption\r\nprint(\"\\n\\nAlice encrypts the entire plaintext sequence...\")\r\ncipherbytes = idea_ofb(key, IV, plainbytes)\r\nprint(\"Ciphertext =\", hex(int.from_bytes(cipherbytes, 'big')))\r\n\r\n# IDEA - OFB decryption Bob\r\nprint(\"\\n\\n\\nBob decrypts the encrypted symmetric key Alice sent him, using his private El-Gamal key...\")\r\nM = elgamal.decrypt(pyEC.privatekey, C1, C2)\r\nassert key == int.from_bytes(M, 'big')\r\n\r\nprint(\"\\nBob decrypts the ciphertext file Alice sent him, using the symmetric key\")\r\nplainbytes = idea_ofb(key, IV, cipherbytes)\r\nR = int.from_bytes(plainbytes[len(plainbytes) - 40:len(plainbytes) - 20], 'big')\r\nS = int.from_bytes(plainbytes[len(plainbytes) - 20:len(plainbytes)], 'big')\r\nm = hashlib.sha1()\r\nm.update(str(plainbytes[0:len(plainbytes) - 40]).encode('utf-8'))\r\nmessage = int(\"0x\" + m.hexdigest(), 0)\r\nprint(\"Bob verifies the file was sent from Alice using her public DSA key\")\r\nassert pydsa.dsa_verify(R, S, dsa_key[\"G\"], dsa_key[\"P\"], dsa_key[\"Q\"], dsa_key[\"pub\"], message)\r\nprint(\"File was valid\")\r\nprint(\"\\nFile after decryption:\", hex(int.from_bytes(plainbytes[0:len(plainbytes) - 40], 'big')))\r\n\r\n\r\n","repo_name":"MohamedSawaed/security-app---cryptography","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4322850453","text":"'''\nGiven an integer n, add a dot (\".\") as the thousands separator and return it in string format.\n\n \n\nExample 1:\n\nInput: n = 987\nOutput: \"987\"\nExample 2:\n\nInput: n = 1234\nOutput: \"1.234\"\nExample 3:\n\nInput: n = 123456789\nOutput: \"123.456.789\"\nExample 4:\n\nInput: n = 0\nOutput: \"0\"\n \n\nConstraints:\n\n0 <= n < 2^31\n\n'''\nclass Solution:\n def thousandSeparator(self, n: int) -> str:\n a,b=str(n),[]\n while a:\n b.append(a[-3:])\n b.append('.')\n a=a[:-3]\n b.pop()\n return ''.join(b[::-1])\n ","repo_name":"shreyansh-tyagi/leetcode-problem","sub_path":"thousand separator.py","file_name":"thousand separator.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"10352907781","text":"import datetime\nfrom typing import Optional\n\nfrom core import WireMessageFeature\nfrom core.messages.message import Message\nfrom core.messages.did_doc import DIDDoc\n\n\nclass BasicMessage(WireMessageFeature):\n \"\"\"https://github.com/hyperledger/aries-rfcs/tree/master/features/0095-basic-message\"\"\"\n\n FAMILY_NAME = \"basicmessage\"\n VERSION = \"1.0\"\n FAMILY = \"did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/\" + FAMILY_NAME + \"/\" + VERSION + \"/\"\n\n MESSAGE = FAMILY + \"message\"\n\n @classmethod\n def endorsement(cls, msg: Message) -> bool:\n return False\n\n async def handle(cls, agent_name: str, wire_message: bytes, my_label: str=None, my_endpoint: str=None):\n return None\n\n @staticmethod\n def build(content: str) -> Message:\n sent_time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat(' ')\n return Message({\n '@type': BasicMessage.MESSAGE,\n '~l10n': {'locale': 'en'},\n 'sent_time': sent_time,\n 'content': content\n })\n\n @staticmethod\n def validate(msg: Message):\n msg.check_for_attrs(\n [\n ('@type', BasicMessage.MESSAGE),\n '~l10n',\n 'sent_time',\n 'content',\n ]\n )\n\n Message.check_for_attrs_in_message(\n [\n ('locale', 'en')\n ],\n msg['~l10n']\n )\n\n @staticmethod\n def extract_did(msg: Message, key: str) -> str:\n return msg[key].get(DIDDoc.DID, None) or msg[key].get(DIDDoc.VCX_DID)\n\n @staticmethod\n def extract_did_doc(msg: Message, key: str) -> dict:\n return msg.get(key, {}).get(DIDDoc.DID_DOC, {}) or msg.get(key, {}).get(DIDDoc.VCX_DID_DOC, {})\n\n @staticmethod\n def extract_verkey_endpoint(msg: Message, key: str) -> (Optional, Optional):\n \"\"\"\n Extract verkey and endpoint that will be used to send message back to the sender of this message. Might return None.\n \"\"\"\n did_doc = BasicMessage.extract_did_doc(msg, key)\n vks = did_doc.get('publicKey')\n vk = vks[0].get('publicKeyBase58') if vks and isinstance(vks, list) and len(vks) > 0 else None\n endpoints = msg.get(key, {}).get(DIDDoc.DID_DOC, {}).get('service')\n endpoint = endpoints[0].get('serviceEndpoint') if endpoints and isinstance(endpoints, list) and len(\n endpoints) > 0 else None\n return vk, endpoint\n\n @staticmethod\n def extract_their_info(msg: Message, key: str):\n \"\"\"\n Extract the other participant's DID, verkey and endpoint\n :param msg:\n :param key: attribute for extracting\n :return: Return a 4-tuple of (DID, verkey, endpoint, routingKeys)\n \"\"\"\n their_did = BasicMessage.extract_did(msg, key)\n did_doc = BasicMessage.extract_did_doc(msg, key)\n service = DIDDoc.extract_service(did_doc)\n their_endpoint = service['serviceEndpoint']\n public_keys = did_doc['publicKey']\n\n def get_key(controller_: str, id_: str):\n for k in public_keys:\n if k['controller'] == controller_ and k[\"id\"] == id_:\n return k['publicKeyBase58']\n return None\n\n def extract_key(name: str):\n if \"#\" in name:\n controller_, id_ = name.split('#')\n return get_key(controller_, id_)\n else:\n return name\n\n their_vk = extract_key(service[\"recipientKeys\"][0])\n\n routing_keys = []\n for rk in service.get(\"routingKeys\", []):\n routing_keys.append(extract_key(rk))\n\n return their_did, their_vk, their_endpoint, routing_keys\n","repo_name":"Sirius-social/Indy-Agent","sub_path":"app/core/aries_rfcs/features/feature_0095_basic_message/feature.py","file_name":"feature.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"657066542","text":"from django.conf.urls import url\nfrom .import views\n\nurlpatterns=[\n url(r'^trending_add/$', views.trending_add, name='trending_add'),\n url(r'^trending_edit/(?P\\d+)$', views.trending_edit, name='trending_edit'),\n url(r'^trending_delete/(?P\\d+)/$', views.trending_delete, name='trending_delete'),\n\n\n]","repo_name":"sonali254/NewsWebsite","sub_path":"trending/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4614699309","text":"from typing import List, Tuple, Dict, Any\n\n\nclass Monkey:\n symbol_to_inv = {\n \"+\": \"-\",\n \"-\": \"+\",\n \"*\": \"/\",\n \"/\": \"*\",\n \"=\": \"=\",\n }\n\n def __init__(self, name: str, left_name: str, right_name: str, op_symbol: str):\n self.name: str = name\n if (\n op_symbol in \"+-*/\"\n ): # Monkeys which depend on other monkeys have a reference to their math operation.\n self.operation_symbol = op_symbol\n self.resolved = False\n self.val = None\n else: # Monkeys which shout their number start resolved and have no ref to a math op.\n self.operation_symbol = None\n self.resolved = True\n self.val = int(op_symbol)\n\n self.left_name: str = left_name\n self.right_name: str = right_name\n self.left = None\n self.right = None\n self.depth = -1\n\n def op(self, a, b, inv: bool = False):\n symbol = (\n self.symbol_to_inv[self.operation_symbol] if inv else self.operation_symbol\n )\n if symbol == \"+\":\n return a + b\n elif symbol == \"-\":\n return a - b\n elif symbol == \"*\":\n return a * b\n elif symbol == \"/\":\n return a / b\n elif symbol == \"=\":\n return a\n raise ValueError\n\n def inv_op(self, a, b):\n return self.op(a, b, inv=True)\n\n def add_children(self, all_monkeys: Dict[str, Any]) -> None:\n \"\"\"\n Adds references to other monkeys which this monkey depends on.\n :param all_monkeys: Dict of monkey names to Monkey objects.\n :return:\n \"\"\"\n if not self.left_name:\n return\n self.left = all_monkeys[self.left_name]\n self.right = all_monkeys[self.right_name]\n\n def ready_to_resolve(self) -> bool:\n \"\"\"\n Whether the monkey is ready to resolve given the statuses of its children.\n Resolution is assigning a value to the monkey based on its children and operation.\n Monkeys without children start out resolved.\n If the monkey has children and both children are resolved, it's ready to resolve.\n :return: Whether the monkey is ready to resolve.\n \"\"\"\n\n if self.left is None:\n return False\n return not self.resolved and self.left.resolved and self.right.resolved\n\n def resolve(self) -> None:\n \"\"\"\n Assign a val to this monkey based on its operation and the values of the monkeys it depends on.\n If it depends on no monkeys, its operation just returns a number.\n :return:\n \"\"\"\n self.resolved = True\n self.val = self.op(self.left.val, self.right.val)\n\n def assign_depth(self, depth: int):\n \"\"\"For troubleshooting purposes.\"\"\"\n self.depth = depth\n if self.left is not None:\n self.left.assign_depth(depth + 1)\n self.right.assign_depth(depth + 1)\n\n\ndef set_up_monkeys() -> Tuple[List[str], Dict[str, Monkey]]:\n \"\"\"\n Returns a list of monkey names and a dict of Monkey objects arranged in a tree structure.\n Monkeys are assigned children, an operation, and an inverse operation.\n Keys in the dict are monkey names, values are Monkey objects.\n Items in the list are monkey names, sorted by depth, leaves to root.\n :return:\n \"\"\"\n monkeys = {}\n with open(f\"../inputs/day_21.txt\", \"r\") as input_file:\n for line in input_file.readlines():\n name, operation = line.strip().split(\": \")\n args = operation.split(\" \")\n if len(args) == 1:\n op_symbol = args[0]\n left_name, right_name = None, None\n else:\n left_name, op_symbol, right_name = args\n monkey = Monkey(\n name=name,\n left_name=left_name,\n right_name=right_name,\n op_symbol=op_symbol,\n )\n monkeys[name] = monkey\n for name, monkey in monkeys.items():\n monkey.add_children(monkeys)\n monkeys[\"root\"].assign_depth(0)\n # list of names in descending order of depth allows for resolution of monkey values in order\n names: List[str] = [\n monkey.name\n for monkey in sorted(\n list(monkeys.values()), key=lambda x: x.depth, reverse=True\n )\n ]\n return names, monkeys\n\n\ndef part_1():\n names, monkeys = set_up_monkeys()\n # resolve all monkeys\n for name in names:\n if monkeys[name].ready_to_resolve():\n monkeys[name].resolve()\n print(monkeys[\"root\"].val)\n\n\ndef part_2():\n names, monkeys = set_up_monkeys()\n # set humn to imaginary unit\n monkeys[\"humn\"].val = 1j\n for name in names:\n if monkeys[name].ready_to_resolve():\n monkeys[name].resolve()\n root = monkeys[\"root\"]\n # treat imaginary unit as an algebraic variable and solve for it\n unresolved = root.left.val if root.left.val.imag != 0 else root.right.val\n resolved = root.left.val if unresolved == root.right.val else root.right.val\n print((resolved - unresolved.real) / unresolved.imag)\n\n\nif __name__ == \"__main__\":\n part_1()\n part_2()\n","repo_name":"mheidal/python-advent-of-code-2022","sub_path":"solutions/day_21.py","file_name":"day_21.py","file_ext":"py","file_size_in_byte":5190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14736106756","text":"#kkbox api\nimport requests\nimport json\n#憑證\ndef get_access_token():\n url = 'https://account.kkbox.com/oauth2/token' #api網址\n # 標頭\n headers = {\n \"Host\":\"account.kkbox.com\",\n \"Content-Type\":\"application/x-www-form-urlencoded\"\n }\n # 參數\n data = {\n \"grant_type\": \"client_credentials\",\n \"client_id\": \"7a374022c0170e6e7fdf4a9372ea404a\",\n \"client_secret\": \"4461cc97a1f4db563a64be9dac4a35db\"\n }\n \n access_token = requests.post(url , headers=headers , data=data)\n return access_token.json()[\"access_token\"]\n\n# get_access_token()\n\n#排行榜查詢\ndef get_charts():\n #取得憑證\n access_token = get_access_token()\n #取得排行榜api網址\n url = \"https://api.kkbox.com/v1.1/charts\"\n #標頭\n headers = {\n \"accept\" :\"application/json\",\n \"authorization\": \"Bearer \" + access_token\n }\n #參數\n parmas = {\n \"territory\" : \"TW\"\n }\n respone = requests.get(url , headers=headers , params=parmas)\n result = respone.json()['data']\n\n data = []\n for item in result:\n print(item['id'],item['title'])\n data.append({\n 'id' : item['id'],\n 'title' : item['title'],\n 'url' : item['url']\n })\n with open('data/charts.json' , 'w' , encoding='utf8') as f:\n f.write(json.dumps(data , indent=4 , ensure_ascii=False))\n# get_charts()\n\n#取得該音樂排行榜的歌曲列表\ndef get_charts_tracks(chart_id):\n access_token = get_access_token()\n url = \"https://api.kkbox.com/v1.1/charts/\" + chart_id + \"/tracks\"\n #標頭\n headers = {\n 'accept': \"application/json\",\n 'authorization': \"Bearer \" + access_token\n }\n #參數\n parmas = {\n \"territory\" : \"TW\"\n }\n response = requests.get(url , headers=headers , params=parmas)\n result = response.json()['data']\n data = []\n for item in result:\n # print(item['name'],item['album']['artist']['name'])\n data.append({\n 'name' : item['name'],\n 'url' : item['url'],\n 'artist' : item['album']['artist']['name']\n })\n return data\n# get_charts_tracks('LZPhK2EyYzN15dU-PT')\n\n#儲存至各個排行榜json檔\ndef store_charts_tracks():\n with open('data/charts.json' , 'r') as f:\n charts = json.load(f)\n for item in charts:\n data = get_charts_tracks(item['id'])\n if item['title'] == '獨立/另類單曲週榜':\n item['title'] = '獨立_另類單曲週榜'\n elif item['title'] == '有聲書 / 相聲單曲週榜':\n item['title'] = '有聲書_相聲單曲週榜'\n with open('data/%s.json'%(item['title']) , 'w' , encoding='utf8') as f:\n f.write(json.dumps(data,indent=4,ensure_ascii=False))\n\n\nstore_charts_tracks()","repo_name":"Hakuonn/python3","sub_path":"final_report/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7779185720","text":"#!/usr/bin/env python3\n\n## Author: Bruno Novais\n## Email brusilva@cisco.com\n\nimport asyncssh, asyncio\nimport sys, time, argparse, os\nfrom datetime import datetime\nfrom lxml import etree\n\nclass SmartFormatter(argparse.HelpFormatter):\n\n def _split_lines(self, text, width):\n if text.startswith('R|'):\n return text[2:].splitlines()\n # this is the RawTextHelpFormatter._split_lines\n return argparse.HelpFormatter._split_lines(self, text, width)\n\ndef openFile(logfile):\n '''\n this function opens logfile and returns the object\n :param logfile:\n :return:\n '''\n f = open(logfile, 'a+')\n return f\n\ndef averageThroughput(sizeOfNetconfQuery, timeDelta_seconds):\n '''\n Receives size of a netconfquery response and divides by time it took.\n We then return the float based on KBps\n :param sizeOfNetconfQuery:\n :param timeDelta_seconds:\n :return: float\n '''\n return '%.2f kbps' % (((sizeOfNetconfQuery/timeDelta_seconds)*8)/1000)\n\ndef querySize(var) -> int:\n '''\n Calculates size of a list by writing to a file, reading size, then removing file.\n :param var:\n :return:\n '''\n filename = '/tmp/query' + str(datetime.now().microsecond)\n with open(filename, 'w') as q:\n q.write('\\n'.join(var))\n\n statsq = os.stat(filename)\n os.remove(filename)\n return statsq.st_size\n\ndef buildXML(request) -> str:\n '''\n This function builds the XML request using etree library.\n We loop through 'request' list to create the hierarchy\n\n input: List\n return: XML String (netconfQuery)\n '''\n global header\n global end\n sizeOfRequest = len(request)\n header='''\n '''\n end='''\n ]]>]]>\n '''\n\n # First create the standard XML part\n get = etree.Element('get')\n filter = etree.Element('filter')\n Operational = etree.Element('Operational')\n filter.append(Operational)\n get.append(filter)\n\n # Now the below loop through the list to build the rest that is variable.\n c = 0\n subTree = request[c]\n subElement = etree.Element(subTree)\n Operational.append(subElement)\n\n # First the netconfQuery is set to the headers and end and in between the tag of the 1st value of the list 'request'\n netconfQuery = header + etree.tostring(get).decode() + end\n\n # Then we will verify if the size of request is 1. If it is, nothing else needs to be done so we return netconfQuery\n if sizeOfRequest == 1:\n return netconfQuery\n else:\n # If it isn't, means we have deeper layers, so we go and loop through the list creating the elements\n # and adding up to the root Element. Eventually we finish it up and assign this new tree to netconfQuery\n # and return it.\n c = 1\n while c < sizeOfRequest:\n # if the value is an int, we make it text, not an element, so need a try block.\n try:\n var = int(request[c]) + 1\n # assign subElement to 2 parent levels as next element will be under that.\n subElement = subTree.getparent().getparent()\n subTree.text = request[c]\n except ValueError:\n # If not int, just create next subtree and make it child of subelement\n subTreeStr = request[c]\n subTree = etree.Element(subTreeStr)\n subElement.append(subTree)\n subElement = subTree\n finally:\n c += 1\n\n netconfQuery = header + etree.tostring(get).decode() + end\n\n return netconfQuery\n\nasync def run_client(netconfQuery, count, sleep, ip, user, password, file):\n '''\n this function creates a connection to an ios-xr box, runs netconf format,\n and loops through 'count' times running the netconf query specified in 'netconfQuery'\n\n :param netconfQuery:\n :param count:\n :param sleep:\n :return: None\n '''\n con, client = await asyncssh.create_connection(None, host =ip, username=user, password=password)\n stdin, stdout, stderr = await con.open_session(command='netconf format')\n time.sleep(1) # the magical sleep to wait for netconf hello exchange\n countTemp = count\n var = []\n\n # loop until count = 0, and sleep between interactions\n while count > 0:\n if count != countTemp:\n print('# zZz for {}s'.format(sleep*60))\n time.sleep(sleep*60)\n\n print('# Enough zZz! Sending query\\n')\n timeBefore = datetime.today()\n sending_log = '{},{},'.format(' '.join(operation), str(timeBefore))\n file.write(sending_log)\n file.flush()\n stdin.write(netconfQuery + '\\n') # write netconfQuery to stdin\n doItOnce = 0\n async for line in stdout:\n if doItOnce == 0:\n timeFirstLine = datetime.today()\n doItOnce += 1\n var.append(line)\n if '/rpc-reply' in line:\n break\n\n\n sizeOfNetconfQuery = querySize(var)\n timeAfter = datetime.today()\n timeDelta = timeAfter - timeFirstLine\n timeDelta_seconds = float(timeDelta.total_seconds())\n file.write('{},{},{},{} bytes,{}\\n'.format(str(timeFirstLine)\n , str(timeAfter)\n , str(timeDelta)\n , sizeOfNetconfQuery\n , averageThroughput(sizeOfNetconfQuery, timeDelta_seconds)))\n file.flush()\n var = []\n count -= 1\n\nif __name__ == \"__main__\":\n\n #### Argparse block ####\n helpOperation='R|Operation List. Example:\\n\\n \"SystemMonitoring\"\\n \"RSVP InterfaceSummaryTable\"'\n parser = argparse.ArgumentParser(formatter_class=SmartFormatter)\n parser.add_argument(\"operation\", type=str, help=helpOperation)\n parser.add_argument(\"--count\", '-c', type=int, default=1, help=\"How many times to run query\")\n parser.add_argument(\"--sleep\", '-s', type=float, default=1, help=\"How many minutes to wait before next query\")\n parser.add_argument(\"--ip\", '-i', type=str, help=\"Host\")\n parser.add_argument(\"--user\", '-u', type=str, help=\"Username\")\n parser.add_argument(\"--password\", '-p', type=str, help=\"Password\")\n parser.add_argument(\"--filename\", '-f', type=str, help=\"Filename for logs\")\n arguments = parser.parse_args()\n #### End of Argparse block ####\n\n # Assigning variables\n operation = [s for s in arguments.operation.split(' ')]\n count = arguments.count\n sleep = arguments.sleep\n ip = arguments.ip\n user = arguments.user\n password = arguments.password\n logfile = arguments.filename\n\n file = openFile(logfile)\n\n # Running buildXML to build the query based on the list 'operation'\n netconfQuery = buildXML(operation)\n\n #### Main block ####\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(run_client(netconfQuery, count, sleep, ip, user, password, file))\n except (OSError, asyncssh.Error) as exc:\n sys.exit('SSH connection failed: ' + str(exc))\n finally:\n loop.close()\n file.close()\n #### End of Main block ####\n","repo_name":"brunoonovais/netconf-iosxr","sub_path":"asyncssh_netconf.py","file_name":"asyncssh_netconf.py","file_ext":"py","file_size_in_byte":7253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70694375895","text":"import gym\n\nimport RL\nfrom RL.agents import (BasicStatsRecordingAgent, DDPGActAgent, # noqa: F401\n DDPGTrainAgent, EnvRenderingAgent,\n ExperienceBufferAgent, ForceExploitControlAgent,\n MatplotlibPlotAgent, ModelLoaderSaverAgent,\n ParamNoiseAgent, ParamsCopyAgent, PygletLoopAgent,\n RandomPlayAgent, RewardScalingAgent, SeedingAgent,\n StatsLoggingAgent, TensorboardAgent, TensorFlowAgent)\nfrom RL.common.atari_wrappers import wrap_atari\nfrom RL.common.utils import need_conv_net\nfrom RL.contexts import DDPGContext\nfrom RL.models.ddpg_model import DDPGModel\n\nc = DDPGContext()\n\n\ndef make(id):\n env = gym.make(id) # type: gym.Env\n if need_conv_net(env.observation_space):\n env = wrap_atari(env, episode_life=c.atari_episode_life, clip_rewards=c.atari_clip_rewards, framestack_k=c.atari_framestack_k, frameskip_k=c.atari_frameskip_k, noop_max=c.atari_noop_max)\n return env\n\n\nc.set_envs([make(c.env_id) for i in range(c.num_envs_to_make)])\n\nr = RL.Runner(c, \"runner\")\n\n# basics:\nr.register_agent(TensorFlowAgent(c, \"TensorFlowAgent\"))\nr.register_agent(SeedingAgent(c, \"SeedingAgent\"))\nr.register_agent(RewardScalingAgent(c, \"RewardScalingAgent\"))\n\n# core algo\nr.register_agent(ForceExploitControlAgent(c, \"ExploitControlAgent\"))\nr.register_agent(RandomPlayAgent(c, \"MinimumExperienceAgent\", play_for_steps=c.minimum_experience))\nddpg_act_agent = r.register_agent(DDPGActAgent(c, \"DDPGActAgent\"))\nr.register_agent(ModelLoaderSaverAgent(c, \"LoaderSaverAgent\", ddpg_act_agent.model.params))\nif not c.eval_mode:\n r.register_agent(ParamNoiseAgent(c, \"ParamNoiseAgent\", DDPGModel(c, \"ParamNoiseAgent/noisy_model\", num_critics=c.num_critics), ddpg_act_agent.model))\n exp_buff_agent = r.register_agent(ExperienceBufferAgent(c, \"ExperienceBufferAgent\"))\n ddpg_train_agent = r.register_agent(DDPGTrainAgent(c, \"DDPGTrainAgent\", ddpg_act_agent, exp_buff_agent))\n r.register_agent(ParamsCopyAgent(c, \"TargetNetUpdateAgent\", ddpg_act_agent.model.params, ddpg_train_agent.target_model.params, c.target_network_update_every, c.target_network_update_tau))\n\n# rendering and visualizations:\nif c.render:\n r.register_agent(EnvRenderingAgent(c, \"RenderingAgent\"))\nr.register_agent(PygletLoopAgent(c, \"PygletLoopAgent\"))\n\n# stats and graphs:\nr.register_agent(BasicStatsRecordingAgent(c, \"StatsRecordingAgent\", frameskip=c.atari_frameskip_k if need_conv_net(c.envs[0].observation_space) else 1))\nfor env_id_no in range(c.num_envs):\n keys = list(filter(lambda k: k.startswith('Env-' + str(env_id_no)), RL.stats.stats_dict.keys()))\n r.register_agent(StatsLoggingAgent(c, \"Env-{0}-StatsLoggingAgent\".format(env_id_no), keys))\n r.register_agent(TensorboardAgent(c, \"Env-{0}-TensorboardAgent\".format(env_id_no), keys, 'Env-{0} Total Frames'.format(env_id_no)))\nr.register_agent(MatplotlibPlotAgent(c, 'RPE', [(RL.stats.get('Env-0 Episode ID'), RL.stats.get('Env-0 Episode Reward'))], ['b-'], xlabel='Episode ID', ylabel='Reward', legend='RPE', auto_save=True, smoothing=c.matplotlib_smoothing))\n\n# algo specific stats and graphs:\nmisc_keys = ['Critic Loss', 'Actor Loss', 'Total Updates', \"Average Actor Critic Q\", 'Exploration Divergence', 'Exploration Sigma']\nr.register_agent(StatsLoggingAgent(c, 'Misc-StatsLoggingAgent', misc_keys))\nr.register_agent(TensorboardAgent(c, 'Misc-TensorboardAgent', misc_keys, 'Env-0 Total Frames', log_every_episode=-1, log_every_step=100))\n\nr.run()\n","repo_name":"bhatiaabhinav/RL","sub_path":"RL/algorithms/ddpg.py","file_name":"ddpg.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"34088262731","text":"import os\nimport argparse\nimport bibtexparser\nimport yaml\nfrom tqdm import tqdm\n\npaper_dir = 'papers'\noutput_dir = './_data/pub.yml'\n\nbooktitle_series_map = {\n \"International Conference on Learning Representations\": \"ICLR\",\n \"IEEE Transactions on Robotics\": \"TOR\",\n \"Conference on Robot Learning\": \"CoRL\",\n \"Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition\": \"CVPR\",\n \"Proceedings of the IEEE conference on Computer Vision and Pattern Recognition\": \"CVPR\",\n \"Proceedings of the European Conference on Computer Vision (ECCV) Workshops\": \"ECCV\",\n \"European conference on computer vision\": \"ECCV\",\n \"Robotics: Science and Systems\": \"RSS\",\n \"Advances in Neural Information Processing Systems\": \"NIPS\",\n \"Proceedings of the IEEE International Conference on Computer Vision\": \"ICCV\",\n \"Proc. ACM Interact. Mob. Wearable Ubiquitous Technol.\": \"IMWUT\",\n \"IEEE Communications Magazine\": \"CommMag\",\n \"IEEE Transactions on Vehicular Technology\": \"TVT\",\n \"The Journal of Defense Modeling and Simulation\": \"JDMS\",\n}\n\ndef add_new_articles(bibdbs):\n\n print(\"Adding {} articles\".format(len(bibdbs)))\n\n yml = open(output_dir, 'w')\n output_lines = []\n bibdbs = sorted(bibdbs, key=lambda pub: pub['year'], reverse=True)\n for i in range(len(bibdbs)):\n bibdb = bibdbs[i]\n # print(bibdb)\n conf = \"\"\n if 'series' in bibdb:\n conf = bibdb['series']\n elif 'booktitle' in bibdb:\n conf = bibdb['booktitle']\n elif 'journal' in bibdb:\n conf = bibdb['journal']\n elif 'archiveprefix' in bibdb:\n conf = bibdb['archiveprefix']\n\n # print(conf)\n\n for conf_name in booktitle_series_map:\n if conf.lower() == conf_name.lower():\n conf = booktitle_series_map[conf_name]\n\n # conf = conf.split(' ')[0]\n conf = conf.split('\\'')[0].strip()\n\n bibdb['conf'] = conf\n\n # print(bibdb['author'])\n bibdb = bibtexparser.customization.author(bibdb)\n # print(bibdb['author'])\n author_field = \"\"\n prev_author_field = bibdb['author']\n for j in range(len(prev_author_field)-1):\n author = prev_author_field[j]\n author_field += author.split(', ')[1] + \" \" + author.split(',')[0] + \", \"\n author_field += prev_author_field[-1].split(', ')[1] + \" \" + prev_author_field[-1].split(',')[0]\n bibdb['author'] = author_field\n # print(bibdb['author'])\n # output = yaml.dump(bibdb, default_flow_style=False)\n # print(output)\n # output_lines.append(\"-\\n\")\n # output_lines.append(output)\n\n f = open(output_dir, \"w\")\n # f.writelines(output_lines)\n yaml.dump(bibdbs, f, default_flow_style=False)\n f.close()\n\n\n\n\ndef add_new_articles_from_tex_file(tex_fp):\n bibfile = open(tex_fp)\n bibdb = bibtexparser.load(bibfile)\n add_new_articles(bibdb.entries)\n bibfile.close()\n\ndef add_new_articles_from_tex_string(tex_string):\n bibdb = bibtexparser.loads(tex_string)\n add_new_articles(bibdb.entries)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--bibtex_fp', default=\"tmp.bib\", help='text from bibtex file')\n parser.add_argument('--bibtex', default=None, help='text from bibtex file')\n parser.add_argument('--bibname', default=None, help='bib name, default is none to reuse name from bibtxt')\n arguments = parser.parse_args()\n\n bibdb = None\n if arguments.bibtex is not None:\n add_new_articles_from_tex_string(arguments.bibtex)\n elif arguments.bibtex_fp is not None:\n add_new_articles_from_tex_file(arguments.bibtex_fp)\n else:\n print(\"Invalid bibtex text or bib file!\")\n print(\"Entering dummy text file for texting\")\n bibtex = \"@inproceedings{sener2018active,\" \\\n \"title={Active Learning for Convolutional Neural Networks: A Core-Set Approach},\" \\\n \"author={Ozan Sener and Silvio Savarese},\" \\\n \"booktitle={International Conference on Learning Representations},\" \\\n \"year={2018},\" \\\n \"url={https://openreview.net/forum?id=H1aIuk-RW},}\"\n bibdb = bibtexparser.loads(bibtex)\n add_new_articles(bibdb.entries)","repo_name":"hangqiu/hangqiu.github.io","sub_path":"assets/bib2yml.py","file_name":"bib2yml.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73453295893","text":"#\r\n# ev2.py: ev1 with the following modifications:\r\n# - self-adaptive mutation\r\n# - stochastic arithmetic crossover\r\n# - restructured code for better use of OO\r\n#\r\n# Note: EV2 still suffers from many of the weaknesses of EV1,\r\n# most particularly in the parent/survivor selection processes\r\n#\r\n# To run: python ev2.py --input ev2_example.cfg\r\n# python ev2.py --input my_params.cfg\r\n#\r\n#\r\n\r\nimport optparse\r\nimport sys\r\nimport yaml\r\nimport math\r\nfrom random import Random\r\n\r\n\r\n#EV2 Config class \r\nclass EV2_Config:\r\n \"\"\"\r\n EV2 configuration class\r\n \"\"\"\r\n # class variables\r\n sectionName='EV2'\r\n options={'populationSize': (int,True),\r\n 'generationCount': (int,True),\r\n 'randomSeed': (int,True),\r\n 'minLimit': (float,True),\r\n 'maxLimit': (float,True)}\r\n \r\n #constructor\r\n def __init__(self, inFileName):\r\n #read YAML config and get EV2 section\r\n infile=open(inFileName,'r')\r\n ymlcfg=yaml.load(infile)\r\n infile.close()\r\n eccfg=ymlcfg.get(self.sectionName,None)\r\n if eccfg is None: raise Exception('Missing {} section in cfg file'.format(self.sectionName))\r\n \r\n #iterate over options\r\n for opt in self.options:\r\n if opt in eccfg:\r\n optval=eccfg[opt]\r\n \r\n #verify parameter type\r\n if type(optval) != self.options[opt][0]:\r\n raise Exception('Parameter \"{}\" has wrong type'.format(opt))\r\n \r\n #create attributes on the fly\r\n setattr(self,opt,optval)\r\n else:\r\n if self.options[opt][1]:\r\n raise Exception('Missing mandatory parameter \"{}\"'.format(opt))\r\n else:\r\n setattr(self,opt,None)\r\n \r\n #string representation for class data \r\n def __str__(self):\r\n return str(yaml.dump(self.__dict__,default_flow_style=False))\r\n \r\n\r\n#Simple 1-D fitness function example: 1-D Rastrigrin function\r\n# \r\ndef fitnessFunc(x):\r\n return -10.0-(0.04*x)**2+10.0*math.cos(0.04*math.pi*x)\r\n\r\n\r\n#Find index of worst individual in population\r\ndef findWorstIndex(l):\r\n minval=l[0].fit\r\n imin=0\r\n for i in range(len(l)):\r\n if l[i].fit < minval:\r\n minval=l[i].fit\r\n imin=i\r\n return imin\r\n\r\n\r\n#Print some useful stats to screen\r\ndef printStats(pop,gen):\r\n print('Generation:',gen)\r\n avgval=0\r\n maxval=pop[0].fit \r\n sigma=pop[0].sigma\r\n for ind in pop:\r\n avgval+=ind.fit\r\n if ind.fit > maxval:\r\n maxval=ind.fit\r\n sigma=ind.sigma\r\n print(str(ind.x)+'\\t'+str(ind.fit)+'\\t'+str(ind.sigma))\r\n\r\n print('Max fitness',maxval)\r\n print('Sigma',sigma)\r\n print('Avg fitness',avgval/len(pop))\r\n print('')\r\n\r\n\r\n#A simple Individual class\r\nclass Individual:\r\n minSigma=1e-100\r\n maxSigma=1\r\n #Note, the learning rate is typically tau=A*1/sqrt(problem_size)\r\n # where A is a user-chosen scaling factor (optional) and problem_size\r\n # for real and integer vector problems is usually the vector-length.\r\n # In our case here, the vector length is 1, so we choose to use a learningRate=1\r\n learningRate=1\r\n minLimit=None\r\n maxLimit=None\r\n cfg=None\r\n prng=None\r\n fitFunc=None\r\n\r\n def __init__(self,randomInit=True):\r\n if randomInit:\r\n self.x=self.prng.uniform(self.minLimit,self.maxLimit)\r\n self.fit=self.__class__.fitFunc(self.x)\r\n self.sigma=self.prng.uniform(0.9,0.1) #use \"normalized\" sigma\r\n else:\r\n self.x=0\r\n self.fit=0\r\n self.sigma=self.minSigma\r\n \r\n def crossover(self, other):\r\n child=Individual(randomInit=False)\r\n alpha=self.prng.random()\r\n child.x=self.x*alpha+other.x*(1-alpha)\r\n child.sigma=self.sigma*alpha+other.sigma*(1-alpha)\r\n child.fit=None\r\n \r\n return child\r\n \r\n def mutate(self):\r\n self.sigma=self.sigma*math.exp(self.learningRate*self.prng.normalvariate(0,1))\r\n if self.sigma < self.minSigma: self.sigma=self.minSigma\r\n if self.sigma > self.maxSigma: self.sigma=self.maxSigma\r\n\r\n self.x=self.x+(self.maxLimit-self.minLimit)*self.sigma*self.prng.normalvariate(0,1)\r\n \r\n def evaluateFitness(self):\r\n self.fit=self.__class__.fitFunc(self.x)\r\n\r\n\r\n#EV2: EV1 with self-adaptive mutation & stochastic crossover\r\n# \r\ndef ev2(cfg):\r\n #start random number generator\r\n prng=Random()\r\n prng.seed(cfg.randomSeed)\r\n\r\n #set Individual static params: min/maxLimit, fitnessFunc, & prng\r\n Individual.minLimit=cfg.minLimit\r\n Individual.maxLimit=cfg.maxLimit\r\n Individual.fitFunc=fitnessFunc\r\n Individual.prng=prng\r\n \r\n #random initialization of population\r\n population=[]\r\n for i in range(cfg.populationSize):\r\n ind=Individual()\r\n ind.evaluateFitness()\r\n population.append(ind)\r\n \r\n #print stats \r\n printStats(population,0)\r\n\r\n #evolution main loop\r\n for i in range(cfg.generationCount):\r\n #randomly select two parents\r\n parents=prng.sample(population,2)\r\n\r\n #recombine\r\n child=parents[0].crossover(parents[1])\r\n \r\n #random mutation\r\n child.mutate()\r\n \r\n #update child's fitness value\r\n child.evaluateFitness() \r\n \r\n #survivor selection: replace worst\r\n iworst=findWorstIndex(population)\r\n if child.fit > population[iworst].fit:\r\n population[iworst]=child\r\n \r\n #print stats \r\n printStats(population,i+1)\r\n \r\n \r\n#\r\n# Main entry point\r\n#\r\ndef main(argv=None):\r\n if argv is None:\r\n argv = sys.argv\r\n \r\n try:\r\n #\r\n # get command-line options\r\n #\r\n parser = optparse.OptionParser()\r\n parser.add_option(\"-i\", \"--input\", action=\"store\", dest=\"inputFileName\", help=\"input filename\", default=None)\r\n parser.add_option(\"-q\", \"--quiet\", action=\"store_true\", dest=\"quietMode\", help=\"quiet mode\", default=False)\r\n parser.add_option(\"-d\", \"--debug\", action=\"store_true\", dest=\"debugMode\", help=\"debug mode\", default=False)\r\n (options, args) = parser.parse_args(argv)\r\n \r\n #validate options\r\n if options.inputFileName is None:\r\n raise Exception(\"Must specify input file name using -i or --input option.\")\r\n \r\n #Get EV2 config params\r\n cfg=EV2_Config(options.inputFileName)\r\n \r\n #print config params\r\n print(cfg)\r\n \r\n #run EV2\r\n ev2(cfg)\r\n \r\n if not options.quietMode: \r\n print('EV2 Completed!') \r\n \r\n except Exception as info:\r\n if 'options' in vars() and options.debugMode:\r\n from traceback import print_exc\r\n print_exc()\r\n else:\r\n print(info)\r\n \r\n\r\nif __name__ == '__main__':\r\n main()\r\n \r\n","repo_name":"kalmanlukelin/Training-Feedforward-Neural-Networks-Using-Island-model-Genetic-Algorithm","sub_path":"EV_course_material/EV2/ev2.py","file_name":"ev2.py","file_ext":"py","file_size_in_byte":7116,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"32812589336","text":"import logging\r\nimport asyncio\r\nimport os\r\nimport sys\r\nimport aiofiles\r\nimport yaml\r\n\r\n\r\nclass Config:\r\n def __init__(self):\r\n self.headers = {\r\n \"authority\": \"api.bilibili.com\",\r\n \"method\": \"GET\",\r\n \"scheme\": \"https\",\r\n \"accept\": \"*/*\",\r\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8,en-US;q=0.6\",\r\n \"cookie\": \"\",\r\n \"dnt\": \"1\",\r\n \"origin\": \"https://www.bilibili.com\",\r\n \"sec-ch-ua\": '\" Not A;Brand\";v=\"99\", \"Chromium\";v=\"101\", \"Microsoft Edge\";v=\"101\"',\r\n \"sec-fetch-dest\": \"empty\",\r\n \"sec-fetch-mode\": \"cors\",\r\n \"sec-fetch-site\": \"same-site\",\r\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\r\n \"Chrome/101.0.4951.64 Safari/537.36 Edg/101.0.1210.47 \",\r\n \"Connection\": \"close\"\r\n }\r\n self.url = \"https://api.bilibili.com/x/web-interface/nav\"\r\n self.url1 = \"https://api.bilibili.com/x/relation/modify\"\r\n self.url2 = \"https://api.bilibili.com/x/polymer/web-dynamic/v1/feed/all\"\r\n self.url3 = \"https://api.bilibili.com/x/web-interface/coin/add\"\r\n self.url4 = \"https://api.bilibili.com/x/web-interface/share/add\"\r\n self.url5 = \"https://api.bilibili.com/x/web-interface/archive/related\"\r\n self.url6 = \"https://api.bilibili.com/x/click-interface/web/heartbeat\"\r\n self.url7 = \"https://api.live.bilibili.com/room/v1/Area/getList\"\r\n self.url8 = \"https://api.live.bilibili.com/xlive/web-ucenter/v1/sign/DoSign\"\r\n self.url_re = \"https://api.bilibili.com/x/relation?fid=%s\"\r\n self.url9 = \"https://api.bilibili.com/x/relation/tags\"\r\n self.url10 = \"https://api.bilibili.com/x/relation/tag\"\r\n self.url11 = \"https://api.bilibili.com/x/space/arc/search?mid=\"\r\n self.url_all = \"https://api.live.bilibili.com/xlive/web-interface/v1/second/getList?platform=web\" \\\r\n \"&parent_area_id=%s&area_id=%s&page=%s \"\r\n self.url_check = \"https://api.live.bilibili.com/xlive/lottery-interface/v1/Anchor/Check?roomid=%s\"\r\n self.url_tx = \"https://api.live.bilibili.com/xlive/lottery-interface/v1/Anchor/Join\"\r\n self.url_relationship = \"https://api.bilibili.com/x/relation/tags/moveUsers\"\r\n self.url_group = \"https://api.bilibili.com/x/relation/tags\"\r\n self.create_url = \"https://api.bilibili.com/x/relation/tag/create\"\r\n self.prize = \"https://api.live.bilibili.com/xlive/lottery-interface/v1/Anchor/AwardRecord\"\r\n self.send = \"https://api.live.bilibili.com/msg/send\"\r\n self.clockin_url = \"https://manga.bilibili.com/twirp/activity.v1.Activity/ClockIn\"\r\n self.vip_url = \"https://api.bilibili.com/x/vip/privilege/my\"\r\n self.ua_list = [\r\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\r\n \"Chrome/101.0.4951.64 Safari/537.36 Edg/101.0.1210.47 \",\r\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.53 \"\r\n \"Safari/537.36 Edg/103.0.1264.37\",\r\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 \"\r\n \"Safari/537.36 \"\r\n ]\r\n self.config_dir = \"./config.yaml\"\r\n logging.basicConfig(level=logging.INFO, format='%(message)s')\r\n self.logger = logging.getLogger(__name__)\r\n\r\n async def create_config(self):\r\n data = {\r\n 'user1': {\r\n 'cookie': '',\r\n 'refresh_token': '',\r\n 'refresh_csrf': '',\r\n 'coin': 0,\r\n 'spider_page': 50,\r\n 'spider_thread': 7, # 线程数不可超出7\r\n 'blacklist': [], # 此处填写uid,格式为uid1,uid2,uid3,uid4,uid5,uid6,uid\r\n 'whitelist': [], # 白名单处填写uid,格式为uid1,uid2,uid,中奖天选后防止取关会自动添加上\r\n 'DesignateUp': [], # 设定要投币的uid,格式为uid1,uid2,\r\n 'proxylist': [] # 代理列表格式字符串,格式为\"ip:port\" 不写就默认不代理\r\n }\r\n }\r\n\r\n comments = {\r\n 'user1': {\r\n 'cookie': '用户登录B站时使用的cookie',\r\n 'refresh_token': '用户登录B站时使用的refresh_token',\r\n 'refresh_csrf': '无需理会refresh_csrf,由我处理',\r\n 'coin': '是否开启B站自动投币功能,0表示关闭,1表示开启,如果你要投币两个我还没适配哦',\r\n 'spider_page': 'B站天选时爬取的页数',\r\n 'spider_thread': '线程数不可超出7',\r\n 'blacklist': '此处填写uid,格式为uid1,uid2,uid3,uid4,uid5,uid6,uid7',\r\n 'whitelist': '白名单处填写uid,格式为uid1,uid2,uid,中奖天选后防止取关会自动添加上',\r\n 'DesignateUp': '设定要投币的uid可以是你喜欢的up,格式为uid1,uid2,',\r\n 'proxylist': '代理列表格式字符串,格式为\"ip:port\" 不写就默认不代理'\r\n }\r\n }\r\n async with aiofiles.open(self.config_dir, mode='w', encoding='utf-8') as f:\r\n for key, value in data.items():\r\n await f.write(key + ':\\n')\r\n for k, v in value.items():\r\n await f.write(' ' + k + ': ' + str(v) + ' # ' + comments[key][k] + '\\n')\r\n self.logger.info('config.yaml is created,please go to Modify Configuration File')\r\n\r\n async def check_config(self):\r\n datalist = ['cookie','refresh_token','refresh_csrf', 'coin', 'spider_page', 'spider_thread', 'blacklist', 'whitelist', 'DesignateUp',\r\n 'proxylist']\r\n with open(self.config_dir, 'r', encoding='utf-8') as f:\r\n # 检查配置文件是否正确\r\n data = yaml.safe_load(f)\r\n if data is not None:\r\n for key, value in data.items():\r\n # 处理配置数据\r\n # print(k, v)\r\n not_found = set(datalist) - set(data[key].keys())\r\n if not_found:\r\n self.logger.info(f'{key}以下键未在 config.yaml 中找到:{not_found},自动添加默认值')\r\n for k1 in not_found:\r\n if k1 == 'cookie':\r\n data[key][k1] = ''\r\n elif k1 == 'refresh_token':\r\n data[key][k1] = ''\r\n elif k1 == 'refresh_csrf':\r\n data[key][k1] = ''\r\n elif k1 == 'drop':\r\n data[key][k1] = 0\r\n elif k1 == 'spider_page':\r\n data[key][k1] = 50\r\n elif k1 == 'spider_thread':\r\n data[key][k1] = 7\r\n else:\r\n data[key][k1] = []\r\n # 在config.yaml中添加键值对\r\n await self.correct_config(data)\r\n return data\r\n else:\r\n self.logger.info('config.yaml 不正确。请检查文件是否正确。')\r\n return None\r\n\r\n async def correct_config(self, data):\r\n with open(self.config_dir, 'w', encoding='utf-8') as f:\r\n f.write(yaml.dump(data, default_flow_style=False))\r\n self.logger.info('config.yaml,修改完成')\r\n\r\n async def insert_data(self, key, value):\r\n with open(self.config_dir, 'r') as file:\r\n data = yaml.safe_load(file)\r\n\r\n # 增加或修改值\r\n for k,v in data.items():\r\n for k1,v1 in v.items():\r\n if k1 == key and v1 != value:\r\n data[k][k1] = value\r\n # 写入 YAML 文件\r\n with open(self.config_dir, 'w') as file:\r\n yaml.dump(data, file)\r\n self.logger.info('更新配置文件完成')\r\n\r\n async def back_k_data(self, key, v):\r\n with open(self.config_dir, 'r') as file:\r\n data = yaml.safe_load(file)\r\n return data[key][v]\r\n\r\n async def start(self):\r\n # 检查配置文件是否存在\r\n if not os.path.exists(self.config_dir):\r\n self.logger.info('config.yaml 不存在,开始创建')\r\n await self.create_config()\r\n # sys.exit(1)\r\n else:\r\n data = await self.check_config()\r\n if data is not None:\r\n return data\r\n \r\n\r\n\r\n# bilibili daily\r\n\r\nif __name__ == '__main__':\r\n con = Config()\r\n asyncio.run(con.start())\r\n \r\n","repo_name":"wangquanfugui233/Bilibili_Python","sub_path":"Basic/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":8844,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"67"} +{"seq_id":"11049389204","text":"\r\nimport pandas as pd\r\nimport io\r\nimport os\r\nimport posixpath\r\nimport re\r\nimport boto3\r\nimport yaml\r\n\r\nwith open('Monthly_Report.yaml') as yaml_file:\r\n conf = yaml.load(yaml_file, Loader=yaml.Loader)\r\n\r\nsession = boto3.Session(profile_name=conf['profile'], region_name=conf['aws_region'])\r\ns3 = session.client('s3', verify=conf['ssl_cert'])\r\nathena = session.client('athena', verify=conf['ssl_cert'])\r\n\r\n# These set environment variables for this session only\r\ncredentials = session.get_credentials()\r\nos.environ['AWS_ACCESS_KEY_ID'] = credentials.access_key\r\nos.environ['AWS_SECRET_ACCESS_KEY'] = credentials.secret_key\r\nos.environ['AWS_AWS_DEFAULT_REGION'] = conf['aws_region']\r\n\r\n# ---------------------------\r\n\r\ndef s3read_using(FUN, Bucket, Key, **kwargs):\r\n with io.BytesIO() as f:\r\n s3.download_fileobj(Bucket=Bucket, Key=Key, Fileobj=f)\r\n f.seek(0)\r\n df = FUN(f, **kwargs)\r\n return df\r\n\r\n\r\ndef s3_write_parquet(df, Bucket, Key, **kwargs):\r\n for col in df.dtypes[df.dtypes=='datetime64[ns]'].index.values:\r\n df[col] = df[col].dt.round(freq='ms') # parquet doesn't support ns timestamps\r\n with io.BytesIO() as f:\r\n df.to_parquet(f, **kwargs)\r\n f.seek(0)\r\n s3.put_object(Bucket=Bucket, Key=Key, Body=f.getvalue())\r\n\r\n\r\ndef s3_write_excel(df, Bucket, Key, **kwargs):\r\n with io.BytesIO() as f:\r\n df.to_excel(f, **kwargs)\r\n f.seek(0)\r\n s3.put_object(Bucket=Bucket, Key=Key, Body=f.getvalue())\r\n\r\n\r\ndef s3_write_feather(df, Bucket, Key, **kwargs):\r\n with io.BytesIO() as f:\r\n df.to_feather(f, **kwargs)\r\n s3.put_object(Bucket=Bucket, Key=Key, Body=f.getvalue())\r\n\r\n\r\ndef s3_write_csv(df, Bucket, Key, **kwargs):\r\n with io.StringIO() as f:\r\n df.to_csv(f, **kwargs)\r\n f.seek(0)\r\n s3.put_object(Bucket=Bucket, Key=Key, Body=f.getvalue())\r\n\r\n\r\ndef s3_read_parquet(Bucket, Key, **kwargs):\r\n return s3read_using(pd.read_parquet, Bucket, Key, **kwargs)\r\n\r\n\r\ndef s3_read_excel(Bucket, Key, **kwargs):\r\n return s3read_using(pd.read_excel, Bucket, Key, **kwargs)\r\n\r\n\r\ndef s3_read_feather(Bucket, Key, **kwargs):\r\n return s3read_using(pd.read_feather, Bucket, Key, **kwargs)\r\n\r\n\r\n# Uses io.String() instead of io.Bytes()\r\ndef s3_read_csv(Bucket, Key, **kwargs):\r\n with io.String() as f:\r\n s3.download_fileobj(Bucket=Bucket, Key=Key, Fileobj=f)\r\n f.seek(0)\r\n df = pd.read_csv(f, **kwargs)\r\n return df\r\n\r\n\r\ns3_list_objects = s3.list_objects_v2\r\n\r\ndef s3_read_parquet_hive(bucket, key):\r\n if 'Contents' in s3_list_objects(Bucket = bucket, Prefix = key):\r\n date_ = re.search('\\d{4}-\\d{2}-\\d{2}', key).group(0)\r\n df = (s3_read_parquet(Bucket=bucket, Key=key)\r\n .assign(Date = lambda x: pd.to_datetime(date_, format='%Y-%m-%d'))\r\n .rename(columns = {'Timestamp': 'TimeStamp'}))\r\n else:\r\n df = pd.DataFrame()\r\n\r\n return df\r\n\r\n\r\ndef get_keys(s3, bucket, prefix, callback=lambda x: x):\r\n response = s3.list_objects_v2(\r\n Bucket=bucket,\r\n Prefix=prefix)\r\n if 'Contents' in response.keys():\r\n for cont in response['Contents']:\r\n try:\r\n yield callback(cont['Key'])\r\n except:\r\n pass\r\n\r\n while 'NextContinuationToken' in response.keys():\r\n response = s3.list_objects_v2(\r\n Bucket=bucket,\r\n Prefix=prefix,\r\n ContinuationToken=response['NextContinuationToken'])\r\n for cont in response['Contents']:\r\n try:\r\n yield callback(cont['Key'])\r\n except:\r\n pass\r\n\r\n\r\ndef get_signalids(date_, conf, path='detections'):\r\n date_str = date_.strftime('%Y-%m-%d')\r\n bucket = conf['bucket']\r\n key_prefix = conf['key_prefix'] or ''\r\n prefix = posixpath.join(key_prefix, path, f'date={date_str}')\r\n keys = get_keys(s3, bucket, prefix, callback = lambda k: re.search('(?<=_)\\d+(?=_)', k).group())\r\n return keys\r\n\r\n\r\ndef get_det_config(date_, conf):\r\n '''\r\n date_ [Timestamp]\r\n conf [dict]\r\n '''\r\n\r\n def read_det_config(s3, bucket, key):\r\n dc = s3_read_feather(Bucket=bucket, Key=key)\r\n dc.loc[dc.DetectionTypeDesc.isna(), 'DetectionTypeDesc'] = '[]'\r\n return dc\r\n\r\n date_str = date_.strftime('%Y-%m-%d')\r\n\r\n bucket = conf['bucket']\r\n\r\n bd = s3_read_parquet(Bucket=bucket, Key=f'mark/bad_detectors/date={date_str}/bad_detectors_{date_str}.parquet')\r\n bd.Detector = bd.Detector.astype('int64')\r\n\r\n dc_prefix = f'atspm_det_config_good/date={date_str}'\r\n dc_keys = get_keys(s3, bucket, dc_prefix)\r\n\r\n dc = pd.concat(list(map(lambda k: read_det_config(s3, bucket, k), dc_keys)))\r\n\r\n df = pd.merge(dc, bd, how='outer', on=['SignalID','Detector']).fillna(value={'Good_Day': 1})\r\n df = df.loc[df.Good_Day==1].drop(columns=['Good_Day'])\r\n\r\n return df\r\n","repo_name":"atops/VDOT-Flexdashboard-Report","sub_path":"s3io.py","file_name":"s3io.py","file_ext":"py","file_size_in_byte":4887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38644695087","text":"with open(\"inputs/day10.txt\") as file:\n data = []\n for line in file:\n line = line.split()\n if len(line) == 2:\n data.append((line[0], int(line[1])))\n else:\n data.append((line[0], None))\n\n# Part 1 & 2 ===\npart1 = 0\nscreen = [[\" \"]*40 for _ in range(6)]\ncycle, register = 0, 1\ni = 0\nis_executing = False\nwhile True:\n cycle += 1\n opcode, value = data[i]\n\n if (cycle - 20) % 40 == 0:\n part1 += cycle * register\n\n if (cycle-1) % 40 in range(register-1, register+2):\n screen[(cycle-1)//40][(cycle-1) % 40] = \"#\"\n\n if not is_executing:\n if opcode == \"addx\":\n is_executing = True\n else: # noop\n i += 1\n else:\n is_executing = not is_executing\n register += data[i][1]\n i += 1\n\n if i + 1 >= len(data):\n break\n\n\nprint(\"Part 1:\", part1)\nprint(\"Part 2:\")\n[print(\"\".join(line)) for line in screen]\n","repo_name":"0xVector/AdventOfCode2022","sub_path":"day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38835068942","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 22 10:08:23 2022\n\n@author: Lenovo1\n\"\"\"\n\nimport streamlit as st\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nfrom matplotlib.ticker import PercentFormatter\nimport numpy as np\nimport logging\n#from task12345 import list_problemChosen\n\n#from proble import * #引用模块中的函数\nimport backFunc as back\nfrom code.showAnswerResult import match_answer\n\nLOG_FORMAT = \"%(asctime)s - %(levelname)s - %(message)s\"\nlogging.basicConfig(filename='C:/Users/HP/Documents/Python/streamlit+backFunc(1)/ExamSystem.log', level=logging.INFO, format=LOG_FORMAT)\n\n\n#global list_problemChosen\n#########################################################\n#定义多页面函数\nclass MultiPages:\n def __init__(self):\n self.titles_list = []\n self.func_dict = {}\n\n def add_TitleAndFunc(self, title, func):\n if title not in self.titles_list:\n self.titles_list.append(title)\n self.func_dict[title] = func\n#如果选择了‘basic python选项’,函数内的title则为‘basic python’\n def run(self):\n title = st.sidebar.radio(\n 'Go To',\n self.titles_list,\n format_func=lambda title: str(title))#str函数可能是多余的\n func_dict=self.func_dict\n func_name=func_dict.get(title)\n eval(func_name)()\n##############################################################\n \nglobal setChosen#setChosen表示当前选择的题库\n\nsetChosen='BasicPython'\n#定义页面 \n#定义页面1-登录页面\n\n#########################################################更新(xhp\ndef logo_detect(Username, password):\n user_table = pd.read_excel('user_all.xlsx',sheet_name='Users')\n temp_table = pd.read_excel('user_temp.xlsx',sheet_name='Temp')\n all_username = list(user_table['name'])\n if Username in all_username:\n if password == str(user_table.loc[all_username.index(Username), 'password']):\n st.success('login successfully')\n temp_table.loc[0, 'name'] = Username\n user_table.loc[all_username.index(Username), 'Login'] = 2\n temp_table.to_excel('user_temp.xlsx', index=False,sheet_name='Temp')\n user_table.to_excel('user_all.xlsx', index=False,sheet_name='Users')#改变user——all用户log状态\n else:\n st.error('password error')\n else:\n st.error('The Username does not exist')\n\ndef page_login():\n st.title('Page Login')\n input_Username=st.text_input('Username',max_chars=20).strip()\n input_password = st.text_input('Password', type='password',max_chars=20).strip()\n if st.button('sign in'):\n logo_detect(input_Username,input_password)\n logging.info(f'user [{input_Username}] login')\n\n##############################################################更新(xhp\n\n\n#定义页面2-题库答题页面:#从外部传入一个setChosen函数\ndef page_problem(setChosen):\n \n #定义各类题型的题目展示函数\n #problem为一个object\n #定义单选题的题目展示函数\n def SingleChoice_show(problemObject,key_type,setChosen,i):\n str_i=str(i)\n key_str=setChosen+key_type+str_i\n problem_show=problemObject.problem#problem_show为要展示的题目\n logging.info(f'[SingleChoice]: {problemObject.problem}')\n # (f'question[{fillBlanks.problemid}]: {fillBlanks.problem}.')\n choice_default='Please choose your answer'\n choice_A='A. '+str(problemObject.A)\n choice_B='B. '+str(problemObject.B)\n choice_C='C. '+str(problemObject.C)\n choice_D='D. '+str(problemObject.D)\n st.text(problem_show)\n choice_list=[choice_default,choice_A,choice_B,choice_C,choice_D]\n answer_chosen=st.radio('Single Choice',choice_list,key=key_str)\n \n #获取用户选择的答案\n answer_result='null'\n if answer_chosen==choice_default:\n answer_result='null'\n elif answer_chosen==choice_A:\n answer_result='A'\n elif answer_chosen==choice_B:\n answer_result='B'\n elif answer_chosen==choice_C:\n answer_result='C'\n elif answer_chosen==choice_D:\n answer_result='D'\n \n return answer_result\n #定义判断题的题目展示函数\n def Judge_show(problemObject,key_type,setChosen,i):\n str_i=str(i)\n key_str=setChosen+key_type+str_i\n problem_show=problemObject.problem#problem_show为要展示的题目\n logging.info(f'[Judge]: {problem_show}')\n choice_default='Please choose your answer'\n choice_A='A. '+str(problemObject.A)\n choice_B='B. '+str(problemObject.B)\n st.text(problem_show)\n choice_list=[choice_default,choice_A,choice_B]\n answer_chosen=st.radio('Judge',choice_list,key=key_str)\n \n #获取用户选择的答案\n answer_result='null'\n if answer_chosen==choice_default:\n answer_result='null'\n elif answer_chosen==choice_A:\n answer_result='A'\n elif answer_chosen==choice_B:\n answer_result='B'\n \n return answer_result\n \n \n #定义多选题的展示函数\n def MultipleChoice_show(problemObject,key_type,setChosen,i):\n str_i=str(i)\n key_str=setChosen+key_type+str_i\n problem_show=problemObject.problem#problem_show为要展示的题目\n logging.info(f'[MultipleChoice]: {problem_show}')\n choice_A='A. '+str(problemObject.A)\n choice_B='B. '+str(problemObject.B)\n choice_C='C. '+str(problemObject.C)\n choice_D='D. '+str(problemObject.D)\n st.text(problem_show)\n choice_list=[choice_A,choice_B,choice_C,choice_D]\n answer_chosen=st.multiselect('Multiple Choice',choice_list,key=key_str)\n \n #获取用户选择的答案\n answer_result=''\n answer_result_list=[]\n for i in answer_chosen:\n i_list=list(i)\n answer_result_list.append(i[0])\n \n answer_result_list=sorted(answer_result_list)\n answer_result=''.join(answer_result_list)\n return answer_result\n \n #定义填空题的展示函数\n def FillBlanks_show(problemObject,key_type,setChosen,i):\n str_i=str(i)\n key_str=setChosen+key_type+str_i\n problem_show=problemObject.problem#problem_show为要展示的题目\n logging.info(f'[FillBlanks]: {problem_show}')\n st.text(problem_show) \n answer_chosen=st.text_input('Fill Blanks', value=\"\",key=key_str)\n \n #获取用户选择的答案\n answer_result='null'\n answer_result=answer_chosen\n return answer_result\n \n #主函数:题目展示的主函数\n #st.title('Page Problem')\n answer_result_list={}\n title_str=str(setChosen)\n st.title(title_str)\n type_dict={1:'SingleChoice',2:'Judge',3:'MultipleChoice',4:'FillBlanks',5:'CodeProblem'}\n list_problemChosen,list_problemid= back.problemChoose(setChosen)\n for i in range(0,5):\n j=i+1\n str_subheader='Question'+str(j) \n st.subheader(str_subheader)\n \n problemObject=list_problemChosen[i]\n if problemObject.problemTypeNum==1:\n answer_result=SingleChoice_show(problemObject,'SingleChoice',setChosen,i)\n answer_result_list[list_problemid[i]]=answer_result\n elif problemObject.problemTypeNum==2:\n answer_result=Judge_show(problemObject,'Judge',setChosen,i)\n answer_result_list[list_problemid[i]]=answer_result\n elif problemObject.problemTypeNum==3:\n answer_result=MultipleChoice_show(problemObject,'MultipleChoice',setChosen,i)\n answer_result_list[list_problemid[i]]=answer_result\n elif problemObject.problemTypeNum==4:\n answer_result=FillBlanks_show(problemObject,'FillBlanks',setChosen,i)\n answer_result_list[list_problemid[i]]=answer_result\n st.text(answer_result_list)\n\n def write_user_result(right, wrong):\n df_user_all = pd.read_excel('user_all.xlsx', sheet_name='Users')\n df_user_now = pd.read_excel('user_temp.xlsx', sheet_name='Temp')\n user_name = df_user_now.loc[0, 'name']\n all_username = list(df_user_all['name'])\n index_num = all_username.index(user_name)\n\n comma = ','\n df_user_all.loc[index_num, 'right'] =str(df_user_all.loc[index_num, 'right'])+ comma + str(right).strip('{}').replace(' ','')\n df_user_all.loc[index_num, 'wrong'] =str(df_user_all.loc[index_num, 'wrong'])+ comma + str(wrong).strip('{}').replace(' ','')\n df_user_all.loc[index_num, 'right']= df_user_all.loc[index_num, 'right'].strip('nan,')\n df_user_all.loc[index_num, 'wrong']= df_user_all.loc[index_num, 'wrong'].strip('nan,')\n df_user_all.to_excel('user_all.xlsx',sheet_name='Users')\n\n #定义提交函数 \n def submitFunc(answer_result_list):\n \n df_user_all=pd.read_excel('user_all.xlsx',sheet_name='Users')\n df_user_now=pd.read_excel('user_temp.xlsx',sheet_name='Temp')\n user_name=df_user_now.loc[0,'name']\n right,wrong = match_answer(user_name,answer_result_list)\n\n indexlist_user=df_user_all.loc[df_user_all.name==user_name].index.tolist()#获取该user所在的行,返回一个列表\n index_user=indexlist_user[0]\n df_user_all.loc[index_user,setChosen]=3\n df_user_all.to_excel('user_all.xlsx',sheet_name='Users',index=False)\n st.write(str(right))\n write_user_result(right, wrong)\n\n\n if st.button('Submit'):\n submitFunc(answer_result_list)\n\n\n#定义页面3-提示用户已登录,请选择题库进行作答页面:\ndef page_already_login():\n st.title('Page Already Login')\n pass\n\n#定义页面4:提示用户未登录,请进行登录页面:\ndef page_not_login():\n st.title('Page Not Login')\n pass\n\n#定义页面5:提示用户,该题库已作答\ndef page_already_answer():\n st.title('Page Already Answer')\n pass\n\n#定义页面6:分析报告页面\ndef page_analyze():\n st.title('Page Analyze')\n temp_table = pd.read_excel('user_temp.xlsx', sheet_name='Temp')\n username = list(temp_table['name'])[0]\n plt.rcParams['font.sans-serif'] = ['SimHei']\n\n # 用来正常显示中文标签\n st.write(\"根据答题结果分析报告如下:\")\n\n\n def draw_columnars(list1, list2, type_list, colors, title):\n data = [list1, list2]\n x = range(len(list1))\n width = 0.35\n fig = plt.figure()\n # 将bottom_y元素都初始化为0\n bottom_y = np.zeros(len(type_list))\n data = np.array(data)\n # 按列计算计算每组柱子的总和,为计算百分比做准备\n sums = np.sum(data, axis=0)\n for i, color in zip(data, colors):\n # 计算每个柱子的高度,即百分比\n y = i / sums\n pl = plt.bar(x, y, width, bottom=bottom_y, color=color)\n plt.bar_label(pl, label_type=\"center\", color=\"k\")\n # 计算bottom参数的位置\n bottom_y = y + bottom_y\n # 生成legend\n legend_labels = [\"正确\", \"错误\"]\n patches = [mpatches.Patch(color=colors[h], label=\"{:s}\".format(legend_labels[h])) for h in\n range(len(legend_labels))]\n ax = plt.gca()\n ax.legend(handles=patches, ncol=1, bbox_to_anchor=(1, 1), borderaxespad=2)\n plt.xticks(x, type_list)\n # 纵轴设置为百分比\n plt.gca().yaxis.set_major_formatter(PercentFormatter(1))\n plt.title(title, fontdict={'size': 15})\n return fig\n\n def draw_columnar1(right_list, wrong_list):\n type_list = ['BasicPython', 'LoopAndLogic', 'Function', 'Virtualization', 'NumpAndPandas']\n list1 = [0, 0, 0, 0, 0]\n list2 = [0, 0, 0, 0, 0]\n for i in right_list:\n temp = i.split(':')\n list1[int(temp[0][0:1]) - 1] += 1\n for i in wrong_list:\n temp = i.split(':')\n list2[int(temp[0][0:1]) - 1] += 1\n colors = ['#FFE4C4', '#B0C4DE']\n title = \"各题型情况分析\"\n fig = draw_columnars(list1, list2, type_list, colors, title)\n st.pyplot(fig)\n max_value = max(list1)\n temp = []\n for i in range(len(list1)):\n if list1[i] == max_value: temp.append(type_list[i])\n print(temp)\n st.subheader(\"您擅长的题型是:\" + \"、\".join(temp))\n\n def draw_columnar2(right_list, wrong_list):\n type_list = ['单选题', '判断题', '多选题', '填空题']\n list1 = [0, 0, 0, 0]\n list2 = [0, 0, 0, 0]\n for i in right_list:\n temp = i.split(':')\n list1[int(temp[1]) - 1] += 1\n for i in wrong_list:\n temp = i.split(':')\n list2[int(temp[1]) - 1] += 1\n colors = ['#AFEEEE', '#F08080']\n title = \"各题型情况分析\"\n fig = draw_columnars(list1, list2, type_list, colors, title)\n st.pyplot(fig)\n max_value = max(list1)\n temp = []\n for i in range(len(list1)):\n if list1[i] == max_value: temp.append(type_list[i])\n print(temp)\n st.subheader(\"您擅长的题型是:\" + \"、\".join(temp))\n\n def read_excel(username):\n user_table = pd.read_excel('user_all.xlsx', sheet_name='Users')\n all_username = list(user_table['name'])\n right_str = user_table.loc[all_username.index(username), 'right']\n wrong_str = user_table.loc[all_username.index(username), 'wrong']\n right_list = right_str.split(',')\n wrong_list = wrong_str.split(',')\n draw_columnar1(right_list, wrong_list)\n draw_columnar2(right_list, wrong_list)\n draw_pie(right_list, wrong_list)\n\n def draw_pie(right_list, wrong_list):\n labels = [\"正确\", \"错误\"]\n colors = [\"#40E0D0\", \"#3CB371\"]\n sizes = [len(right_list), len(wrong_list)]\n fig = plt.figure()\n plt.pie(sizes, labels=labels, autopct='%1.2f%%',\n startangle=90, colors=colors, textprops={'color': \"white\",\n \"fontsize\": \"16\"}) # '%1.1f':指小数点后保留一位有效数值;'%1.2f%%'保留两位小数点,增加百分号(%);startangle=90则从y轴正方向画起\n plt.axis('equal') # 该行代码使饼图长宽相等\n plt.title('答题总情况占比', fontdict={'size': 15})\n plt.legend(loc=\"upper right\", fontsize=10, bbox_to_anchor=(1.1, 1.05), borderaxespad=0.3) # 添加图例\n st.pyplot(fig)\n\n read_excel(username)\n\n\n\n#####################################################################定义页面7:用户退出登录页面:\ndef page_logout():\n user_table = pd.read_excel('user_all.xlsx', sheet_name='Users')\n temp_table = pd.read_excel('user_temp.xlsx', sheet_name='Temp')\n Username=list(temp_table['name'])[0]\n all_username = list(user_table['name'])\n st.title('Page Log Out')\n if st.button('log out'):\n user_table.loc[all_username.index(Username), 'Login'] = 1\n user_table.to_excel('user_all.xlsx',sheet_name='Users')\n\n\n#############################################################################更新(xhp\n#定义页面8:用户登录后的login界面\ndef page_Welcome():\n temp_table = pd.read_excel('user_temp.xlsx',sheet_name='Temp')\n username=str(temp_table.loc[0,'name'])\n st.title(username)\n st.title(\"Welcome to Python World \")\n\n#################################################################\n#定义不同题库的答题页面函数\n\ndef page_problem_BasicPython():\n setChosen='BasicPython'\n page_problem(setChosen)\n pass\n\ndef page_problem_LoopAndLogic():\n setChosen='LoopAndLogic'\n page_problem(setChosen)\n pass\n\ndef page_problem_Function():\n setChosen='Function'\n page_problem(setChosen)\n pass\n\ndef page_problem_Virtualization():\n setChosen='Virtualization'\n page_problem(setChosen)\n pass\n\ndef page_problem_NumpAndPandas():\n setChosen='NumpAndPandas'\n page_problem(setChosen)\n pass\n\n\n\n\n\n\n\n\n\n\n############################################################\n#定义函数:生成pagestate_dict的函数\n\ndef create_pagestate_dict():\n state_dict={}\n df_user_all=pd.read_excel('user_all.xlsx',sheet_name='Users')\n df_user_now=pd.read_excel('user_temp.xlsx',sheet_name='Temp')\n key_list=[\n 'Login',\n 'BasicPython',\n 'LoopAndLogic',\n 'Function',\n 'Virtualization',\n 'NumpAndPandas',\n 'Analysis',\n 'LogOut'\n ]\n user_name=df_user_now.loc[0,'name']\n indexlist_user=df_user_all.loc[df_user_all.name==user_name].index.tolist()#获取该user所在的行,返回一个列表\n index_user=indexlist_user[0]\n for key_name in key_list:\n state_dict[key_name]=df_user_all.loc[index_user,key_name]\n pagestate_dict=state_dict\n return pagestate_dict\n\n\n#定义函数:页面选择函数\ndef pageChooseFunc():\n LOG_FORMAT = \"%(asctime)s - %(levelname)s - %(message)s\"\n logging.basicConfig(filename='ExamSystem.log', level=logging.INFO, format=LOG_FORMAT)\n global pagestate_dict\n #pagestate_dict={\n # 'Login':1,\n # 'BasicPython':2,\n # 'LoopAndLogic':2,\n # 'Function':2,\n # 'Virtualization':2,\n # 'NumpAndPandas':2,\n # 'Analysis':2,\n # 'LogOut':2\n # }\n pagestate_dict = create_pagestate_dict()\n \n ###############################################\n #定义页面选择函数\n #对于登录界面,定义页面选择函数\n def login_page(loginState):\n if loginState==1:\n func='page_login'\n elif loginState== 2:\n func = 'page_Welcome'\n return func\n \n #对于BasicPython题库界面,定义页面选择函数:\n def BasicPython_page(Logo_state,BasicPythonState):\n if Logo_state==1:\n func='page_not_login'\n elif BasicPythonState==2:\n func='page_problem_BasicPython'\n #setChosen='BasicPython'\n elif BasicPythonState==3:\n func='page_already_answer'\n return func\n \n #对于LoopAndLogic题库界面,定义页面选择函数:\n def LoopAndLogic_page(Logo_state,LoopAndLogicState):\n if Logo_state==1:\n func='page_not_login'\n elif LoopAndLogicState==2:\n func='page_problem_LoopAndLogic'\n #setChosen='LoopAndLogic'\n elif LoopAndLogicState==3:\n func='page_already_answer'\n return func\n \n def Function_page(Logo_state,FunctionState):\n if Logo_state==1:\n func='page_not_login'\n elif FunctionState==2:\n func='page_problem_Function'\n #setChosen='Function'\n elif FunctionState==3:\n func='page_already_answer'\n return func\n \n def Virtualization_page(Logo_state,VirtualizationState):\n if Logo_state==1:\n func='page_not_login'\n elif VirtualizationState==2:\n func='page_problem_Virtualization'\n #setChosen='Virtualization'\n elif VirtualizationState==3:\n func='page_already_answer'\n return func\n \n def NumpAndPandas_page(Logo_state,NumpAndPandasState):\n if Logo_state==1:\n func='page_not_login'\n elif NumpAndPandasState==2:\n func='page_problem_NumpAndPandas'\n #setChosen='NumpAndPandas'\n elif NumpAndPandasState==3:\n func='page_already_answer'\n return func\n \n def Analysis_page(Logo_state,AnalysisState):\n if Logo_state==1:\n func='page_not_login'\n elif AnalysisState==2:\n func='page_analyze'\n return func\n #####################################这里修改logout和login都有一个参数控制\n def LogOut_page(loginState):\n if loginState==1:\n func='page_not_login'\n elif loginState==2:\n func='page_logout'\n return func\n \n ##############################################\n \n #定义一个字典,保存各界面的func\n pagefunc_dict={}\n\n def ChooseFunc_login(pagefunc_dict):\n interface='Login'#interface为界面的名称\n login_func=login_page(pagestate_dict[interface])\n pagefunc_dict[interface]=login_func\n return pagefunc_dict\n\n def ChooseFunc_BasicPython(pagefunc_dict):\n interface = 'BasicPython' # interface为界面的名称\n BasicPython_func = BasicPython_page(pagestate_dict['Login'],pagestate_dict[interface])\n pagefunc_dict['BasicPython'] = BasicPython_func\n return pagefunc_dict\n\n def ChooseFunc_LoopAndLogic(pagefunc_dict):\n interface = 'LoopAndLogic' # interface为界面的名称\n LoopAndLogic_func = LoopAndLogic_page(pagestate_dict['Login'],pagestate_dict[interface])\n pagefunc_dict['LoopAndLogic'] = LoopAndLogic_func\n return pagefunc_dict\n\n def ChooseFunc_Function(pagefunc_dict):\n interface = 'Function' # interface为界面的名称\n Function_func = Function_page(pagestate_dict['Login'],pagestate_dict[interface])\n pagefunc_dict['Function'] = Function_func\n return pagefunc_dict\n\n def ChooseFunc_Virtualization(pagefunc_dict):\n interface = 'Virtualization' # interface为界面的名称\n Virtualization_func = Virtualization_page(pagestate_dict['Login'],pagestate_dict[interface])\n pagefunc_dict['Virtualization'] = Virtualization_func\n return pagefunc_dict\n\n def ChooseFunc_NumpAndPandas(pagefunc_dict):\n interface = 'NumpAndPandas' # interface为界面的名称\n NumpAndPandas_func = NumpAndPandas_page(pagestate_dict['Login'],pagestate_dict[interface])\n pagefunc_dict['NumpAndPandas'] = NumpAndPandas_func\n return pagefunc_dict\n\n def ChooseFunc_Analysis(pagefunc_dict):\n interface = 'Analysis' # interface为界面的名称\n Analysis_func = Analysis_page(pagestate_dict['Login'],pagestate_dict[interface])\n pagefunc_dict['Analysis'] = Analysis_func\n return pagefunc_dict\n \n def ChooseFunc_LogOut(pagefunc_dict):\n interface='LogOut'#interface为界面的名称\n LogOut_func=LogOut_page(pagestate_dict['Login'])\n pagefunc_dict['LogOut']=LogOut_func\n return pagefunc_dict\n \n pagefunc_dict=ChooseFunc_login(pagefunc_dict)\n pagefunc_dict=ChooseFunc_BasicPython(pagefunc_dict)\n pagefunc_dict=ChooseFunc_LoopAndLogic(pagefunc_dict)\n pagefunc_dict=ChooseFunc_Function(pagefunc_dict)\n pagefunc_dict=ChooseFunc_Virtualization(pagefunc_dict)\n pagefunc_dict=ChooseFunc_NumpAndPandas(pagefunc_dict)\n pagefunc_dict=ChooseFunc_Analysis(pagefunc_dict)\n pagefunc_dict=ChooseFunc_LogOut(pagefunc_dict)\n \n return pagefunc_dict\n\n ######################################################################### \n#streamlit运行的主函数\npagefunc_dict=pageChooseFunc() \nsystem=MultiPages()\n\ndef add_page(title_name):\n func_name=pagefunc_dict[title_name]\n system.add_TitleAndFunc(title_name,func_name)\n \nadd_page('Login')\nadd_page('BasicPython')\nadd_page('LoopAndLogic')\nadd_page('Function')\nadd_page('Virtualization')\nadd_page('NumpAndPandas')\nadd_page('Analysis')\nadd_page('LogOut')\n\nsystem.run() \n \n \n ","repo_name":"cuhksz-PythonKiller/ExamSystem","sub_path":"ExamSystem/streamlitMain.py","file_name":"streamlitMain.py","file_ext":"py","file_size_in_byte":23346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35175668254","text":"from datetime import date\n\nclass Donkey:\n\n def __init__(self, name, species):\n self.name = name\n self.species = species\n self.date_added = date.today()\n self.walking = True\n\ndaisy = Donkey(\"Daisy\", \"donkey\")\n\nprint(daisy)\n\n","repo_name":"chesney-hardin/petting-zoo","sub_path":"walking/donkey.py","file_name":"donkey.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42140729237","text":"\r\ndef convert(a):\r\n sentence = a.split('. ')\r\n b= \"\"\r\n for i in sentence:\r\n b = b + \". \" + i.capitalize()\r\n return b\r\n #. Hello. Hi\r\ndef main():\r\n #\"hello. hi\r\n sentence = input(\"Enter a sentence: \")\r\n print(convert(sentence)[2::])\r\n #Hello. Hi\r\n\r\nmain()","repo_name":"vuhoangviet198/HW","sub_path":"HW/HW2/Problem4.py","file_name":"Problem4.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70779972054","text":"import torch\nimport torch.nn.functional as F\n\n\n@torch.jit.script\ndef calculate_distances(p0: torch.Tensor, p1: torch.Tensor) -> torch.Tensor:\n # ReLU prevents negative numbers in sqrt\n Dij = torch.sqrt(F.relu(torch.sum((p0 - p1) ** 2, -1)))\n return Dij\n\n\ndef calculate_torsions(p0: torch.Tensor, p1: torch.Tensor, p2: torch.Tensor, p3: torch.Tensor) -> torch.Tensor:\n b0 = -1.0 * (p1 - p0)\n b1 = p2 - p1\n b2 = p3 - p2\n\n if p0.dim() == 1:\n b1 /= b1.norm()\n else:\n b1 /= b1.norm(dim=1)[:, None]\n\n v = b0 - torch.sum(b0 * b1, dim=-1, keepdim=True) * b1\n w = b2 - torch.sum(b2 * b1, dim=-1, keepdim=True) * b1\n\n x = torch.sum(v * w, dim=-1)\n y = torch.sum(torch.cross(b1, v) * w, dim=-1)\n\n return torch.atan2(y, x)\n\n\n# %%\nif __name__ == '__main__':\n # %%\n coords = torch.tensor([[10.396, 18.691, 19.127],\n [9.902, 18.231, 20.266],\n [8.736, 17.274, 20.226],\n [7.471, 18.048, 19.846]])\n coords2 = torch.tensor([[7.471, 18.048, 19.846],\n [6.67, 17.583, 18.852],\n [5.494, 18.412, 18.503],\n [4.59, 18.735, 19.711]])\n\n print(calculate_torsions(*coords))\n print(calculate_torsions(*coords2))\n # %%\n # calculate_torsions(*coords[:, None, :])\n a = torch.cat((coords, coords2), 1).reshape(4, -1, 3)\n print(calculate_torsions(*a))\n","repo_name":"akirasosa/mobile-semantic-segmentation","sub_path":"src/mylib/torch/functional.py","file_name":"functional.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":708,"dataset":"github-code","pt":"67"} +{"seq_id":"19164005543","text":"from Globals import ErrManager, ST_G\nfrom Parser_M import Parser\nfrom ASTWalker import ASTWalker, CodeGenerator\nfrom AssemblyGenerator import AssemblyGenerator\nimport sys\n\ndef driver():\n\n s = \"\"\n SourceCodeFile = \"\"\n OutputFile = \"\"\n\n #find input file\n for i, arg in enumerate(sys.argv):\n if arg == \"-i\":\n SourceCodeFile = sys.argv[i+1]\n if arg == \"-o\":\n OutputFile = sys.argv[i+1]\n\n ST_G.SourceFile = SourceCodeFile\n\n\n # Construct parser\n P = Parser(SourceFile=SourceCodeFile, DebugArgs = sys.argv)\n P.BuildParser()\n\n AST = P.RunParser()\n\n # Run parser in try except block to enable compliation\n # terminiation under various circumstances\n # try:\n # AST = P.RunParser()\n # if ErrManager.HasErrors():\n # raise Exception()\n # except Exception as e:\n # print(e)\n # print(\"\\n[Compliation Stopped]\\nThe Following Errors Were Found:\\n\")\n # ErrManager.PrintErrors()\n # return\n\n ST_G.ClearSymbolTable()\n\n AW = ASTWalker(AST)\n AW.PrintASTHelper(AW.AST)\n\n ICG = CodeGenerator(AST, \"intermediate.3AC\")\n\n ICG.PrettyPrint3AC()\n\n Assembly = AssemblyGenerator(ThreeAC = ICG.Output , Filename=OutputFile)\n\n\ndriver()\n","repo_name":"cscully-allison/C_Compiler","sub_path":"Parser/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"486840510","text":"\"\"\"\r\n Начиная в вершине треугольника (см. пример ниже)\r\nи перемещаясь вниз на смежные числа,\r\nмаксимальная сумма до основания составляет 23.\r\n\r\n 3\r\n 7 4\r\n 2 4 6\r\n8 5 9 3\r\n\r\n То есть, 3 + 7 + 4 + 9 = 23.\r\n\r\n Найдите максимальную сумму пути от вершины\r\nдо основания следующего треугольника:\r\n\r\n\r\n Примечание: Так как в данном треугольнике всего\r\n16384 возможных маршрута от вершины до основания,\r\nэту задачу можно решить проверяя каждый из маршрутов.\r\nОднако похожая Задача 67 с треугольником, состоящим из сотни строк,\r\nне решается перебором (brute force) и требует более умного подхода! ;o)\r\n\"\"\"\r\n\r\npyramid = '''\r\n75\r\n95 64\r\n17 47 82\r\n18 35 87 10\r\n20 04 82 47 65\r\n19 01 23 75 03 34\r\n88 02 77 73 07 63 67\r\n99 65 04 28 06 16 70 92\r\n41 41 26 56 83 40 80 70 33\r\n41 48 72 33 47 32 37 16 94 29\r\n53 71 44 65 25 43 91 52 97 51 14\r\n70 11 33 28 77 73 17 78 39 68 17 57\r\n91 71 52 38 17 14 91 43 58 50 27 29 48\r\n63 66 04 68 89 53 67 30 73 16 69 87 40 31\r\n04 62 98 27 23 09 70 98 73 93 38 53 60 04 23\r\n'''\r\n\r\n# Построение списка (матрицы) из пирамиды\r\npyramid = pyramid.split('\\n')\r\npyramid = list(filter(None, pyramid))\r\nfor i in range(len(pyramid)):\r\n if len(pyramid[i]) < len(pyramid[-1]):\r\n while len(pyramid[i]) < len(pyramid[-1]):\r\n pyramid[i] += ' 00'\r\n \r\n pyramid[i] = pyramid[i].split(' ')\r\n for j in range(len(pyramid[i])):\r\n pyramid[i][j] = int(pyramid[i][j])\r\n\r\n\r\ndef max_path_sum(tri, m):\r\n # цикл для восходящего расчета\r\n for i in range(m - 1, -1, -1):\r\n for j in range(i + 1):\r\n # для каждого элемента отметьте оба элементы чуть ниже числа\r\n # и ниже справа от номера добавить максимум к нему\r\n if tri[i + 1][j] > tri[i + 1][j + 1]:\r\n tri[i][j] += tri[i + 1][j]\r\n else:\r\n tri[i][j] += tri[i + 1][j + 1]\r\n\r\n # вернуть верхний элемент, где хранится максимальная сумма\r\n return tri[0][0]\r\n\r\n\r\n# Программа драйвера для проверки вышеуказанной функции\r\nprint(max_path_sum(pyramid, len(pyramid) - 1))\r\n","repo_name":"Tyferse/Python3_public","sub_path":"ProjecEuler2021/Tasks 11-20/Task18.py","file_name":"Task18.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15013322092","text":"\"\"\"\nWord Search\n\nGiven a 2D board and a word, find if the word exists in the grid.\n\nThe word can be constructed from letters of sequentially adjacent cell, where \"adjacent\" cells are those horizontally or vertically neighboring. The same letter cell may not be used more than once.\n\nExample:\n\nboard =\n[\n ['A','B','C','E'],\n ['S','F','C','S'],\n ['A','D','E','E']\n]\n\nGiven word = \"ABCCED\", return true.\nGiven word = \"SEE\", return true.\nGiven word = \"ABCB\", return false.\n\n \n\nConstraints:\n\n board and word consists only of lowercase and uppercase English letters.\n 1 <= board.length <= 200\n 1 <= board[i].length <= 200\n 1 <= word.length <= 10^3\n\n\"\"\"\n\n# DFS\nclass Solution:\n def exist(self, board: List[List[str]], word: str) -> bool:\n if not board or not board[0]:\n return False\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == word[0]:\n if self.dfs(board, i, j, word):\n return True\n return False\n \n def dfs(self, board, i, j, word):\n if len(word) == 1:\n return True\n temp = board[i][j]\n board[i][j] = '#'\n for x, y in [(1, 0), (-1, 0), (0, 1), (0, -1)]:\n if 0<=i+x None:\n history_dict = history.history\n for metric in metrics:\n train_metric = history_dict[metric]\n val_metric = history_dict['val_{}'.format(metric)]\n epochs = range(1, len(train_metric) + 1)\n\n plt.figure()\n plt.plot(epochs, train_metric, label='Training {}'.format(metric))\n plt.plot(epochs, val_metric, label='Validation {}'.format(metric))\n plt.xlabel('Epochs')\n plt.ylabel(metric)\n plt.legend()\n plt.show()\n","repo_name":"nex3z/keras-exercises","sub_path":"deep-learning-with-python/sequence/imdb/plot_util.py","file_name":"plot_util.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23112632259","text":"from __future__ import print_function\n\nimport os\nimport sys\nimport re\nimport glob\nimport xml.etree.ElementTree as ET\n\nfrom rdkit import Chem\n\nimport RDKitUtil\nimport MiscUtil\n\n__all__ = [\"CalculateTorsionAngleDifference\", \"DoesSMARTSContainsMappedAtoms\", \"DoesSMARTSContainValidSubClassMappedAtoms\", \"DoesSMARTSContainValidTorsionRuleMappedAtoms\", \"GetGenericHierarchyClassElementNode\", \"IdentifyRotatableBondsForTorsionLibraryMatch\", \"IsSpecificHierarchyClass\", \"ListTorsionLibraryInfo\", \"RemoveLastHierarchyClassElementNodeFromTracking\", \"RemoveLastHierarchySubClassElementNodeFromTracking\", \"RetrieveTorsionLibraryInfo\", \"SetupHierarchyClassAndSubClassNamesForRotatableBond\", \"SetupHierarchySubClassElementPatternMol\", \"SetupTorsionRuleElementPatternMol\", \"SetupTorsionLibraryInfoForMatchingRotatableBonds\", \"TrackHierarchyClassElementNode\", \"TrackHierarchySubClassElementNode\"]\n\ndef RetrieveTorsionLibraryInfo(TorsionLibraryFilePath, Quiet = True):\n \"\"\"Retrieve torsion library information.\n \n Arguments:\n TorsionLibraryFilePath (str): Torsion library XML file path.\n\n Returns:\n object: An object returned by xml.etree.ElementTree.parse function.\n\n Notes:\n The XML file is parsed using xml.etree.ElementTree.parse function and\n object created by the parse function is simply returned.\n\n \"\"\"\n \n if not Quiet:\n MiscUtil.PrintInfo(\"\\nRetrieving data from torsion library file %s...\" % TorsionLibraryFilePath)\n\n try:\n TorsionLibElementTree = ET.parse(TorsionLibraryFilePath)\n except Exception as ErrMsg:\n MiscUtil.PrintError(\"Failed to parse torsion library file: %s\" % ErrMsg)\n\n return TorsionLibElementTree\n\ndef ListTorsionLibraryInfo(TorsionLibElementTree):\n \"\"\"List torsion library information using XML tree object. The following\n information is listed:\n \n Summary:\n \n Total number of HierarchyClass nodes: \n Total number of HierarchyClassSubClass nodes: ; HierarchySubClass nodes: ;\n TorsionRule nodes: \n ... ... ...\n \n Arguments:\n TorsionLibElementTree (object): XML tree object.\n\n Returns:\n Nothing.\n\n \"\"\"\n HierarchyClassesInfo = {}\n HierarchyClassesInfo[\"HierarchyClassNames\"] = []\n HierarchyClassesInfo[\"HierarchySubClassCount\"] = {}\n HierarchyClassesInfo[\"TorsionRuleCount\"] = {}\n\n HierarchyClassCount, HierarchySubClassCount, TorsionRuleCount = [0] * 3\n\n for HierarchyClassNode in TorsionLibElementTree.findall(\"hierarchyClass\"):\n HierarchyClassCount += 1\n HierarchyClassName = HierarchyClassNode.get(\"name\")\n if HierarchyClassName in HierarchyClassesInfo[\"HierarchyClassNames\"]:\n MiscUtil.PrintWarning(\"Hierarchy class name, %s, already exists...\" % HierarchyClassName)\n HierarchyClassesInfo[\"HierarchyClassNames\"].append(HierarchyClassName)\n\n SubClassCount = 0\n for HierarchySubClassNode in HierarchyClassNode.iter(\"hierarchySubClass\"):\n SubClassCount += 1\n HierarchyClassesInfo[\"HierarchySubClassCount\"][HierarchyClassName] = SubClassCount\n HierarchySubClassCount += SubClassCount\n\n RuleCount = 0\n for TorsionRuleNode in HierarchyClassNode.iter(\"torsionRule\"):\n RuleCount += 1\n \n HierarchyClassesInfo[\"TorsionRuleCount\"][HierarchyClassName] = RuleCount\n TorsionRuleCount += RuleCount\n\n MiscUtil.PrintInfo(\"\\nTotal number of HierarchyClass nodes: %s\" % HierarchyClassCount)\n MiscUtil.PrintInfo(\"Total number of HierarchyClassSubClass nodes: %s\" % HierarchySubClassCount)\n MiscUtil.PrintInfo(\"Total number of TorsionRule nodes: %s\" % TorsionRuleCount)\n\n # List info for each hierarchyClass...\n MiscUtil.PrintInfo(\"\")\n \n # Generic class first...\n GenericClassName = \"GG\"\n if GenericClassName in HierarchyClassesInfo[\"HierarchyClassNames\"]:\n MiscUtil.PrintInfo(\"HierarchyClass: %s; HierarchySubClass nodes: %s; TorsionRule nodes: %s\" % (GenericClassName, HierarchyClassesInfo[\"HierarchySubClassCount\"][GenericClassName], HierarchyClassesInfo[\"TorsionRuleCount\"][GenericClassName]))\n \n for HierarchyClassName in sorted(HierarchyClassesInfo[\"HierarchyClassNames\"]):\n if HierarchyClassName == GenericClassName:\n continue\n MiscUtil.PrintInfo(\"HierarchyClass: %s; HierarchySubClass nodes: %s; TorsionRule nodes: %s\" % (HierarchyClassName, HierarchyClassesInfo[\"HierarchySubClassCount\"][HierarchyClassName], HierarchyClassesInfo[\"TorsionRuleCount\"][HierarchyClassName]))\n\ndef SetupTorsionLibraryInfoForMatchingRotatableBonds(TorsionLibraryInfo):\n \"\"\"Setup torsion library information for matching rotatable bonds. The\n following information is initialized and updated in torsion library\n dictionary for matching rotatable bonds:\n \n TorsionLibraryInfo[\"GenericClass\"] = None\n TorsionLibraryInfo[\"GenericClassElementNode\"] = None\n \n TorsionLibraryInfo[\"SpecificClasses\"] = {}\n TorsionLibraryInfo[\"SpecificClasses\"][\"Names\"] = []\n TorsionLibraryInfo[\"SpecificClasses\"][\"ElementNode\"] = {}\n \n TorsionLibraryInfo[\"HierarchyClassNodes\"] = []\n TorsionLibraryInfo[\"HierarchySubClassNodes\"] = []\n \n TorsionLibraryInfo[\"DataCache\"] = {}\n TorsionLibraryInfo[\"DataCache\"][\"SubClassPatternMol\"] = {}\n \n TorsionLibraryInfo[\"DataCache\"][\"TorsionRulePatternMol\"] = {}\n TorsionLibraryInfo[\"DataCache\"][\"TorsionRuleAnglesInfo\"] = {}\n \n Arguments:\n TorsionLibraryInfo (dict): A dictionary containing root node for\n torsion library element tree.\n\n Returns:\n Nonthing. The torsion library information dictionary is updated.\n\n \"\"\"\n _SetupTorsionLibraryHierarchyClassesInfoForMatchingRotatableBonds(TorsionLibraryInfo)\n _SetupTorsionLibraryDataCacheInfoForMatchingRotatableBonds(TorsionLibraryInfo)\n \ndef _SetupTorsionLibraryHierarchyClassesInfoForMatchingRotatableBonds(TorsionLibraryInfo):\n \"\"\"Setup hierarchy classes information for generic and specific classes.\"\"\"\n \n RootElementNode = TorsionLibraryInfo[\"TorsionLibElementTree\"]\n \n TorsionLibraryInfo[\"GenericClass\"] = None\n TorsionLibraryInfo[\"GenericClassElementNode\"] = None\n \n TorsionLibraryInfo[\"SpecificClasses\"] = {}\n TorsionLibraryInfo[\"SpecificClasses\"][\"Names\"] = []\n TorsionLibraryInfo[\"SpecificClasses\"][\"ElementNode\"] = {}\n\n # Class name stacks for tracking names during processing of torsion rules..\n TorsionLibraryInfo[\"HierarchyClassNodes\"] = []\n TorsionLibraryInfo[\"HierarchySubClassNodes\"] = []\n\n ElementNames = []\n for ElementNode in RootElementNode.findall(\"hierarchyClass\"):\n ElementName = ElementNode.get(\"name\")\n if ElementName in ElementNames:\n MiscUtil.PrintWarning(\"Hierarchy class name, %s, already exists. Ignoring duplicate name...\" % ElementName)\n continue\n\n ElementNames.append(ElementName)\n \n if re.match(\"^GG$\", ElementName, re.I):\n TorsionLibraryInfo[\"GenericClass\"] = ElementName\n TorsionLibraryInfo[\"GenericClassElementNode\"] = ElementNode\n else:\n TorsionLibraryInfo[\"SpecificClasses\"][\"Names\"].append(ElementName)\n TorsionLibraryInfo[\"SpecificClasses\"][\"ElementNode\"][ElementName] = ElementNode\n\ndef _SetupTorsionLibraryDataCacheInfoForMatchingRotatableBonds(TorsionLibraryInfo):\n \"\"\"Setup information for caching molecules for hierarchy subclass and torsion rule patterns.\"\"\"\n \n TorsionLibElementTree = TorsionLibraryInfo[\"TorsionLibElementTree\"]\n\n # Initialize data cache for pattern molecules corresponding to SMARTS patterns for\n # hierarchy subclasses and torsion rules. The pattern mols are generated and cached\n # later.\n TorsionLibraryInfo[\"DataCache\"] = {}\n TorsionLibraryInfo[\"DataCache\"][\"SubClassPatternMol\"] = {}\n \n TorsionLibraryInfo[\"DataCache\"][\"TorsionRulePatternMol\"] = {}\n TorsionLibraryInfo[\"DataCache\"][\"TorsionRuleLonePairMapNumber\"] = {}\n TorsionLibraryInfo[\"DataCache\"][\"TorsionRuleAnglesInfo\"] = {}\n \n HierarchyClassID, HierarchySubClassID, TorsionRuleID = [0] * 3\n \n for HierarchyClassNode in TorsionLibElementTree.findall(\"hierarchyClass\"):\n HierarchyClassID += 1\n \n for HierarchySubClassNode in HierarchyClassNode.iter(\"hierarchySubClass\"):\n HierarchySubClassID += 1\n # Add unique ID to node...\n HierarchySubClassNode.set(\"NodeID\", HierarchySubClassID)\n \n for TorsionRuleNode in HierarchyClassNode.iter(\"torsionRule\"):\n TorsionRuleID += 1\n # Add unique ID to node...\n TorsionRuleNode.set(\"NodeID\", TorsionRuleID)\n\ndef IdentifyRotatableBondsForTorsionLibraryMatch(TorsionLibraryInfo, Mol, RotBondsPatternMol):\n \"\"\"Identify rotatable bonds in a molecule for torsion library match.\n \n Arguments:\n TorsionLibraryInfo (dict): A dictionary containing information for\n matching rotatable bonds.\n Mol (object): RDKit molecule object.\n RotBondsPatternMol (object): RDKit molecule object for SMARTS pattern\n corresponding to rotatable bonds.\n\n Returns:\n bool: True - Rotatable bonds present in molecule; Otherwise, false.\n None or dict: None - For no rotatable bonds in molecule; otherwise, a\n dictionary containing the following informations for rotatable bonds\n matched to RotBondsPatternMol:\n \n RotBondsInfo[\"IDs\"] = []\n RotBondsInfo[\"AtomIndices\"] = {}\n RotBondsInfo[\"HierarchyClass\"] = {}\n\n \"\"\"\n # Match rotatable bonds...\n RotBondsMatches = RDKitUtil.FilterSubstructureMatchesByAtomMapNumbers(Mol, RotBondsPatternMol, Mol.GetSubstructMatches(RotBondsPatternMol, useChirality = False))\n\n # Check and filter rotatable bond matches...\n RotBondsMatches = _FilterRotatableBondMatches(Mol, RotBondsMatches)\n\n if not len(RotBondsMatches):\n return False, None\n\n # Initialize rotatable bonds info...\n RotBondsInfo = {}\n RotBondsInfo[\"IDs\"] = []\n RotBondsInfo[\"AtomIndices\"] = {}\n RotBondsInfo[\"HierarchyClass\"] = {}\n \n # Setup rotatable bonds info...\n ID = 0\n for RotBondAtomIndices in RotBondsMatches:\n ID += 1\n\n RotBondAtoms = [Mol.GetAtomWithIdx(RotBondAtomIndices[0]), Mol.GetAtomWithIdx(RotBondAtomIndices[1])]\n RotBondAtomSymbols = [RotBondAtoms[0].GetSymbol(), RotBondAtoms[1].GetSymbol()]\n \n ClassID = \"%s%s\" % (RotBondAtomSymbols[0], RotBondAtomSymbols[1])\n if ClassID not in TorsionLibraryInfo[\"SpecificClasses\"][\"Names\"]:\n ReverseClassID = \"%s%s\" % (RotBondAtomSymbols[1], RotBondAtomSymbols[0])\n if ReverseClassID in TorsionLibraryInfo[\"SpecificClasses\"][\"Names\"]:\n ClassID = ReverseClassID\n # Reverse atom indices and related information...\n RotBondAtomIndices = list(reversed(RotBondAtomIndices))\n RotBondAtoms = list(reversed(RotBondAtoms))\n RotBondAtomSymbols = list(reversed(RotBondAtomSymbols))\n \n # Track information...\n RotBondsInfo[\"IDs\"].append(ID)\n RotBondsInfo[\"AtomIndices\"][ID] = RotBondAtomIndices\n RotBondsInfo[\"HierarchyClass\"][ID] = ClassID\n \n return True, RotBondsInfo\n\ndef _FilterRotatableBondMatches(Mol, RotBondsMatches):\n \"\"\"Filter rotatable bond matches to ensure that each rotatable bond atom\n is attached to at least two heavy atoms. Otherwise, the torsion rules might match\n hydrogens.\"\"\"\n \n FilteredRotBondMatches = []\n \n # Go over rotatable bonds...\n for RotBondMatch in RotBondsMatches:\n SkipRotBondMatch = False\n for AtomIndex in RotBondMatch:\n Atom = Mol.GetAtomWithIdx(AtomIndex)\n HeavyAtomNeighborCount = RDKitUtil.GetNumHeavyAtomNeighbors(Atom)\n \n if HeavyAtomNeighborCount <= 1:\n SkipRotBondMatch = True\n break\n \n if not SkipRotBondMatch:\n FilteredRotBondMatches.append(RotBondMatch)\n\n return FilteredRotBondMatches\n\ndef SetupHierarchySubClassElementPatternMol(TorsionLibraryInfo, ElementNode):\n \"\"\"Setup pattern molecule for SMARTS pattern in hierarchy subclass element.\n \n Arguments:\n TorsionLibraryInfo (dict): A dictionary containing information for\n matching rotatable bonds.\n ElementNode (object): A hierarchy sub class element node being matched\n in torsion library XML tree.\n\n Returns:\n object: RDKit molecule object corresponding to SMARTS pattern for\n hierarchy sub class element node.\n\n \"\"\"\n # Check data cache...\n SubClassNodeID = ElementNode.get(\"NodeID\")\n if SubClassNodeID in TorsionLibraryInfo[\"DataCache\"][\"SubClassPatternMol\"]:\n return(TorsionLibraryInfo[\"DataCache\"][\"SubClassPatternMol\"][SubClassNodeID])\n\n # Setup and track pattern mol...\n SubClassSMARTSPattern = ElementNode.get(\"smarts\")\n SubClassPatternMol = Chem.MolFromSmarts(SubClassSMARTSPattern)\n \n if SubClassPatternMol is None:\n MiscUtil.PrintWarning(\"Ignoring hierachical subclass, %s, containing invalid SMARTS pattern %s\" % (ElementNode.get(\"name\"), SubClassSMARTSPattern))\n \n if not DoesSMARTSContainValidSubClassMappedAtoms(SubClassSMARTSPattern):\n SubClassPatternMol = None\n MiscUtil.PrintWarning(\"Ignoring hierachical subclass, %s, containing invalid map atom numbers in SMARTS pattern %s\" % (ElementNode.get(\"name\"), SubClassSMARTSPattern))\n \n TorsionLibraryInfo[\"DataCache\"][\"SubClassPatternMol\"][SubClassNodeID] = SubClassPatternMol\n \n return SubClassPatternMol\n\ndef SetupTorsionRuleElementPatternMol(TorsionLibraryInfo, ElementNode, TorsionRuleNodeID, TorsionSMARTSPattern):\n \"\"\"Setup pattern molecule for SMARTS pattern in torsion rule element.\n \n Arguments:\n TorsionLibraryInfo (dict): A dictionary containing information for\n matching rotatable bonds.\n ElementNode (object): A torsion rule element node being matched in\n torsion library XML tree.\n TorsionRuleNodeID (int): Torsion rule element node ID.\n TorsionSMARTSPattern (str): SMARTS pattern for torsion rule element node.\n\n Returns:\n object: RDKit molecule object corresponding to SMARTS pattern for\n torsion rule element node.\n\n \"\"\"\n\n if TorsionRuleNodeID in TorsionLibraryInfo[\"DataCache\"][\"TorsionRulePatternMol\"]:\n return (TorsionLibraryInfo[\"DataCache\"][\"TorsionRulePatternMol\"][TorsionRuleNodeID])\n \n TorsionPatternMol = Chem.MolFromSmarts(TorsionSMARTSPattern)\n if TorsionPatternMol is None:\n MiscUtil.PrintWarning(\"Ignoring torsion rule element containing invalid SMARTS pattern %s\" % TorsionSMARTSPattern)\n \n if not DoesSMARTSContainValidTorsionRuleMappedAtoms(TorsionSMARTSPattern):\n TorsionPatternMol = None\n MiscUtil.PrintWarning(\"Ignoring torsion rule element containing invalid map atoms numbers in SMARTS pattern %s\" % TorsionSMARTSPattern)\n \n TorsionLibraryInfo[\"DataCache\"][\"TorsionRulePatternMol\"][TorsionRuleNodeID] = TorsionPatternMol\n \n return TorsionPatternMol\n\ndef SetupHierarchyClassAndSubClassNamesForRotatableBond(TorsionLibraryInfo):\n \"\"\" Setup hierarchy class and subclass names for a rotatable bond matched to\n a torsion rule element node.\n\n Returns:\n TorsionLibraryInfo (dict): A dictionary containing information for\n matching rotatable bonds.\n\n Returns:\n str: A back slash delimited string containing hierarchy class names at\n the level of torsion rule element node.\n str: A back slash delimited string containing hierarchy sub class names\n at the level of torsion rule element node.\n\n \"\"\"\n HierarchyClassName, HierarchyClassSubName = [\"None\"] * 2\n\n # Setup hierarchy class name...\n if len(TorsionLibraryInfo[\"HierarchyClassNodes\"]):\n HierarchyClassElementNode = TorsionLibraryInfo[\"HierarchyClassNodes\"][-1]\n HierarchyClassName = HierarchyClassElementNode.get(\"name\")\n if len(HierarchyClassName) == 0:\n HierarchyClassName = \"None\"\n \n # Setup hierarchy class name...\n if len(TorsionLibraryInfo[\"HierarchySubClassNodes\"]):\n HierarchySubClassNames = []\n for ElementNode in TorsionLibraryInfo[\"HierarchySubClassNodes\"]:\n Name = ElementNode.get(\"name\")\n if len(Name) == 0:\n Name = \"None\"\n HierarchySubClassNames.append(Name)\n \n HierarchyClassSubName = \"/\".join(HierarchySubClassNames)\n \n # Replace spaces by underscores in class and subclass names...\n if HierarchyClassName is not None:\n if \" \" in HierarchyClassName:\n HierarchyClassName = HierarchyClassName.replace(\" \", \"_\")\n \n if HierarchyClassSubName is not None:\n if \" \" in HierarchyClassSubName:\n HierarchyClassSubName = HierarchyClassSubName.replace(\" \", \"_\")\n \n return (HierarchyClassName, HierarchyClassSubName)\n\ndef SetupTorsionRuleAnglesInfo(TorsionLibraryInfo, TorsionRuleElementNode):\n \"\"\"Setup torsion angles and energy info for matching a torsion rule.\n \n Arguments:\n TorsionLibraryInfo (dict): A dictionary containing information for\n matching rotatable bonds.\n TorsionRuleElementNode (object): A torsion rule element node being\n matched in torsion library XML tree.\n\n Returns:\n dict: A dictionary containing the following information for torsion rule\n being matched to a rotatable bond:\n \n RuleAnglesInfo = {}\n \n RuleAnglesInfo[\"IDs\"] = []\n RuleAnglesInfo[\"Value\"] = {}\n RuleAnglesInfo[\"Score\"] = {}\n RuleAnglesInfo[\"Tolerance1\"] = {}\n RuleAnglesInfo[\"Tolerance2\"] = {}\n \n RuleAnglesInfo[\"ValuesList\"] = []\n RuleAnglesInfo[\"ValuesIn360RangeList\"] = []\n RuleAnglesInfo[\"Tolerances1List\"] = []\n RuleAnglesInfo[\"Tolerances2List\"] = []\n \n # Strain energy calculations...\n RuleAnglesInfo[\"EnergyMethod\"] = None\n RuleAnglesInfo[\"EnergyMethodExact\"] = None\n RuleAnglesInfo[\"EnergyMethodApproximate\"] = None\n \n # For approximate strain energy calculation...\n RuleAnglesInfo[\"Beta1\"] = {}\n RuleAnglesInfo[\"Beta2\"] = {}\n RuleAnglesInfo[\"Theta0\"] = {}\n \n # For exact strain energy calculation...\n RuleAnglesInfo[\"HistogramEnergy\"] = []\n RuleAnglesInfo[\"HistogramEnergyLowerBound\"] = []\n RuleAnglesInfo[\"HistogramEnergyUpperBound\"] = []\n \n \"\"\"\n # Check data cache...\n TorsionRuleNodeID = TorsionRuleElementNode.get(\"NodeID\")\n if TorsionRuleNodeID in TorsionLibraryInfo[\"DataCache\"][\"TorsionRuleAnglesInfo\"]:\n return TorsionLibraryInfo[\"DataCache\"][\"TorsionRuleAnglesInfo\"][TorsionRuleNodeID]\n \n # Initialize rule angles info...\n RuleAnglesInfo = {}\n \n RuleAnglesInfo[\"IDs\"] = []\n RuleAnglesInfo[\"Value\"] = {}\n RuleAnglesInfo[\"Score\"] = {}\n RuleAnglesInfo[\"Tolerance1\"] = {}\n RuleAnglesInfo[\"Tolerance2\"] = {}\n \n RuleAnglesInfo[\"ValuesList\"] = []\n RuleAnglesInfo[\"ValuesIn360RangeList\"] = []\n RuleAnglesInfo[\"Tolerances1List\"] = []\n RuleAnglesInfo[\"Tolerances2List\"] = []\n\n # Strain energy calculations...\n RuleAnglesInfo[\"EnergyMethod\"] = None\n RuleAnglesInfo[\"EnergyMethodExact\"] = None\n RuleAnglesInfo[\"EnergyMethodApproximate\"] = None\n\n # For approximate strain energy calculation....\n RuleAnglesInfo[\"Beta1\"] = {}\n RuleAnglesInfo[\"Beta2\"] = {}\n RuleAnglesInfo[\"Theta0\"] = {}\n \n # For exact strain energy calculation...\n RuleAnglesInfo[\"HistogramEnergy\"] = []\n RuleAnglesInfo[\"HistogramEnergyLowerBound\"] = []\n RuleAnglesInfo[\"HistogramEnergyUpperBound\"] = []\n\n # Setup strain energy calculation information...\n EnergyMethod, EnergyMethodExact, EnergyMethodApproximate = [None] * 3\n EnergyMethod = TorsionRuleElementNode.get(\"method\")\n if EnergyMethod is not None:\n EnergyMethodExact = True if re.match(\"^exact$\", EnergyMethod, re.I) else False\n EnergyMethodApproximate = True if re.match(\"^approximate$\", EnergyMethod, re.I) else False\n \n RuleAnglesInfo[\"EnergyMethod\"] = EnergyMethod\n RuleAnglesInfo[\"EnergyMethodExact\"] = EnergyMethodExact\n RuleAnglesInfo[\"EnergyMethodApproximate\"] = EnergyMethodApproximate\n \n # Setup angles information....\n AngleID = 0\n for AngleListElementNode in TorsionRuleElementNode.findall(\"angleList\"):\n for AngleNode in AngleListElementNode.iter(\"angle\"):\n AngleID += 1\n Value = float(AngleNode.get(\"value\"))\n Tolerance1 = float(AngleNode.get(\"tolerance1\"))\n Tolerance2 = float(AngleNode.get(\"tolerance2\"))\n Score = float(AngleNode.get(\"score\"))\n\n # Track values...\n RuleAnglesInfo[\"IDs\"].append(AngleID)\n RuleAnglesInfo[\"Value\"][AngleID] = Value\n RuleAnglesInfo[\"Score\"][AngleID] = Score\n RuleAnglesInfo[\"Tolerance1\"][AngleID] = Tolerance1\n RuleAnglesInfo[\"Tolerance2\"][AngleID] = Tolerance2\n\n RuleAnglesInfo[\"ValuesList\"].append(Value)\n RuleAnglesInfo[\"Tolerances1List\"].append(Tolerance1)\n RuleAnglesInfo[\"Tolerances2List\"].append(Tolerance2)\n\n # Map value to 0 to 360 range...\n MappedValue = Value + 360 if Value < 0 else Value\n RuleAnglesInfo[\"ValuesIn360RangeList\"].append(MappedValue)\n\n # Approximate strain energy calculation information...\n if EnergyMethodApproximate:\n Beta1 = float(AngleNode.get(\"beta_1\"))\n Beta2 = float(AngleNode.get(\"beta_2\"))\n Theta0 = float(AngleNode.get(\"theta_0\"))\n \n RuleAnglesInfo[\"Beta1\"][AngleID] = Beta1\n RuleAnglesInfo[\"Beta2\"][AngleID] = Beta2\n RuleAnglesInfo[\"Theta0\"][AngleID] = Theta0\n \n # Exact energy method information...\n if EnergyMethodExact:\n for HistogramBinNode in TorsionRuleElementNode.find(\"histogram_converted\").iter(\"bin\"):\n Energy = float(HistogramBinNode.get(\"energy\"))\n Lower = float(HistogramBinNode.get(\"lower\"))\n Upper = float(HistogramBinNode.get(\"upper\"))\n \n RuleAnglesInfo[\"HistogramEnergy\"].append(Energy)\n RuleAnglesInfo[\"HistogramEnergyLowerBound\"].append(Lower)\n RuleAnglesInfo[\"HistogramEnergyUpperBound\"].append(Upper)\n \n if len(RuleAnglesInfo[\"IDs\"]) == 0:\n RuleInfo = None\n \n # Cache data...\n TorsionLibraryInfo[\"DataCache\"][\"TorsionRuleAnglesInfo\"][TorsionRuleNodeID] = RuleAnglesInfo\n\n return RuleAnglesInfo\n\ndef DoesSMARTSContainValidSubClassMappedAtoms(SMARTS):\n \"\"\"Check for the presence of two central mapped atoms in SMARTS pattern.\n A valid SMARTS pattern must contain only two mapped atoms corresponding\n to map atom numbers ':2' and ':3'.\n \n Arguments:\n SMARTS (str): SMARTS pattern for sub class in torsion library XML tree.\n\n Returns:\n bool: True - A valid pattern; Otherwise, false.\n\n \"\"\"\n MatchedMappedAtoms = re.findall(\":[0-9]\", SMARTS, re.I)\n if len(MatchedMappedAtoms) < 2 or len(MatchedMappedAtoms) > 4:\n return False\n\n # Check for the presence of two central atom map numbers for a torsion...\n for MapAtomNum in [\":2\", \":3\"]:\n if MapAtomNum not in MatchedMappedAtoms:\n return False\n \n return True\n\ndef DoesSMARTSContainValidTorsionRuleMappedAtoms(SMARTS):\n \"\"\"Check for the presence of four mapped atoms in a SMARTS pattern.\n A valid SMARTS pattern must contain only four mapped atoms corresponding\n to map atom numbers ':1', ':2', ':3' and ':4'.\n \n Arguments:\n SMARTS (str): SMARTS pattern for torsion rule in torsion library XML\n tree.\n\n Returns:\n bool: True - A valid pattern; Otherwise, false.\n\n \"\"\"\n MatchedMappedAtoms = re.findall(\":[0-9]\", SMARTS, re.I)\n if len(MatchedMappedAtoms) != 4:\n return False\n\n # Check for the presence of four atom map numbers for a torsion...\n for MapAtomNum in [\":1\", \":2\", \":3\", \":4\"]:\n if MapAtomNum not in MatchedMappedAtoms:\n return False\n \n return True\n\ndef DoesSMARTSContainsMappedAtoms(SMARTS, MappedAtomNumsList):\n \"\"\"Check for the presence of specified mapped atoms in SMARTS pattern.\n The mapped atom numbers in the list are specified as ':1', ':2', ':3' etc.\n \n Arguments:\n SMARTS (str): SMARTS pattern in torsion library XML tree.\n MappedAtoms (list): Mapped atom numbers as \":1\", \":2\" etc.\n\n Returns:\n bool: True - All mapped atoms present in pattern; Otherwise, false.\n\n \"\"\"\n MatchedMappedAtoms = re.findall(\":[0-9]\", SMARTS, re.I)\n if len(MatchedMappedAtoms) == 0:\n return False\n\n # Check for the presence of specified mapped atoms in pattern...\n for MapAtomNum in MappedAtomNumsList:\n if MapAtomNum not in MatchedMappedAtoms:\n return False\n \n return True\n\ndef IsSpecificHierarchyClass(TorsionLibraryInfo, HierarchyClass):\n \"\"\"Check whether it's a specific hierarchy class.\n \n Arguments:\n TorsionLibraryInfo (dict): A dictionary containing information for\n matching rotatable bonds.\n HierarchyClass (str): Hierarchy class name.\n\n Returns:\n bool: True - A valid hierarchy class name; Otherwise, false.\n\n \"\"\"\n return True if HierarchyClass in TorsionLibraryInfo[\"SpecificClasses\"][\"ElementNode\"] else False\n\ndef GetGenericHierarchyClassElementNode(TorsionLibraryInfo):\n \"\"\"Get generic hierarchy class element node.\n \n Arguments:\n TorsionLibraryInfo (dict): A dictionary containing information for\n matching rotatable bonds.\n\n Returns:\n object: Generic hierarchy class element node in torsion library XML\n tree.\n\n \"\"\"\n return TorsionLibraryInfo[\"GenericClassElementNode\"]\n\ndef TrackHierarchyClassElementNode(TorsionLibraryInfo, ElementNode):\n \"\"\"Track hierarchy class element node using a stack.\n \n Arguments:\n TorsionLibraryInfo (dict): A dictionary containing information for\n matching rotatable bonds.\n ElementNode (object): Hierarchy class element node in torsion library\n XML tree. \n\n Returns:\n Nothing. The torsion library info is updated.\n\n \"\"\"\n TorsionLibraryInfo[\"HierarchyClassNodes\"].append(ElementNode)\n\ndef RemoveLastHierarchyClassElementNodeFromTracking(TorsionLibraryInfo):\n \"\"\"Remove last hierarchy class element node from tracking by removing it\n from a stack.\n \n Arguments:\n TorsionLibraryInfo (dict): A dictionary containing information for\n matching rotatable bonds.\n\n Returns:\n Nothing. The torsion library info is updated.\n\n \"\"\"\n TorsionLibraryInfo[\"HierarchyClassNodes\"].pop()\n\ndef TrackHierarchySubClassElementNode(TorsionLibraryInfo, ElementNode):\n \"\"\"Track hierarchy sub class element node using a stack.\n \n Arguments:\n TorsionLibraryInfo (dict): A dictionary containing information for\n matching rotatable bonds.\n ElementNode (object): Hierarchy sub class element node in torsion\n library XML tree. \n\n Returns:\n Nothing. The torsion library info is updated.\n\n \"\"\"\n TorsionLibraryInfo[\"HierarchySubClassNodes\"].append(ElementNode)\n\ndef RemoveLastHierarchySubClassElementNodeFromTracking(TorsionLibraryInfo):\n \"\"\"Remove last hierarchy sub class element node from tracking by removing it\n from a stack.\n \n Arguments:\n TorsionLibraryInfo (dict): A dictionary containing information for\n matching rotatable bonds.\n\n Returns:\n Nothing. The torsion library info is updated.\n\n \"\"\"\n TorsionLibraryInfo[\"HierarchySubClassNodes\"].pop()\n\ndef CalculateTorsionAngleDifference(TorsionAngle1, TorsionAngle2):\n \"\"\"Calculate torsion angle difference in the range from 0 to 180.\n \n Arguments:\n TorsionAngle1 (float): First torsion angle.\n TorsionAngle2 (float): Second torsion angle.\n\n Returns:\n float: Difference between first and second torsion angle.\n\n \"\"\"\n\n # Map angles to 0 to 360 range...\n if TorsionAngle1 < 0:\n TorsionAngle1 = TorsionAngle1 + 360\n if TorsionAngle2 < 0:\n TorsionAngle2 = TorsionAngle2 + 360\n\n # Calculate and map angle difference in the range from 0 to 180 range...\n TorsionAngleDiff = abs(TorsionAngle1 - TorsionAngle2)\n if TorsionAngleDiff > 180.0:\n TorsionAngleDiff = abs(TorsionAngleDiff - 360)\n\n return TorsionAngleDiff\n","repo_name":"UnixJunkie/mayachemtools","sub_path":"lib/Python/TorsionLibraryUtil.py","file_name":"TorsionLibraryUtil.py","file_ext":"py","file_size_in_byte":29282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"37973591203","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport hashlib\r\n\r\nimport appier\r\n\r\nfrom . import base\r\n\r\nclass Entity(base.CrosslineBase):\r\n \"\"\"\r\n Top level class that describes an element that can be\r\n identifiable under the system as an entity.\r\n\r\n Can be used for operations of control, authentication, etc.\r\n \"\"\"\r\n\r\n identifier = appier.field(\r\n index = \"hashed\",\r\n default = True,\r\n immutable = True\r\n )\r\n\r\n key = appier.field(\r\n index = \"hashed\",\r\n immutable = True\r\n )\r\n\r\n @classmethod\r\n def validate(cls):\r\n return super(Entity, cls).validate() + [\r\n appier.not_null(\"identifier\"),\r\n appier.not_empty(\"identifier\"),\r\n appier.is_lower(\"identifier\"),\r\n appier.string_gt(\"identifier\", 3),\r\n appier.string_lt(\"identifier\", 64),\r\n appier.not_duplicate(\"identifier\", cls._name()),\r\n ]\r\n\r\n @classmethod\r\n def list_names(cls):\r\n return [\"identifier\", \"key\", \"app\"]\r\n\r\n @classmethod\r\n def get_by_id(cls, identifier, app = None):\r\n return cls.get(identifier = identifier, app = None)\r\n\r\n @classmethod\r\n @appier.operation(\r\n name = \"Verify\",\r\n parameters = (\r\n (\"Identifier\", \"identifier\", str),\r\n (\"Key\", \"key\", str),\r\n (\"App\", \"app\", str, None)\r\n )\r\n )\r\n def verify_g(cls, identifier, key, app = None):\r\n entity = cls.get(identifier = identifier, app = app)\r\n appier.verify(\r\n entity.key == key,\r\n message = \"Missmatch in key\"\r\n )\r\n\r\n @classmethod\r\n @appier.operation(\r\n name = \"Create\",\r\n parameters = ((\"Identifier\", \"identifier\", str),),\r\n factory = True\r\n )\r\n def create_s(cls, identifier):\r\n entity = cls(identifier = identifier)\r\n entity.save()\r\n return entity\r\n\r\n @classmethod\r\n def _plural(cls):\r\n return \"Entities\"\r\n\r\n def pre_create(self):\r\n base.CrosslineBase.pre_create(self)\r\n self.key = self.secret(hash = hashlib.md5)\r\n\r\n @appier.operation(\r\n name = \"Set PicaPonto.pt\",\r\n parameters = (\r\n (\"Code\", \"code\", int),\r\n (\"Secret\", \"secret\", int)\r\n )\r\n )\r\n def set_pica_s(self, code, secret):\r\n self.meta[\"pica:codigo\"] = code\r\n self.meta[\"pica:senha\"] = secret\r\n self.save()\r\n\r\n @appier.view(name = \"Enter Actions\")\r\n def enter_actions_v(self, *args, **kwargs):\r\n from .actions import enter\r\n kwargs[\"sort\"] = kwargs.get(\"sort\", [(\"timestamp\", -1)])\r\n kwargs.update(entity = self.identifier)\r\n return appier.lazy_dict(\r\n model = enter.EnterAction,\r\n kwargs = kwargs,\r\n entities = appier.lazy(lambda: enter.EnterAction.find(*args, **kwargs)),\r\n page = appier.lazy(lambda: enter.EnterAction.paginate(*args, **kwargs))\r\n )\r\n","repo_name":"hivesolutions/crossline","sub_path":"src/crossline/models/entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":2952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10720789633","text":"class Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n start, end = 0, len(matrix) * len(matrix[0]) - 1\n while start <= end:\n mid = start + (end - start) // 2\n val = matrix[mid // len(matrix[0])][mid % len(matrix[0])]\n if val == target:\n return True\n elif val > target:\n end = mid - 1\n else:\n start = mid + 1\n return False","repo_name":"gkamboj/LeetCode","sub_path":"0074-search-a-2d-matrix/74-search-a-2d-matrix.py","file_name":"74-search-a-2d-matrix.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22996615390","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n def printList(self):\n while (self):\n print(self.val)\n self = self.next\n\nclass Solution(object):\n # Time Complexity : O(N)\n # Space Complexity : O(1)\n def mergeTwoLists(self, l1, l2):\n prehead = ListNode(-1)\n prev = prehead\n while l1 and l2:\n if l1.val <= l2.val:\n prev.next = l1\n l1 = l1.next\n else:\n prev.next = l2\n l2=l2.next\n prev = prev.next\n\n prev.next = l1 if l1 else l2\n return prehead.next\n\nl1 = ListNode(1)\nl1.next = ListNode(2)\nl1.next.next = ListNode(4)\n\nl2 = ListNode(1)\nl2.next = ListNode(3)\nl2.next.next = ListNode(4)\n\nX = Solution()\nanswer1 = X.mergeTwoLists(l1, l2)\nanswer1.printList()\n","repo_name":"anugrah18/Leetcode_solutions","sub_path":"Linked List/21-mergedTwoSortedList.py","file_name":"21-mergedTwoSortedList.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9960719307","text":"from __future__ import annotations\n\nfrom typing import Any, List, Optional\n\nimport sqlite3\n\nfrom .datastore import DataStore, RaiseType\nfrom .record import Record\n\n\nclass SQLite(DataStore):\n \"\"\"Data store backed in SQLite 3\"\"\"\n\n conn: sqlite3.Connection\n cursor: sqlite3.Cursor\n\n def __init__(self, file_name: str):\n \"\"\"Sets up the data store\"\"\"\n\n super().__init__()\n\n self.conn = sqlite3.connect(file_name)\n self.conn.row_factory = sqlite3.Row\n self.cursor = self.conn.cursor()\n self.cursor.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS hopes (\n name text UNIQUE,\n added_by text,\n added_from text,\n added timestamp DEFAULT CURRENT_TIMESTAMP,\n approved bool\n )\n \"\"\"\n )\n self.conn.commit()\n\n def _write_append(self, record: Record) -> Optional[bool]:\n \"\"\"Append a record to the underlying data store this type implements.\n\n This function may be a no-op method, in which case it MUST return None.\n Otherwise, it should return if the write succeeded.\n\n Values passed to this function SHOULD NOT exist in the store already,\n so the implement does not need to consider de-duplication.\n \"\"\"\n\n self.cursor.execute(\n \"INSERT OR IGNORE INTO hopes VALUES (?,?,?,?,?)\",\n (record.name, record.added_by, record.added_from, record.added, record.approved),\n )\n self.conn.commit()\n\n return True\n\n def random(self) -> Record:\n \"\"\"Selects a random element from this store.\"\"\"\n\n self.cursor.execute(\n \"SELECT * FROM hopes WHERE approved = true ORDER BY RANDOM() LIMIT 1\"\n )\n\n record = Record(**self.cursor.fetchone())\n self.seen.add(record.name)\n\n return record\n\n def __len__(self) -> int:\n self.cursor.execute(\"SELECT COUNT(0) FROM hopes WHERE approved == true\")\n\n return int(self.cursor.fetchone()[0])\n\n def _write_list(self, _: Optional[List[Record]]) -> Optional[bool]:\n return None\n\n def __exit__(\n self, exception_type: RaiseType, message: Any, traceback: Any\n ) -> Optional[bool]:\n self.conn.commit()\n self.conn.close()\n\n return super().__exit__(exception_type, message, traceback)\n","repo_name":"javajawa/eorzeas-only-hope","sub_path":"src/eorzea/storage/sqlite.py","file_name":"sqlite.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11009232881","text":"import sqlalchemy as sql\n\nfrom deeplearning.ml4pl.graphs.unlabelled import unlabelled_graph_database\nfrom deeplearning.ml4pl.testing import (\n random_unlabelled_graph_database_generator,\n)\nfrom deeplearning.ml4pl.testing import testing_databases\nfrom labm8.py import decorators\nfrom labm8.py import test\n\n\nFLAGS = test.FLAGS\n\n\n@decorators.loop_for(seconds=2)\n@test.Parametrize(\"node_x_dimensionality\", (1, 2))\n@test.Parametrize(\"node_y_dimensionality\", (0, 1, 2))\n@test.Parametrize(\"graph_x_dimensionality\", (0, 1, 2))\n@test.Parametrize(\"graph_y_dimensionality\", (0, 1, 2))\ndef test_CreateRandomProgramGraph(\n node_x_dimensionality: int,\n node_y_dimensionality: int,\n graph_x_dimensionality: int,\n graph_y_dimensionality: int,\n):\n \"\"\"Black-box test of generator properties.\"\"\"\n program_graph = random_unlabelled_graph_database_generator.CreateRandomProgramGraph(\n node_x_dimensionality=node_x_dimensionality,\n node_y_dimensionality=node_y_dimensionality,\n graph_x_dimensionality=graph_x_dimensionality,\n graph_y_dimensionality=graph_y_dimensionality,\n )\n assert program_graph.node_x_dimensionality == node_x_dimensionality\n assert program_graph.node_y_dimensionality == node_y_dimensionality\n assert program_graph.graph_x_dimensionality == graph_x_dimensionality\n assert program_graph.graph_y_dimensionality == graph_y_dimensionality\n\n\n@test.Fixture(\n scope=\"function\",\n params=testing_databases.GetDatabaseUrls(),\n namer=testing_databases.DatabaseUrlNamer(\"db\"),\n)\ndef db(request) -> unlabelled_graph_database.Database:\n \"\"\"A test fixture which yields an empty graph proto database.\"\"\"\n yield from testing_databases.YieldDatabase(\n unlabelled_graph_database.Database, request.param\n )\n\n\n@test.Fixture(scope=\"function\", params=(1, 1000, 5000))\ndef proto_count(request) -> int:\n \"\"\"Test fixture to enumerate proto counts.\"\"\"\n return request.param\n\n\n@test.Fixture(scope=\"function\", params=(1, 3))\ndef node_x_dimensionality(request) -> int:\n \"\"\"Test fixture to enumerate node feature dimensionalities.\"\"\"\n return request.param\n\n\n@test.Fixture(scope=\"function\", params=(0, 3))\ndef node_y_dimensionality(request) -> int:\n \"\"\"Test fixture to enumerate node label dimensionalities.\"\"\"\n return request.param\n\n\n@test.Fixture(scope=\"function\", params=(0, 3))\ndef graph_x_dimensionality(request) -> int:\n \"\"\"Test fixture to enumerate graph feature dimensionalities.\"\"\"\n return request.param\n\n\n@test.Fixture(scope=\"function\", params=(0, 3))\ndef graph_y_dimensionality(request) -> int:\n \"\"\"Test fixture to enumerate graph label dimensionalities.\"\"\"\n return request.param\n\n\n@test.Fixture(scope=\"function\", params=(0, 2))\ndef split_count(request) -> int:\n \"\"\"Test fixture to enumerate split counts.\"\"\"\n return request.param\n\n\ndef test_PopulateDatabaseWithRandomProgramGraphs(\n db: unlabelled_graph_database.Database,\n proto_count: int,\n node_x_dimensionality: int,\n node_y_dimensionality: int,\n graph_x_dimensionality: int,\n graph_y_dimensionality: int,\n split_count: int,\n):\n \"\"\"Test populating databases.\"\"\"\n random_unlabelled_graph_database_generator.PopulateDatabaseWithRandomProgramGraphs(\n db=db,\n proto_count=proto_count,\n node_x_dimensionality=node_x_dimensionality,\n node_y_dimensionality=node_y_dimensionality,\n graph_x_dimensionality=graph_x_dimensionality,\n graph_y_dimensionality=graph_y_dimensionality,\n split_count=split_count,\n )\n with db.Session() as session:\n assert (\n session.query(\n sql.func.count(unlabelled_graph_database.ProgramGraph.ir_id)\n ).scalar()\n == proto_count\n )\n\n assert (\n session.query(\n sql.func.min(\n unlabelled_graph_database.ProgramGraph.node_x_dimensionality\n )\n ).scalar()\n == node_x_dimensionality\n )\n\n assert (\n session.query(\n sql.func.min(\n unlabelled_graph_database.ProgramGraph.node_y_dimensionality\n )\n ).scalar()\n == node_y_dimensionality\n )\n\n assert (\n session.query(\n sql.func.min(\n unlabelled_graph_database.ProgramGraph.graph_y_dimensionality\n )\n ).scalar()\n == graph_y_dimensionality\n )\n\n assert (\n session.query(\n sql.func.min(\n unlabelled_graph_database.ProgramGraph.graph_y_dimensionality\n )\n ).scalar()\n == graph_y_dimensionality\n )\n\n\ndef test_benchmark_CreateRandomProgramGraph(benchmark):\n \"\"\"Benchmark graph tuple generation.\"\"\"\n benchmark(random_unlabelled_graph_database_generator.CreateRandomProgramGraph)\n\n\nif __name__ == \"__main__\":\n test.Main()\n","repo_name":"ChrisCummins/phd","sub_path":"deeplearning/ml4pl/testing/random_unlabelled_graph_database_generator_test.py","file_name":"random_unlabelled_graph_database_generator_test.py","file_ext":"py","file_size_in_byte":4582,"program_lang":"python","lang":"en","doc_type":"code","stars":181,"dataset":"github-code","pt":"67"} +{"seq_id":"19322538070","text":"from kivy.app import App\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.properties import ObjectProperty\nimport socket\nimport json\nimport threading\n\n\nTCP_IP = 'cheellex.com'\nTCP_PORT = 5010\nBUFFER_SIZE = 1024\nMESSAGE = json.dumps((123, 0, 0))\n\n\n\nclass SclMain(BoxLayout):\n r = ObjectProperty(None)\n g = ObjectProperty(None)\n b = ObjectProperty(None)\n\n def send_rgb(self):\n t = threading.Thread(\n target=self.send_rgb_to_tcp,\n args=())\n t.daemon = True\n t.start()\n\n def send_rgb_to_tcp(self):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((TCP_IP, TCP_PORT))\n\n try:\n s.send(\n json.dumps((\n int(self.r.value),\n int(self.g.value),\n int(self.b.value),\n )))\n except ValueError:\n pass\n finally:\n s.close()\n\n\nclass SclApp(App):\n def build(self):\n return SclMain()\n\n\nif __name__ == '__main__':\n SclApp().run()\n","repo_name":"WnP/flask_kivy_async","sub_path":"kivy_app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"16370101525","text":"import redis\r\n\r\n\r\n# 传入列表,返回前字符串列表合并为字符串\r\ndef makestr(wordlist):\r\n enstr = ''\r\n for word in wordlist:\r\n enstr += word\r\n enstr += \"%\"\r\n return enstr[:-1]\r\n\r\n\r\n# 从redis中提取出不同key对应的值\r\ndef Get_str_from_redis(key, host='127.0.0.1', port='6379', db=0):\r\n red = redis.Redis(host=host, port=port, db=db)\r\n return red.get(key).decode('utf-8')\r\n\r\n\r\n# 全局选项对象,在此对象中定义浏览器以及翻译接口url\r\nclass General_Option():\r\n option_dic={}\r\n def __init__(self):\r\n opr=redis.Redis(host='127.0.0.1',db=5)\r\n keys=opr.keys()\r\n self.option_dic={}\r\n for key in keys:\r\n value=opr.get(key)\r\n self.option_dic[key.decode('utf-8')]=value.decode('utf-8')\r\n #没域名的是翻译时调用,有域名是爬取时调用\r\n if 'domain' in self.option_dic.keys():\r\n self.domain = self.option_dic['domain']\r\n #下面是一些必选项,在如果在db4中有相应的键值对,初始化会直接读取\r\n #如果不存在的话会使用默认接口进行解析\r\n if \"scrap_api\" in self.option_dic.keys():\r\n self.scrap_api = self.option_dic['scrap_api']\r\n else:\r\n self.scrap_api=\"bing\"\r\n opr.set(\"scrap_api\",\"bing\")\r\n if \"translate_api\" in self.option_dic.keys():\r\n self.translate_api = self.option_dic['translate_api']\r\n else:\r\n self.translate_api=\"baidu\"\r\n opr.set(\"translate_api\",\"baidu\")\r\n if \"label_api\" in self.option_dic.keys():\r\n self.label_api=self.option_dic['label_api']\r\n else:\r\n self.label_api='baidu'\r\n opr.set('label_api',\"baidu\")\r\n # 返回的是一系列参数的字典,包括爬取的url,以及搜索元素的class名等\r\n def Get_scrap_dic(self):\r\n retdic = {}\r\n if self.scrap_api == 'bing':\r\n retdic['url'] = r'https://cn.bing.com/search?q=%2B' + self.domain + r'&ensearch=1'\r\n retdic['stop_element'] = 'b_footerItems_icp' # 程序停止等待加载的元素类型\r\n retdic['scrap_element'] = 'b_caption' # 程序爬取的元素类型\r\n\r\n return retdic\r\n\r\n if self.scrap_api == 'baidu':\r\n retdic['url']=r'https://www.baidu.com/s?&wd=inurl%3A'+self.domain\r\n retdic['stop_element']='pc'\r\n retdic['scrap_element']='c-abstract c-abstract-en'\r\n\r\n return retdic\r\n # 要添加API接口的话在这里加if-else语句即可,返回字典提供三个关键元素类型即可\r\n\r\n else:\r\n print('传入选项错误,请检查API选项调用设置')\r\n exit(1)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n print(Get_str_from_redis(key='https://www.csdn.net/', db=1))\r\n","repo_name":"yangliu322/domain","sub_path":"general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30554329103","text":"#write a function to calculate the interest of a bank account\n#parameters:\n# - Balance: the current balance of the account\n# - month: the number of the month\n# if month < 6, the interest rate is 0.5%\n# if month >=6, the interest rate is 0.8%\n# return the mew balance (old balance + interest )\n\n\n\nbalance = float(input('Enter the balance: ')) \nmonth = int(input('Enter the month: '))\ndef interest_rate(balance):\n if month < 6:\n new_balance = balance * (5 /100) * month\n if month >=6:\n new_balance = balance * (8 /100) * month\n new_balance = new_balance + balance\n return new_balance\nprint(\"New Balance: \", interest_rate(balance))\n\n\n\n\n\nbalance = float(input('Enter the balance: '))\nmonth = int(input('Enter the month: '))\n\ndef interest_rate(balance):\n new_balance = balance * rate * month\n new_balance = new_balance + balance\n return new_balance\nif month < 6: \n rate = 5 / 100\nelse: rate = 8 / 100\nprint(\"New Balance: \", interest_rate(balance))","repo_name":"hoangvhgch220975/VHHoang","sub_path":"week5/ex_on_class2.py","file_name":"ex_on_class2.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28722328743","text":"''' A box for your texty goodness.\n'''\n\ndef load(senpai, kouhai):\n\tmoe = senpai.remote[\"moe\"]\n\timouto = senpai.remote[\"imouto\"]\n\n\tclass this(kouhai.Frame):\n\t\tdef __init__(self, prop={}):\n\t\t\tsuper().__init__(prop)\n\n\t\t\tself.properties = dict({\n\t\t\t\t\"text\": \"\",\n\t\t\t\t\"multiline\": False\n\t\t\t}, **self.properties)\n\n\tdef keyinput(event):\n\t\tif kouhai.focus and isinstance(kouhai.focus, this):\n\t\t\tprop = kouhai.focus.properties\n\n\t\t\tif event.key == 13 or event.key == 271: # Enter\n\t\t\t\tif prop[\"multiline\"]:\n\t\t\t\t\tprop[\"text\"] += \"\\n\"\n\t\t\t\t#self.set_focus(False)\n\t\t\telif event.key == 8: # Backspace\n\t\t\t\tprop[\"text\"] = prop[\"text\"][:-1]\n\t\t\telse:\n\t\t\t\tprop[\"text\"] += event.unicode\n\n\t\t\tkouhai.focus.fire(\"keyinput\", event)\n\n\timouto.on(\"keyinput\", keyinput)\n\n\treturn this","repo_name":"Llyme/ADVDISC-MP","sub_path":"assets/api/SenPy/kouhai/TextBox.py","file_name":"TextBox.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23907137208","text":"#Aluno: Deivison rodrigues jordao\r\n#juntar dois vetores intercalando seus indices\r\n\r\n#Definicao dos vetores:\r\n\r\nvetor1 = [None] * 5\r\n\r\nvetor2 = [None] * 5\r\n\r\n#Preenchimento\r\nfor i in range(0,5):\r\n vetor1[i] = (input(\"Digite um numero para o vetor1: \"))\r\n\r\nfor i in range(0,5):\r\n vetor2[i] = (input(\"Digite um numero para o vetor2: \"))\r\n\r\nvetor3 = [vetor1[0],vetor2[0],vetor1[1],vetor2[1],vetor1[2],vetor2[2],vetor1[3],vetor2[3],vetor1[4],vetor2[4]]\r\n\r\n#Saida\r\nprint()\r\nprint(vetor3)","repo_name":"deivisongithub/intro-a-programacao","sub_path":"lista 7/Questão1.py","file_name":"Questão1.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13575503647","text":"#!/usr/bin/env python\n\"\"\"\nThis script show how one can build on TAMASIS and use home-made\nestimation tools.\n\"\"\"\n# Imports\n# --------\nimport os # to handle files\nimport numpy as np # numerical computations\nfrom copy import copy # copy of objects\nfrom tamasis import * # import everything from tamasis\nimport lo # my Linear Operators\n\n# loading data\n# -------------\nfilename = os.path.join(os.getenv('HOME'), 'data', 'pacs', 'frames_blue.fits')\n# define a PacsObservation from data fits file\n# this is level 1 data in HIPE format\npacs = PacsObservation(filename=filename, policy_bad_detector=\"keep\")\n# this method actually loads the data in memory as a Tod\n# which is an ndarray subclass\ntod = pacs.get_tod()\n\n# Generating acquisition model\n# -----------------------------\n# projector\nprojection = Projection(pacs, # the PacsObseravtion object\n resolution=3.2, # resolution of the sky map in arcsec\n oversampling=False)\n# backprojection\nbackmap = projection.T(tod)\n# coverage map\ncoverage = projection.T(np.ones(tod.shape))\n# naive map\nnaive = backmap / coverage\n# mask according to coverage (everything that is covered by less than 10.)\nmask = Masking(coverage < 10.)\n# The model is the masking of the sky map then the projection\n# This is basically matrix multiplication\nmodel = projection * mask\n\n# Performing inversion\n# ---------------------\n\n# with TAMASIS\nx_tm = mapper_rls(tod, model, hyper=1e-1, tol=1e-10, maxiter=100)\n\n# with lo routines\n# transform to lo\nH = lo.aslinearoperator(model * mask)\n# smoothness priors\nDs = [lo.diff(backmap.shape, axis=axis) for axis in (0, 1)]\n# inversion\ny = tod.ravel() # requires 1d input\nx_lo = lo.acg(H, y, Ds, 1e-1 * np.ones(3), tol=1e-10, maxiter=100)\nx_lo.resize(backmap.shape) # output is 1d so need reshaping\n\n# with sparsity assumptions (using Huber algorithm)\nx_h = lo.hacg(H, y, Ds, 1e1 * np.ones(3),\n np.asarray((None, 1e-6, 1e-6, 1e-6)),\n x0=x_lo.flatten(), tol=1e-7, maxiter=200)\nx_h.resize(backmap.shape)\n","repo_name":"nbarbey/csh","sub_path":"tests/test_tamasis_huber.py","file_name":"test_tamasis_huber.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"7710520775","text":"from flask import request\nfrom flask_restful import abort, Resource\nfrom marshmallow.exceptions import ValidationError\n\nfrom .schema import UserSchema\nfrom app.models.user import User\n\n\nclass UsersResource(Resource):\n\n schema = UserSchema\n\n def post(self):\n try:\n self.schema().validate(request.get_json())\n except ValidationError:\n raise\n\n props = request.get_json()['data']['attributes']\n\n existing = User.query.filter(User.email == props['email']).count()\n\n if existing > 0:\n abort(409, message=\"{} exists already\".format(props['email']))\n\n if 'password' not in props:\n abort(400, message=\"no password provided\")\n\n user = User.create(**props)\n\n return self.schema().dump(user)\n\n def get(self, user_id=None):\n if user_id is not None:\n return self._get_single(user_id)\n else:\n return self._get_list()\n\n def _get_single(self, user_id):\n user = User.query.filter(User.id == user_id).one_or_none()\n\n if user is None:\n abort(404)\n\n return self.schema().dump(user)\n\n def _get_list(self):\n\n page = int(request.args.get('p', 1))\n\n query = User.create_active_users_query()\n\n result = query.paginate(page, per_page=10, error_out=False)\n\n return self.schema().dump(result.items, many=True)\n\n def put(self, user_id):\n user = User.query.filter(User.id == user_id).one_or_none()\n\n if user is None:\n abort(404)\n\n try:\n self.schema().validate(request.get_json())\n except ValidationError:\n raise\n\n props = request.get_json()['data']['attributes']\n\n user.update(**props)\n user.save()\n\n return self.schema().dump(user)\n\n def delete(self, user_id):\n user = User.query.filter(User.id == user_id).one_or_none()\n\n if user is None:\n abort(404)\n\n user.delete()\n user.save()\n\n return {}\n","repo_name":"kamikaz1k/cravelist-api","sub_path":"app/resources/users/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41606366119","text":"import functools\nimport os\nimport warnings\n\nimport embodied\n\nfrom . import dmc\n\n\nclass LocoNav(embodied.Env):\n\n DEFAULT_CAMERAS = dict(\n ant_trivial=4,\n ant_umaze=4,\n )\n\n def __init__(self, name, repeat=1, size=(64, 64), camera=-1):\n # TODO: This env variable is necessary when running on a headless GPU but\n # breaks when running on a CPU machine.\n if 'MUJOCO_GL' not in os.environ:\n os.environ['MUJOCO_GL'] = 'egl'\n from dm_control import composer\n from dm_control.locomotion.props import target_sphere\n from dm_control.locomotion.tasks import random_goal_maze\n if camera == -1:\n camera = self.DEFAULT_CAMERAS.get(name, 0)\n walker, arena = name.split('_')\n walker = self._make_walker(walker)\n arena = self._make_arena(arena)\n target = target_sphere.TargetSphere(radius=1.0, height_above_ground=0.0)\n task = random_goal_maze.RepeatSingleGoalMaze(\n walker=walker, maze_arena=arena, target=target, max_repeats=1,\n randomize_spawn_rotation=False, target_reward_scale=1.,\n physics_timestep=0.005, control_timestep=0.02)\n def after_step(self, physics, random_state):\n super(random_goal_maze.RepeatSingleGoalMaze, self).after_step(\n physics, random_state)\n self._rewarded_this_step = self._target.activated\n self._targets_obtained = int(self._target.activated)\n task.after_step = functools.partial(after_step, task)\n env = composer.Environment(\n time_limit=30, task=task, random_state=None,\n strip_singleton_obs_buffer_dim=True)\n self._env = dmc.DMC(env, repeat, size, camera)\n\n @property\n def obs_space(self):\n return self._env.obs_space\n\n @property\n def act_space(self):\n return self._env.act_space\n\n def step(self, action):\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', '.*is a deprecated alias for.*')\n return self._env.step(action)\n\n def _make_walker(self, name):\n from dm_control.locomotion.walkers import ant\n if name == 'ant':\n return ant.Ant()\n # observable_options={'egocentric_camera': {'enabled': False}})\n else:\n raise NotImplementedError(name)\n\n def _make_arena(self, name):\n import labmaze\n from dm_control.locomotion.arenas import mazes\n if name == 'umaze':\n maze = labmaze.FixedMazeWithRandomGoals(\n entity_layer=UMAZE, num_spawns=1, num_objects=1,\n random_state=None)\n arena = mazes.MazeWithTargets(\n maze, xy_scale=1.2, z_height=2.0, aesthetic='default', name='maze')\n return arena\n elif name == 'trivial':\n maze = labmaze.FixedMazeWithRandomGoals(\n entity_layer=TRIVIAL, num_spawns=1, num_objects=1,\n random_state=None)\n arena = mazes.MazeWithTargets(\n maze, xy_scale=1.2, z_height=2.0, aesthetic='default', name='maze')\n return arena\n else:\n raise NotImplementedError(name)\n\n\nTRIVIAL = \"\"\"\n***********\n* *\n* *\n* *\n******* *\n******* *\n******* *\n* *\n* P G *\n* *\n***********\n\"\"\"[1:]\n\n\nUMAZE = \"\"\"\n***********\n* *\n* G *\n* *\n******* *\n******* *\n******* *\n* *\n* P *\n* *\n***********\n\"\"\"[1:]\n","repo_name":"danijar/daydreamer","sub_path":"embodied/envs/loconav.py","file_name":"loconav.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","stars":175,"dataset":"github-code","pt":"67"} +{"seq_id":"41875559876","text":"from datetime import datetime\nfrom django.http import FileResponse\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAuthenticated\nfrom User.models import LyfUser\nfrom .models import DiaryEntry\nfrom .serializers import DiaryEntrySerializer\nfrom .utils import DiaryGenerator, TxtGenerator\n\nimport io\n\n\n# Create your views here.\n@api_view([\"GET\"])\n@permission_classes([IsAuthenticated])\ndef getAllDiaries(request, userId):\n entries = DiaryEntry.objects.getEntries(userId)\n data = [entry.as_dict for entry in entries]\n return Response(data)\n\n\n@api_view([\"GET\"])\n@permission_classes([IsAuthenticated])\ndef getAllPDFs(request, userId):\n diary = DiaryEntry.objects.getEntries(userId)\n\n file_buffer = io.BytesIO()\n file_buffer = DiaryGenerator.generateDiary(diary=diary, file_buffer=file_buffer)\n file_buffer.seek(0)\n\n return FileResponse(file_buffer, as_attachment=True, filename=f\"{diary[0]._user.username}_diary.pdf\")\n\n\n@api_view([\"GET\"])\n@permission_classes([IsAuthenticated])\ndef getAllTXTs(request, userId):\n diary = DiaryEntry.objects.getEntries(userId)\n\n file_buffer = io.BytesIO()\n file_buffer = TxtGenerator.generateDiaryTxt(diary, file_buffer)\n file_buffer.seek(0)\n\n return FileResponse(file_buffer, as_attachment=True, filename=f\"{diary[0]._user.username}_diary.txt\")\n\n\n@api_view([\"GET\"])\n@permission_classes([IsAuthenticated])\ndef getEntrybyId(request, userId, entryId):\n entry = DiaryEntry.objects.get_entry_by_id(entryId)\n data = entry.as_dict\n\n return Response(data)\n\n\n@api_view([\"GET\"])\n@permission_classes([IsAuthenticated])\ndef getPDFbyEntryId(request, userId, entryId, entryId2):\n entry = DiaryEntry.objects.get_entry_by_id(entryId)\n data = entry.as_dict\n\n file_buffer = io.BytesIO()\n file_buffer = DiaryGenerator.generateEntry(entry=entry, file_buffer=file_buffer)\n file_buffer.seek(0)\n\n file_buffers = io.BytesIO()\n TxtGenerator.generateEntryTxt(entry, file_buffers)\n\n return FileResponse(file_buffer, as_attachment=True, filename=f\"{data['_title']}.pdf\")\n\n\n@api_view([\"GET\"])\n@permission_classes([IsAuthenticated])\ndef getTXTbyEntryId(request, userId, entryId, entryId2):\n entry = DiaryEntry.objects.get_entry_by_id(entryId)\n data = entry.as_dict\n\n file_buffer = io.BytesIO()\n file_buffer = TxtGenerator.generateEntryTxt(entry, file_buffer)\n file_buffer.seek(0)\n\n return FileResponse(file_buffer, as_attachment=True, filename=f\"{data['_title']}.txt\")\n\n\n@api_view([\"POST\"])\n@permission_classes([IsAuthenticated])\ndef createEntry(request, userId):\n data = request.data\n print(request.data)\n\n try:\n entry = DiaryEntry.objects.create(\n _user=LyfUser.objects.get_user_by_id(userId),\n _title=data['_title'],\n _description=data['_description'],\n _is_private=True if data['_is_private'] == 'true' else False,\n _created_on=datetime.fromisoformat(data['_createdAt']),\n _audioLink=data['_audioLink'] if data['_audioLink'] != \"null\" else None,\n _imageLinks=data['_imageLinks'] if data['_imageLinks'] != \"null\" else None,\n )\n\n return Response(\"E_CREATE_SUCCESS\", status=status.HTTP_200_OK)\n except Exception as e:\n return Response(str(e), status=status.HTTP_403_FORBIDDEN)\n\n\n@api_view([\"PUT\"])\n@permission_classes([IsAuthenticated])\ndef updateEntry(request, userId, entryId):\n data = request.data\n\n corrected_data = {\n '_user': userId,\n '_title': data[\"_title\"],\n '_description': data[\"_description\"],\n '_is_private': data[\"_is_private\"],\n '_created_on': data[\"_createdAt\"],\n '_audioLink': data[\"_audioLink\"],\n '_imageLinks': list(data['_imageLinks'][1:-1].replace(\",\", \"\").split(\" \")) if data[\n '_imageLinks'] != \"null\" else None,\n }\n print(corrected_data)\n\n entry = DiaryEntry.objects.get_entry_by_id(entryId)\n serializer = DiaryEntrySerializer(entry, data=corrected_data)\n if serializer.is_valid():\n serializer.save()\n return Response(\"E_PUT_SUCCESS\", status=status.HTTP_200_OK)\n else:\n return Response(serializer.errors, status=status.HTTP_401_UNAUTHORIZED)\n\n\n@api_view([\"DELETE\"])\n@permission_classes([IsAuthenticated])\ndef deleteEntry(request, userId, entryId):\n entry = DiaryEntry.objects.get_entry_by_id(entryId)\n try:\n entry.delete()\n return Response(\"E_DEL_SUCCESS\", status=status.HTTP_200_OK)\n except Exception as e:\n return Response(str(e), status=status.HTTP_401_UNAUTHORIZED)\n","repo_name":"just-ary27/Lyf","sub_path":"lyfRest/diary/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"71963506452","text":"\"\"\"\"\"\"\nimport socketserver\nimport threading\nfrom functools import partial\n\n'''\n#---------------------------------------------------------------------------------\n# Copyright (C) 2020, CELIS TTI BU.\n# The copyright to the computer program(s) herein is the property of TTI NORTE SL\n#\n# The program(s) may be used and/or copied only with the written permission\n# of CELIS TTI BU or in accordance with the terms and conditions stipulated in the\n# agreement/contract under which the program(s) have been supplied.\n#\n# This library provides a collection of functions for controlling the Keysight VSA 89601B that runs on a MXA platform.\n# These functions are aimed to simplify the high level test procedures where this instrument is involved within SGMA.\n#\n#Chiller's simulator\n# URL: $HeadURL: https://192.168.1.10:8443/svn/projects/trunk/escan/simulators/chiller/main.py $\n# Last commit author: $Author: pmonsalvete $\n# Revision: $Revision: 39 $\n# Date: $Date: 2022-11-17 11:23:08 -0100 (Thu, 17 Nov 2022) $\n# Module ID\n# $Id: main.py 39 2022-11-17 12:23:08Z pmonsalvete $\n#---------------------------------------------------------------------------------\n'''\n\nfrom libs.comunication import SoilMoistureTCPHandler\nfrom libs.files import readJson\nfrom libs.kafka_libs import read_last_message_tp, get_consumer, get_producer, send_msg\nfrom kafka import TopicPartition\n\n\nclass SoilMoisture:\n def __init__(self, file_cfg=None):\n self.soil_moisture = 30\n self.state = \"OFF\"\n self.init_kafka(file_cfg)\n self.create_dict_functions()\n\n def init_kafka(self, file_cfg):\n sim_data = readJson(file_cfg)\n self.topic = sim_data[\"kafka\"][\"topic\"]\n self.consumer = get_consumer([self.topic])\n self.num_partition = sim_data[\"kafka\"][\"partitions\"][\"soil_moisture\"]\n self.tp_soil_moisture = TopicPartition(self.topic, sim_data[\"kafka\"][\"partitions\"][\"soil_moisture\"])\n producer = get_producer()\n send_msg(producer, self.topic, self.soil_moisture, self.num_partition)\n\n def get_soil_moisture(self, **kwargs):\n self.soil_moisture = read_last_message_tp(self.consumer, self.tp_soil_moisture)\n return self.soil_moisture\n\n def set_state(self, **kwargs):\n self.state = kwargs[\"data\"]\n\n def get_state(self, **kwargs):\n return self.state\n\n def create_dict_functions(self):\n self.dict_functions_beam = {\n \"GET_MOIST\": partial(self.get_soil_moisture),\n \"GET_STATE\": partial(self.get_state), \"SET_STATE\": partial(self.set_state, data=None)}\n\n\n\nif __name__ == \"__main__\":\n server_data = readJson('config/soil_moisture_config.json')\n ip_address = server_data['server']['ip']\n port = server_data['server']['port']\n try:\n server = socketserver.TCPServer((ip_address, port), SoilMoistureTCPHandler)\n server.soil_moist = SoilMoisture(file_cfg='config/soil_moisture_config.json')\n server.allow_reuse_address = True\n print(\"Server created in IP {}\\tPORT {}\".format(ip_address, port))\n print(\"Server is waiting...\")\n server.serve_forever()\n while True:\n pass\n except KeyboardInterrupt:\n server.shutdown()\n print(\"Server closed\")\n except ConnectionResetError:\n print(\"Polling closed\")\n","repo_name":"atilioDambrosio/GreenHouse","sub_path":"simulators/simulators/soil_moisture/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23813560253","text":"import unittest \nfrom unittest import TestCase\nfrom unittest.mock import patch\nfrom peewee import Model, CharField, ForeignKeyField, DecimalField, BooleanField, Database, Check, IntegrityError, SqliteDatabase\n\nfrom database.config import db_path\ntest_db_path = 'test_trips.db'\ndb_path = test_db_path\n\nfrom database.model import Park, SavedTrip, Trip\nfrom database import database\n\nclass TestDatabase(TestCase):\n\n\n def setUp(self):\n # Remove existing data from test database and recreate tables\n self.db = SqliteDatabase(db_path)\n self.db.drop_tables([Park, Trip, SavedTrip])\n self.db.create_tables([Park, Trip, SavedTrip])\n\n\n def addTestParkData(self):\n Park.create(park_id = \"abcd\", park_name = \"park name 1\", park_city = \"park city 1\", park_state = \"park state 1\", park_description = \"park description 1\",\n latitude = 12.3456, longitude = 98.7654)\n Park.create(park_id = \"dcba\", park_name = \"park name 2\", park_city = \"park city 2\", park_state = \"park state 2\", park_description = \"park description 2\",\n latitude = 12.3556, longitude = 98.7554)\n Park.create(park_id = \"sdfs\", park_name = \"park name 3\", park_city = \"park city 3\", park_state = \"park state 1\", park_description = \"park description 3\",\n latitude = 12.3416, longitude = 98.7614)\n\n\n def addTestTripData(self):\n pass\n\n\n def test_save_parks_list(self):\n expected_parks_list = []\n park1 = Park(park_id = \"abcd\", park_name = \"park name 1\", park_city = \"park city 1\", park_state = \"park state 1\", park_description = \"park description 1\",\n latitude = 12.3456, longitude = 98.7654)\n park2 = Park(park_id = \"dcba\", park_name = \"park name 2\", park_city = \"park city 2\", park_state = \"park state 2\", park_description = \"park description 2\",\n latitude = 12.3556, longitude = 98.7554)\n park3 = Park(park_id = \"sdfs\", park_name = \"park name 3\", park_city = \"park city 3\", park_state = \"park state 3\", park_description = \"park description 3\",\n latitude = 12.3416, longitude = 98.7614)\n expected_parks_list.append(park1)\n expected_parks_list.append(park2)\n expected_parks_list.append(park3)\n\n database.save_parks_list(expected_parks_list)\n parks = Park.select().execute()\n parks_list = []\n for p in parks:\n parks_list.append(p)\n self.assertEqual(expected_parks_list, parks_list)\n\n\n def test_get_parks_by_state(self):\n self.addTestParkData()\n parks_list = []\n parks = database.get_parks_by_state(\"park state 1\")\n for p in parks:\n parks_list.append(p)\n self.assertEqual(len(parks_list), 2)\n\n\n def test_save_trip(self):\n park = Park.create(park_id = \"abcd\", park_name = \"park name 1\", park_city = \"park city 1\", park_state = \"park state 1\", park_description = \"park description 1\",\n latitude = 12.3456, longitude = 98.7654)\n trip = SavedTrip(month = 5, park = park, image_1 = \"lsdkfjd\", image_2 = \"kdldk\", image_3 = \"sldkfj\", image_4 = \"sldkdl\", precipitation = 24, \n avg_temp = 12.3, max_temp = 56.9, min_temp = 0.45)\n database.save_trip(trip)\n database_trip = SavedTrip.get_or_none(SavedTrip.image_1 == \"lsdkfjd\")\n self.assertIsNotNone(database_trip)\n\n\n def test_save_duplicate_trip(self):\n park = Park.create(park_id = \"abcd\", park_name = \"park name 1\", park_city = \"park city 1\", park_state = \"park state 1\", park_description = \"park description 1\",\n latitude = 12.3456, longitude = 98.7654)\n trip = Trip(month = 5, park = park, image_1 = \"lsdkfjd\", image_2 = \"kdldk\", image_3 = \"sldkfj\", image_4 = \"sldkdl\", precipitation = 24, \n avg_temp = 12.3, max_temp = 56.9, min_temp = 0.45)\n database.save_trip(trip)\n duplicate_trip = Trip(month = 5, park = park, image_1 = \"lsdkfjd\", image_2 = \"kdldk\", image_3 = \"sldkfj\", image_4 = \"sldkdl\", precipitation = 24, \n avg_temp = 12.3, max_temp = 56.9, min_temp = 0.45)\n database.save_trip(duplicate_trip)\n saved_trips = SavedTrip.select().execute()\n list_of_saved_trips = []\n for t in saved_trips:\n list_of_saved_trips.append(t)\n\n self.assertEqual(len(list_of_saved_trips), 1)\n\n\n def test_get_park_by_code(self):\n Park.create(park_id = \"abcd\", park_name = \"park name 1\", park_city = \"park city 1\", park_state = \"park state 1\", park_description = \"park description 1\",\n latitude = 12.3456, longitude = 98.7654)\n park = database.get_park_by_code(\"abcd\")\n self.assertIsNotNone(park)\n\n\n def test_convert_month(self):\n integer_month = 1\n expected_month = \"JANUARY\"\n actual_month = database._convert_month(integer_month)\n self.assertEqual(expected_month, actual_month)\n\n\n\n","repo_name":"KelseyStiff/National-Parks-Trip-Planner","sub_path":"tests/test_database.py","file_name":"test_database.py","file_ext":"py","file_size_in_byte":4965,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"27974489837","text":"from sklearn.model_selection import train_test_split #for split the data\nfrom sklearn.metrics import accuracy_score #for accuracy_score\nfrom sklearn.model_selection import KFold #for K-fold cross validation\nfrom sklearn.model_selection import cross_val_score #score evaluation\nfrom sklearn.model_selection import cross_val_predict #prediction\n\nX_train,X_test,y_train,y_test = train_test_split(x_data,y_data,test_size=0.3,random_state=42)\n\n# Logistic Regression\nfrom sklearn.linear_model import LogisticRegression \n\nmodel = LogisticRegression()\n#model.fit(X_train,y_train)\n#prediction_lr=model.predict(X_test)\n#print('The accuracy of the LR is', accuracy_score(prediction_lr,y_test))\nkfold = KFold(n_splits=10, random_state=22)\nresult_lr=cross_val_score(model,X_train,y_train,cv=kfold,scoring='accuracy') #if we have small dataset we can perform cv on the entire dataset and not only on training dataset\nprint('The cross validated score for LR is:', result_lr.mean())\ny_pred = cross_val_predict(model,X_test,y_test,cv=kfold)\n\n# Random Forests\nfrom sklearn.ensemble import RandomForestClassifier\nmodel = RandomForestClassifier(criterion='gini', n_estimators=700,\n min_samples_split=10,min_samples_leaf=1,\n max_features='auto',oob_score=True,\n random_state=1,n_jobs=-1)\nmodel.fit(X_train,y_train)\nprediction_rm=model.predict(X_test)\nprint('--------------The Accuracy of the model----------------------------')\nprint('The accuracy of the Random Forest Classifier is',round(accuracy_score(prediction_rm,y_test)*100,2))\nkfold = KFold(n_splits=10, random_state=22) # k=10, split the data into 10 equal parts\nresult_rm=cross_val_score(model,all_features,Targeted_feature,cv=10,scoring='accuracy')\nprint('The cross validated score for Random Forest Classifier is:',round(result_rm.mean()*100,2))\ny_pred = cross_val_predict(model,all_features,Targeted_feature,cv=10)\nsns.heatmap(confusion_matrix(Targeted_feature,y_pred),annot=True,fmt='3.0f',cmap=\"summer\")\nplt.title('Confusion_matrix', y=1.05, size=15)\n\n# Support Vector Machines\nfrom sklearn.svm import SVC, LinearSVC\n\nmodel = SVC()\nmodel.fit(X_train,y_train)\nprediction_svm=model.predict(X_test)\nprint('--------------The Accuracy of the model----------------------------')\nprint('The accuracy of the Support Vector Machines Classifier is',round(accuracy_score(prediction_svm,y_test)*100,2))\nkfold = KFold(n_splits=10, random_state=22) # k=10, split the data into 10 equal parts\nresult_svm=cross_val_score(model,all_features,Targeted_feature,cv=10,scoring='accuracy')\nprint('The cross validated score for Support Vector Machines Classifier is:',round(result_svm.mean()*100,2))\ny_pred = cross_val_predict(model,all_features,Targeted_feature,cv=10)\nsns.heatmap(confusion_matrix(Targeted_feature,y_pred),annot=True,fmt='3.0f',cmap=\"summer\")\nplt.title('Confusion_matrix', y=1.05, size=15)\n\n##knn\nfrom sklearn.neighbors import KNeighborsClassifier\n\n\nmodel = KNeighborsClassifier(n_neighbors = 4)\nmodel.fit(X_train,y_train)\nprediction_knn=model.predict(X_test)\nprint('--------------The Accuracy of the model----------------------------')\nprint('The accuracy of the K Nearst Neighbors Classifier is',round(accuracy_score(prediction_knn,y_test)*100,2))\nkfold = KFold(n_splits=10, random_state=22) # k=10, split the data into 10 equal parts\nresult_knn=cross_val_score(model,all_features,Targeted_feature,cv=10,scoring='accuracy')\nprint('The cross validated score for K Nearest Neighbors Classifier is:',round(result_knn.mean()*100,2))\ny_pred = cross_val_predict(model,all_features,Targeted_feature,cv=10)\nsns.heatmap(confusion_matrix(Targeted_feature,y_pred),annot=True,fmt='3.0f',cmap=\"summer\")\nplt.title('Confusion_matrix', y=1.05, size=15)\n\n# Gaussian Naive Bayes\nfrom sklearn.naive_bayes import GaussianNB\nmodel= GaussianNB()\nmodel.fit(X_train,y_train)\nprediction_gnb=model.predict(X_test)\nprint('--------------The Accuracy of the model----------------------------')\nprint('The accuracy of the Gaussian Naive Bayes Classifier is',round(accuracy_score(prediction_gnb,y_test)*100,2))\nkfold = KFold(n_splits=10, random_state=22) # k=10, split the data into 10 equal parts\nresult_gnb=cross_val_score(model,all_features,Targeted_feature,cv=10,scoring='accuracy')\nprint('The cross validated score for Gaussian Naive Bayes classifier is:',round(result_gnb.mean()*100,2))\ny_pred = cross_val_predict(model,all_features,Targeted_feature,cv=10)\nsns.heatmap(confusion_matrix(Targeted_feature,y_pred),annot=True,fmt='3.0f',cmap=\"summer\")\nplt.title('Confusion_matrix', y=1.05, size=15)\n\n# Decision Tree\nfrom sklearn.tree import DecisionTreeClassifier\nmodel= DecisionTreeClassifier(criterion='gini', \n min_samples_split=10,min_samples_leaf=1,\n max_features='auto')\nmodel.fit(X_train,y_train)\nprediction_tree=model.predict(X_test)\nprint('--------------The Accuracy of the model----------------------------')\nprint('The accuracy of the DecisionTree Classifier is',round(accuracy_score(prediction_tree,y_test)*100,2))\nkfold = KFold(n_splits=10, random_state=22) # k=10, split the data into 10 equal parts\nresult_tree=cross_val_score(model,all_features,Targeted_feature,cv=10,scoring='accuracy')\nprint('The cross validated score for Decision Tree classifier is:',round(result_tree.mean()*100,2))\ny_pred = cross_val_predict(model,all_features,Targeted_feature,cv=10)\nsns.heatmap(confusion_matrix(Targeted_feature,y_pred),annot=True,fmt='3.0f',cmap=\"summer\")\nplt.title('Confusion_matrix', y=1.05, size=15)\n\nfrom sklearn.ensemble import AdaBoostClassifier\nmodel= AdaBoostClassifier()\nmodel.fit(X_train,y_train)\nprediction_adb=model.predict(X_test)\nprint('--------------The Accuracy of the model----------------------------')\nprint('The accuracy of the AdaBoostClassifier is',round(accuracy_score(prediction_adb,y_test)*100,2))\nkfold = KFold(n_splits=10, random_state=22) # k=10, split the data into 10 equal parts\nresult_adb=cross_val_score(model,all_features,Targeted_feature,cv=10,scoring='accuracy')\nprint('The cross validated score for AdaBoostClassifier is:',round(result_adb.mean()*100,2))\ny_pred = cross_val_predict(model,all_features,Targeted_feature,cv=10)\nsns.heatmap(confusion_matrix(Targeted_feature,y_pred),annot=True,fmt='3.0f',cmap=\"summer\")\nplt.title('Confusion_matrix', y=1.05, size=15)\n\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nmodel= LinearDiscriminantAnalysis()\nmodel.fit(X_train,y_train)\nprediction_lda=model.predict(X_test)\nprint('--------------The Accuracy of the model----------------------------')\nprint('The accuracy of the LinearDiscriminantAnalysis is',round(accuracy_score(prediction_lda,y_test)*100,2))\nkfold = KFold(n_splits=10, random_state=22) # k=10, split the data into 10 equal parts\nresult_lda=cross_val_score(model,all_features,Targeted_feature,cv=10,scoring='accuracy')\nprint('The cross validated score for AdaBoostClassifier is:',round(result_lda.mean()*100,2))\ny_pred = cross_val_predict(model,all_features,Targeted_feature,cv=10)\nsns.heatmap(confusion_matrix(Targeted_feature,y_pred),annot=True,fmt='3.0f',cmap=\"summer\")\nplt.title('Confusion_matrix', y=1.05, size=15)\n\nfrom sklearn.ensemble import GradientBoostingClassifier\nmodel= GradientBoostingClassifier()\nmodel.fit(X_train,y_train)\nprediction_gbc=model.predict(X_test)\nprint('--------------The Accuracy of the model----------------------------')\nprint('The accuracy of the Gradient Boosting Classifier is',round(accuracy_score(prediction_gbc,y_test)*100,2))\nkfold = KFold(n_splits=10, random_state=22) # k=10, split the data into 10 equal parts\nresult_gbc=cross_val_score(model,all_features,Targeted_feature,cv=10,scoring='accuracy')\nprint('The cross validated score for AdaBoostClassifier is:',round(result_gbc.mean()*100,2))\ny_pred = cross_val_predict(model,all_features,Targeted_feature,cv=10)\nsns.heatmap(confusion_matrix(Targeted_feature,y_pred),annot=True,fmt='3.0f',cmap=\"summer\")\nplt.title('Confusion_matrix', y=1.05, size=15)\n\nmodels = pd.DataFrame({\n 'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression', \n 'Random Forest', 'Naive Bayes', 'AdaBoostClassifier', \n 'Gradient Decent', 'Linear Discriminant Analysis', \n 'Decision Tree'],\n 'Score': [result_svm.mean(), result_knn.mean(), result_lr.mean(), \n result_rm.mean(), result_gnb.mean(), result_adb.mean(), \n result_gbc.mean(), result_lda.mean(), result_tree.mean()]})\nmodels.sort_values(by='Score',ascending=False)\n\ntrain_X = traindf.drop(\"Survived\", axis=1)\ntrain_Y=traindf[\"Survived\"]\ntest_X = testdf.drop(\"PassengerId\", axis=1).copy()\ntrain_X.shape, train_Y.shape, test_X.shape\n\n# Gradient boosting tunning\nimport xgboost as xgb\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import GradientBoostingClassifier\nmodel = GradientBoostingClassifier()\nparam_grid = {'loss' : [\"deviance\"],\n 'n_estimators' : [100,200,300,400],\n 'learning_rate': [0.1, 0.05, 0.01,0.001],\n 'max_depth': [4, 8],\n 'min_samples_leaf': [100,150],\n 'max_features': [0.3, 0.2,0.1] \n }\n\nmodelf = GridSearchCV(model,param_grid = param_grid, cv=kfold, scoring=\"accuracy\", n_jobs= 4, verbose = 1)\n\nmodelf.fit(train_X,train_Y)\n\n# Best score\nmodelf.best_score_\n\n# Best Estimator\nmodelf.best_estimator_\n\n# Random Forest Classifier Parameters tunning \nmodel = RandomForestClassifier()\nn_estim=range(100,1000,100)\n\n## Search grid for optimal parameters\nparam_grid = {\"n_estimators\" :n_estim}\n\n\nmodel_rf = GridSearchCV(model,param_grid = param_grid, cv=5, scoring=\"accuracy\", n_jobs= 4, verbose = 1)\n\nmodel_rf.fit(train_X,train_Y)\n\n\n\n# Best score\nprint(model_rf.best_score_)\n\n#best estimator\nmodel_rf.best_estimator_\n\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nmodel =LinearDiscriminantAnalysis()\nparam_grid = {'tol':[0.001,0.01,.1,.2]}\n\nmodell = GridSearchCV(model,param_grid = param_grid, cv=5, scoring=\"accuracy\", n_jobs= 4, verbose = 1)\n\nmodell.fit(train_X,train_Y)\n\n# Best score\nprint(modell.best_score_)\n\n# Best Estimator\nmodell.best_estimator_\n\nmodel= SVC()\nparam_grid = {'kernel': ['rbf','linear'], \n 'gamma': [ 0.001, 0.01, 0.1, 1],\n 'C': [1, 10, 50, 100,200,300, 1000]}\n\nmodelsvm = GridSearchCV(model,param_grid = param_grid, cv=5, scoring=\"accuracy\", n_jobs= 4, verbose = 1)\n\nmodelsvm.fit(train_X,train_Y)\n\nprint(modelsvm.best_estimator_)\n\n# Best score\nprint(modelsvm.best_score_)\n\n# Random Forests\nfrom sklearn.ensemble import RandomForestClassifier\nrandom_forest = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',\n max_depth=None, max_features='auto', max_leaf_nodes=None,\n min_impurity_decrease=0.0, min_impurity_split=None,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=400, n_jobs=1,\n oob_score=False, random_state=None, verbose=0,\n warm_start=False)\nrandom_forest.fit(train_X, train_Y)\nY_pred_rf = random_forest.predict(test_X)\nrandom_forest.score(train_X,train_Y)\nacc_random_forest = round(random_forest.score(train_X, train_Y) * 100, 2)\n\nprint(\"Important features\")\npd.Series(random_forest.feature_importances_,train_X.columns).sort_values(ascending=True).plot.barh(width=0.8)\nprint('__'*30)\nprint(acc_random_forest)\n\n","repo_name":"TadejGr/ML_Models","sub_path":"ml_models.py","file_name":"ml_models.py","file_ext":"py","file_size_in_byte":11425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21658899846","text":"\n\ndef priority(char):\n if ord(char) > ord(\"Z\"):\n prio = ord(char) - 96\n else:\n prio = ord(char) - 64 + 26\n # print(char, end=\" \")\n # print(prio)\n return prio\n\n\ndef task1():\n prios = []\n for line in Lines:\n line = line.strip()\n length = len(line)\n comp1 = line[0:int(length / 2)]\n comp2 = line[int(length / 2):int(length)]\n\n for char in comp1:\n if char in comp2:\n prios.append(priority(char))\n break\n\n print('Task 1: ' + str(sum(prios)))\n\n\ndef task2():\n prios = []\n for x in range(0, len(Lines), 3):\n for char in Lines[x].strip():\n if char in Lines[x+1] and char in Lines[x+2]:\n prios.append(priority(char))\n break\n\n print('Task 2: ' + str(sum(prios)))\n\n\nif __name__ == '__main__':\n file = open('input.txt', 'r')\n Lines = file.readlines()\n\n task1()\n task2()\n","repo_name":"floonym/adventofcode","sub_path":"03/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74901319252","text":"import numpy as np\nimport pandas as pd\nimport os\nimport time\nfrom gensim.models import word2vec\nimport parameter\nimport lookup\n\n#训练词向量并且存储\ndef toWordEmbeddings():\n #--------------------------------train word embeddings---------------------------------\n print(\"training word embeddings.....\")\n sentences = word2vec.Text8Corpus(parameter.IN_FILE)\n model = word2vec.Word2Vec(\n sentences=sentences,\n size=parameter.VECTOR_DIM, # 词向量维度\n window=parameter.WINDOW_SIZE, # window大小\n min_count=parameter.MIN_COUNT, # 频率小于这个值被忽略\n sg=parameter.TYPE, # sg==0->cbow; sg==1->skip-gram\n hs=parameter.HS, # use hierarchical softmax\n negative=parameter.NEGATIVE, # use negative sampling\n sorted_vocab=parameter.SORT, # 按照词频率从高到低排序\n )\n # save embeddings file\n if not os.path.exists(parameter.OUT_FOLDER):\n os.mkdir(path=parameter.OUT_FOLDER)\n model.wv.save_word2vec_format(parameter.OUT_FOLDER+\"word_vec.txt\", binary=False)\n\n # ----------------------------------生成word和id相互索引的.csv文件-------------------------\n print(\"generating lookup table.....\")\n if os.path.exists(parameter.OUT_FOLDER+\"word_vec.txt\"):\n lookup.generate(inFile=parameter.OUT_FOLDER+\"word_vec.txt\",outFile=parameter.OUT_FOLDER+\"words_ids.csv\")\n else:\n print(\"there is no embedings files\")\n\n\nif __name__ ==\"__main__\":\n begin_time=time.time()\n toWordEmbeddings()\n end_time=time.time()\n print(\"ALL DONE!\")\n print(\"Spend \",(end_time-begin_time)/60,\" mins\")\n","repo_name":"XierHacker/SmallTools","sub_path":"word2vec/word2vec.py","file_name":"word2vec.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22436105327","text":"class Solution(object):\n def climbStairs(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n result = 1\n for i in range(1,n/2+1):\n fenzi = 1\n fenmu = 1\n for j in range(1, i+1):\n fenzi = fenzi*j\n for k in range(n-2*i+1, n-i+1):\n fenmu = fenmu*k\n result = fenmu / fenzi + result\n return result\n","repo_name":"hzyhzzh/LeetCode","sub_path":"LeetCode70.py","file_name":"LeetCode70.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7569503047","text":"######################################################\n#\n# PyRAI2MD test neural network\n#\n# Author Jingbai Li\n# Oct 11 2021\n#\n######################################################\n\nimport os, sys, shutil, json, subprocess\n\ndef TestNN():\n \"\"\" neural network test\n\n 1. energy grad nac training and prediction\n 2. energy grad soc training and prediction\n\n \"\"\"\n pyrai2mddir = os.environ['PYRAI2MD']\n testdir = '%s/neural_network' % (os.getcwd())\n record = {\n 'egn' : 'FileNotFound',\n 'egs' : 'FileNotFound',\n 'permute' : 'FileNotFound',\n 'invd' : 'FileNotFound',\n 'egn_train' : 'FileNotFound',\n 'egn_predict' : 'FileNotFound',\n 'egs_train' : 'FileNotFound',\n 'egs_predict' : 'FileNotFound',\n }\n\n filepath = '%s/TEST/neural_network/train_data/egn.json' % (pyrai2mddir)\n if os.path.exists(filepath):\n record['egn'] = filepath\n\n filepath = '%s/TEST/neural_network/train_data/egs.json' % (pyrai2mddir)\n if os.path.exists(filepath):\n record['egs'] = filepath\n\n filepath = '%s/TEST/neural_network/train_data/allpath' % (pyrai2mddir)\n if os.path.exists(filepath):\n record['permute'] = filepath\n\n filepath = '%s/TEST/neural_network/train_data/invd' % (pyrai2mddir)\n if os.path.exists(filepath):\n record['invd'] = filepath\n\n filepath = '%s/TEST/neural_network/train_data/egn_train' % (pyrai2mddir)\n if os.path.exists(filepath):\n record['egn_train'] = filepath\n\n filepath = '%s/TEST/neural_network/train_data/egn_predict' % (pyrai2mddir)\n if os.path.exists(filepath):\n record['egn_predict'] = filepath\n\n filepath = '%s/TEST/neural_network/train_data/egs_train' % (pyrai2mddir)\n if os.path.exists(filepath):\n record['egs_train'] = filepath\n\n filepath = '%s/TEST/neural_network/train_data/egs_predict' % (pyrai2mddir)\n if os.path.exists(filepath):\n record['egs_predict'] = filepath\n\n summary = \"\"\"\n *---------------------------------------------------*\n | |\n | Neural Network Test Calculation |\n | |\n *---------------------------------------------------*\n\n Check files and settings:\n-------------------------------------------------------\n\"\"\"\n for key, location in record.items():\n summary += ' %-10s %s\\n' % (key, location)\n\n for key, location in record.items():\n if location == 'FileNotFound':\n summary += '\\n Test files are incomplete, please download it again, skip test\\n\\n'\n return summary, 'FAILED(test file unavailable)'\n if location == 'VariableNotFound':\n summary += '\\n Environment variables are not set, cannot find program, skip test\\n\\n'\n return summary, 'FAILED(enviroment variable missing)'\n\n CopyInput(record, testdir)\n\n summary += \"\"\"\n Copy files:\n %-10s --> %s/egn.json\n %-10s --> %s/egs.json\n %-10s --> %s/allpath\n %-10s --> %s/invd\n %-10s --> %s/egn_train\n %-10s --> %s/egn_predict\n %-10s --> %s/egs_train\n %-10s --> %s/egs_predict\n\n Run MOLCAS CASSCF:\n\"\"\" % ('egn', testdir,\n 'egs', testdir,\n 'permute', testdir,\n 'invd', testdir,\n 'egn_train', testdir,\n 'egn_predict', testdir,\n 'egs_train', testdir,\n 'egs_predict', testdir)\n\n results, code = RunNN(record, testdir, pyrai2mddir)\n \n summary += \"\"\"\n-------------------------------------------------------\n Neural Networks OUTPUT\n-------------------------------------------------------\n%s\n-------------------------------------------------------\n\"\"\" % (results) \n return summary, code\n \ndef CopyInput(record, testdir):\n if os.path.exists(testdir) == False:\n os.makedirs(testdir)\n\n shutil.copy2(record['egn'], '%s/egn.json' % (testdir))\n shutil.copy2(record['egs'], '%s/egs.json' % (testdir))\n shutil.copy2(record['permute'], '%s/allpath' % (testdir))\n shutil.copy2(record['invd'], '%s/invd' % (testdir))\n shutil.copy2(record['egn_train'], '%s/egn_train' % (testdir))\n shutil.copy2(record['egn_predict'], '%s/egn_predict' % (testdir))\n shutil.copy2(record['egs_train'], '%s/egs_train' % (testdir))\n shutil.copy2(record['egs_predict'], '%s/egs_predict' % (testdir))\n\ndef Collect(testdir, title):\n with open('%s/NN-%s.log' % (testdir, title), 'r') as logfile:\n log = logfile.read().splitlines()\n for n, line in enumerate(log):\n if \"\"\" Number of atoms:\"\"\" in line:\n results = log[n - 1:]\n break\n results = '\\n'.join(results) + '\\n'\n\n return results\n\ndef Check(testdir):\n with open('%s/max_abs_dev.txt' % (testdir), 'r') as logfile:\n log = logfile.read().splitlines()\n\n results = \"\"\"%s\n ...\n%s\n\"\"\" % ( '\\n'.join(log[:10]), '\\n'.join(log[-10:]))\n\n return results\n\ndef RunNN(record, testdir, pyrai2mddir):\n maindir = os.getcwd()\n results = ''\n\n os.chdir(testdir)\n subprocess.run('python3 %s/pyrai2md.py egn_train > stdout_egn' % (pyrai2mddir), shell = True)\n os.chdir(maindir)\n tmp = Collect(testdir, 'egn')\n results += tmp\n\n if len(tmp.splitlines()) < 10:\n code = 'FAILED(egn training runtime error)'\n return results, code\n else:\n results += ' egn training done, entering egn prediction...\\n'\n\n os.chdir(testdir)\n subprocess.run('python3 %s/pyrai2md.py egn_predict >> stdout_egn' % (pyrai2mddir), shell = True)\n os.chdir(maindir)\n tmp = Check(testdir)\n results += tmp\n\n if len(tmp.splitlines()) < 10:\n code = 'FAILED(egn prediction runtime error)'\n return results, code\n else:\n results += ' egn prediction done, entering egs training...\\n'\n\n os.chdir(testdir)\n subprocess.run('python3 %s/pyrai2md.py egs_train > stdout_egs' % (pyrai2mddir), shell = True)\n os.chdir(maindir)\n tmp = Collect(testdir, 'egs')\n results += tmp\n\n if len(tmp.splitlines()) < 10:\n code = 'FAILED(egn training runtime error)'\n return results, code\n else:\n results += ' egs training done, entering egs prediction...\\n'\n\n os.chdir(testdir)\n subprocess.run('python3 %s/pyrai2md.py egs_predict >> stdout_egs' % (pyrai2mddir), shell = True)\n os.chdir(maindir)\n tmp = Check(testdir)\n results += tmp\n\n if len(tmp.splitlines()) < 10:\n code = 'FAILED(egs prediction runtime error)'\n return results, code\n else:\n code = 'PASSED'\n results += ' egs prediction done\\n'\n\n return results, code\n","repo_name":"lopez-lab/PyRAI2MD","sub_path":"TEST/neural_network/test_nn.py","file_name":"test_nn.py","file_ext":"py","file_size_in_byte":6589,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"67"} +{"seq_id":"6108324496","text":"import scipy.io as sio \nimport numpy as np\nimport torch\nimport os\nfrom torch.nn import functional as F\nfrom torch import autograd\nfrom torch.autograd import Variable\nimport nibabel as nib\nfrom torch.utils.data.dataset import Dataset\nfrom torch.utils.data import dataloader\nfrom sklearn.metrics import roc_curve, auc\n\nfrom mri_pet_dataset_test import TestDataset\nfrom gan_models import * \nfrom densenet import *\n\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\ncuda = torch.cuda.is_available()\n\n# initial for recurrence\nseed = 23\nnp.random.seed(seed)\ntorch.manual_seed(seed)\ntorch.cuda.manual_seed(seed)\ntorch.cuda.manual_seed_all(seed)\ntorch.backends.cudnn.deterministic = True\n\n\nSAVE_PATH = './Generated_and_Real_PET_Results'\n\nWORKERS = 0\nBATCH_SIZE = 1\n\ndataset = TestDataset()\ndata_loader_all = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=WORKERS)\n\n\nG = Generator_Pyconv357_Attention().cuda()\n# AD vs CN\nG.load_state_dict(torch.load('./Generator_save/Generator_Pyconv357_Attention_with_Task_induced_Discriminator(Main)/'))\n# pMCI vs sMCI\n# ...\n\nT = Task_induced_Discriminator().cuda()\n# AD vs CN\nT.load_state_dict(torch.load('./Task_Induced_Discriminator_save/PET/10_TLoss0.1841_TrainACC0.9271_TestACC0.8875_TestSEN0.8958_TestSPE0.8811_TestAUC0.9518_F1S0.8745.pth')) # Can also use others retrained classification models. \n# pMCI vs sMCI\n# ...\n\n\n\n##################################################################################\n# Test\n##################################################################################\n# G.eval()\n# T.eval()\nTP = 0\nFP = 0\nFN = 0\nTN = 0\nlabels = []\nscores = []\niteration = 0 \nfor val_test_data in data_loader_all:\n iteration += 1\n val_test_imgs = val_test_data[0]\n val_test_labels = val_test_data[1]\n val_test_labels_ = Variable(val_test_labels).cuda()\n val_test_data_batch_size = val_test_imgs.size()[0]\n fname = val_test_data[2][0].split('.')[0]\n\n # Complete dataset with MRI and PET \n mri_images = val_test_imgs[:, 0, :, :, :].view(val_test_data_batch_size, 1, 76, 94, 76)\n mri_images = Variable(mri_images.cuda(), requires_grad=False)\n pet_images = val_test_imgs[:, 1, :, :, :].view(val_test_data_batch_size, 1, 76, 94, 76)\n pet_images = Variable(pet_images.cuda(), requires_grad=False)\n\n # Incomplete dataset with only MRI \n # mri_images = val_test_imgs.view(val_test_data_batch_size, 1, 76, 94, 76)\n # mri_images = Variable(mri_images.cuda(), requires_grad=False)\n\n x_fake = G(mri_images)\n result_c_ = T(x_fake)\n\n out_c = F.softmax(result_c_, dim=1)\n score = out_c[0][1].data.cpu().item()\n score = round(score, 4)\n scores.append(score)\n _, predicted__ = torch.max(out_c.data, 1)\n PREDICTED = predicted__.data.cpu().numpy()\n REAL = val_test_labels_.data.cpu().numpy()\n labels.append(REAL)\n\n if PREDICTED == 1 and REAL == 1:\n TP += 1\n elif PREDICTED == 1 and REAL == 0:\n FP += 1\n elif PREDICTED == 0 and REAL == 1:\n FN += 1 \n elif PREDICTED == 0 and REAL == 0:\n TN += 1\n else:\n continue\n\n fake_data = np.squeeze(x_fake.data.cpu().numpy())\n real_data = np.squeeze(pet_images.data.cpu().numpy())\n \n # Save as ||.nii.gz|| format.\n # file_name1 = os.path.join(SAVE_PATH,'{}_fake.nii.gz'.format(fname))\n # generated_pet = nib.Nifti1Image(fake_data, np.eye(4)) \n # nib.save(generated_pet, file_name1)\n # file_name2 = os.path.join(SAVE_PATH,'{}_pet.nii.gz'.format(fname))\n # real_pet = nib.Nifti1Image(real_data, np.eye(4)) \n # nib.save(real_pet, file_name2)\n\n\n # Save as ||.mat|| format.\n file_name1 = os.path.join(SAVE_PATH,'{}_fake.mat'.format(fname))\n sio.savemat(file_name1, {'data':fake_data})\n file_name2 = os.path.join(SAVE_PATH,'{}_pet.mat'.format(fname))\n sio.savemat(file_name2, {'data':real_data})\n\n\ntest_acc = (TP + TN)/((TP + TN + FP + FN) +0.00001)\ntest_sen = TP/((TP + FN)+0.00001)\ntest_spe = TN/((FP + TN)+0.00001)\n\nfpr, tpr, thresholds = roc_curve(labels, scores)\nroc_auc = auc(fpr, tpr)\n\nprint(\n 'Test_ACC:{:.4f} {}/{}'.format(round(test_acc, 4), (TP + TN), (TP + TN + FP + FN)),\n 'Test_SEN:{:.4f} {}/{}'.format(round(test_sen, 4), TP , (TP + FN)),\n 'Test_SPE:{:.4f} {}/{}'.format(round(test_spe, 4), TN, (FP + TN)),\n 'Test_AUC:{:.4f}'.format(round(roc_auc, 4) ),\n )\n","repo_name":"xiaoxingxingkz/TPA-GAN","sub_path":"test_TPA_GAN.py","file_name":"test_TPA_GAN.py","file_ext":"py","file_size_in_byte":4385,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"67"} +{"seq_id":"39684291176","text":"## need an array to store sequence having taregt sum\n## original array is needed\n## sum of array is needed\n## target sum is needed\n## index is needed\n\nclass Solution:\n def targetSumSubsequence(self,index,sum,target,arr,subsequence):\n if sum == target:\n print(subsequence)\n return \n\n if index == len(arr):\n return\n\n subsequence.append(arr[index])\n self.targetSumSubsequence(index+1,sum+arr[index],target,arr,subsequence)\n subsequence.pop()\n self.targetSumSubsequence(index+1,sum,target,arr,subsequence)\n\n\nif __name__ == \"__main__\":\n arr = [1,2,3,4,5,6,7,8,9,10]\n subsequence = []\n index = 0\n sum = 0\n target = 40\n Solution().targetSumSubsequence(index,sum,target,arr,subsequence)\n ","repo_name":"akash-aman/DS_Algo","sub_path":"recursion/printing subsequence sum k.py","file_name":"printing subsequence sum k.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35680070234","text":"# Given a 1-indexed array of integers numbers that is already sorted in non-decreasing order, find two numbers such\n# that they add up to a specific target number. Let these two numbers be numbers[index1] and numbers[index2] where\n# 1 <= index1 < index2 < numbers.length.\n#\n# Return the indices of the two numbers, index1 and index2, added by one as an integer array [index1, index2] of\n# length 2.\n#\n# The tests are generated such that there is exactly one solution. You may not use the same element twice.\n#\n# Your solution must use only constant extra space.\n\nclass Solution(object):\n def twoSum(self, numbers, target):\n # two pointers - left and right\n l, r = 0, len(numbers) - 1\n\n while l < r:\n sum = numbers[l] + numbers[r]\n\n if sum > target:\n r -= 1\n elif sum < target:\n l += 1\n else:\n return [l + 1, r + 1]\n return\n\n\nsol = Solution()\nnumbers = [2, 7, 11, 15]\ntarget = 9\nprint(sol.twoSum(numbers, target))\n","repo_name":"cosmin-oros/ADA","sub_path":"Leetcode/TwoPointers/twoSumII.py","file_name":"twoSumII.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"10107647171","text":"def calcscore(name, *score, **option):\n print(name)\n print(score)\n print(option)\n\n total = 0\n for s in score:\n total += s\n\n print(\"총점 : \", total)\n #if(option['avg'] == True):\n if option.get('avg'): #'avg' 키가 없으면 디볼트(none)가 false로 처리됨\n print(\"평균 : \", total/len(score))\n\ndef main():\n #calcscore(\"홍길동\", 88, 99, 77, avg=True)\n #calcscore(\"고길동\", 99, 88, 95, 85)\n\n hong_score = [88, 99, 77]\n go_score = [99, 88, 95, 85]\n option = {\n 'avg' : True,\n 'total' : True\n }\n calcscore(\"홍길동\", *hong_score, avg=True)\n calcscore(\"고길동\", \n *go_score, #리스트를 펼쳐서 가변인수로 전달 \n **option #사전을 펼쳐서 키워드 가면인수로 전달 \n )\n\nmain()\n ","repo_name":"kimsungkwang/python","sub_path":"chapter07/ex05.py","file_name":"ex05.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32992854448","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\nimport django.core.validators\nimport boardinghouse.base\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n migrations.swappable_dependency(getattr(settings, 'BOARDINGHOUSE_SCHEMA_MODEL', 'boardinghouse.Schema')),\n ('admin', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Schema',\n fields=[\n ('schema', models.CharField(primary_key=True, serialize=False, max_length=36, validators=[django.core.validators.RegexValidator(regex=b'^[a-z][a-z0-9_]*$', message='May only contain lowercase letters, digits and underscores. Must start with a letter.')], help_text='The internal name of the schema.
May only contain lowercase letters, digits and underscores. Must start with a letter.
May not be changed after creation.', unique=True)),\n ('name', models.CharField(help_text='The display name of the schema.', unique=True, max_length=128)),\n ('is_active', models.BooleanField(default=True, help_text='Use this instead of deleting schemata.')),\n ('users', models.ManyToManyField(help_text='Which users may access data from this schema.', related_name='schemata', to=settings.AUTH_USER_MODEL, blank=True)),\n ],\n options={\n 'swappable': 'BOARDINGHOUSE_SCHEMA_MODEL',\n 'verbose_name_plural': 'schemata',\n },\n bases=(boardinghouse.base.SharedSchemaMixin, models.Model),\n ),\n ]\n","repo_name":"luzfcb/django-boardinghouse","sub_path":"boardinghouse/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24802813130","text":"\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.autograd.variable import Variable\nfrom torchvision import transforms, datasets\nimport torchvision\n\nimport argparse\n\nparser = argparse.ArgumentParser(description = \"MNIST Testing\")\nparser.add_argument('--no-cuda', action = 'store_true', default = False)\n\nARGS = parser.parse_args()\n\nuse_cuda = torch.cuda.is_available() and not ARGS.no_cuda\ndevice = torch.device('cuda' if use_cuda else 'cpu')\nprint(device)\n\n###############################################################\n\n\n# take MNIST data and tranform input values from [0, 255] to [-1, 1]\ndef mnist():\n out_dir = '../dataset'\n train = datasets.MNIST(root = out_dir, train = True, transform = transforms.ToTensor(), download = True)\n test = datasets.MNIST(root = out_dir, train = False, transform = transforms.ToTensor())\n return train, test\n\n\ndef loss_function(recon_x, x, mu, log_var):\n BCE = F.binary_cross_entropy(recon_x, x.view(-1, 784), reduction='sum')\n KLD = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())\n return BCE + KLD\n\n\ndef generate_image(vae):\n z = torch.randn(64, 2).to(device)\n sample = vae.decoder(z)\n\n torchvision.utils.save_image(sample.view(64, 1, 28, 28), './sample_vae' + '.png')\n\n\n##########################################################\nclass VAE(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.encode_layer = nn.Sequential(\n nn.Linear(28 * 28, 512),\n nn.ReLU(),\n nn.Linear(512, 256),\n nn.ReLU()\n )\n\n self.mu = nn.Linear(256, 2)\n self.log_var = nn.Linear(256, 2)\n\n self.decode_layer = nn.Sequential(\n nn.Linear(2, 256),\n nn.ReLU(),\n nn.Linear(256, 512),\n nn.ReLU(),\n nn.Linear(512, 28 * 28),\n nn.Sigmoid()\n )\n\n\n def encoder(self, x):\n x = self.encode_layer(x)\n return self.mu(x), self.log_var(x)\n\n def decoder(self, x):\n return self.decode_layer(x)\n\n def forward(self, x):\n mu, log_var = self.encoder(x.view(-1, 28*28))\n z = self.sampling(mu, log_var)\n return self.decoder(z), mu, log_var\n\n def sampling(self, mu, log_var):\n std = torch.exp(0.5 * log_var)\n eps = torch.randn_like(std)\n return eps.mul(std).add_(mu) # return z\n\n#####################################################\n\ndef training(vae, optimizer, epoch, train_loader):\n vae.train()\n train_loss = 0\n\n for batch_idx, (data, _) in enumerate(train_loader):\n data = data.to(device)\n optimizer.zero_grad()\n\n recon_batch, mu, log_var = vae(data)\n loss = loss_function(recon_batch, data, mu, log_var)\n\n loss.backward()\n train_loss += loss.item()\n optimizer.step()\n\n if batch_idx % 100 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item() / len(data)))\n\n print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss / len(train_loader.dataset)))\n\n\n\nif __name__ == '__main__':\n size = 100\n train, test = mnist()\n train_loader = torch.utils.data.DataLoader(train, batch_size = size, shuffle = True)\n test_loader = torch.utils.data.DataLoader(test, batch_size = size, shuffle = False)\n\n vae = VAE().to(device)\n\n optimizer = optim.Adam(vae.parameters(), lr = 0.001)\n\n # training\n for epoch in range(0, 100):\n training(vae, optimizer, epoch, train_loader)\n\n generate_image(vae)\n","repo_name":"xgfelicia/Deep-Learning","sub_path":"vae/vae-vanilla.py","file_name":"vae-vanilla.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34937474283","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#FILE = GeometrySearcher.py\n\n##############################################################\n#-----------------...EasyHybrid 3.0...-----------------------#\n#-----------Credits and other information here---------------#\n##############################################################\n\n#==============================================================================\nimport os, sys\n#importing our library functions\nimport commonFunctions\nfrom LogFile import LogFileWriter\n# pDynamo\nfrom pBabel import * \nfrom pCore import * \nfrom pMolecule import * \nfrom pScientific import * \nfrom pScientific.Arrays import * \nfrom pScientific.Geometry3 import * \nfrom pSimulation import *\n#*********************************************************************************\nclass GeometrySearcher:\n '''\n Class to handle with pDynamo methods that search geometries for the system, such as global/local minimuns\n as saddle points and reaction path trajectories. \n '''\n #.------------------------------------------------------------------------- \n def __init__(self,_system,_baseFolder,_trajName=None):\n '''\n Class constructor.\n '''\n self.molecule = _system\n self.baseName = _baseFolder\n self.optAlg = \"ConjugatedGradient\"\n self.InitCrd3D = Clone(_system.coordinates3)\n self.finalCrd3D = None\n self.massWeighting = False\n self.logFreq = 50 # deafult value for otimizations, must to be changed through the specific class method\n self.trajectoryName = None\n self.savePdb = False\n self.saveFormat = None \n self.rmsGrad = 0.1\n self.maxIt = 500\n self.saveFrequency = 0\n self.DEBUG = False\n if not _trajName == None: self.trajectoryName = os.path.join(_baseFolder,_trajName)\n #=========================================================================\n def ChangeDefaultParameters(self,_parameters):\n '''\n Class method to modify default parameters for the minimization runs\n ''' \n if \"save_pdb\" in _parameters: self.savePdb = _parameters[\"save_pdb\"]\n if \"maxIterations\" in _parameters: self.maxIt = _parameters['maxIterations'] \n if \"log_frequency\" in _parameters: self.logFreq = _parameters[\"log_frequency\"] \n if \"save_format\" in _parameters: self.saveFormat = _parameters[\"save_format\"]\n if \"save_frequency\" in _parameters: self.saveFrequency = _parameters[\"save_frequency\"] \n if \"rmsGradient\" in _parameters: self.rmsGrad = _parameters[\"rmsGradient\"]\n if \"Debug\" in _parameters: self.DEBUG = _parameters[\"Debug\"]\n #======================================================================================\n # Main minimization class method\n def Minimization(self,_optimizer):\n '''\n Execute the minimization routine for search of geometry corresponding to local minima\n '''\n #------------------------------------------------------------------\n self.optAlg = _optimizer \n # run the minimization for the chosen algorithm\n if self.optAlg == \"ConjugatedGradient\": self.RunConjugatedGrad()\n elif self.optAlg == \"SteepestDescent\" : self.RunSteepestDescent()\n elif self.optAlg == \"LFBGS\" : self.RunLFBGS()\n elif self.optAlg == \"QuasiNewton\" : self.RunQuasiNewton()\n elif self.optAlg == \"FIRE\" : self.RunFIREmin()\n self.finalCrd3D = Clone(self.molecule.coordinates3)\n if self.DEBUG:\n self.Print()\n pdbFileA = os.path.join(self.baseName, \"initialCoord_{}.pdb\".format(self.optAlg) )\n pdbFileB = os.path.join(self.baseName, \"finalCoord_{}.pdb\".format(self.optAlg) )\n self.molecule.coordinates3 = Clone(self.InitCrd3D)\n ExportSystem(pdbFileA,self.molecule)\n self.molecule.coordinates3 = Clone(self.finalCrd3D)\n ExportSystem(pdbFileB,self.molecule)\n #=============================================================================\n #Minimizers methods\n def RunConjugatedGrad(self):\n '''\n Class method to apply the conjugated gradient minimizer\n '''\n if self.trajectoryName == None:\n ConjugateGradientMinimize_SystemGeometry(self.molecule , \n logFrequency = self.logFreq ,\n maximumIterations = self.maxIt ,\n rmsGradientTolerance = self.rmsGrad )\n else: \n trajectory = ExportTrajectory( self.trajectoryName, self.molecule, log=None )\n ConjugateGradientMinimize_SystemGeometry(self.molecule , \n logFrequency = self.logFreq ,\n trajectories = [(trajectory, self.saveFrequency)],\n maximumIterations = self.maxIt ,\n rmsGradientTolerance = self.rmsGrad )\n\n #=====================================================================================\n def RunSteepestDescent(self):\n '''\n Class method to apply the steepest descent minimizer\n ''' \n if self.trajectoryName == None:\n SteepestDescentMinimize_SystemGeometry(self.molecule , \n logFrequency = self.logFreq ,\n maximumIterations = self.maxIt ,\n rmsGradientTolerance = self.rmsGrad )\n else:\n trajectory = ExportTrajectory( self.trajectoryName, self.molecule, log=None )\n SteepestDescentMinimize_SystemGeometry(self.molecule , \n logFrequency = self.logFreq ,\n trajectories = [(trajectory, self.saveFrequency)],\n maximumIterations = self.maxIt ,\n rmsGradientTolerance = self.rmsGrad )\n #============================================================================\n def RunLFBGS(self):\n '''\n Class method to apply the LFBGS minimizer\n ''' \n if self.trajectoryName == None:\n LBFGSMinimize_SystemGeometry(self.molecule , \n logFrequency = self.logFreq ,\n maximumIterations = self.maxIt ,\n rmsGradientTolerance = self.rmsGrad )\n else:\n trajectory = ExportTrajectory( self.trajectoryName, self.molecule, log=None )\n LBFGSMinimize_SystemGeometry(self.molecule , \n logFrequency = self.logFreq ,\n trajectories = [(trajectory, self.saveFrequency)],\n maximumIterations = self.maxIt ,\n rmsGradientTolerance = self.rmsGrad ) \n #=============================================================================\n def RunQuasiNewton(self):\n '''\n Class method to apply the Quaisi-Newton minimizer\n ''' \n if self.trajectoryName == None: \n QuasiNewtonMinimize_SystemGeometry( self.molecule , \n logFrequency = self.logFreq ,\n maximumIterations = self.maxIt ,\n rmsGradientTolerance = self.rmsGrad )\n else:\n trajectory = ExportTrajectory( self.trajectoryName, self.molecule, log=None )\n QuasiNewtonMinimize_SystemGeometry( self.molecule , \n logFrequency = self.logFreq ,\n trajectories = [(trajectory, self.saveFrequency)],\n maximumIterations = self.maxIt ,\n rmsGradientTolerance = self.rmsGrad )\n #==============================================================================\n def RunFIREmin(self):\n '''\n '''\n if self.trajectoryName == None:\n FIREMinimize_SystemGeometry( self.molecule , \n logFrequency = self.logFreq ,\n maximumIterations = self.maxIt ,\n rmsGradientTolerance = self.rmsGrad )\n else:\n trajectory = ExportTrajectory( self.trajectoryName, self.molecule, log=None )\n FIREMinimize_SystemGeometry( self.molecule , \n logFrequency = self.logFreq ,\n trajectories = [(trajectory, self.saveFrequency)],\n maximumIterations = self.maxIt ,\n rmsGradientTolerance = self.rmsGrad ) \n #=============================================================================\n # Reaction path searchers\n def NudgedElasticBand(self,_parameters):\n '''\n Nudget Elastic Band procedure to estimate a reaction path\n '''\n #-------------------------------------------------------------------------\n rmdGIS = 1\n springCF = 500.0\n fixedTerminal = False\n useSpline = False\n spline_tol = 1.5\n if \"spring_constant_force\" in _parameters: springCF = _parameters[\"spring_constant_force\"]\n if \"fixed_terminal_images\" in _parameters: fixedTerminal = _parameters[\"fixed_terminal_images\"]\n if \"RMS_growing_intial_string\" in _parameters: rmsGIS = _parameters[\"RMS_growing_intial_string\"]\n if \"spline_redistribution\" in _parameters: useSpline = _parameters[\"spline_redistribution\"]\n\n self.trajectoryName = os.path.join(self.baseName,self.trajectoryName+\".ptGeo\")\n #Note: is interesting to think in a window were the user select the initial and final coords\n # here we excpect to ibe in pkl probably from a scan or optimization already done using the software\n if \"init_coord\" in _parameters: self.InitCrd3D = ImportCoordinates3( _parameters[\"init_coord\"], log=None )\n if \"final_coord\" in _parameters: self.finalCrd3D = ImportCoordinates3( _parameters[\"final_coord\"], log=None )\n trajectory = None\n #-----------------------------------------------------------------------------------------\n if not \"traj_source\" in _parameters:\n GrowingStringInitialPath(self.molecule ,\n _parameters[\"traj_bins\"] ,\n self.InitCrd3D ,\n self.finalCrd3D , \n self.trajectoryName ,\n rmsGradientTolerance=rmsGIS )\n trajectory = ExportTrajectory( self.trajectoryName, self.molecule, append=True )\n else:\n self.trajectoryName = _parameters[\"traj_source\"]\n trajectory = ExportTrajectory( _parameters[\"traj_source\"], self.molecule, append=True ) \n #------------------------------------------------------------------------------------------\n ChainOfStatesOptimizePath_SystemGeometry ( self.molecule , \n trajectory ,\n logFrequency = 1 ,\n maximumIterations = self.maxIt ,\n fixedTerminalImages = fixedTerminal ,\n springForceConstant = springCF ,\n splineRedistributionTolerance=spline_tol ,\n forceSplineRedistributionCheckPerIteration=useSpline,\n rmsGradientTolerance = self.rmsGrad )\n #========================================================================================\n def SelfAvoidWalking(self,_parameters):\n '''\n Self-Avoid-Walking procedure to estimate a reaction path\n ''' \n self.trajectoryName = self.baseName + \"SAW.ptGeo\"\n self.traj = ExportTrajectory( self.trajectoryName, self.molecule, append=True ) \n ExpandByLinearInterpolation( _parameters[\"traj_source\"], self.trajectoryName, self.molecule, _parameters[\"traj_bins\"])\n Gamma = 100.0\n Rho = 2.0\n Kappa = 5000.0\n if \"gamma\" in _parameters: Gamma = _parameters[\"gamma\"]\n if \"rho\" in _parameters: Rho = _parameters[\"rho\"]\n if \"kappa\" in _parameters: Kappa = _parameters[\"kappa\"]\n SAWOptimize_SystemGeometry ( self.molecule, self.traj, gamma=Gamma, kappa=Kappa )\n #========================================================================================\n def SteepestDescentPathSearch(self,_parameters):\n '''\n '''\n massW = True\n funcStep = 2.0\n pathStep = 0.025 \n\n if \"mass_weighting\" in _parameters: massw = _parameters[\"mass_weighting\"]\n if \"function_step\" in _parameters: funcStep = _parameters[\"function_step\"]\n if \"path_step\" in _parameters: pathStep = _parameters[\"path_step\"]\n\n self.molecule.coordinates3 = _parameters[\"saddle_conformation\"]\n self.trajectoryName = self.baseName + \".steepPath.ptGeo\"\n self.traj = ExportTrajectory( self.trajectoryName, self.molecule )\n SteepestDescentPath_SystemGeometry( self.molecule ,\n functionStep = funcStep ,\n logFrequency = self.logFrequency ,\n maximumIterations = self.maxIt ,\n pathStep = pathStep ,\n saveFrequency = self.save_frequency ,\n trajectory = self.traj ,\n useMassWeighting = massW )\n\n #========================================================================================\n def BakerSaddleOptimizer(self,_parameters):\n '''\n Class method to search saddle-points transition structure\n '''\n\n self.InitCrd3D = ImportCoordinates3(_parameters[\"saddle_coord\"] )\n self.molecule.coordinates3 = Clone(self.InitCrd3D)\n BakerSaddleOptimize_SystemGeometry( self.molecule ,\n logFrequency = 1 ,\n maximumIterations = self.maxIt ,\n rmsGradientTolerance = self.rmsGrad )\n\n self.finalCrd3D = Clone(self.molecule.coordinates3)\n Pickle(self.baseName+\"_BakerOpt.pkl\",self.finalCrd3D)\n if savePdb: \n ExportSystem(self.baseName+\"_BakerOpt.pdb\",self.finalCrd3D)\n savePdb = False\n\n #=========================================================================================\n def CalculateRMS(self):\n '''\n Calculate the root mean square of deviation of the final coordinate found with the first set given.\n '''\n masses = Array.FromIterable ( [ atom.mass for atom in self.molecule.atoms ] )\n self.InitCrd3D.Superimpose ( self.finalCrd3D, weights = masses )\n rms = self.InitCrd3D.RootMeanSquareDeviation ( self.finalCrd3D, weights = masses )\n print(\"Root Mean Sqaute of Deviation of the optimized structure from the initial: {}\".format(rms))\n #===========================================================================================\n def Finalize(self):\n '''\n Finaluze the Geometry searcher procedures, save structures and/or trajectories\n '''\n self.CalculateRMS()\n #----------------------------------------------------------------------\n #Save structures and/or trajectories\n if self.savePdb:\n pdbFile = self.baseName + \"opt_{}.pdb\".format(self.optAlg)\n i = 0;\n while os.path.exists(pdbFile):\n pdbFile = self.baseName + \"_#{}_opt_{}.pdb\".format(i,self.optAlg)\n i += 1\n ExportSystem(pdbFile,self.molecule)\n #----------------------------------------------------------------------\n if self.saveFormat == \".dcd\" or self.saveFormat == \".mdcrd\":\n if self.saveFormat != self.trajectoryName:\n traj_save = os.path.splitext(self.trajectoryName)[0] + self.saveFormat\n Duplicate(self.trajectoryName,traj_save,self.molecule)\n #===========================================================================================\n def Print(self):\n '''\n Print to screen basic info for the simulation. \n ''' \n print( \"Geometry Searcher working trajectory folder:{}\".format(self.trajectoryName) )\n print( \"RMS gradient tolerance: {}\".format(self.rmsGrad) )\n print( \"Optimization Algorithm: {}\".format(self.optAlg) )\n print( \"Maximum number of maxIterations: {}\".format(self.maxIt) )\n \n#================================================================================================#\n#======================================END OF THE FILE===========================================#\n#================================================================================================#\n","repo_name":"ferbachega/EasyHybrid3_old","sub_path":"easyhybrid/pDynamoMethods/GeometrySearcher.py","file_name":"GeometrySearcher.py","file_ext":"py","file_size_in_byte":19354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"2183665607","text":"import requests #used for communication over http\r\nimport shutil, json, time \r\n\r\n\r\n#gets a list of all of the content on the sd card\r\n#in the JSON format, and processes the data to get the name of the latest taken photo\r\ndef getLatestPictureName():\r\n r = requests.get('http://10.5.5.9:8080/gp/gpMediaList')\r\n data = r.json()\r\n\r\n index_of_latest = 0\r\n value_of_latest = 0\r\n\r\n for index,x in enumerate(data['media'][0]['fs']):\r\n if x['n'][:4] == \"GOPR\" :\r\n if int(x['n'][4:8]) > value_of_latest:\r\n value_of_latest = int(x['n'][4:8])\r\n index_of_latest = index\r\n\r\n picture = data['media'][0]['fs'][index_of_latest]['n']\r\n return picture\r\n\r\n\r\n#sends commands over http to the camera server\r\ndef takePicture():\r\n r = requests.get('http://10.5.5.9/camera/CM?t=goprohero&p=%01') #changes the mode of the camera to photo mode\r\n time.sleep(2) #sleeps for 2 seconds, in order to allow the processing of the command on the side of the camera\r\n \r\n p = requests.get('http://10.5.5.9/bacpac/SH?t=goprohero&p=%01') #activates the shutter, thus taking a picture\r\n time.sleep(2)\r\n\r\n #checks whether either of the http requests failed, code 200 denotes successful completion\r\n if p.status_code != 200 or r.status_code != 200:\r\n print(\"ERROR, http request failed!\")\r\n\r\n#takes a picture name as a parameter, then downloads said picture saving it in the desired directory\r\ndef downloadPicture(picture_name):\r\n url = 'http://10.5.5.9:8080/videos/DCIM/100GOPRO/' + picture_name\r\n response = requests.get(url, stream=True)\r\n \r\n path = \"C:\\\\Users\\\\nedel\\\\Desktop\\\\PSR 'Veljko' Mk.Ia\\\\Files\\\\Pictures\\\\AcquisitionedPhotos\\\\\" + picture_name\r\n \r\n with open(path, 'wb') as out_file: #both the directory and the file name can be changed\r\n shutil.copyfileobj(response.raw, out_file)\r\n del response\r\n\r\n#function calls all the other functions, allowing for simpler execution\r\ndef executeCamera():\r\n takePicture()\r\n picture = getLatestPictureName()\r\n downloadPicture(picture)\r\n return str( \"C:\\\\Users\\\\nedel\\\\Desktop\\\\PSR 'Veljko' Mk.Ia\\\\Files\\\\Pictures\\\\AcquisitionedPhotos\\\\\" + picture)\r\n\r\nexecuteCamera()\r\n","repo_name":"NedeljkoTesanovic/PSR-Veljko-Mk.Ia","sub_path":"Python/gopro.py","file_name":"gopro.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"27570328096","text":"def mario_or_bust(fcn):\n def wrappers(*args, **kwargs):\n try:\n results = fcn(*args, **kwargs)\n os.system('afplay /Users/travis-bumgarner/Programming/smb_stage_clear.wav -v 0.05')\n return results\n\n except Error as e:\n print(e)\n os.system('afplay /Users/travis-bumgarner/Programming/smb_gameover.wav -v 0.05')\n return wrappers\n\n# @mario_or_bust\n# def foo():\n# notrealfunction()\n# foo()\n","repo_name":"TravisBumgarner/Recycle-Bin","sub_path":"mario-sound-python/python/audio_decorator.py","file_name":"audio_decorator.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10771388459","text":"import os\nimport unittest\nimport requests\nimport simplejson as json\nimport yaml\n\nfrom emuvim.test.api_base_openstack import ApiBaseOpenStack\n\n\nclass testRestApi(ApiBaseOpenStack):\n \"\"\"\n Tests to check the REST API endpoints of the emulator.\n \"\"\"\n\n def setUp(self):\n # create network\n self.createNet(nswitches=3, ndatacenter=2, nhosts=2,\n ndockers=0, autolinkswitches=True)\n\n # setup links\n self.net.addLink(self.dc[0], self.h[0])\n self.net.addLink(self.h[1], self.dc[1])\n self.net.addLink(self.dc[0], self.dc[1])\n\n # start api\n self.startApi()\n\n # start Mininet network\n self.startNet()\n\n def testNovaDummy(self):\n print('->>>>>>> test Nova Dummy Class->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n print(\" \")\n\n headers = {'Content-type': 'application/json'}\n test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(\n __file__), \"templates/test_heatapi_template_create_stack.yml\")).read()\n url = \"http://0.0.0.0:18004/v1/tenantabc123/stacks\"\n requests.post(url, data=json.dumps(yaml.load(test_heatapi_template_create_stack)),\n headers=headers)\n\n print('->>>>>>> test Nova List Versions ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18774/\"\n listapiversionnovaresponse = requests.get(url, headers=headers)\n self.assertEqual(listapiversionnovaresponse.status_code, 200)\n self.assertEqual(json.loads(listapiversionnovaresponse.content)[\n \"versions\"][0][\"id\"], \"v2.1\")\n self.assertEqual(json.loads(listapiversionnovaresponse.content)[\n \"versions\"][0][\"status\"], \"CURRENT\")\n self.assertEqual(json.loads(listapiversionnovaresponse.content)[\n \"versions\"][0][\"version\"], \"2.38\")\n self.assertEqual(json.loads(listapiversionnovaresponse.content)[\n \"versions\"][0][\"min_version\"], \"2.1\")\n self.assertEqual(json.loads(listapiversionnovaresponse.content)[\n \"versions\"][0][\"updated\"], \"2013-07-23T11:33:21Z\")\n print(\" \")\n\n print('->>>>>>> test Nova Version Show ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18774/v2.1/id_bla\"\n listapiversion21novaresponse = requests.get(url, headers=headers)\n self.assertEqual(listapiversion21novaresponse.status_code, 200)\n self.assertEqual(json.loads(listapiversion21novaresponse.content)[\n \"version\"][\"id\"], \"v2.1\")\n self.assertEqual(json.loads(listapiversion21novaresponse.content)[\n \"version\"][\"status\"], \"CURRENT\")\n self.assertEqual(json.loads(listapiversion21novaresponse.content)[\n \"version\"][\"version\"], \"2.38\")\n self.assertEqual(json.loads(listapiversion21novaresponse.content)[\n \"version\"][\"min_version\"], \"2.1\")\n self.assertEqual(json.loads(listapiversion21novaresponse.content)[\n \"version\"][\"updated\"], \"2013-07-23T11:33:21Z\")\n print(\" \")\n\n print('->>>>>>> test Nova Version List Server APIs ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18774/v2.1/id_bla/servers\"\n listserverapisnovaresponse = requests.get(url, headers=headers)\n self.assertEqual(listserverapisnovaresponse.status_code, 200)\n self.assertNotEqual(json.loads(listserverapisnovaresponse.content)[\n \"servers\"][0][\"name\"], \"\")\n print(\" \")\n\n print('->>>>>>> test Nova Delete Server APIs ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18774/v2.1/id_bla/servers/%s\" % (\n json.loads(listserverapisnovaresponse.content)[\"servers\"][0][\"id\"])\n deleteserverapisnovaresponse = requests.delete(url, headers=headers)\n self.assertEqual(deleteserverapisnovaresponse.status_code, 204)\n print(\" \")\n\n print('->>>>>>> test Nova Delete Non-Existing Server APIs ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18774/v2.1/id_bla/servers/non-existing-ix\"\n deleteserverapisnovaresponse = requests.delete(url, headers=headers)\n self.assertEqual(deleteserverapisnovaresponse.status_code, 404)\n print(\" \")\n\n print('->>>>>>> testNovaVersionListServerAPIs_withPortInformation ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18774/v2.1/id_bla/servers/andPorts\"\n listserverapisnovaresponse = requests.get(url, headers=headers)\n self.assertEqual(listserverapisnovaresponse.status_code, 200)\n self.assertNotEqual(json.loads(listserverapisnovaresponse.content)[\n \"servers\"][0][\"name\"], \"\")\n print(\" \")\n\n print('->>>>>>> test Nova List Flavors ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18774/v2.1/id_bla/flavors\"\n listflavorsresponse = requests.get(url, headers=headers)\n self.assertEqual(listflavorsresponse.status_code, 200)\n self.assertIn(json.loads(listflavorsresponse.content)[\"flavors\"][0][\"name\"], [\n \"m1.nano\", \"m1.tiny\", \"m1.micro\", \"m1.small\"])\n self.assertIn(json.loads(listflavorsresponse.content)[\"flavors\"][1][\"name\"], [\n \"m1.nano\", \"m1.tiny\", \"m1.micro\", \"m1.small\"])\n self.assertIn(json.loads(listflavorsresponse.content)[\"flavors\"][2][\"name\"], [\n \"m1.nano\", \"m1.tiny\", \"m1.micro\", \"m1.small\"])\n print(\" \")\n\n print('->>>>>>> testNovaAddFlavors ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18774/v2.1/id_bla/flavors\"\n addflavorsresponse = requests.post(url,\n data='{\"flavor\":{\"name\": \"testFlavor\", \"vcpus\": \"test_vcpus\", \"ram\": 1024, \"disk\": 10}}',\n headers=headers)\n self.assertEqual(addflavorsresponse.status_code, 200)\n self.assertIsNotNone(json.loads(\n addflavorsresponse.content)[\"flavor\"][\"id\"])\n self.assertIsNotNone(json.loads(addflavorsresponse.content)[\n \"flavor\"][\"links\"][0]['href'])\n print(\" \")\n\n print('->>>>>>> test Nova List Flavors Detail ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18774/v2.1/id_bla/flavors/detail\"\n listflavorsdetailresponse = requests.get(url, headers=headers)\n self.assertEqual(listflavorsdetailresponse.status_code, 200)\n self.assertIn(json.loads(listflavorsdetailresponse.content)[\n \"flavors\"][0][\"name\"], [\"m1.nano\", \"m1.tiny\", \"m1.micro\", \"m1.small\"])\n self.assertIn(json.loads(listflavorsdetailresponse.content)[\n \"flavors\"][1][\"name\"], [\"m1.nano\", \"m1.tiny\", \"m1.micro\", \"m1.small\"])\n self.assertIn(json.loads(listflavorsdetailresponse.content)[\n \"flavors\"][2][\"name\"], [\"m1.nano\", \"m1.tiny\", \"m1.micro\", \"m1.small\"])\n print(\" \")\n\n print('->>>>>>> testNovaAddFlavors ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18774/v2.1/id_bla/flavors/detail\"\n addflavorsresponse = requests.post(url,\n data='{\"flavor\":{\"name\": \"testFlavor\", \"vcpus\": \"test_vcpus\", \"ram\": 1024, \"disk\": 10}}',\n headers=headers)\n self.assertEqual(addflavorsresponse.status_code, 200)\n self.assertIsNotNone(json.loads(\n addflavorsresponse.content)[\"flavor\"][\"id\"])\n self.assertIsNotNone(json.loads(addflavorsresponse.content)[\n \"flavor\"][\"links\"][0]['href'])\n print(\" \")\n\n print('->>>>>>> test Nova List Flavor By Id ->>>>>>>>>>>>>>>')\n\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18774/v2.1/id_bla/flavors/%s\" % (\n json.loads(listflavorsdetailresponse.content)[\"flavors\"][0][\"name\"])\n listflavorsbyidresponse = requests.get(url, headers=headers)\n self.assertEqual(listflavorsbyidresponse.status_code, 200)\n self.assertEqual(json.loads(listflavorsbyidresponse.content)[\n \"flavor\"][\"id\"], json.loads(listflavorsdetailresponse.content)[\"flavors\"][0][\"id\"])\n print(\" \")\n\n print('->>>>>>> test Nova List Images ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18774/v2.1/id_bla/images\"\n listimagesresponse = requests.get(url, headers=headers)\n self.assertEqual(listimagesresponse.status_code, 200)\n print(listimagesresponse.content)\n print(\" \")\n\n print('->>>>>>> test Nova List Images Details ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18774/v2.1/id_bla/images/detail\"\n listimagesdetailsresponse = requests.get(url, headers=headers)\n self.assertEqual(listimagesdetailsresponse.status_code, 200)\n self.assertEqual(json.loads(listimagesdetailsresponse.content)[\n \"images\"][0][\"metadata\"][\"architecture\"], \"x86_64\")\n print(\" \")\n\n print('->>>>>>> test Nova List Image By Id ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18774/v2.1/id_bla/images/%s\" % (\n json.loads(listimagesdetailsresponse.content)[\"images\"][0][\"id\"])\n listimagebyidresponse = requests.get(url, headers=headers)\n self.assertEqual(listimagebyidresponse.status_code, 200)\n self.assertEqual(json.loads(listimagebyidresponse.content)[\n \"image\"][\"id\"], json.loads(listimagesdetailsresponse.content)[\"images\"][0][\"id\"])\n print(\" \")\n\n print('->>>>>>> test Nova List Image By Non-Existend Id ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18774/v2.1/id_bla/images/non_existing_id\"\n listimagebynonexistingidresponse = requests.get(url, headers=headers)\n self.assertEqual(listimagebynonexistingidresponse.status_code, 404)\n print(\" \")\n\n # find ubuntu id\n for image in json.loads(listimagesresponse.content)[\"images\"]:\n if image[\"name\"] == \"ubuntu:trusty\":\n ubuntu_image_id = image[\"id\"]\n\n print('->>>>>>> test Nova Create Server Instance ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18774/v2.1/id_bla/servers\"\n data = '{\"server\": {\"name\": \"X\", \"flavorRef\": \"%s\", \"imageRef\":\"%s\"}}' % (\n json.loads(listflavorsresponse.content)[\"flavors\"][0][\"id\"], ubuntu_image_id)\n createserverinstance = requests.post(url, data=data, headers=headers)\n self.assertEqual(createserverinstance.status_code, 200)\n self.assertEqual(json.loads(createserverinstance.content)[\n \"server\"][\"image\"][\"id\"], ubuntu_image_id)\n print(\" \")\n\n print('->>>>>>> test Nova Create Server Instance With Already Existing Name ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18774/v2.1/id_bla/servers\"\n data = '{\"server\": {\"name\": \"X\", \"flavorRef\": \"%s\", \"imageRef\":\"%s\"}}' % (\n json.loads(listflavorsresponse.content)[\"flavors\"][0][\"id\"], ubuntu_image_id)\n createserverinstance = requests.post(url, data=data, headers=headers)\n self.assertEqual(createserverinstance.status_code, 409)\n print(\" \")\n\n print('->>>>>>> test Nova Version List Server APIs Detailed ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18774/v2.1/id_bla/servers/detail\"\n listserverapisdetailedresponse = requests.get(url, headers=headers)\n self.assertEqual(listserverapisdetailedresponse.status_code, 200)\n self.assertEqual(json.loads(listserverapisdetailedresponse.content)[\n \"servers\"][0][\"status\"], \"ACTIVE\")\n print(\" \")\n\n print('->>>>>>> test Nova Show Server Details ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18774/v2.1/id_bla/servers/%s\" % (\n json.loads(listserverapisdetailedresponse.content)[\"servers\"][0][\"id\"])\n listserverdetailsresponse = requests.get(url, headers=headers)\n self.assertEqual(listserverdetailsresponse.status_code, 200)\n self.assertEqual(json.loads(listserverdetailsresponse.content)[\n \"server\"][\"flavor\"][\"links\"][0][\"rel\"], \"bookmark\")\n print(\" \")\n\n print('->>>>>>> test Nova Show Non-Existing Server Details ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18774/v2.1/id_bla/servers/non_existing_server_id\"\n listnonexistingserverdetailsresponse = requests.get(\n url, headers=headers)\n self.assertEqual(listnonexistingserverdetailsresponse.status_code, 404)\n print(\" \")\n\n def testNeutronDummy(self):\n print('->>>>>>> test Neutron Dummy Class->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n print(\" \")\n\n headers = {'Content-type': 'application/json'}\n test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(\n __file__), \"templates/test_heatapi_template_create_stack.yml\")).read()\n url = \"http://0.0.0.0:18004/v1/tenantabc123/stacks\"\n requests.post(url, data=json.dumps(\n yaml.load(test_heatapi_template_create_stack)), headers=headers)\n # test_heatapi_keystone_get_token = open(\"test_heatapi_keystone_get_token.json\").read()\n\n print('->>>>>>> test Neutron List Versions ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/\"\n listapiversionstackresponse = requests.get(url, headers=headers)\n self.assertEqual(listapiversionstackresponse.status_code, 200)\n self.assertEqual(json.loads(listapiversionstackresponse.content)[\n \"versions\"][0][\"id\"], \"v2.0\")\n print(\" \")\n\n print('->>>>>>> test Neutron Show API v2.0 ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0\"\n listapiversionv20response = requests.get(url, headers=headers)\n self.assertEqual(listapiversionv20response.status_code, 200)\n self.assertEqual(json.loads(listapiversionv20response.content)[\n \"resources\"][0][\"name\"], \"subnet\")\n self.assertEqual(json.loads(listapiversionv20response.content)[\n \"resources\"][1][\"name\"], \"network\")\n self.assertEqual(json.loads(listapiversionv20response.content)[\n \"resources\"][2][\"name\"], \"ports\")\n print(\" \")\n\n print('->>>>>>> test Neutron List Networks ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/networks\"\n listnetworksesponse1 = requests.get(url, headers=headers)\n self.assertEqual(listnetworksesponse1.status_code, 200)\n self.assertEqual(json.loads(listnetworksesponse1.content)[\n \"networks\"][0][\"status\"], \"ACTIVE\")\n listNetworksId = json.loads(listnetworksesponse1.content)[\n \"networks\"][0][\"id\"]\n listNetworksName = json.loads(listnetworksesponse1.content)[\n \"networks\"][0][\"name\"]\n listNetworksId2 = json.loads(listnetworksesponse1.content)[\n \"networks\"][1][\"id\"]\n print(\" \")\n\n print('->>>>>>> test Neutron List Non-Existing Networks ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/networks?name=non_existent_network_name\"\n listnetworksesponse2 = requests.get(url, headers=headers)\n self.assertEqual(listnetworksesponse2.status_code, 404)\n print(\" \")\n\n print('->>>>>>> test Neutron List Networks By Name ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n # tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest\n url = \"http://0.0.0.0:19696/v2.0/networks?name=\" + listNetworksName\n listnetworksesponse3 = requests.get(url, headers=headers)\n self.assertEqual(listnetworksesponse3.status_code, 200)\n self.assertEqual(json.loads(listnetworksesponse3.content)[\n \"networks\"][0][\"name\"], listNetworksName)\n print(\" \")\n\n print('->>>>>>> test Neutron List Networks By Id ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n # tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest\n url = \"http://0.0.0.0:19696/v2.0/networks?id=\" + listNetworksId\n listnetworksesponse4 = requests.get(url, headers=headers)\n self.assertEqual(listnetworksesponse4.status_code, 200)\n self.assertEqual(json.loads(listnetworksesponse4.content)[\n \"networks\"][0][\"id\"], listNetworksId)\n print(\" \")\n\n print('->>>>>>> test Neutron List Networks By Multiple Ids ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/networks?id=\" + listNetworksId + \"&id=\" + \\\n listNetworksId2 # tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest\n listnetworksesponse5 = requests.get(url, headers=headers)\n self.assertEqual(listnetworksesponse5.status_code, 200)\n self.assertEqual(json.loads(listnetworksesponse5.content)[\n \"networks\"][0][\"id\"], listNetworksId)\n self.assertEqual(json.loads(listnetworksesponse5.content)[\n \"networks\"][1][\"id\"], listNetworksId2)\n print(\" \")\n\n print('->>>>>>> test Neutron Show Network ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/networks/\" + listNetworksId\n shownetworksesponse = requests.get(url, headers=headers)\n self.assertEqual(shownetworksesponse.status_code, 200)\n self.assertEqual(json.loads(shownetworksesponse.content)[\n \"network\"][\"status\"], \"ACTIVE\")\n print(\" \")\n\n print('->>>>>>> test Neutron Show Network Non-ExistendNetwork ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/networks/non_existent_network_id\"\n shownetworksesponse2 = requests.get(url, headers=headers)\n self.assertEqual(shownetworksesponse2.status_code, 404)\n print(\" \")\n\n print('->>>>>>> test Neutron Create Network ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/networks\"\n createnetworkresponse = requests.post(\n url, data='{\"network\": {\"name\": \"sample_network\",\"admin_state_up\": true}}', headers=headers)\n self.assertEqual(createnetworkresponse.status_code, 201)\n self.assertEqual(json.loads(createnetworkresponse.content)[\n \"network\"][\"status\"], \"ACTIVE\")\n print(\" \")\n\n print('->>>>>>> test Neutron Create Network With Existing Name ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/networks\"\n createnetworkresponsefailure = requests.post(\n url, data='{\"network\": {\"name\": \"sample_network\",\"admin_state_up\": true}}', headers=headers)\n self.assertEqual(createnetworkresponsefailure.status_code, 400)\n print(\" \")\n\n print('->>>>>>> test Neutron Update Network ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/networks/%s\" % (\n json.loads(createnetworkresponse.content)[\"network\"][\"id\"])\n updatenetworkresponse = requests.put(\n url, data='{\"network\": {\"status\": \"ACTIVE\", \"admin_state_up\":true, \"tenant_id\":\"abcd123\", \"name\": \"sample_network_new_name\", \"shared\":false}}', headers=headers)\n self.assertEqual(updatenetworkresponse.status_code, 200)\n self.assertEqual(json.loads(updatenetworkresponse.content)[\n \"network\"][\"name\"], \"sample_network_new_name\")\n self.assertEqual(json.loads(updatenetworkresponse.content)[\n \"network\"][\"tenant_id\"], \"abcd123\")\n print(\" \")\n\n print('->>>>>>> test Neutron Update Non-Existing Network ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/networks/non-existing-name123\"\n updatenetworkresponse = requests.put(\n url, data='{\"network\": {\"name\": \"sample_network_new_name\"}}', headers=headers)\n self.assertEqual(updatenetworkresponse.status_code, 404)\n print(\" \")\n\n print('->>>>>>> test Neutron List Subnets ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/subnets\"\n listsubnetsresponse = requests.get(url, headers=headers)\n listSubnetName = json.loads(listsubnetsresponse.content)[\n \"subnets\"][0][\"name\"]\n listSubnetId = json.loads(listsubnetsresponse.content)[\n \"subnets\"][0][\"id\"]\n listSubnetId2 = json.loads(listsubnetsresponse.content)[\n \"subnets\"][1][\"id\"]\n self.assertEqual(listsubnetsresponse.status_code, 200)\n self.assertNotIn('None', listSubnetName)\n print(\" \")\n\n print('->>>>>>> test Neutron List Subnets By Name ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/subnets?name=\" + listSubnetName\n listsubnetByNameresponse = requests.get(url, headers=headers)\n self.assertEqual(listsubnetByNameresponse.status_code, 200)\n self.assertNotIn('None', json.loads(\n listsubnetByNameresponse.content)[\"subnets\"][0][\"name\"])\n print(\" \")\n\n print('->>>>>>> test Neutron List Subnets By Id ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/subnets?id=\" + listSubnetId\n listsubnetsbyidresponse = requests.get(url, headers=headers)\n self.assertEqual(listsubnetsbyidresponse.status_code, 200)\n self.assertNotIn(\"None\", json.loads(\n listsubnetsbyidresponse.content)[\"subnets\"][0][\"name\"])\n print(\" \")\n\n print('->>>>>>> test Neutron List Subnets By Multiple Id ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/subnets?id=\" + \\\n listSubnetId + \"&id=\" + listSubnetId2\n listsubnetsbymultipleidsresponse = requests.get(url, headers=headers)\n self.assertEqual(listsubnetsbymultipleidsresponse.status_code, 200)\n self.assertNotIn(\"None\", json.loads(\n listsubnetsbymultipleidsresponse.content)[\"subnets\"][0][\"name\"])\n print(\" \")\n\n print('->>>>>>> test Neutron Show Subnet->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/subnets/%s\" % (\n json.loads(listsubnetsresponse.content)[\"subnets\"][0][\"id\"])\n showsubnetsresponse = requests.get(url, headers=headers)\n self.assertEqual(showsubnetsresponse.status_code, 200)\n self.assertNotIn(\"None\", json.loads(\n showsubnetsresponse.content)[\"subnet\"][\"name\"])\n print(\" \")\n\n print('->>>>>>> test Neutron Show Non-Existing Subnet->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/subnets/non-existing-id123\"\n showsubnetsresponse = requests.get(url, headers=headers)\n self.assertEqual(showsubnetsresponse.status_code, 404)\n print(\" \")\n\n print('->>>>>>> test Neutron Create Subnet ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/subnets\"\n createsubnetdata = '{\"subnet\": {\"name\": \"new_subnet\", \"network_id\": \"%s\",\"ip_version\": 4,\"cidr\": \"10.0.0.1/24\"} }' % (\n json.loads(createnetworkresponse.content)[\"network\"][\"id\"])\n createsubnetresponse = requests.post(\n url, data=createsubnetdata, headers=headers)\n self.assertEqual(createsubnetresponse.status_code, 201)\n self.assertEqual(json.loads(createsubnetresponse.content)[\n \"subnet\"][\"name\"], \"new_subnet\")\n print(\" \")\n\n print('->>>>>>> test Neutron Create Second Subnet ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/subnets\"\n createsubnetdata = '{\"subnet\": {\"name\": \"new_subnet\", \"network_id\": \"%s\",\"ip_version\": 4,\"cidr\": \"10.0.0.1/24\"} }' % (\n json.loads(createnetworkresponse.content)[\"network\"][\"id\"])\n createsubnetfailureresponse = requests.post(\n url, data=createsubnetdata, headers=headers)\n self.assertEqual(createsubnetfailureresponse.status_code, 409)\n print(\" \")\n\n print('->>>>>>> test Neutron Update Subnet ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/subnets/%s\" % (\n json.loads(createsubnetresponse.content)[\"subnet\"][\"id\"])\n updatesubnetdata = '{\"subnet\": {\"name\": \"new_subnet_new_name\", \"network_id\":\"some_id\", \"tenant_id\":\"new_tenant_id\", \"allocation_pools\":\"change_me\", \"gateway_ip\":\"192.168.1.120\", \"ip_version\":4, \"cidr\":\"10.0.0.1/24\", \"id\":\"some_new_id\", \"enable_dhcp\":true} }'\n updatesubnetresponse = requests.put(\n url, data=updatesubnetdata, headers=headers)\n self.assertEqual(updatesubnetresponse.status_code, 200)\n self.assertEqual(json.loads(updatesubnetresponse.content)[\n \"subnet\"][\"name\"], \"new_subnet_new_name\")\n print(\" \")\n\n print('->>>>>>> test Neutron Update Non-Existing Subnet ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/subnets/non-existing-subnet-12345\"\n updatenonexistingsubnetdata = '{\"subnet\": {\"name\": \"new_subnet_new_name\"} }'\n updatenonexistingsubnetresponse = requests.put(\n url, data=updatenonexistingsubnetdata, headers=headers)\n self.assertEqual(updatenonexistingsubnetresponse.status_code, 404)\n print(\" \")\n\n print('->>>>>>> test Neutron List Ports ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/ports\"\n listportsesponse = requests.get(url, headers=headers)\n self.assertEqual(listportsesponse.status_code, 200)\n self.assertEqual(json.loads(listportsesponse.content)\n [\"ports\"][0][\"status\"], \"ACTIVE\")\n listPortsName = json.loads(listportsesponse.content)[\n \"ports\"][0][\"name\"]\n listPortsId1 = json.loads(listportsesponse.content)[\"ports\"][0][\"id\"]\n listPortsId2 = json.loads(listportsesponse.content)[\"ports\"][1][\"id\"]\n print(\" \")\n\n print('->>>>>>> test Neutron List Ports By Name ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/ports?name=\" + listPortsName\n listportsbynameesponse = requests.get(url, headers=headers)\n self.assertEqual(listportsbynameesponse.status_code, 200)\n self.assertEqual(json.loads(listportsbynameesponse.content)[\n \"ports\"][0][\"name\"], listPortsName)\n print(\" \")\n\n print('->>>>>>> test Neutron List Ports By Id ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/ports?id=\" + listPortsId1\n listportsbyidesponse = requests.get(url, headers=headers)\n self.assertEqual(listportsbyidesponse.status_code, 200)\n self.assertEqual(json.loads(listportsbyidesponse.content)[\n \"ports\"][0][\"id\"], listPortsId1)\n print(\" \")\n\n print('->>>>>>> test Neutron List Ports By Multiple Ids ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/ports?id=\" + \\\n listPortsId1 + \"&id=\" + listPortsId2\n listportsbymultipleidsesponse = requests.get(url, headers=headers)\n self.assertEqual(listportsbymultipleidsesponse.status_code, 200)\n self.assertEqual(json.loads(listportsbymultipleidsesponse.content)[\n \"ports\"][0][\"id\"], listPortsId1)\n print(\" \")\n\n print('->>>>>>> test Neutron List Ports By Device ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n server_url = \"http://0.0.0.0:18774/v2.1/id_bla/servers/firewall1:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest\"\n server_response = requests.get(server_url, headers=headers)\n firewall1_server = json.loads(server_response.content)[\"server\"]\n device_id = firewall1_server[\"id\"]\n url = \"http://0.0.0.0:19696/v2.0/ports?device_id=%s\" % device_id\n list_ports_by_device_id_response = requests.get(url, headers=headers)\n self.assertEqual(list_ports_by_device_id_response.status_code, 200)\n list_ports_by_device_id_ports = json.loads(list_ports_by_device_id_response.content)[\"ports\"]\n\n self.assertTrue(any(list_ports_by_device_id_ports), \"Expected at least one port for device\")\n for port in list_ports_by_device_id_ports:\n self.assertTrue(port[\"name\"].startswith(\"firewall1:\"), \"Expected all ports to belong to firewall1\")\n print(\" \")\n\n print('->>>>>>> test Neutron List Non-Existing Ports ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/ports?id=non-existing-port-id\"\n listportsbynonexistingidsesponse = requests.get(url, headers=headers)\n self.assertEqual(listportsbynonexistingidsesponse.status_code, 404)\n print(\" \")\n\n print('->>>>>>> test Neutron Show Port ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/ports/%s\" % (\n json.loads(listportsesponse.content)[\"ports\"][0][\"id\"])\n showportresponse = requests.get(url, headers=headers)\n self.assertEqual(showportresponse.status_code, 200)\n self.assertEqual(json.loads(showportresponse.content)\n [\"port\"][\"status\"], \"ACTIVE\")\n print(\" \")\n\n print('->>>>>>> test Neutron Show Non-Existing Port ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/ports/non-existing-portid123\"\n shownonexistingportresponse = requests.get(url, headers=headers)\n self.assertEqual(shownonexistingportresponse.status_code, 404)\n print(\" \")\n\n print('->>>>>>> test Neutron Create Port In Non-Existing Network ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/ports\"\n createnonexistingportdata = '{\"port\": {\"name\": \"new_port\", \"network_id\": \"non-existing-id\"} }'\n createnonexistingnetworkportresponse = requests.post(\n url, data=createnonexistingportdata, headers=headers)\n self.assertEqual(createnonexistingnetworkportresponse.status_code, 404)\n print(\" \")\n\n print('->>>>>>> test Neutron Create Port ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/ports\"\n createportdata = '{\"port\": {\"name\": \"new_port\", \"network_id\": \"%s\", \"admin_state_up\":true, \"device_id\":\"device_id123\", \"device_owner\":\"device_owner123\", \"fixed_ips\":\"change_me\",\"id\":\"new_id1234\", \"mac_address\":\"12:34:56:78:90\", \"status\":\"change_me\", \"tenant_id\":\"tenant_id123\"} }' % (json.loads(createnetworkresponse.content)[\n \"network\"][\"id\"])\n createportresponse = requests.post(\n url, data=createportdata, headers=headers)\n self.assertEqual(createportresponse.status_code, 201)\n print(createportresponse.content)\n createport = json.loads(createportresponse.content)[\"port\"]\n self.assertEqual(createport[\"name\"], \"new_port\")\n print(\" \")\n\n print('->>>>>>> test Neutron Create Port With Existing Name ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/ports\"\n network_id = json.loads(createnetworkresponse.content)[\"network\"][\"id\"]\n createportwithexistingnamedata = '{\"port\": {\"name\": \"duplicate_port_name\", \"network_id\": \"%s\"} }' % network_id\n createportwithexistingnameresponse1 = requests.post(\n url, data=createportwithexistingnamedata, headers=headers)\n createportwithexistingnameresponse2 = requests.post(\n url, data=createportwithexistingnamedata, headers=headers)\n createportwithexistingname1 = json.loads(createportwithexistingnameresponse1.content)[\"port\"]\n createportwithexistingname2 = json.loads(createportwithexistingnameresponse2.content)[\"port\"]\n self.assertEqual(createportwithexistingnameresponse1.status_code, 201)\n self.assertEqual(createportwithexistingnameresponse2.status_code, 201)\n self.assertEqual(createportwithexistingname1[\"name\"], \"duplicate_port_name\")\n self.assertEqual(createportwithexistingname2[\"name\"], \"duplicate_port_name\")\n self.assertNotEqual(createportwithexistingname1[\"id\"], createportwithexistingname2[\"id\"], \"Duplicate port should have different id\")\n print(\" \")\n\n print('->>>>>>> test Neutron Create Port Without Name ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/ports\"\n createportdatawithoutname = '{\"port\": {\"network_id\": \"%s\"} }' % (\n json.loads(createnetworkresponse.content)[\"network\"][\"id\"])\n createportwithoutnameresponse = requests.post(\n url, data=createportdatawithoutname, headers=headers)\n self.assertEqual(createportwithoutnameresponse.status_code, 201)\n self.assertIn(\"port:cp\", json.loads(\n createportwithoutnameresponse.content)[\"port\"][\"name\"])\n print(\" \")\n\n print('->>>>>>> test Neutron Update Port ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n print(json.loads(createportresponse.content)[\"port\"][\"name\"])\n url = \"http://0.0.0.0:19696/v2.0/ports/%s\" % (\n json.loads(createportresponse.content)[\"port\"][\"name\"])\n updateportdata = '{\"port\": {\"name\": \"new_port_new_name\", \"admin_state_up\":true, \"device_id\":\"device_id123\", \"device_owner\":\"device_owner123\", \"fixed_ips\":\"change_me\",\"mac_address\":\"12:34:56:78:90\", \"status\":\"change_me\", \"tenant_id\":\"tenant_id123\", \"network_id\":\"network_id123\"} }'\n updateportresponse = requests.put(\n url, data=updateportdata, headers=headers)\n self.assertEqual(updateportresponse.status_code, 200)\n self.assertEqual(json.loads(updateportresponse.content)[\n \"port\"][\"name\"], \"new_port_new_name\")\n print(\" \")\n\n print('->>>>>>> test Neutron Update Non-Existing Port ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/ports/non-existing-port-ip\"\n updatenonexistingportdata = '{\"port\": {\"name\": \"new_port_new_name\"} }'\n updatenonexistingportresponse = requests.put(\n url, data=updatenonexistingportdata, headers=headers)\n self.assertEqual(updatenonexistingportresponse.status_code, 404)\n print(\" \")\n\n print('->>>>>>> test Neutron Delete Port ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n righturl = \"http://0.0.0.0:19696/v2.0/ports/%s\" % (\n json.loads(createportresponse.content)[\"port\"][\"id\"])\n deleterightportresponse = requests.delete(righturl, headers=headers)\n self.assertEqual(deleterightportresponse.status_code, 204)\n print(\" \")\n\n print('->>>>>>> test Neutron Delete Non-Existing Port ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n wrongurl = \"http://0.0.0.0:19696/v2.0/ports/unknownid\"\n deletewrongportresponse = requests.delete(wrongurl, headers=headers)\n self.assertEqual(deletewrongportresponse.status_code, 404)\n print(\" \")\n\n print('->>>>>>> test Neutron Delete Subnet ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n wrongurl = \"http://0.0.0.0:19696/v2.0/subnets/unknownid\"\n righturl = \"http://0.0.0.0:19696/v2.0/subnets/%s\" % (\n json.loads(updatesubnetresponse.content)[\"subnet\"][\"id\"])\n deletewrongsubnetresponse = requests.delete(wrongurl, headers=headers)\n deleterightsubnetresponse = requests.delete(righturl, headers=headers)\n self.assertEqual(deletewrongsubnetresponse.status_code, 404)\n self.assertEqual(deleterightsubnetresponse.status_code, 204)\n print(\" \")\n\n print('->>>>>>> test Neutron Delete Network ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n righturl = \"http://0.0.0.0:19696/v2.0/networks/%s\" % (\n json.loads(createnetworkresponse.content)[\"network\"][\"id\"])\n deleterightnetworkresponse = requests.delete(righturl, headers=headers)\n self.assertEqual(deleterightnetworkresponse.status_code, 204)\n print(\" \")\n\n print('->>>>>>> test Neutron Delete Non-Existing Network ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n wrongurl = \"http://0.0.0.0:19696/v2.0/networks/unknownid\"\n deletewrongnetworkresponse = requests.delete(wrongurl, headers=headers)\n self.assertEqual(deletewrongnetworkresponse.status_code, 404)\n print(\" \")\n\n def testKeystomeDummy(self):\n print('->>>>>>> test Keystone Dummy Class->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n print(\" \")\n\n headers = {'Content-type': 'application/json'}\n test_heatapi_keystone_get_token = open(os.path.join(os.path.dirname(\n __file__), \"templates/test_heatapi_keystone_get_token.yml\")).read()\n\n print('->>>>>>> test Keystone List Versions ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:15000/\"\n listapiversionstackresponse = requests.get(url, headers=headers)\n self.assertEqual(listapiversionstackresponse.status_code, 200)\n self.assertEqual(json.loads(listapiversionstackresponse.content)[\n \"versions\"][\"values\"][0][\"id\"], \"v2.0\")\n print(\" \")\n\n print('->>>>>>> test Keystone Show ApiV2 ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:15000/v2.0\"\n showapiversionstackresponse = requests.get(url, headers=headers)\n self.assertEqual(showapiversionstackresponse.status_code, 200)\n self.assertEqual(json.loads(showapiversionstackresponse.content)[\n \"version\"][\"id\"], \"v2.0\")\n print(\" \")\n\n print('->>>>>>> test Keystone Get Token ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:15000/v2.0/tokens\"\n gettokenstackresponse = requests.post(url, data=json.dumps(\n yaml.load(test_heatapi_keystone_get_token)), headers=headers)\n self.assertEqual(gettokenstackresponse.status_code, 200)\n self.assertEqual(json.loads(gettokenstackresponse.content)[\n \"access\"][\"user\"][\"name\"], \"tenantName\")\n print(\" \")\n\n def testHeatDummy(self):\n print('->>>>>>> test Heat Dummy Class->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n print(\" \")\n\n headers = {'Content-type': 'application/json'}\n test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(\n __file__), \"templates/test_heatapi_template_create_stack.yml\")).read()\n test_heatapi_template_update_stack = open(os.path.join(os.path.dirname(\n __file__), \"templates/test_heatapi_template_update_stack.yml\")).read()\n\n print('->>>>>>> test Heat List API Versions Stack ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18004/\"\n listapiversionstackresponse = requests.get(url, headers=headers)\n self.assertEqual(listapiversionstackresponse.status_code, 200)\n self.assertEqual(json.loads(listapiversionstackresponse.content)[\n \"versions\"][0][\"id\"], \"v1.0\")\n print(\" \")\n\n print('->>>>>>> test Create Stack ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18004/v1/tenantabc123/stacks\"\n createstackresponse = requests.post(url, data=json.dumps(\n yaml.load(test_heatapi_template_create_stack)), headers=headers)\n self.assertEqual(createstackresponse.status_code, 201)\n self.assertNotEqual(json.loads(\n createstackresponse.content)[\"stack\"][\"id\"], \"\")\n print(\" \")\n\n print('->>>>>>> test Create Stack With Existing Name ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18004/v1/tenantabc123/stacks\"\n createstackwithexistingnameresponse = requests.post(\n url, data='{\"stack_name\" : \"s1\"}', headers=headers)\n self.assertEqual(createstackwithexistingnameresponse.status_code, 409)\n print(\" \")\n\n print('->>>>>>> test Create Stack With Unsupported Version ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18004/v1/tenantabc123/stacks\"\n createstackwitheunsupportedversionresponse = requests.post(\n url, data='{\"stack_name\" : \"stackname123\", \"template\" : {\"heat_template_version\": \"2015-04-29\"}}', headers=headers)\n self.assertEqual(\n createstackwitheunsupportedversionresponse.status_code, 400)\n print(\" \")\n\n print('->>>>>>> test List Stack ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18004/v1/tenantabc123/stacks\"\n liststackresponse = requests.get(url, headers=headers)\n self.assertEqual(liststackresponse.status_code, 200)\n self.assertEqual(json.loads(liststackresponse.content)[\n \"stacks\"][0][\"stack_status\"], \"CREATE_COMPLETE\")\n print(\" \")\n\n print('->>>>>>> test Show Stack ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18004/v1/tenantabc123showStack/stacks/%s\" % json.loads(\n createstackresponse.content)['stack']['id']\n liststackdetailsresponse = requests.get(url, headers=headers)\n self.assertEqual(liststackdetailsresponse.status_code, 200)\n self.assertEqual(json.loads(liststackdetailsresponse.content)[\n \"stack\"][\"stack_status\"], \"CREATE_COMPLETE\")\n print(\" \")\n\n print('->>>>>>> test Show Non-Exisitng Stack ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18004/v1/tenantabc123showStack/stacks/non_exisitng_id123\"\n listnonexistingstackdetailsresponse = requests.get(\n url, headers=headers)\n self.assertEqual(listnonexistingstackdetailsresponse.status_code, 404)\n print(\" \")\n\n print('->>>>>>> test Update Stack ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18004/v1/tenantabc123updateStack/stacks/%s\" % json.loads(\n createstackresponse.content)['stack']['id']\n updatestackresponse = requests.put(url, data=json.dumps(yaml.load(test_heatapi_template_update_stack)),\n headers=headers)\n self.assertEqual(updatestackresponse.status_code, 202)\n liststackdetailsresponse = requests.get(url, headers=headers)\n self.assertEqual(json.loads(liststackdetailsresponse.content)[\n \"stack\"][\"stack_status\"], \"UPDATE_COMPLETE\")\n print(\" \")\n\n print('->>>>>>> test Update Non-Existing Stack ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18004/v1/tenantabc123updateStack/stacks/non_existing_id_1234\"\n updatenonexistingstackresponse = requests.put(\n url, data={\"non\": \"sense\"}, headers=headers)\n self.assertEqual(updatenonexistingstackresponse.status_code, 404)\n print(\" \")\n\n print('->>>>>>> test Delete Stack ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:18004/v1/tenantabc123showStack/stacks/%s\" % \\\n json.loads(createstackresponse.content)['stack']['id']\n deletestackdetailsresponse = requests.delete(url, headers=headers)\n self.assertEqual(deletestackdetailsresponse.status_code, 204)\n print(\" \")\n\n def testNeutronSFC(self):\n \"\"\"\n Tests the Neutron Service Function Chaining implementation. As Some functions build up on others, a\n complete environment is created here:\n\n Ports: p1, p2, p3, p4\n Port Pairs: pp1(p1, p2), pp2(p3, p4)\n Port Pair Groups: ppg1(pp1, pp2)\n Flow Classifiers: fc1\n Port Chain: pc1(ppg1, fc1)\n \"\"\"\n\n headers = {'Content-type': 'application/json'}\n\n print('->>>>>>> Create ports p1 - p6 ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n # Get network id\n network_resp = requests.get(\n \"http://0.0.0.0:19696/v2.0/networks?name=default\", headers=headers)\n self.assertEqual(network_resp.status_code, 200)\n network_id = json.loads(network_resp.content)[\"networks\"][0][\"id\"]\n\n port_responses = list(map(lambda name: requests.post(\"http://0.0.0.0:19696/v2.0/ports\",\n data='{\"port\": {\"name\": \"%s\", \"network_id\": \"%s\"}}' %\n (name, network_id),\n headers=headers),\n [\"p1\", \"p2\", \"p3\", \"p4\", \"p5\", \"p6\"]))\n\n for port in port_responses:\n self.assertEqual(port.status_code, 201)\n\n port_ids = list(map(lambda response: json.loads(response.content)[\"port\"][\"id\"], port_responses))\n\n listflavorsresponse = requests.get(\"http://0.0.0.0:18774/v2.1/id_bla/flavors\", headers=headers)\n self.assertEqual(listflavorsresponse.status_code, 200)\n flavors = json.loads(listflavorsresponse.content)[\"flavors\"]\n m1_tiny_flavor = list(filter(lambda flavor: flavor[\"name\"] == \"m1.tiny\", flavors))[0]\n\n listimagesdetailsresponse = requests.get(\"http://0.0.0.0:18774/v2.1/id_bla/images/detail\", headers=headers)\n self.assertEqual(listimagesdetailsresponse.status_code, 200)\n images = json.loads(listimagesdetailsresponse.content)[\"images\"]\n ubuntu_image = list(filter(lambda image: image[\"name\"] == \"ubuntu:trusty\", images))[0]\n\n server_url = \"http://0.0.0.0:18774/v2.1/id_bla/servers\"\n server_template = \\\n '{\"server\": {' \\\n '\"name\": \"%s\",' \\\n '\"networks\": [{\"port\": \"%s\"}, {\"port\": \"%s\"}],' \\\n '\"flavorRef\": \"%s\",' \\\n '\"imageRef\": \"%s\"' \\\n '}}'\n server_responses = map(lambda spec: (\n requests.post(server_url,\n data=server_template % (\n spec[\"name\"],\n spec[\"ingress\"],\n spec[\"egress\"],\n m1_tiny_flavor[\"id\"],\n ubuntu_image[\"id\"]\n ),\n headers=headers)\n ), [\n {\"name\": \"s1\", \"ingress\": \"p1\", \"egress\": \"p2\"},\n {\"name\": \"s2\", \"ingress\": \"p3\", \"egress\": \"p4\"},\n {\"name\": \"s3\", \"ingress\": \"p5\", \"egress\": \"p6\"},\n ])\n for response in server_responses:\n self.assertEqual(response.status_code, 200)\n\n print('->>>>>>> test Neutron SFC Port Pair Create ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pairs\"\n pp1_resp = requests.post(url, data='{\"port_pair\": {\"name\": \"pp1\", \"ingress\": \"%s\", \"egress\": \"%s\"}}' % (\n port_ids[0], port_ids[1]), headers=headers)\n self.assertEqual(pp1_resp.status_code, 201)\n pp2_resp = requests.post(url, data='{\"port_pair\": {\"name\": \"pp2\", \"ingress\": \"%s\", \"egress\": \"%s\"}}' % (\n port_ids[2], port_ids[3]), headers=headers)\n self.assertEqual(pp2_resp.status_code, 201)\n pp3_resp = requests.post(url, data='{\"port_pair\": {\"name\": \"pp3\", \"ingress\": \"%s\", \"egress\": \"%s\"}}' % (\n port_ids[4], port_ids[5]), headers=headers)\n self.assertEqual(pp3_resp.status_code, 201)\n\n pp1_id = json.loads(pp1_resp.content)[\"port_pair\"][\"id\"]\n pp2_id = json.loads(pp2_resp.content)[\"port_pair\"][\"id\"]\n pp3_id = json.loads(pp3_resp.content)[\"port_pair\"][\"id\"]\n\n print('->>>>>>> test Neutron SFC Port Pair Update ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pairs/%s\" % pp3_id\n pp3_update_resp = requests.put(\n url, data='{\"port_pair\": {\"description\": \"port_pair_update\"}}', headers=headers)\n self.assertEqual(pp3_update_resp.status_code, 200)\n self.assertEqual(json.loads(pp3_update_resp.content)[\n \"port_pair\"][\"description\"], \"port_pair_update\")\n\n print('->>>>>>> test Neutron SFC Port Pair Delete ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pairs/%s\" % pp3_id\n pp3_delete_resp = requests.delete(url, headers=headers)\n self.assertEqual(pp3_delete_resp.status_code, 204)\n\n print('->>>>>>> test Neutron SFC Port Pair List ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pairs\"\n pp_list_resp = requests.get(url, headers=headers)\n self.assertEqual(pp_list_resp.status_code, 200)\n pp_list = json.loads(pp_list_resp.content)[\"port_pairs\"]\n # only pp1 and pp2 should be left\n self.assertEqual(len(pp_list), 2)\n\n print('->>>>>>> test Neutron SFC Port Pair List filtered by id ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pairs?id=%s\" % pp_list[0][\"id\"]\n pp_list_filtered_by_id_resp = requests.get(url, headers=headers)\n pp_list_filtered_by_id = json.loads(pp_list_filtered_by_id_resp.content)[\"port_pairs\"]\n self.assertEqual(pp_list_filtered_by_id_resp.status_code, 200)\n self.assertEqual(len(pp_list_filtered_by_id), 1)\n self.assertEqual(pp_list_filtered_by_id[0], pp_list[0])\n\n print('->>>>>>> test Neutron SFC Port Pair Show ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pairs/%s\" % pp2_id\n pp2_show_resp = requests.get(url, headers=headers)\n self.assertEqual(pp2_show_resp.status_code, 200)\n self.assertEqual(json.loads(pp2_show_resp.content)\n [\"port_pair\"][\"name\"], \"pp2\")\n\n print('->>>>>>> test Neutron SFC Port Pair Group Create ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pair_groups\"\n ppg1_resp = requests.post(\n url, data='{\"port_pair_group\": {\"name\": \"ppg1\", \"port_pairs\": [\"%s\"]}}' % (pp1_id), headers=headers)\n self.assertEqual(ppg1_resp.status_code, 201)\n ppg2_resp = requests.post(\n url, data='{\"port_pair_group\": {\"name\": \"ppg2\", \"port_pairs\": [\"%s\"]}}' % (pp2_id), headers=headers)\n self.assertEqual(ppg2_resp.status_code, 201)\n ppg3_resp = requests.post(\n url, data='{\"port_pair_group\": {\"name\": \"ppg3\", \"port_pairs\": [\"%s\"]}}' % (pp2_id), headers=headers)\n self.assertEqual(ppg3_resp.status_code, 201)\n\n ppg1_id = json.loads(ppg1_resp.content)[\"port_pair_group\"][\"id\"]\n ppg2_id = json.loads(ppg2_resp.content)[\"port_pair_group\"][\"id\"]\n ppg3_id = json.loads(ppg3_resp.content)[\"port_pair_group\"][\"id\"]\n\n print('->>>>>>> test Neutron SFC Port Pair Group Update ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pair_groups/%s\" % ppg3_id\n ppg3_update_resp = requests.put(\n url, data='{\"port_pair_group\": {\"description\": \"port_pair_group_update\"}}', headers=headers)\n self.assertEqual(ppg3_update_resp.status_code, 200)\n self.assertEqual(json.loads(ppg3_update_resp.content)[\n \"port_pair_group\"][\"description\"], \"port_pair_group_update\")\n\n print('->>>>>>> test Neutron SFC Port Pair Group Delete ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pair_groups/%s\" % ppg3_id\n ppg3_delete_resp = requests.delete(url, headers=headers)\n self.assertEqual(ppg3_delete_resp.status_code, 204)\n\n print('->>>>>>> test Neutron SFC Port Pair Group List ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pair_groups\"\n ppg_list_resp = requests.get(url, headers=headers)\n self.assertEqual(ppg_list_resp.status_code, 200)\n # only ppg1 and ppg2 should be left\n self.assertEqual(\n len(json.loads(ppg_list_resp.content)[\"port_pair_groups\"]), 2)\n\n print('->>>>>>> test Neutron SFC Port Pair Group Show ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pair_groups/%s\" % ppg2_id\n ppg2_show_resp = requests.get(url, headers=headers)\n self.assertEqual(ppg2_show_resp.status_code, 200)\n self.assertEqual(json.loads(ppg2_show_resp.content)[\n \"port_pair_group\"][\"name\"], \"ppg2\")\n\n print('->>>>>>> test Neutron SFC Flow Classifier Create ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/flow_classifiers\"\n fc1_resp = requests.post(\n url, data='{\"flow_classifier\": {\"name\": \"fc1\", \"logical_source_port\": \"p1\", \"source_port_range_min\": 22, \"source_port_range_max\": 4000}}', headers=headers)\n self.assertEqual(fc1_resp.status_code, 201)\n fc2_resp = requests.post(\n url, data='{\"flow_classifier\": {\"name\": \"fc2\", \"logical_source_port\": \"p2\", \"source_port_range_min\": 22, \"source_port_range_max\": 4000}}', headers=headers)\n self.assertEqual(fc2_resp.status_code, 201)\n\n fc1_id = json.loads(fc1_resp.content)[\"flow_classifier\"][\"id\"]\n fc2_id = json.loads(fc2_resp.content)[\"flow_classifier\"][\"id\"]\n\n print('->>>>>>> test Neutron SFC Flow Classifier Update ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/flow_classifiers/%s\" % fc2_id\n fc2_update_resp = requests.put(\n url, data='{\"flow_classifier\": {\"description\": \"flow_classifier_update\"}}', headers=headers)\n self.assertEqual(fc2_update_resp.status_code, 200)\n self.assertEqual(json.loads(fc2_update_resp.content)[\n \"flow_classifier\"][\"description\"], \"flow_classifier_update\")\n\n print('->>>>>>> test Neutron SFC Flow Classifier Delete ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/flow_classifiers/%s\" % fc2_id\n fc2_delete_resp = requests.delete(url, headers=headers)\n self.assertEqual(fc2_delete_resp.status_code, 204)\n\n print('->>>>>>> test Neutron SFC Flow Classifier List ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/flow_classifiers\"\n fc_list_resp = requests.get(url, headers=headers)\n self.assertEqual(fc_list_resp.status_code, 200)\n self.assertEqual(len(json.loads(fc_list_resp.content)\n [\"flow_classifiers\"]), 1) # only fc1\n\n print('->>>>>>> test Neutron SFC Flow Classifier Show ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/flow_classifiers/%s\" % fc1_id\n fc1_show_resp = requests.get(url, headers=headers)\n self.assertEqual(fc1_show_resp.status_code, 200)\n self.assertEqual(json.loads(fc1_show_resp.content)[\n \"flow_classifier\"][\"name\"], \"fc1\")\n\n print('->>>>>>> test Neutron SFC Port Chain Create ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_chains\"\n pc1_resp = requests.post(url, data='{\"port_chain\": {\"name\": \"pc1\", \"port_pair_groups\": [\"%s\"], \"flow_classifiers\": [\"%s\"]}}' % (\n ppg1_id, fc1_id), headers=headers)\n self.assertEqual(pc1_resp.status_code, 201)\n pc2_resp = requests.post(url, data='{\"port_chain\": {\"name\": \"pc2\", \"port_pair_groups\": [\"%s\"], \"flow_classifiers\": [\"%s\"]}}' % (\n ppg1_id, fc1_id), headers=headers)\n self.assertEqual(pc2_resp.status_code, 201)\n\n pc1_id = json.loads(pc1_resp.content)[\"port_chain\"][\"id\"]\n pc2_id = json.loads(pc2_resp.content)[\"port_chain\"][\"id\"]\n\n print('->>>>>>> test Neutron SFC Port Chain Update ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_chains/%s\" % pc2_id\n pc2_update_resp = requests.put(\n url, data='{\"port_chain\": {\"description\": \"port_chain_update\"}}', headers=headers)\n self.assertEqual(pc2_update_resp.status_code, 200)\n self.assertEqual(json.loads(pc2_update_resp.content)[\n \"port_chain\"][\"description\"], \"port_chain_update\")\n\n print('->>>>>>> test Neutron SFC Port Chain Delete ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_chains/%s\" % pc2_id\n pc2_delete_resp = requests.delete(url, headers=headers)\n self.assertEqual(pc2_delete_resp.status_code, 204)\n\n print('->>>>>>> test Neutron SFC Port Chain List ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_chains\"\n pc_list_resp = requests.get(url, headers=headers)\n self.assertEqual(pc_list_resp.status_code, 200)\n self.assertEqual(len(json.loads(pc_list_resp.content)\n [\"port_chains\"]), 1) # only pc1\n\n print('->>>>>>> test Neutron SFC Port Chain Show ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_chains/%s\" % pc1_id\n pc1_show_resp = requests.get(url, headers=headers)\n self.assertEqual(pc1_show_resp.status_code, 200)\n self.assertEqual(json.loads(pc1_show_resp.content)\n [\"port_chain\"][\"name\"], \"pc1\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"containernet/vim-emu","sub_path":"src/emuvim/test/unittests/test_openstack.py","file_name":"test_openstack.py","file_ext":"py","file_size_in_byte":64298,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"67"} +{"seq_id":"1518821488","text":"tentativa = 1\n\nwhile tentativa <= 3:\n senha = input(\"Digite a senha: \")\n if senha == \"senha123\":\n print(\"Acesso concedido\")\n break\n else:\n print(\"Senha incorreta, Tente novamente.\")\nelse:\n print(\"Você execedeu o número maximo de tentativas.\")","repo_name":"ryanzada7/loop","sub_path":"loop4.py","file_name":"loop4.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4982887681","text":"import numpy as np\r\nimport os\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nimport csv\r\nfrom visualize import *\r\nimport dsb2018_utils as du\r\nimport imageio\r\n\r\nbase_dir = 'D:/Kaggle/Data_Science_Bowl_2018' if os.name == 'nt' else os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\r\n\r\n\r\ndef visualise_annotations(source_dirs, n = 2, target_colour = None):\r\n\r\n # Extract filenames for images/masks\r\n dirs = [os.path.join(dir, f) for dir in source_dirs for f in os.listdir(dir)]\r\n files = [os.path.join(f, 'images', ''.join((os.path.split(f)[-1], '.png'))) for f in dirs]\r\n maskfiles = [[os.path.join(f, 'masks', m) for m in os.listdir(os.path.join(f, 'masks'))] if os.path.exists(os.path.join(f, 'masks')) else None for f in dirs]\r\n\r\n # Reduce to a target colour if requested\r\n if target_colour is not None:\r\n from dataset import get_ids\r\n colour_id, _ = get_ids(files)\r\n valid_idx = np.argwhere(du.ismember(colour_id, np.array(target_colour))).reshape(-1,)\r\n files = [files[idx] for idx in valid_idx]\r\n maskfiles = [maskfiles[idx] for idx in valid_idx]\r\n\r\n img_list = []\r\n counter = 0\r\n for f, m in zip(files, maskfiles):\r\n\r\n img = load_img(f)\r\n masks = np.stack([imageio.imread(path) for path in m], axis = -1)\r\n labels = du.maskrcnn_mask_to_labels(masks)\r\n counter += 1\r\n\r\n if counter > n:\r\n # Display\r\n plot_multiple_images(img_list, nrows = n, ncols = 2)\r\n # Reset\r\n counter = 0\r\n img_list = []\r\n else:\r\n img_list.extend([img, image_with_labels(img, labels)])\r\n\r\n return\r\n\r\n\r\ndef visualise_mosaic_annotations(source_dirs, n = 2):\r\n\r\n # Extract filenames for images/masks\r\n files = [os.path.join(_dir, f) for _dir in source_dirs for f in os.listdir(_dir) if os.path.splitext(f)[-1] != '.npz']\r\n maskfiles = [os.path.join(_dir, f) for _dir in source_dirs for f in os.listdir(_dir) if os.path.splitext(f)[-1] == '.npz']\r\n\r\n img_list = []\r\n counter = 0\r\n for f, m in zip(files, maskfiles):\r\n\r\n img = load_img(f)\r\n masks = np.load(m)\r\n labels = du.maskrcnn_mask_to_labels(masks['mask_mosaic'])\r\n counter += 1\r\n\r\n if counter > n:\r\n # Display\r\n plot_multiple_images(img_list, nrows = n, ncols = 2)\r\n # Reset\r\n counter = 0\r\n img_list = []\r\n else:\r\n img_list.extend([img, image_with_labels(img, labels)])\r\n\r\n return\r\n\r\n\r\ndef main():\r\n #visualise_annotations([os.path.join(base_dir, 'train')])\r\n visualise_mosaic_annotations([os.path.join(base_dir, 'train_mosaics')])\r\n \r\nif __name__ == '__main__':\r\n main()","repo_name":"bruceyang2012/Kaggle_Shared","sub_path":"Mask_RCNN_DSB2018/DSB2018/visualise_annotations.py","file_name":"visualise_annotations.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"69850910614","text":"import os\nimport re\nimport shutil\nfrom datetime import date\n\nfrom docx import Document\nfrom docx.shared import Inches\n\nimport informations\n\nDOCS_PATH = \"static/docs/\"\nDOCS_TEMPLATES_PATH = DOCS_PATH + \"doc_templates/\"\nDOCS_UPLOAD_PATH = \"static/uploads/docs_completed/\"\nFINAL_ZIP_NAME = \"Documente_completate\"\nFINAL_ZIP_NAME_EXT = FINAL_ZIP_NAME + \".zip\"\n\n\ndef fetch_titles_and_number():\n list_docs = [os.path.splitext(filename)[0] for filename in os.listdir(DOCS_PATH) if\n os.path.isfile(os.path.join(DOCS_PATH, filename))]\n # print(list_docs)\n return list_docs, len(list_docs)\n\n\ndef docx_replace_regex(doc_obj, regex, replace):\n for p in doc_obj.paragraphs:\n if regex.search(p.text):\n inline = p.runs\n for i in range(len(inline)):\n if regex.search(inline[i].text):\n text = regex.sub(replace, inline[i].text)\n inline[i].text = text\n\n\ndef docx_replace_text_with_image(doc_obj, regex):\n for p in doc_obj.paragraphs:\n if regex.search(p.text):\n inline = p.runs\n for i in range(len(inline)):\n if regex.search(inline[i].text):\n text = regex.sub(\"\", inline[i].text)\n inline[i].text = text\n inline[i].add_picture(f'{\"static/uploads/\"+informations.signature_file}', width=Inches(1), height=Inches(0.5))\n\n\ndef get_current_date():\n return date.today().strftime(\"%d.%m.%Y\")\n\n\ndef replace_with_infos(doc_obj, infos):\n id_char = \"@\"\n infos[\"domiciliu\"] = infos[\"domiciliu\"].replace(\"\\n\", \" \")\n for tipInfo in infos.keys():\n docx_replace_regex(doc_obj, re.compile(id_char+tipInfo), infos[tipInfo])\n docx_replace_regex(doc_obj, re.compile(id_char+\"dataCurenta\"), get_current_date())\n docx_replace_text_with_image(doc_obj, re.compile(id_char + \"semnatura\"))\n\n\ndef manage_doc_and_save(doc_name, infos):\n curr_doc = Document(DOCS_PATH + \"/doc_templates/\" + doc_name)\n replace_with_infos(curr_doc, infos)\n curr_doc.save(DOCS_UPLOAD_PATH + doc_name)\n\n\ndef make_zip():\n shutil.make_archive(FINAL_ZIP_NAME, 'zip', DOCS_UPLOAD_PATH)\n shutil.move(FINAL_ZIP_NAME_EXT, DOCS_UPLOAD_PATH + FINAL_ZIP_NAME_EXT)\n\n\ndef clean_docs_completed():\n for f in os.listdir(DOCS_UPLOAD_PATH):\n os.remove(os.path.join(DOCS_UPLOAD_PATH, f))\n\n\ndef manage_documents(docs):\n for doc_title in docs.values():\n manage_doc_and_save(doc_title + \".docx\", informations.person_informations)\n\n make_zip()\n","repo_name":"andrei8888/DocFillerWeb","sub_path":"fetch_docs.py","file_name":"fetch_docs.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19257752110","text":"import json\n\ndata = [\n {\n 'name': 'apple',\n 'description': 'some apple',\n 'cost': 123\n },\n {\n 'name': 'apple',\n 'description': 'some apple',\n 'cost': 123\n },\n {\n 'name': 'apple',\n 'description': 'some apple',\n 'cost': 123\n }\n]\n\n# Простой вывод\n# with open('json/data.json', 'w') as file:\n# json.dump(data, file)\n\n# Вывод с отступами\n# with open('json/data.json', 'w') as file:\n# json.dump(data, file, indent=2)\n\n# Сортировка по ключам\n# with open('json/data.json', 'w') as file:\n# json.dump(data, file, sort_keys=True, indent=2)\n\n# Чтение\n# with open('json/data.json') as file:\n# print(json.load(file))\n\n# Преобразование в строку\ndt = json.dumps(data)\n\n# Кодировка в байтовый тип\ndt.encode()\n","repo_name":"Spoukster/Python_2","sub_path":"json/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74674536852","text":"# You are also given three integers sr, sc, and newColor. \n# You should perform a flood fill on the image starting from the pixel image[sr][sc].\n\n# To perform a flood fill, consider the starting pixel, \n# plus any pixels connected 4-directionally to the starting pixel of the same color \n# as the starting pixel, plus any pixels connected 4-directionally to those pixels \n# (also with the same color), and so on. Replace the color of all of the \n# aforementioned pixels with newColor.\n\nclass Solution:\n def floodFill(self, nums: List[List[int]], sr: int, sc: int, newColor: int) -> List[List[int]]:\n if nums[sr][sc] ==newColor:\n return nums\n \n self.dfs(nums, sr, sc, nums[sr][sc], newColor)\n return nums\n \n def dfs(self, nums, x, y, color, newColor):\n if x < 0 or y < 0 or x >= len(nums) or y >= len(nums[0]) or nums[x][y] != color:\n return\n \n nums[x][y] = newColor\n self.dfs(nums, x+1, y, color, newColor)\n self.dfs(nums, x, y+1, color, newColor) \n self.dfs(nums, x-1, y, color, newColor) \n self.dfs(nums, x, y-1, color, newColor) ","repo_name":"Audarya07/Leetcode","sub_path":"FloodFill.py","file_name":"FloodFill.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38848545980","text":"# advent of code day 1\n\n# import helper function\nimport helper\n\n# function for building list of calories\ndef get_calories_list():\n # read data from txt file\n data = helper.read_data('../inputs/day1.txt')\n\n # split string in list by new lines\n food_list = data. splitlines()\n\n # set intital values\n elf_calories = 0 # calory count for current elf\n calories_list = [] # the calories_list to be returned\n\n # loop through items in food_list\n for calories in food_list:\n if calories == '':\n # new elf -> append calories to list and reset to zero\n calories_list.append(elf_calories)\n elf_calories = 0\n else:\n # increase elf_calories by calories for food item\n elf_calories += int(calories)\n\n # return calories_list\n return calories_list\n\n# generate list of calories per elf\ncalories_list = get_calories_list()\n\n# solution part one - calories top 1 elf\nsolution_part_one = max(calories_list) # 70374\n\n# solution part two - sum of calories top 3 elfes\nsolution_part_two = sum(sorted(calories_list, reverse=True)[:3]) # 204610\n","repo_name":"AlexanderBauer/Advent_of_Code_2022","sub_path":"python/day01.py","file_name":"day01.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72369505175","text":"from typing import List, Optional, Mapping\nimport abc\n\nimport pandas as pd\nimport numpy as np\nfrom IPython.core.display import display\nfrom sklearn import model_selection\nimport sklearn.metrics\nimport ipywidgets\nfrom typing import Tuple\n\nfrom ..classification_pipe_base import ClassificationPipeBase, Data, Params\n\n\nclass SklearnClassifier(ClassificationPipeBase):\n \"\"\"\n Wrapper for inclusion of sklearn classifiers in the pipeline.\n \"\"\"\n\n input_keys = (\"df\", \"df_metadata\") # type: Tuple[str, ...]\n output_keys = (\"predict\", \"predict_metadata\") # type: Tuple[str, ...]\n\n threshold = None\n\n fit_attributes = [(\"clf\", \"pickle\", \"pickle\")]\n\n def __init__(self, clf, **kwargs):\n super().__init__()\n\n self.clf = clf(**kwargs)\n\n def fit(self, data: Data, params: Params):\n self._set_classification_labels(data[\"df\"], data[\"df_metadata\"])\n X = data[\"df\"][self.X_labels]\n y_true = data[\"df\"][self.y_true_label]\n\n self.clf.fit(X, y_true)\n\n def transform(self, data: Data, params: Params) -> Data:\n self._set_classification_data(data[\"df\"], data[\"df_metadata\"])\n\n self.y_pred_proba = pd.DataFrame(\n self.clf.predict_proba(self.X.values),\n columns=self.y_pred_proba_labels,\n index=self.X.index,\n )\n\n if params.get(\"compute_threshold\"):\n # compute the threshold in this transform and use it [.. missing words ..] the next transforms\n self.threshold = params[\"compute_threshold\"](\n y_true=self.y_true,\n y_pred_proba=self.y_pred_proba,\n y_pred_proba_labels=self.y_pred_proba_labels,\n )\n predict_metadata = data[\"df_metadata\"].copy()\n predict_metadata[\"threshold\"] = self.threshold\n\n y_pred = np.zeros(self.X.shape[0]) # type: ignore\n\n y_pred[\n self.y_pred_proba[self.y_pred_proba_labels[1]] >= (self.threshold or 0.5)\n ] = 1\n\n self.y_pred_proba[self.y_pred_label] = y_pred\n\n if self.y_true is not None:\n self.y_pred_proba[self.y_true_label] = self.y_true\n\n return {\"predict\": self.y_pred_proba, \"predict_metadata\": predict_metadata}\n\n\nclass ThresholdBase(abc.ABC):\n \"\"\"\n What does this do?\n \"\"\"\n\n y_true = None # type: Optional[List]\n y_pred_proba = None # type: Mapping\n y_pred_proba_labels = None # type: List[str]\n\n def set_y(self, y_true, y_pred_proba, y_pred_proba_labels, **kwargs):\n self.y_true = y_true\n self.y_pred_proba = y_pred_proba\n self.y_pred_proba_labels = y_pred_proba_labels\n\n @abc.abstractmethod\n def __call__(self, **kwargs) -> float:\n pass\n\n\nclass PrecisionRecallThreshold(ThresholdBase):\n def __call__(self, **kwargs) -> float:\n self.set_y(\n kwargs[\"y_true\"], kwargs[\"y_pred_proba\"], kwargs[\"y_pred_proba_labels\"]\n )\n\n _, _, threshold = sklearn.metrics.precision_recall_curve(\n self.y_true, self.y_pred_proba[self.y_pred_proba_labels[1]]\n )\n\n return threshold\n\n\nclass CostThreshold(ThresholdBase):\n def __init__(\n self, costFalseNegative: float = 1.0, costFalsePositive: float = 1.0\n ) -> None:\n self.costFalseNegative = costFalseNegative\n self.costFalsePositive = costFalsePositive\n super().__init__()\n\n def __call__(self, **kwargs) -> float:\n self.set_y(\n kwargs[\"y_true\"], kwargs[\"y_pred_proba\"], kwargs[\"y_pred_proba_labels\"]\n )\n\n tspace = np.linspace(0, 1, 100)\n costs = []\n for i in range(len(tspace) - 1):\n t = tspace[i]\n costs.append(\n self._computeCost(t, self.costFalseNegative, self.costFalsePositive)\n )\n tmin = tspace[costs.index(min(costs))]\n return tmin\n\n def _computeConfusionMatrix(self, threshold: float):\n \"\"\"Make a confusion matrix for the given threshold\"\"\"\n\n y_score = np.zeros(self.y_pred_proba.shape[0]) # type: ignore\n y_score[self.y_pred_proba[self.y_pred_proba_labels[1]] >= threshold] = 1\n return sklearn.metrics.confusion_matrix(self.y_true, y_score)\n\n def _computeCost(\n self, threshold: float, costFalseNegative: float, costFalsePositive: float\n ) -> float:\n cfmat = self._computeConfusionMatrix(threshold)\n # false positive: payer classified as default\n numFalseNegative = cfmat[1, 0]\n # false negative: default classified paying\n numFalsePositive = cfmat[0, 1]\n cost = (\n numFalseNegative * costFalseNegative + numFalsePositive * costFalsePositive\n )\n return cost\n\n\nclass GridSearchCVProgressBar(model_selection.GridSearchCV):\n \"\"\"Monkey patch to have a progress bar during grid search\"\"\"\n\n def _get_param_iterator(self):\n \"\"\"Return ParameterGrid instance for the given param_grid\"\"\"\n\n iterator = super()._get_param_iterator()\n iterator = list(iterator)\n n_candidates = len(iterator)\n\n cv = model_selection._split.check_cv(self.cv, None)\n n_splits = getattr(cv, \"n_splits\", 3)\n max_value = n_candidates * n_splits # count the amount of iterations total\n\n progress_label = ipywidgets.HTML()\n progress_bar = ipywidgets.FloatProgress(\n min=0, max=max_value, description=\"GridSearchCV:\"\n )\n progress_box = ipywidgets.HBox(\n children=[progress_bar, progress_label]\n ) # setup a progress label + bar\n\n display(progress_box)\n original_fit = self.estimator.__class__.fit\n\n def fit(*args, **kwargs):\n progress_bar.value += (\n 1\n ) # every time fit is called, increase progress bar by 1\n if (\n progress_bar.value == max_value\n ): # if max value is reached, display finished and turn green\n progress_label.value = \"finished\"\n progress_bar.bar_style = \"success\"\n\n original_fit(*args, **kwargs)\n\n self.estimator.__class__.fit = fit\n\n return iterator\n\n\nclass SklearnGridSearch(SklearnClassifier):\n\n input_keys = (\"df\", \"df_metadata\")\n output_keys = (\"predict\", \"predict_metadata\")\n\n def __init__(self, clf, param_grid, scoring: str = \"roc_auc\") -> None:\n kwargs = {\"estimator\": clf(), \"param_grid\": param_grid, \"scoring\": scoring}\n self.cv_clf = None\n super().__init__(GridSearchCVProgressBar, **kwargs)\n\n def fit(self, data: Data, params: Params):\n super().fit(data, params)\n cv_results = pd.DataFrame(self.clf.cv_results_).sort_values(\n by=\"rank_test_score\"\n )\n display(\"Results of grid search\")\n display(cv_results)\n\n # select the best estimator and store the gridsearch results for optional later inspection\n self.cv_clf = self.clf\n self.clf = self.clf.best_estimator_\n display(\"Best algoritm\")\n display(self.clf)\n\n\nimport tpot\n\n\nclass TPOTClassifier(SklearnClassifier):\n def __init__(self, **kwargs):\n self.clf = tpot.TPOTClassifier(**kwargs)\n\n def fit(self, data: Data, params: Params):\n super().fit(data, params)\n\n # select the best estimator and store the gridsearch results for optional later inspection\n self.cv_clf = self.clf\n self.clf = self.clf.fitted_pipeline_\n display(\"Best algoritm\")\n display(self.clf)\n","repo_name":"mabvanaartrijk/dvb.datascience","sub_path":"dvb/datascience/predictor/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5863410829","text":"import enum\n\nimport pytest\n\nfrom keras import backend\nfrom keras import testing\nfrom keras.legacy.saving import json_utils\n\nif backend.backend() == \"tensorflow\":\n import tensorflow as tf\n\n\nclass JsonUtilsTestAllBackends(testing.TestCase):\n def test_encode_decode_tuple(self):\n metadata = {\"key1\": (3, 5), \"key2\": [(1, (3, 4)), (1,)]}\n string = json_utils.Encoder().encode(metadata)\n loaded = json_utils.decode(string)\n\n self.assertEqual(set(loaded.keys()), {\"key1\", \"key2\"})\n self.assertAllEqual(loaded[\"key1\"], (3, 5))\n self.assertAllEqual(loaded[\"key2\"], [(1, (3, 4)), (1,)])\n\n def test_encode_decode_enum(self):\n class Enum(enum.Enum):\n CLASS_A = \"a\"\n CLASS_B = \"b\"\n\n config = {\"key\": Enum.CLASS_A, \"key2\": Enum.CLASS_B}\n string = json_utils.Encoder().encode(config)\n loaded = json_utils.decode(string)\n self.assertAllEqual({\"key\": \"a\", \"key2\": \"b\"}, loaded)\n\n def test_encode_decode_bytes(self):\n b_string = b\"abc\"\n json_string = json_utils.Encoder().encode(b_string)\n loaded = json_utils.decode(json_string)\n self.assertAllEqual(b_string, loaded)\n\n\n@pytest.mark.skipif(\n backend.backend() != \"tensorflow\",\n reason=\"These JSON serialization tests are specific to TF components.\",\n)\nclass JsonUtilsTestTF(testing.TestCase):\n def test_encode_decode_tensor_shape(self):\n metadata = {\n \"key1\": tf.TensorShape(None),\n \"key2\": [tf.TensorShape([None]), tf.TensorShape([3, None, 5])],\n }\n string = json_utils.Encoder().encode(metadata)\n loaded = json_utils.decode(string)\n\n self.assertEqual(set(loaded.keys()), {\"key1\", \"key2\"})\n self.assertEqual(loaded[\"key1\"].rank, None)\n self.assertAllEqual(loaded[\"key2\"][0].as_list(), [None])\n self.assertAllEqual(loaded[\"key2\"][1].as_list(), [3, None, 5])\n\n def test_encode_decode_type_spec(self):\n spec = tf.TensorSpec((1, 5), tf.float32)\n string = json_utils.Encoder().encode(spec)\n loaded = json_utils.decode(string)\n self.assertEqual(spec, loaded)\n\n invalid_type_spec = {\n \"class_name\": \"TypeSpec\",\n \"type_spec\": \"Invalid Type\",\n \"serialized\": None,\n }\n string = json_utils.Encoder().encode(invalid_type_spec)\n with self.assertRaisesRegexp(\n ValueError, \"No TypeSpec has been registered\"\n ):\n loaded = json_utils.decode(string)\n\n def test_encode_decode_ragged_tensor(self):\n x = tf.ragged.constant([[1.0, 2.0], [3.0]])\n string = json_utils.Encoder().encode(x)\n loaded = json_utils.decode(string)\n self.assertAllClose(loaded.values, x.values)\n\n def test_encode_decode_extension_type_tensor(self):\n class MaskedTensor(tf.experimental.ExtensionType):\n __name__ = \"MaskedTensor\"\n values: tf.Tensor\n mask: tf.Tensor\n\n x = MaskedTensor(\n values=[[1, 2, 3], [4, 5, 6]],\n mask=[[True, True, False], [True, False, True]],\n )\n string = json_utils.Encoder().encode(x)\n loaded = json_utils.decode(string)\n self.assertAllClose(loaded.values, x.values)\n self.assertAllClose(loaded.mask, x.mask)\n","repo_name":"keras-team/keras","sub_path":"keras/legacy/saving/json_utils_test.py","file_name":"json_utils_test.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","stars":59773,"dataset":"github-code","pt":"67"} +{"seq_id":"7180039468","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nrequests.session\n~~~~~~~~~~~~~~~~\n\nThis module provides a Session object to manage and persist settings across\nrequests (cookies, auth, proxies).\n\n\"\"\"\n\nfrom .defaults import defaults\nfrom .models import Request\nfrom .hooks import dispatch_hook\nfrom .utils import header_expand\nfrom .packages.urllib3.poolmanager import PoolManager\n\n\ndef merge_kwargs(local_kwarg, default_kwarg):\n \"\"\"Merges kwarg dictionaries.\n\n If a local key in the dictionary is set to None, it will be removed.\n \"\"\"\n\n if default_kwarg is None:\n return local_kwarg\n\n if isinstance(local_kwarg, basestring):\n return local_kwarg\n\n if local_kwarg is None:\n return default_kwarg\n\n # Bypass if not a dictionary (e.g. timeout)\n if not hasattr(default_kwarg, 'items'):\n return local_kwarg\n\n # Update new values.\n kwargs = default_kwarg.copy()\n kwargs.update(local_kwarg)\n\n # Remove keys that are set to None.\n for (k,v) in local_kwarg.items():\n if v is None:\n del kwargs[k]\n\n return kwargs\n\n\nclass Session(object):\n \"\"\"A Requests session.\"\"\"\n\n __attrs__ = [\n 'headers', 'cookies', 'auth', 'timeout', 'proxies', 'hooks',\n 'params', 'config']\n\n\n def __init__(self,\n headers=None,\n cookies=None,\n auth=None,\n timeout=None,\n proxies=None,\n hooks=None,\n params=None,\n config=None):\n\n self.headers = headers or {}\n self.cookies = cookies or {}\n self.auth = auth\n self.timeout = timeout\n self.proxies = proxies or {}\n self.hooks = hooks or {}\n self.params = params or {}\n self.config = config or {}\n\n for (k, v) in defaults.items():\n self.config.setdefault(k, v)\n\n self.poolmanager = PoolManager(\n num_pools=self.config.get('pool_connections'),\n maxsize=self.config.get('pool_maxsize')\n )\n\n # Set up a CookieJar to be used by default\n self.cookies = {}\n\n # Add passed cookies in.\n if cookies is not None:\n self.cookies.update(cookies)\n\n def __repr__(self):\n return '' % (id(self))\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n pass\n\n def request(self, method, url,\n params=None,\n data=None,\n headers=None,\n cookies=None,\n files=None,\n auth=None,\n timeout=None,\n allow_redirects=False,\n proxies=None,\n hooks=None,\n return_response=True,\n config=None,\n prefetch=False):\n\n \"\"\"Constructs and sends a :class:`Request `.\n Returns :class:`Response ` object.\n\n :param method: method for the new :class:`Request` object.\n :param url: URL for the new :class:`Request` object.\n :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.\n :param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.\n :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.\n :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.\n :param files: (optional) Dictionary of 'filename': file-like-objects for multipart encoding upload.\n :param auth: (optional) Auth typle to enable Basic/Digest/Custom HTTP Auth.\n :param timeout: (optional) Float describing the timeout of the request.\n :param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.\n :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.\n :param return_response: (optional) If False, an un-sent Request object will returned.\n :param config: (optional) A configuration dictionary.\n :param prefetch: (optional) if ``True``, the response content will be immediately downloaded.\n \"\"\"\n\n method = str(method).upper()\n\n # Default empty dicts for dict params.\n cookies = {} if cookies is None else cookies\n data = {} if data is None else data\n files = {} if files is None else files\n headers = {} if headers is None else headers\n params = {} if params is None else params\n\n # Expand header values.\n if headers:\n for k, v in headers.items() or {}:\n headers[k] = header_expand(v)\n\n args = dict(\n method=method,\n url=url,\n data=data,\n params=params,\n headers=headers,\n cookies=cookies,\n files=files,\n auth=auth,\n hooks=hooks,\n timeout=timeout,\n allow_redirects=allow_redirects,\n proxies=proxies,\n config=config,\n _poolmanager=self.poolmanager\n )\n\n # Merge local kwargs with session kwargs.\n for attr in self.__attrs__:\n session_val = getattr(self, attr, None)\n local_val = args.get(attr)\n\n args[attr] = merge_kwargs(local_val, session_val)\n\n # Arguments manipulation hook.\n args = dispatch_hook('args', args['hooks'], args)\n\n # Create the (empty) response.\n r = Request(**args)\n\n # Give the response some context.\n r.session = self\n\n # Don't send if asked nicely.\n if not return_response:\n return r\n\n # Send the HTTP Request.\n r.send(prefetch=prefetch)\n\n # Send any cookies back up the to the session.\n self.cookies.update(r.response.cookies)\n\n # Return the response.\n return r.response\n\n\n def get(self, url, **kwargs):\n \"\"\"Sends a GET request. Returns :class:`Response` object.\n\n :param url: URL for the new :class:`Request` object.\n :param **kwargs: Optional arguments that ``request`` takes.\n \"\"\"\n\n kwargs.setdefault('allow_redirects', True)\n return self.request('get', url, **kwargs)\n\n\n def options(self, url, **kwargs):\n \"\"\"Sends a OPTIONS request. Returns :class:`Response` object.\n\n :param url: URL for the new :class:`Request` object.\n :param **kwargs: Optional arguments that ``request`` takes.\n \"\"\"\n\n kwargs.setdefault('allow_redirects', True)\n return self.request('options', url, **kwargs)\n\n\n def head(self, url, **kwargs):\n \"\"\"Sends a HEAD request. Returns :class:`Response` object.\n\n :param url: URL for the new :class:`Request` object.\n :param **kwargs: Optional arguments that ``request`` takes.\n \"\"\"\n\n kwargs.setdefault('allow_redirects', True)\n return self.request('head', url, **kwargs)\n\n\n def post(self, url, data=None, **kwargs):\n \"\"\"Sends a POST request. Returns :class:`Response` object.\n\n :param url: URL for the new :class:`Request` object.\n :param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.\n :param **kwargs: Optional arguments that ``request`` takes.\n \"\"\"\n\n return self.request('post', url, data=data, **kwargs)\n\n\n def put(self, url, data=None, **kwargs):\n \"\"\"Sends a PUT request. Returns :class:`Response` object.\n\n :param url: URL for the new :class:`Request` object.\n :param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.\n :param **kwargs: Optional arguments that ``request`` takes.\n \"\"\"\n\n return self.request('put', url, data=data, **kwargs)\n\n\n def patch(self, url, data=None, **kwargs):\n \"\"\"Sends a PATCH request. Returns :class:`Response` object.\n\n :param url: URL for the new :class:`Request` object.\n :param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.\n :param **kwargs: Optional arguments that ``request`` takes.\n \"\"\"\n\n return self.request('patch', url, data=data, **kwargs)\n\n\n def delete(self, url, **kwargs):\n \"\"\"Sends a DELETE request. Returns :class:`Response` object.\n\n :param url: URL for the new :class:`Request` object.\n :param **kwargs: Optional arguments that ``request`` takes.\n \"\"\"\n\n return self.request('delete', url, **kwargs)\n\n\ndef session(**kwargs):\n \"\"\"Returns a :class:`Session` for context-management.\"\"\"\n\n return Session(**kwargs)\n","repo_name":"bububa/pyTOP","sub_path":"pyTOP/packages/requests/sessions.py","file_name":"sessions.py","file_ext":"py","file_size_in_byte":8460,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"67"} +{"seq_id":"43329087925","text":"from django.core.management.base import BaseCommand\nfrom django.db.models import Q\nfrom shipanaro.models import Membership\n\n\nclass Command(BaseCommand):\n help = \"Find duplicated memberships through email or identity document\"\n\n def handle(self, *args, **options):\n for m in Membership.objects.all():\n u = m.user\n result = (\n Membership.objects.values_list(\"id\", \"user__email\", \"nid\")\n .filter(\n Q(user__email=u.email) | (Q(nid=m.nid) & Q(nid_type=m.nid_type))\n )\n .filter(user__is_active=True)\n )\n if len(result) != 1:\n for r in result:\n print(\"{},{},{}\".format(r[0], r[1], r[2]))\n","repo_name":"pirates-cat/shipanaro","sub_path":"humans/management/commands/find_duplicates.py","file_name":"find_duplicates.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"25683492300","text":"from aws_cdk import (\r\n aws_iam as iam,\r\n Aws, aws_lambda as _lambda,\r\n Duration\r\n)\r\nimport os\r\nfrom constructs import Construct\r\n\r\n\r\nclass LambdaManualApprove(Construct):\r\n\r\n def __init__(self, scope: Construct, construct_id: str,\r\n project_name: str = None,\r\n emails: list = None,\r\n **kwargs) -> None:\r\n super().__init__(scope, construct_id, **kwargs)\r\n dirname = os.path.dirname(__file__)\r\n self.function = _lambda.Function(self, f'ManualApprovalNotification{project_name}',\r\n runtime=_lambda.Runtime.PYTHON_3_8,\r\n # code=_lambda.Code.asset('CodeCommit_Stack/lambda'),\r\n code=_lambda.Code.from_asset(path=os.path.join(dirname, \"lambda/function\")),\r\n handler='app.lambda_handler',\r\n function_name=f'ManualApproval_{project_name}',\r\n timeout=Duration.seconds(70),\r\n environment={\r\n \"webhook_secret_name\": f\"webhook_{project_name}_channel\"\r\n }\r\n )\r\n\r\n st = iam.PolicyStatement(actions=[\"secretsmanager:GetSecretValue\"],\r\n effect=iam.Effect.ALLOW,\r\n resources=[\r\n f\"arn:aws:secretsmanager:{Aws.REGION}:{Aws.ACCOUNT_ID}:secret:webhook_{project_name}_channel-??????\"]\r\n )\r\n self.function.add_to_role_policy(st)\r\n\r\n","repo_name":"velez94/cdkv2_pipeline_multienvironment","sub_path":"src/lib/manual_approval/manual_approval_teams_integration.py","file_name":"manual_approval_teams_integration.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"24496489451","text":"import cv2\nimport time\n# from adafruit_servokit import ServoKit\n\nclass FaceTracking():\n def __init__(self):\n cascPath = \"haarcascade_frontalface_default.xml\"\n self.faceCascade = cv2.CascadeClassifier(cascPath)\n\n self.cap = cv2.VideoCapture(0)\n # sets the fps of the video capturing to 15\n self.cap.set(cv2.CAP_PROP_FPS , 15)\n self.frame_w = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)/2\n\n\n def detect(self):\n ret, frame = self.cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n faces = self.faceCascade.detectMultiScale(\n gray,\n scaleFactor = 1.1,\n minNeighbors = 5,\n minSize=(30,30)\n )\n\n return faces\n\n def aim(self, face):\n x = face[0]\n # y = face[1]\n w = face[2]\n # h = face[3] \n\n box_mid = 2*x+w\n frame_mid = self.frame_w/2\n threshold = 25\n\n aiming = False\n micro_trajectory = []\n\n while not aiming:\n\n if box_mid > frame_mid + threshold:\n self.servo.throttle = 1.0\n elif box_mid < frame_mid - threshold:\n self.servo.throttle = -1.0\n else:\n self.servo.throttle = 0.0\n aiming = True\n\n micro_trajectory.append((time.time(),self.servo.throttle))\n \n return micro_trajectory\n","repo_name":"jbrosemer/Atlas","sub_path":"faceTracking.py","file_name":"faceTracking.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1052542205","text":"\"\"\"Python CSV Sample codes\n\"\"\"\nimport csv\nfrom collections import OrderedDict\nfrom typing import List\n\nSUBJECTS_LENGTH = 3\n\n\ndef load_csvfile(filepath: str) -> List[OrderedDict]:\n result = []\n with open(file=filepath, mode=\"r\") as csvfp:\n reader = csv.DictReader(csvfp)\n for row in reader:\n result.append(row)\n return result\n\n\ndef save_csvfile(filepath: str, fieldnames: list, records: List[dict]):\n with open(file=filepath, mode=\"w\") as csvfp:\n writer = csv.DictWriter(csvfp, fieldnames=fieldnames)\n writer.writeheader()\n writer.writerows(records)\n\n\nclass Student:\n name: str\n math: int = 0\n eng: int = 0\n korean: int = 0\n total: int = 0\n avg: float = 0.0\n\n def to_dict(self) -> dict:\n return self.__dict__.copy()\n\n @classmethod\n def fields(cls) -> list:\n fieldnames = list(cls.__annotations__.keys()) # pylint: disable=no-member\n return fieldnames.copy()\n\n def _calculate_total(self):\n self.total = self.math + self.eng + self.korean\n\n def _calculate_avg(self):\n if self.total > 0:\n self.avg = float(self.total) / SUBJECTS_LENGTH\n\n def set_data(self, name: str, math: str, eng: str, korean: str):\n self.name = name\n self.math = int(math)\n self.eng = int(eng)\n self.korean = int(korean)\n self._calculate_total()\n self._calculate_avg()\n\n\ndef main():\n \"\"\"Show basic usage of the CSV package\n Load sample csv file and Save test csv file.\n \"\"\"\n sample_students = load_csvfile(\"sample.csv\")\n\n csv_recoreds = []\n student = Student()\n for member in sample_students:\n student.set_data(**member)\n csv_recoreds.append(student.to_dict())\n\n fieldnames = Student.fields()\n save_csvfile(\n filepath=\"test_result.csv\", fieldnames=fieldnames, records=csv_recoreds\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"digiry/python-samples","sub_path":"csv/csv_main.py","file_name":"csv_main.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31613656411","text":"def sum_of_digit(diggy):\n \"\"\"\n Create the function sumOfDigits that adds\n individual digits of a number,\n and returns the sum.\n \"\"\"\n packy = []\n stringy = str(diggy)\n for i in stringy:\n inty = int(i)\n packy.append(inty)\n return sum(packy)\n\n\nprint(sum_of_digit(414))\n","repo_name":"ajakaiye33/pythonic_daily_capsules","sub_path":"sum_of_digit.py","file_name":"sum_of_digit.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9188517071","text":"import torch\n\nfrom typing import List, Optional, Tuple\nfrom pytorch3d.renderer.cameras import CamerasBase\nimport pdb\n\n# Volume renderer which integrates color and density along rays\n# according to the equations defined in [Mildenhall et al. 2020]\nclass SphereTracingRenderer(torch.nn.Module):\n def __init__(\n self,\n cfg\n ):\n super().__init__()\n\n self._chunk_size = cfg.chunk_size\n self.near = cfg.near\n self.far = cfg.far\n self.max_iters = cfg.max_iters\n \n def sphere_tracing(\n self,\n implicit_fn,\n origins, # Nx3\n directions, # Nx3\n ):\n '''\n Input:\n implicit_fn: a module that computes a SDF at a query point\n origins: N_rays X 3\n directions: N_rays X 3\n Output:\n points: N_rays X 3 points indicating ray-surface intersections. For rays that do not intersect the surface,\n the point can be arbitrary.\n mask: N_rays X 1 (boolean tensor) denoting which of the input rays intersect the surface.\n '''\n # TODO (Q1): Implement sphere tracing\n # 1) Iteratively update points and distance to the closest surface\n # in order to compute intersection points of rays with the implicit surface\n # 2) Maintain a mask with the same batch dimension as the ray origins,\n # indicating which points hit the surface, and which do not\n # points = origins + directions * self.near\n eps = 1e-5\n t = torch.ones_like(origins[:,0].unsqueeze(-1)) * self.near\n points = origins + t * directions\n\n mask = torch.ones_like(t) > 0\n iteration = 0\n while True:\n iteration+=1\n f_p = implicit_fn.get_distance(points)\n t = t + f_p\n points = origins + t * directions\n mask[t > self.far] = False\n valid = f_p[mask] > eps\n \n if valid.sum() == 0:\n break\n \n if iteration == self.max_iters:\n print('break with maximum iteration!')\n break\n\n return points, mask\n \n\n def forward(\n self,\n sampler,\n implicit_fn,\n ray_bundle,\n ):\n B = ray_bundle.shape[0]\n\n # Process the chunks of rays.\n chunk_outputs = []\n\n for chunk_start in range(0, B, self._chunk_size):\n cur_ray_bundle = ray_bundle[chunk_start:chunk_start+self._chunk_size]\n points, mask = self.sphere_tracing(\n implicit_fn,\n cur_ray_bundle.origins,\n cur_ray_bundle.directions\n )\n mask = mask.repeat(1,3)\n isect_points = points[mask].view(-1, 3)\n\n # Get color from implicit function with intersection points\n isect_color = implicit_fn.get_color(isect_points)\n\n # Return\n color = torch.zeros_like(cur_ray_bundle.origins)\n color[mask] = isect_color.view(-1)\n\n cur_out = {\n 'color': color.view(-1, 3),\n }\n\n chunk_outputs.append(cur_out)\n\n # Concatenate chunk outputs\n out = {\n k: torch.cat(\n [chunk_out[k] for chunk_out in chunk_outputs],\n dim=0\n ) for k in chunk_outputs[0].keys()\n }\n\n return out\n\ndef get_device():\n \"\"\"\n Checks if GPU is available and returns device accordingly.\n \"\"\"\n if torch.cuda.is_available():\n device = torch.device(\"cuda:0\")\n else:\n device = torch.device(\"cpu\")\n return device\n\ndef sdf_to_density(signed_distance, alpha, beta):\n # TODO (Q3): Convert signed distance to density with alpha, beta parameters\n s = - signed_distance\n PHI = torch.zeros_like(signed_distance)\n PHI[s<=0] = (0.5 * torch.exp(s / beta))[s<=0]\n PHI[s>0] = (1 - 0.5 * torch.exp( - s / beta))[s>0]\n sigma = alpha * PHI\n return sigma\n\ndef sdf_to_density_naive(signed_distance, scale):\n # logistic density distribution\n PHI = scale * torch.exp(-scale * signed_distance) / torch.square(1.0 + torch.exp(-scale* signed_distance))\n return PHI\n\n\n\nclass VolumeSDFRenderer(torch.nn.Module):\n def __init__(\n self,\n cfg\n ):\n super().__init__()\n\n self._chunk_size = cfg.chunk_size\n self._white_background = cfg.white_background if 'white_background' in cfg else False\n self.alpha = cfg.alpha\n self.beta = cfg.beta\n\n def _compute_weights(\n self,\n deltas,\n rays_density: torch.Tensor,\n eps: float = 1e-10\n ):\n # TODO (Q3): Copy code from VolumeRenderer._compute_weights\n # TODO (1.5): Compute transmittance using the equation described in the README\n n_rays, n_sample_per_ray = deltas.shape[0], deltas.shape[1]\n\n multiplier = torch.exp(-(deltas * rays_density))\n \n T = torch.ones(n_rays, n_sample_per_ray,1).to(get_device())\n \n for i in range(1, n_sample_per_ray):\n T[:,i,:] = T[:,i-1,:].clone() * multiplier[:,i-1,:] # clone avoids inplace error\n\n # TODO (1.5): Compute weight used for rendering from transmittance and density\n weights = T * (1-torch.exp(-rays_density*deltas))\n \n return weights\n \n def _aggregate(\n self,\n weights: torch.Tensor,\n rays_color: torch.Tensor\n ):\n # TODO (Q3): Copy code from VolumeRenderer._aggregate\n feature = torch.sum(weights * rays_color, dim=1)\n return feature\n\n def forward(\n self,\n sampler,\n implicit_fn,\n ray_bundle,\n ):\n B = ray_bundle.shape[0]\n\n # Process the chunks of rays.\n chunk_outputs = []\n\n for chunk_start in range(0, B, self._chunk_size):\n cur_ray_bundle = ray_bundle[chunk_start:chunk_start+self._chunk_size]\n\n # Sample points along the ray\n cur_ray_bundle = sampler(cur_ray_bundle)\n n_pts = cur_ray_bundle.sample_shape[1]\n\n # Call implicit function with sample points\n distance, color = implicit_fn.get_distance_color(cur_ray_bundle.sample_points)\n color = color.view(-1, n_pts, 3)\n density = sdf_to_density(distance, self.alpha, self.beta) # TODO (Q3): convert SDF to density\n density = sdf_to_density_naive(distance, 150.0)\n # Compute length of each ray segment\n depth_values = cur_ray_bundle.sample_lengths[..., 0]\n deltas = torch.cat(\n (\n depth_values[..., 1:] - depth_values[..., :-1],\n 1e10 * torch.ones_like(depth_values[..., :1]),\n ),\n dim=-1,\n )[..., None]\n\n # Compute aggregation weights\n weights = self._compute_weights(\n deltas.view(-1, n_pts, 1),\n density.view(-1, n_pts, 1)\n ) \n\n # Compute color\n color = self._aggregate(\n weights,\n color.view(-1, n_pts, color.shape[-1])\n )\n\n # Return\n cur_out = {\n 'color': color,\n }\n\n chunk_outputs.append(cur_out)\n\n # Concatenate chunk outputs\n out = {\n k: torch.cat(\n [chunk_out[k] for chunk_out in chunk_outputs],\n dim=0\n ) for k in chunk_outputs[0].keys()\n }\n\n return out\n\n\nrenderer_dict = {\n 'sphere_tracing': SphereTracingRenderer,\n 'volume_sdf': VolumeSDFRenderer\n}\n\n","repo_name":"afiretony/Learning-for-3D-Vision","sub_path":"assignments/solutions/assignment4/a4/renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":7609,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"18835610408","text":"from typing import List\n\n\ndef command_args_to_str(command: List[str]):\n command_len = len(command)\n if command_len == 1:\n return \"\"\n start_args_index = 1\n end_args_index = command_len\n args_str = \" \".join(\n command[start_args_index:end_args_index]\n )\n return args_str\n","repo_name":"konnovdev/cpop_bot","sub_path":"tools/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"28907815004","text":"class Solution:\n def connect(self, root: 'Node') -> 'Node':\n if not root:\n return root\n dp = [root]\n while dp:\n for i in range(len(dp)):\n t = dp.pop(0)\n if not t.left:\n continue\n dp.append(t.left)\n dp.append(t.right)\n for j in range(len(dp)-1):\n dp[j].next = dp[j+1]\n return root\n","repo_name":"longhao54/leetcode","sub_path":"normal/116.py","file_name":"116.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38768747131","text":"from myFunction import set1,set0,get_bit,get_xor,txt_to_bin,bin_to_txt,get_pos_array\r\nimport cv2\r\n\r\npos_array = get_pos_array(\"password\")\r\npa_ln = len(pos_array)\r\n\r\n\r\ntxt_bit = \"\"\r\ntxt_ln = 88 # this is the input text length, you should put this manualy. because, there have no end-point-condition\r\n\r\n\r\n\r\nimage = cv2.imread(\"output.png\")\r\nh,w,ch = image.shape\r\n\r\n\r\n\r\nmsb_p = 8\r\ntxt_b_cntr = 0\r\npss_a_cntr = 0\r\nflag = True\r\n\r\n\r\n\r\nfor x in range(0,w):\r\n for y in range(0,h):\r\n pix_num = image[y,x][0]\r\n if get_bit(pix_num,msb_p) == 1: #------- filtering\r\n \r\n\r\n p = pos_array[pss_a_cntr] #------- get position from pos_array\r\n pss_a_cntr += 1\r\n if pss_a_cntr == pa_ln:\r\n pss_a_cntr = 0\r\n\r\n\r\n im_b = get_bit(pix_num,p+1) #------- get pixel position bit\r\n\r\n im_b0 = get_bit(pix_num,1) #------- get pixel LSB\r\n\r\n\r\n xor_b = get_xor(im_b,im_b0)\r\n\r\n if xor_b == True: #------- retrive text bit\r\n txt_bit += '1'\r\n else:\r\n txt_bit += '0'\r\n\r\n\r\n\r\n\r\n txt_b_cntr += 1\r\n if txt_b_cntr >= txt_ln:\r\n flag = False\r\n break\r\n\r\n if flag == False:\r\n break\r\n\r\n \r\ntxt = bin_to_txt(txt_bit)\r\n\r\nprint(txt)\r\n","repo_name":"NurAhmadullah/Image-steganography","sub_path":"decode.py","file_name":"decode.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"10936691655","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 11 20:31:14 2018\n\n@author: htshinichi\n\"\"\"\nimport KnnClassifier\nimport matplotlib.pyplot as plt\nclass KnnFunction():\n #根据k取值列表,计算各取值的精确率\n def K_acc(self,X_train,X_test,y_train,y_test,k_arr):\n self.k_value = k_arr\n self.accu_value=[]\n self.accu_best,self.k_best = 0, 0\n for k in self.k_value:\n model = KnnClassifier.KNN(k)\n y_pred = model.predict(X_test,X_train,y_train)\n accu = model.accuracy(y_test,y_pred)\n if accu > self.accu_best:\n self.k_best = k\n self.accu_best = accu\n self.accu_value.append(accu)\n #绘制k-acc图\n def plot_K_acc(self):\n figure = plt.figure(figsize=(8,6))\n ax = figure.add_subplot(1,1,1)\n ax.plot(self.k_value,self.accu_value,color='red',marker='*',lw=1)\n plt.xticks(self.k_value, rotation=0) \n ax.set_xlabel(r\"k_value\")\n ax.set_ylabel(r\"acc\")\n ax.set_title(\"k value and accuracy\")\n plt.show()\n\n","repo_name":"htshinichi/ML_model","sub_path":"KNN/KnnFunction.py","file_name":"KnnFunction.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"70718809174","text":"\"\"\"\nAuthor err1482 : Emerald Rafferty\n\n\"\"\"\n\ndef has_pork(food_info) -> bool:\n for i in food_info['ingredients']:\n if 'pork' in i.lower():\n return True\n if 'bacon' in i.lower():\n return True\n return False\n\n\ndef has_soy(food_info) -> bool:\n for i in food_info['ingredients']:\n if 'soy' in i.lower():\n return True\n return False\n\ndef has_egg(food_info) -> bool:\n for i in food_info['ingredients']:\n if 'egg' in i.lower() and 'veg' not in i.lower():\n return True\n return False\n\n\ndef has_gluten(food_info) -> bool:\n for i in food_info['ingredients']:\n if 'gluten' in i.lower():\n return True\n return False\n\n\ndef is_kosher(food_info) -> bool:\n if 'kshr' in food_info['name'].lower():\n return True\n if has_pork(food_info):\n return False\n return True\n\n\ndef has_nuts(food_info) -> bool:\n for i in food_info['ingredients']:\n if 'tree nuts' in i.lower():\n return True\n if 'peanuts' in i.lower():\n return True\n\n return False\n\n\ndef has_meat(food_info) -> bool:\n products = {'chicken', 'beef', 'pork', 'bacon', 'enzymes', 'meat', 'fish'}\n for i in food_info['ingredients']:\n i = i.lower()\n for j in products:\n if j in i:\n return True\n return False\n\ndef has_dairy(food_info) -> bool:\n products = {'milk', 'cheese', 'contains milk', 'cheddar cheese'}\n\n for i in food_info['ingredients']:\n i = i.lower()\n for j in products:\n if j in i and 'coconut' not in i and 'soy' not in i and 'almond' not in i and 'oat' not in i and 'rice' not in i:\n return True\n return False\n\n\ndef main() -> None:\n print('meh')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jzaia18/TheWegMenu","sub_path":"thewegmenu/utils/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"4046536411","text":"\"\"\"Main module for the collider physics tutorial at TRISEP 2022\"\"\"\n\n# physics\nimport numpy as np\nimport vector\nimport file_handler\nimport cuts\nfrom plotter import plot_histogram\n\n# constants and such\nBRANCH_NAMES = [ # things we'll need from file (use None to get everything)\n 'lep_pt',\n 'lep_eta',\n 'lep_phi',\n 'lep_E',\n 'lep_n',\n 'lep_charge',\n 'lep_type']\nSAMPLE_SIZE = 100000 # use None to get the whole dataset\n\nFILE_NAME = 'DataMuons.root'\nPARTICLE_ID = {\n # \"file_name\": PDG ID,\n \"DataMuons.root\": 13,\n \"DataEgamma.root\": 11,\n}[FILE_NAME]\n\n# open file, get data\ndata = file_handler.open_file(\n FILE_NAME, branches=BRANCH_NAMES, sample_size=SAMPLE_SIZE)\nprint(len(data), 'events to start')\n\n# make some pre-cut histograms\nplot_histogram(\n data['lep_pt'] / 1000, n_bins=25, range_tuple=(0, 100),\n xlabel=\"pT [GeV]\", ylabel=\"counts\", title=\"Lepton pT before cuts\",\n save_name=\"lep_pt_without_cuts.png\")\nplot_histogram(\n data['lep_n'], n_bins=3, range_tuple=(1, 4),\n xlabel=\"lepton number\", ylabel=\"counts\", title=\"Lepton number before cuts\",\n save_name=\"lep_n_before_cuts.png\")\n# 2nd one justifies cutting on exactly two -- only lose 85 events\n\n# make cuts (2 leptons, same type opposite sign, pt>25GeV)\ndata = cuts.make_cuts(data, particle_id=PARTICLE_ID)\nprint(len(data), 'events left after cuts')\n\n# get data in pretty variables\nlep_pt = data['lep_pt'] / 1000 # to convert MeV to GeV\nlep_eta = data['lep_eta']\nlep_phi = data['lep_phi']\nlep_E = data['lep_E'] / 1000 # MeV --> GeV\nlep_n = data['lep_n']\n\n# plot combined lepton properties after cuts\nplot_histogram(\n lep_pt, n_bins=25, range_tuple=(0, 100),\n xlabel=\"lepton pT\", ylabel=\"counts\", title=\"Lepton pT after cuts\",\n save_name=\"lep_pt.png\")\nplot_histogram(\n lep_eta, n_bins=25, range_tuple=(0, 4),\n xlabel=\"lepton eta\", ylabel=\"counts\", title=\"Lepton eta after cuts\",\n save_name=\"lep_eta.png\")\nplot_histogram(\n lep_phi, n_bins=25, range_tuple=(0, np.pi),\n xlabel=\"lepton phi\", ylabel=\"counts\", title=\"Lepton phi after cuts\",\n save_name=\"lep_phi.png\")\nplot_histogram(\n lep_E, n_bins=25, range_tuple=(0, 100),\n xlabel=\"lepton E\", ylabel=\"counts\", title=\"Lepton E after cuts\",\n save_name=\"lep_E.png\")\n\n# and individual properties\nplot_histogram(\n lep_pt[:, 0], n_bins=25, range_tuple=(0, 100),\n xlabel=\"leading lepton pT\", ylabel=\"counts\",\n title=\"Leading lepton pT after cuts\",\n save_name=\"lep_0_pt.png\")\nplot_histogram(\n lep_eta[:, 0], n_bins=25, range_tuple=(0, 4),\n xlabel=\"leading lepton eta\", ylabel=\"counts\",\n title=\"Leading lepton eta after cuts\",\n save_name=\"lep_0_eta.png\")\nplot_histogram(\n lep_phi[:, 0], n_bins=25, range_tuple=(0, np.pi),\n xlabel=\"leading lepton phi\", ylabel=\"counts\",\n title=\"Leading lepton phi after cuts\",\n save_name=\"lep_0_phi.png\")\nplot_histogram(\n lep_E[:, 0], n_bins=25, range_tuple=(0, 100),\n xlabel=\"leading lepton E\", ylabel=\"counts\",\n title=\"Leading lepton E after cuts\",\n save_name=\"lep_0_E.png\")\n\nplot_histogram(\n lep_pt[:, 1], n_bins=25, range_tuple=(0, 100),\n xlabel=\"sub-leading lepton pT\", ylabel=\"counts\",\n title=\"sub-leading lepton pT after cuts\",\n save_name=\"lep_1_pt.png\")\nplot_histogram(\n lep_eta[:, 1], n_bins=25, range_tuple=(0, 4),\n xlabel=\"sub-leading lepton eta\", ylabel=\"counts\",\n title=\"sub-leading lepton eta after cuts\",\n save_name=\"lep_1_eta.png\")\nplot_histogram(\n lep_phi[:, 1], n_bins=25, range_tuple=(0, np.pi),\n xlabel=\"sub-leading lepton phi\", ylabel=\"counts\",\n title=\"sub-leading lepton phi after cuts\",\n save_name=\"lep_1_phi.png\")\nplot_histogram(\n lep_E[:, 1], n_bins=25, range_tuple=(0, 100),\n xlabel=\"sub-leading lepton E\", ylabel=\"counts\",\n title=\"sub-leading lepton E after cuts\",\n save_name=\"lep_1_E.png\")\n\n# make vectors, construct Z candidates\nlepton_vectors = vector.zip({\n 'pt': lep_pt, 'eta': lep_eta, 'phi': lep_phi, 'E': lep_E})\nlead_lepton = lepton_vectors[:, 0]\nnext_lepton = lepton_vectors[:, 1]\nz_candidate = lead_lepton + next_lepton\n\n# plot reconstructed Z properties\nplot_histogram(\n z_candidate.mass, n_bins=100, range_tuple=(50, 150),\n xlabel=\"reconstructed Z mass\", ylabel=\"counts\",\n title=\"reconstructed Z mass\",\n save_name=\"reco_z_mass.png\")\nplot_histogram(\n z_candidate.pt, n_bins=25, range_tuple=(0, 100),\n xlabel=\"reconstructed Z pt\", ylabel=\"counts\",\n title=\"reconstructed Z pt\",\n save_name=\"reco_z_pt.png\")\nplot_histogram(\n z_candidate.eta, n_bins=25, range_tuple=(0, 4),\n xlabel=\"reconstructed Z eta\", ylabel=\"counts\",\n title=\"reconstructed Z eta\",\n save_name=\"reco_z_eta.png\")\nplot_histogram(\n z_candidate.phi, n_bins=25, range_tuple=(0, np.pi),\n xlabel=\"reconstructed Z phi\", ylabel=\"counts\",\n title=\"reconstructed Z phi\",\n save_name=\"reco_z_phi.png\")\nplot_histogram(\n z_candidate.E, n_bins=25, range_tuple=(0, 100),\n xlabel=\"reconstructed Z E\", ylabel=\"counts\",\n title=\"reconstructed Z E\",\n save_name=\"reco_z_E.png\")\n","repo_name":"callum-mccracken/TRISEP_2022","sub_path":"collider/collision.py","file_name":"collision.py","file_ext":"py","file_size_in_byte":5029,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"7857618187","text":"import os\nimport loompy\n\nloom_file = snakemake.input['loom']\ngenes_out = snakemake.output['genes']\n\nos.makedirs(os.path.dirname(genes_out), exist_ok=True)\n\nwith loompy.connect(loom_file, 'r') as ds:\n ens_id = ds.ra['EnsID'][:]\n\ngenes = list(ens_id)\n\nwith open(genes_out, 'w') as fout:\n for g in genes:\n fout.write(g + \"\\n\")\n","repo_name":"deto/Hotspot_Analysis","sub_path":"pipelineScripts/select_all.py","file_name":"select_all.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29977143871","text":"import re\nfrom collections import defaultdict\n\n\ndef get_new_dictionary(input_dict_name, output_dict_name):\n resultLines = []\n file1 = open(input_dict_name, \"r\")\n file2 = open(output_dict_name, \"w\")\n resultDic = defaultdict(list)\n for line in file1.readlines()[1:]:\n line = line.strip()\n words = re.split(r\"[, -]\", line)\n words = list(filter(lambda x: x != '', words))\n key = words[0]\n for w in words[1:]:\n resultDic[w].append(key)\n for k in sorted(resultDic.keys()):\n translations = \", \".join(sorted(resultDic[k]))\n line = f\"{k} - {translations}\"\n resultLines.append(line)\n resultLines.insert(0, f'{len(resultLines)}')\n file2.writelines(map(lambda x: x + \"\\n\", resultLines))\n file2.close()\n file1.close()\n","repo_name":"be-y-a/smth","sub_path":"OZON/ML/HOMEWORKS/week1_get_new_dictionary.py","file_name":"week1_get_new_dictionary.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25656215558","text":"import pygame\nimport os\nfrom utils import read_csv\n\n\nclass saw():\n # TODO : What is saw anyway?\n img = [pygame.transform.scale(pygame.image.load(os.path.join('png', 'SAW0.png')), (64, 64)),\n pygame.transform.scale(pygame.image.load(os.path.join('png', 'SAW1.png')), (64, 64)),\n pygame.transform.scale(pygame.image.load(os.path.join('png', 'SAW2.png')), (64, 64)),\n pygame.transform.scale(pygame.image.load(os.path.join('png', 'SAW3.png')), (64, 64))]\n\n def __init__(self, x, y, width, height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.hitbox = (x, y, width, height)\n self.count = 0\n self.safespot = x\n\n def draw(self, screen):\n if self.count >= 8:\n self.count = 0\n screen.blit(self.img[self.count // 2], (self.x, self.y))\n self.count += 1\n\n def update_position(self, change):\n if change == -45:\n # TODO Wtf do you mean with this? safespot = x in init()?\n # TODO God's sake this is under construction bro.\n self.x = self.safespot\n else:\n self.x += change\n\n\nclass Tile():\n def __init__(self, image, x, y):\n self.image = pygame.transform.scale(pygame.image.load(image), (32, 32))\n self.rect = self.image.get_rect()\n self.rect.x, self.rect.y = x, y\n\n def draw(self, surface):\n surface.blit(self.image, (self.rect.x, self.rect.y))\n\n def collide(self, hitbox):\n print('Player Hit-box: ', hitbox)\n print('Tile Rectangle: ', self.rect)\n print()\n if self.rect.x <= hitbox[0] + hitbox[2] <= self.rect.x + 32:\n if hitbox[1] + hitbox[3] > self.rect.y:\n if hitbox[1] < 32 + self.rect.y:\n return True\n return False\n\n\nclass TileMap():\n def __init__(self, filename):\n self.tile_size = 32\n self.start_x, self.start_y = 0, 0\n self.tiles = self.load_tiles(filename)\n self.map_surface = pygame.Surface((self.map_w, self.map_h))\n self.map_surface.set_colorkey((0, 0, 0))\n #self.load_map()\n\n def draw_map(self, surface, move_check, screen_scroller):\n for tile in self.tiles:\n if move_check:\n tile.rect.x += screen_scroller\n surface.blit(tile.image, (tile.rect.x, tile.rect.y + 558))\n\n #def draw_map(self, surface, bgX):\n #surface.blit(self.map_surface, (bgX, 558))\n\n #def load_map(self):\n #for tile in self.tiles:\n #tile.draw(self.map_surface)\n\n def load_tiles(self, filename):\n tiles = []\n tile_path = 'Tile (1).png'\n tile_map = read_csv(filename)\n\n for y, row_map in enumerate(tile_map):\n # Move to next row\n for x, tile in enumerate(row_map):\n # Move to next tile in current row\n if tile == '-1':\n self.start_x = x * self.tile_size\n self.start_y = y * self.tile_size\n else:\n if tile == '1' or tile == '2':\n tile_path = 'Tile (2).png'\n elif tile == '0':\n tile_path = 'Tile (1).png'\n elif tile == '3' or tile == '6':\n tile_path = 'Tile (4).png'\n elif tile == '4' or tile == '7' or tile == '8':\n tile_path = 'Tile (5).png'\n tiles.append(Tile(os.path.join('Tiles', tile_path), x * self.tile_size, y * self.tile_size))\n\n # Store the size of the tile map\n self.map_w, self.map_h = len(tile_map[-1]) * self.tile_size, len(tile_map) * self.tile_size\n return tiles\n","repo_name":"guy1998/CodeZ","sub_path":"game_obj.py","file_name":"game_obj.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"38398880203","text":"# -*- coding: utf-8 -*-\nimport paddle\nimport paddle.fluid as fluid\nimport paddle.fluid.layers as pd\n\ndict_size = 30000\nsource_dict_dim = target_dict_dim = dict_size\nhidden_dim = 32\nword_dim = 32\nbatch_size = 2\nmax_length = 8\ntopk_size = 50\nbeam_size = 2\n\nis_sparse = True\ndecoder_size = hidden_dim\nmodel_save_dir = \"machine_translation.inference.model\"\n\n\ndef encoder():\n src_word_id = pd.data(\n name=\"src_word_id\", shape=[1], dtype='int64', lod_level=1)\n src_embedding = pd.embedding(\n input=src_word_id,\n size=[dict_size, word_dim],\n dtype='float32',\n is_sparse=is_sparse,\n param_attr=fluid.ParamAttr(name='vemb'))\n\n fc1 = pd.fc(input=src_embedding, size=hidden_dim * 4, act='tanh')\n lstm_hidden0, lstm_0 = pd.dynamic_lstm(input=fc1, size=hidden_dim * 4)\n encoder_out = pd.sequence_last_step(input=lstm_hidden0)\n return encoder_out\n\n\ndef train_decoder(context):\n trg_language_word = pd.data(\n name=\"target_language_word\", shape=[1], dtype='int64', lod_level=1)\n trg_embedding = pd.embedding(\n input=trg_language_word,\n size=[dict_size, word_dim],\n dtype='float32',\n is_sparse=is_sparse,\n param_attr=fluid.ParamAttr(name='vemb'))\n\n rnn = pd.DynamicRNN()\n with rnn.block():\n current_word = rnn.step_input(trg_embedding)\n pre_state = rnn.memory(init=context, need_reorder=True)\n current_state = pd.fc(\n input=[current_word, pre_state], size=decoder_size, act='tanh')\n\n current_score = pd.fc(\n input=current_state, size=target_dict_dim, act='softmax')\n rnn.update_memory(pre_state, current_state)\n rnn.output(current_score)\n\n return rnn()\n\n\ndef train_program():\n context = encoder()\n rnn_out = train_decoder(context)\n label = pd.data(\n name=\"target_language_next_word\", shape=[1], dtype='int64', lod_level=1)\n cost = pd.cross_entropy(input=rnn_out, label=label)\n avg_cost = pd.mean(cost)\n return avg_cost\n\n\ndef optimizer_func():\n return fluid.optimizer.Adagrad(\n learning_rate=1e-4,\n regularization=fluid.regularizer.L2DecayRegularizer(\n regularization_coeff=0.1))\n\n\ndef train(use_cuda):\n EPOCH_NUM = 1\n\n if use_cuda and not fluid.core.is_compiled_with_cuda():\n return\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n\n train_reader = paddle.batch(\n paddle.reader.shuffle(\n paddle.dataset.wmt14.train(dict_size), buf_size=1000),\n batch_size=batch_size)\n\n feed_order = [\n 'src_word_id', 'target_language_word', 'target_language_next_word'\n ]\n\n def event_handler(event):\n if isinstance(event, fluid.EndStepEvent):\n if event.step % 10 == 0:\n print('pass_id=' + str(event.epoch) + ' batch=' + str(\n event.step) + ' loss=' + str(event.metrics[0]))\n\n if isinstance(event, fluid.EndEpochEvent):\n trainer.save_params(model_save_dir)\n\n trainer = fluid.Trainer(\n train_func=train_program, place=place, optimizer_func=optimizer_func)\n\n trainer.train(\n reader=train_reader,\n num_epochs=EPOCH_NUM,\n event_handler=event_handler,\n feed_order=feed_order)\n\n\ndef main(use_cuda):\n train(use_cuda)\n\n\nif __name__ == '__main__':\n use_cuda = False # set to True if training with GPU\n main(use_cuda)","repo_name":"yyHaker/PaddlePaddle_study","sub_path":"src/learn/seq2seq/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23685233873","text":"import csv\nimport json\nfrom collections import defaultdict\nfrom collections import Counter\nimport fce_api as fd\nimport pandas\nimport numpy as np\n\nDEBUG = False\n\n\"\"\" Computes the Fleiss' Kappa value as described in (Fleiss, 1971) \"\"\"\ndef compute_kappa(mat):\n \"\"\" Computes the Kappa value\n @param n Number of rating per subjects (number of human raters)\n @param mat Matrix[subjects][categories]\n @return The Kappa value \"\"\"\n n = check_each_line_count(mat) # PRE : every line count must be equal to n\n N = len(mat)\n k = len(mat[0])\n\n if DEBUG:\n print(n, 'raters.')\n print(N, 'subjects.')\n print(k, 'categories.')\n\n # Computing p[]\n p = [0.0] * k\n for j in range(k):\n p[j] = 0.0\n for i in range(N):\n p[j] += mat[i][j]\n p[j] /= N * n\n if DEBUG: print('p =', p)\n\n # Computing P[]\n P = [0.0] * N\n for i in range(N):\n P[i] = 0.0\n for j in range(k):\n P[i] += mat[i][j] * mat[i][j]\n P[i] = (P[i] - n) / (n * (n - 1))\n if DEBUG: print('P =', P)\n\n # Computing Pbar\n Pbar = sum(P) / N\n if DEBUG:\n print('Pbar =', Pbar)\n print('Sum P =', sum(P))\n\n # Computing PbarE\n PbarE = 0.0\n for pj in p:\n PbarE += pj * pj\n if DEBUG: print('PbarE =', PbarE)\n\n kappa = (Pbar - PbarE) / (1 - PbarE)\n if DEBUG: print('kappa =', kappa)\n\n return kappa\n\n\ndef compute_free_kappa(mat):\n \"\"\" Computes the Kappa value\n @param n Number of rating per subjects (number of human raters)\n @param mat Matrix[subjects][categories]\n @return The Kappa value \"\"\"\n n = check_each_line_count(mat) # PRE : every line count must be equal to n\n N = len(mat)\n k = len(mat[0])\n\n if DEBUG:\n print(n, 'raters.')\n print(N, 'subjects.')\n print(k, 'categories.')\n\n # Computing Po\n sum_of_squares = 0\n for j in range(k):\n for i in range(N):\n sum_of_squares += mat[i][j] * mat[i][j]\n p_o = ((sum_of_squares - N * n) / ((N * n) * (n - 1)))\n if DEBUG: print('P_o =', p_o)\n\n # Computing P[]\n p_e = 1.0 / k\n\n kappa = (p_o - p_e) / (1.0 - p_e)\n if DEBUG: print('kappa =', kappa)\n\n return kappa\n\ndef compute_kraemer_kappa(mat):\n \"\"\" Computes the Kappa value\n @param n Number of rating per subjects (number of human raters)\n @param mat Matrix[subjects][categories]\n @return The Kappa value \"\"\"\n n = check_each_line_count(mat) # PRE : every line count must be equal to n\n N = len(mat)\n k = len(mat[0])\n\n if DEBUG:\n print(n, 'raters.')\n print(N, 'subjects.')\n print(k, 'categories.')\n\n # Computing p[]\n p = [0.0] * k\n for j in range(k):\n p[j] = 0.0\n for i in range(N):\n p[j] += mat[i][j]\n p[j] /= N * n\n if DEBUG: print('p =', p)\n\n # Computing P[]\n P = [0.0] * N\n for i in range(N):\n P[i] = 0.0\n for j in range(k):\n P[i] += mat[i][j] * mat[i][j]\n P[i] = (P[i] - n) / (n * (n - 1))\n if DEBUG: print('P =', P)\n\n # Computing Pbar\n Pbar = sum(P) / N\n if DEBUG:\n print('Pbar =', Pbar)\n print('Sum P =', sum(P))\n\n # Computing PbarE\n PbarE = 0.0\n for pj in p:\n PbarE += pj * pj\n if DEBUG: print('PbarE =', PbarE)\n\n kappa = (Pbar - PbarE) / (1 - PbarE) + (1 - Pbar)/ ((N * n) * (1 - PbarE))\n if DEBUG: print('kappa =', kappa)\n return kappa\n\n\ndef check_each_line_count(mat):\n \"\"\" Assert that each line has a constant number of ratings\n @param mat The matrix checked\n @return The number of ratings\n @throws AssertionError If lines contain different number of ratings \"\"\"\n n = sum(mat[0])\n\n assert all(sum(line) == n for line in mat[1:]), \"Line count != %d (n value).\" % n\n return n\n\n\ndef extract_from_result(result, key):\n \"\"\" Returns the chosen ids from an answer\n :param result: the result from one answer\n :return: the values of the chosen key from the result tokens \"\"\"\n values = []\n for token in result:\n values.append(token[key])\n return values\n\ndef extract_results(filename, value='id'):\n result_dict = defaultdict(list)\n with open(filename) as file:\n reader = csv.DictReader(file)\n for row in reader:\n #print(row['Input.sentence'], row['Answer.chosenWord'])\n answer_json = json.loads(row['Answer.chosenWord'])\n result_dict[row['Input.sentence']] += extract_from_result(answer_json, value)\n return result_dict\n\ndef extract_agreement_table(filename, number_of_annotators, value='id'):\n result_dict = defaultdict(list)\n table_rows = np.full([500, number_of_annotators], 0)\n with open(filename) as file:\n reader = csv.DictReader(file)\n for i,row in enumerate(reader):\n answer_json = json.loads(row['Answer.chosenWord'])\n selection = extract_from_result(answer_json, value)\n for id in selection:\n key = row['Input.sentence'] + str(id)\n if result_dict[key] == []:\n result_dict[key] = [row['Input.sentence']] + [id] + [0] * number_of_annotators\n result_dict[key][2 + i % number_of_annotators] += 1\n return result_dict\n\ndef test_extract_results():\n hundred_dict = extract_results('100_sentence_3_batch_results.csv')\n assert len(list(hundred_dict.keys())) == 100\n\ndef generate_matrix(result_dict, num_judges=3, num_classes=2):\n values_sets = [set(l) for l in result_dict.values()]\n set_lengths = [len(list(s)) for s in values_sets]\n num_events = sum(set_lengths)\n agreement_matrix = np.zeros([num_events, num_classes], dtype=np.int64)\n events_counter = 0\n for key in result_dict.keys():\n ids = Counter(result_dict[key])\n for key in ids.keys():\n agreement_matrix[events_counter, 1] = ids[key]\n agreement_matrix[events_counter, 0] = 3 - ids[key]\n events_counter += 1\n return agreement_matrix\n\n\ndef compute_agreement_dict(mat):\n agreement_dict = {}\n for row in mat:\n row_str = '[' + str(row[0]) + ',' + str(row[1]) + ']'\n agreement_dict.setdefault(row_str, 'no-value')\n if agreement_dict[row_str] == 'no-value':\n agreement_dict[row_str] = 1\n else:\n agreement_dict[row_str] += 1\n return agreement_dict\n\ndef naive_agreement(result_dict):\n matched = 0\n total = 0\n for key in result_dict.keys():\n ids = Counter(result_dict[key])\n for key in ids.keys():\n if (ids[key] == 3):\n matched += 3\n else:\n matched += 2\n total += 3\n ratio = matched / total\n return ratio\n\ndef record_fless_matrix(mat):\n with open('fleiss_mat.csv', 'w+') as csv_file:\n field_names = ['not_chosen', 'chosen']\n dict_writer = csv.DictWriter(csv_file, fieldnames=field_names)\n dict_writer.writeheader()\n for event in mat:\n dict_writer.writerow({field_names[0]: event[0],\n field_names[1]: event[1]})\n\ndef test_agreement_table(agreement_table, number_of_annotators):\n for event in agreement_table.values():\n if (sum(event[2:]) < 1 or sum(event[2:]) > number_of_annotators):\n print('Invalid argument table')\n\ndef record_agreement_table(agreement_table):\n length = max([len(x) for x in agreement_table.values()])\n with open('agreement_table.csv', 'w+') as csv_file:\n field_names = ['sentence', 'id'] + [str(x) for x in list(range(1, length - 1))]\n dict_writer = csv.DictWriter(csv_file, fieldnames=field_names)\n dict_writer.writeheader()\n for event in agreement_table.values():\n row_dict = {}\n for i in range(len(event)):\n row_dict[field_names[i]] = event[i]\n dict_writer.writerow(row_dict)\n\n# TO DO: confusion matrix is better!!\ndef correct_count(result_dict):\n correct = 0\n no_error_correct = 0\n no_error_sentences = 0\n correct_spot = 0\n found = 0\n missed_spans = 0\n total_spans = 0\n counted_as_no_err = 0\n fce_data = fd.extract_data('fce_train.gold.max.rasp.old_cat.m2')\n for key in result_dict.keys():\n if -2 in result_dict[key]:\n counted_as_no_err += 1\n for sentence in fce_data:\n spans_selected = []\n if key == sentence[0][1:]:\n found += 1\n if len(sentence[1]) < 1:\n no_error_sentences += 1\n for start in result_dict[key]:\n if start == -2 and len(sentence[1]) == 0:\n correct += 1\n no_error_correct += 1\n else:\n for i, span in enumerate(sentence[1]):\n if int(start) >= span[0] and int(start) < span[1]:\n correct_spot += 1\n correct += 1\n spans_selected.append(i)\n missed_spans += len(sentence[1]) - len(list(set(spans_selected)))\n total_spans += len(sentence[1])\n print('No error match: ', no_error_correct)\n print('No error sentences: ', no_error_sentences)\n print('Has no error answer: ', counted_as_no_err)\n print('Error match: ', correct_spot)\n print('Error missed: ', missed_spans)\n print('Found sentences: ', found)\n print('Total spans: ', total_spans)\n return correct\n\ndef error_no_error(result_dict):\n errors = 0\n no_errors = 0\n for key in result_dict.keys():\n no_errors += len([x for x in result_dict[key] if x == -2])\n errors += len([x for x in result_dict[key] if x != -2])\n return errors, no_errors\n\nif __name__ == \"__main__\":\n \"\"\" Example on this Wikipedia article data set \"\"\"\n sentences_ids = extract_results('100_sentence_3_batch_results.csv')\n fleiss_matrix = generate_matrix(sentences_ids)\n\n free_kappa = compute_free_kappa(fleiss_matrix[0:3])\n print('free kappa: ' + str(free_kappa))\n fleiss_kappa = compute_kappa(fleiss_matrix)\n print('kappa: ' + str(fleiss_kappa))\n kraemer_kappa = compute_kraemer_kappa(fleiss_matrix)\n print('kraemer kappa: ' + str(kraemer_kappa))\n naive_ratio = naive_agreement(sentences_ids)\n print('naive ratio: ' + str(naive_ratio))\n agreement_classes = compute_agreement_dict(fleiss_matrix)\n print(agreement_classes)\n record_fless_matrix(fleiss_matrix)\n\n mat = \\\n [\n [0, 0, 0, 0, 14],\n [0, 2, 6, 4, 2],\n [0, 0, 3, 5, 6],\n [0, 3, 9, 2, 0],\n [2, 2, 8, 1, 1],\n [7, 7, 0, 0, 0],\n [3, 2, 6, 3, 0],\n [2, 5, 3, 2, 2],\n [6, 5, 2, 1, 0],\n [0, 2, 2, 3, 7]\n ]\n #print('fleiss kappa: ' + str(compute_kappa(mat)))\n\n start_result_dict = extract_results('100_sentence_3_batch_results.csv', value='start')\n c_count = correct_count(start_result_dict)\n print('correct: ' + str(c_count))\n err, no_err = error_no_error(sentences_ids)\n print('error: ', err)\n print('no-error: ', no_err)\n err, no_err = error_no_error(start_result_dict)\n print('error: ', err)\n print('no-error: ', no_err)\n agreement_table = extract_agreement_table('100_sentence_3_batch_results.csv', 3,)\n for key in agreement_table.keys():\n print(key + str(agreement_table[key]))\n test_agreement_table(agreement_table, 3)\n expert_column = []\n fce_data = fd.extract_data('fce_train.gold.max.rasp.old_cat.m2')\n for event in agreement_table.values():\n for sentence in fce_data:\n # if the sentence is matched\n if event[0] == sentence[0][1:]:\n errors_num = len(sentence[1])\n if event[1] == -2:\n # if the sentence does not have error and the annotation guessed it does not have error false negatives\n if errors_num < 1:\n event.append(1)\n # if the sentence has an error but the annotation said there are no errors true negatives\n else:\n event.append(0)\n # if the sentence does not have an error but the annotator said it has an errir\n elif errors_num < 1:\n event.append(0)\n else:\n for i in range(errors_num):\n # if the annotated error fits in an error span\n if int(sentence[1][i][0]) <= event[1] // 2 and event[1] // 2 < int(sentence[1][i][1]):\n event.append(1)\n break\n # if there are no more errors.\n if i == errors_num - 1:\n event.append(0)\n print(len(expert_column))\n record_agreement_table(agreement_table)\n","repo_name":"Alex92rus/ErrorDetectionProject","sub_path":"old_version/inter-annotator_agreement.py","file_name":"inter-annotator_agreement.py","file_ext":"py","file_size_in_byte":12943,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"10220898722","text":"#!/usr/bin/python3\r\n# -*- encoding: utf-8 -*-\r\n# Author: Vozec\r\n# CTFlagger\r\n\r\nfrom flask import render_template\r\nfrom os import path\r\nfrom random import choice\r\nimport html\r\nimport json\r\n\r\nfrom utils.utils_func import ConvertResult,Timestamp2date,Size,find\r\n\r\ndef Result_manager(result,CONFIG):\r\n\tconfig \t = ConvertResult(result)\r\n\tres_scan = json.loads(config['result'])\r\n\tcontent = Get_Content(res_scan,config,CONFIG)\r\n\r\n\treturn render_template('result.html',\r\n\t\tfilename=config['filename'].replace('_original',''),\r\n\t\tsize=config['size'],\r\n\t\tmagic=config['magic'],\r\n\t\tupload_count=config['upload_count'],\r\n\t\tfirst_up=Timestamp2date(config['first_up']),\r\n\t\tlast_up=Timestamp2date(config['last_up']),\r\n\t\tpasswords=str(config['all_password']),\r\n\t\tflags=str(config['flag'])\r\n\t).replace(r'7e84437f35fa24b76c7898ca87f636d0',content)\r\n\r\ndef Get_Content(results,config,CONFIG):\r\n\tcontent = ''\r\n\tmodels = Load_all_model()\r\n\tfor module,res in results.items():\r\n\t\tif (type(res['path']) == str):\r\n\t\t\tcontent += HTML_1(models,module,res,config,CONFIG)\t\r\n\t\telse:\r\n\t\t\tcontent += HTML_2(models,module,res,config,CONFIG)\t\r\n\treturn content\r\n\r\n\r\ndef HTML_2(models,module,res,config,CONFIG):\r\n\tdef preview_possible(res):\r\n\t\ttxt = []\r\n\t\tfor x in res['path']:\r\n\t\t\tfor ext in ['json','txt']:\r\n\t\t\t\tif x.endswith(ext):\r\n\t\t\t\t\ttxt.append(x)\r\n\t\t\t\t\tbreak\r\n\t\treturn len(txt) == len(res['content'])\r\n\tcontent = ''\r\n\tpreview_id = 0\r\n\tpreview_on = preview_possible(res)\r\n\tfor i in range(len(res['path'])):\r\n\t\tfile = res['path'][i]\r\n\t\tif not preview_on:\r\n\t\t\tcontent += render_template(models['multi_nopreview'],\r\n\t\t\t\tcontent='%s %s'%(Name(file),Get_size(file,config,CONFIG)),\r\n\t\t\t\tlink=file,\r\n\t\t\t\tid = Get_id())\r\n\t\telse:\r\n\t\t\tcnt = res['content'][preview_id] if preview_id < len(res['content']) else ''\r\n\r\n\t\t\ttemplate = models['multi_preview_text']\r\n\t\t\tif isimage(file):\r\n\t\t\t\ttemplate = models['multi_preview_img']\r\n\t\t\telif cnt != '':\r\n\t\t\t\ttemplate = models['multi_preview_text']\r\n\t\t\t\tpreview_id += 1\r\n\r\n\t\t\tcontent += render_template(template,\r\n\t\t\t\t\tcontent='%s %s'%(Name(file),Get_size(file,config,CONFIG)),\r\n\t\t\t\t\tlink=file,\r\n\t\t\t\t\tid = Get_id(),\r\n\t\t\t\t\tpreview=html.escape(cnt))\r\n\r\n\treturn render_template('result/multi_base.html',\r\n\t\tmodule=module,\r\n\t\tcontent='%s files'%len(res['path']),\r\n\t\tid = Get_id(),\r\n\t).replace(r'7e84437f35fa24b76c7898ca87f636d0',content)\r\n\r\ndef HTML_1(models,module,res,config,CONFIG):\r\n\ttemplate = models['simple_nopreview']\r\n\tif isimage(res['path']):\r\n\t\ttemplate = models['simple_preview_img']\r\n\telif res['content'] != '':\r\n\t\ttemplate = models['simple_preview_text']\r\n\r\n\treturn render_template(template,\r\n\t\t\tmodule=module,\r\n\t\t\tcontent='%s %s'%(Name(res['path']),Get_size(res['path'],config,CONFIG)),\r\n\t\t\tlink=res['path'],\r\n\t\t\tid = Get_id(),\r\n\t\t\tpreview=html.escape(res['content']))\r\n\r\ndef Load_all_model():\r\n\treturn {\r\n\t\t'simple_nopreview' : 'result/simple_nopreview.html',\r\n\t\t'simple_preview_text' : 'result/simple_preview_text.html',\r\n\t\t'simple_preview_img' : 'result/simple_preview_img.html',\r\n\t\t'multi_nopreview' : 'result/multi_nopreview.html',\r\n\t\t'multi_preview_img' : 'result/multi_preview_img.html',\r\n\t\t'multi_preview_text' : 'result/multi_preview_text.html',\r\n\t}\r\n\r\ndef Get_id():\r\n\treturn ''.join([choice('012345789') for _ in range(64)])\r\n\r\ndef isimage(file):\r\n\tfor x in ['png','jpeg','jpg','bmp','gif']:\r\n\t\tif file.endswith(x):\r\n\t\t\treturn True\r\n\treturn False\r\n\r\ndef Name(file):\r\n\treturn path.basename(file)\r\n\r\ndef Path(file):\r\n\treturn path.dirname(file)\r\n\r\ndef Get_size(file,config,CONFIG):\r\n\tdirectory = '%s/%s'%(CONFIG['dwnl_dir'],config['hash'])\r\n\tfound_dir,found_file = find(Name(file),directory)\r\n\tif found_file != '':\r\n\t\treturn '(%s)'%Size('%s/%s'%(found_dir,found_file))\r\n\treturn ''\r\n","repo_name":"Vozec/CTFileScan-WEB","sub_path":"app/utils/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"22868154303","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 16 16:56:47 2018\r\n\r\n@author: A\r\n\"\"\"\r\n\r\n##estimation\r\n##ARI,NMI : 군집결과와 비교할 대상 있을 때\r\nimport mglearn\r\nfrom sklearn.metrics.cluster import adjusted_rand_score\r\nfrom sklearn.datasets import make_moons\r\n\r\nX, y = make_moons(n_samples=200, noise=0.05, random_state=0)\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nscaler = StandardScaler()\r\nscaler.fit(X)\r\nX_scaled = scaler.transform(X)\r\n\r\nimport matplotlib.pyplot as plt\r\nfig, axes = plt.subplots(1,4,figsize=(15,3),subplot_kw={\"xticks\":(),\"yticks\":()})\r\n#알고리즘 리스트만들기\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.cluster import AgglomerativeClustering\r\nfrom sklearn.cluster import DBSCAN\r\nalgorithms = [KMeans(n_clusters=2),AgglomerativeClustering(n_clusters=2),DBSCAN()]\r\n\r\n#무작위로 클러스터 할당\r\nimport numpy as np\r\nrandom_state = np.random.RandomState(seed=0)\r\nrandom_clusters = random_state.randint(low=0, high=2, size=len(X))\r\n\r\naxes[0].scatter(X_scaled[:,0],X_scaled[:,1],c=random_clusters, cmap=mglearn.cm3,\r\n s=60, edgecolors=\"black\")\r\naxes[0].set_title(\"random assign - ARI : {:.2f}\".format(\r\n adjusted_rand_score(y,random_clusters)))\r\n\r\nfor ax, algorithm in zip(axes[1:],algorithms):\r\n clusters = algorithm.fit_predict(X_scaled)\r\n ax.scatter(X_scaled[:,0],X_scaled[:,1],c=clusters,\r\n cmap=mglearn.cm3,s=60,edgecolors=\"black\")\r\n ax.set_title(\"{} - ARI : {:.2f}\".format(algorithm.__class__.__name__,\r\n adjusted_rand_score(y, clusters)))\r\n\r\n#accuracy가 아니라 adjusted_rand_score나 normalized_mutual_info_score 사용해야 함\r\nfrom sklearn.metrics import accuracy_score\r\nclusters1 = [0,0,1,1,0]\r\nclusters2 = [1,1,0,0,1]\r\nprint(\"accuracy : {:.2f}\".format(accuracy_score(clusters1, clusters2)))\r\nprint(\"ARI : {:.2f}\".format(adjusted_rand_score(clusters1, clusters2)))\r\n\r\n##실루엣 계수 : 비교대상 없는경우. 그러나 복잡할 수록 잘 안맞음\r\nfrom sklearn.metrics.cluster import silhouette_score\r\nX, y = make_moons(n_samples=200, noise=0.05, random_state=0)\r\nscaler = StandardScaler()\r\nscaler.fit(X)\r\nX_scaled = scaler.transform(X)\r\n\r\nfig, axes = plt.subplots(1, 4, figsize=(15,3), subplot_kw={\"xticks\":(),\"yticks\":()})\r\nrandom_state = np.random.RandomState(seed=0)\r\nrandom_clusters = random_state.randint(low=0, high=2, size=len(X))\r\n\r\naxes[0].scatter(X_scaled[:,0],X_scaled[:,1],c=random_clusters, cmap=mglearn.cm3,\r\n s=60, edgecolors=\"black\")\r\naxes[0].set_title(\"random assign - ARI : {:.2f}\".format(\r\n silhouette_score(X_scaled,random_clusters)))\r\n\r\nfor ax, algorithm in zip(axes[1:],algorithms):\r\n clusters = algorithm.fit_predict(X_scaled)\r\n ax.scatter(X_scaled[:,0],X_scaled[:,1],c=clusters,\r\n cmap=mglearn.cm3,s=60,edgecolors=\"black\")\r\n ax.set_title(\"{} - ARI : {:.2f}\".format(algorithm.__class__.__name__,\r\n silhouette_score(X_scaled, clusters)))\r\n \r\n##얼굴 데이터에서 고유값 찾고 변환\r\n#pca (수동)\r\nfrom sklearn.decomposition import PCA\r\npca = PCA(n_components=100, whiten = True, random_state=0)\r\n\r\nfrom sklearn.datasets import fetch_lfw_people\r\npeople = fetch_lfw_people(min_faces_per_person=20,resize=0.7)\r\nimage_shape = people.images[0].shape\r\n\r\nmask = np.zeros(people.target.shape, dtype=np.bool)\r\nfor target in np.unique(people.target):#타겟별 빈도가 달라 50개씩 일괄 추출\r\n mask[np.where(people.target == target)[0][:50]] = 1\r\n\r\nX_people = people.data[mask]\r\ny_people = people.target[mask]\r\n\r\nX_people = X_people / 255.\r\n\r\npca.fit_transform(X_people)\r\nX_pca = pca.transform(X_people)\r\n\r\n#dbscan\r\ndbscan = DBSCAN()\r\nlabels = dbscan.fit_predict(X_pca)\r\nprint(\"eigen label : {}\".format(np.unique(labels)))#결과 : -1. 모두 잡음이라는 의미\r\n\r\n#min_sample 조정\r\ndbscan = DBSCAN(min_samples=3)\r\nlabels = dbscan.fit_predict(X_pca)\r\nprint(\"eigen label : {}\".format(np.unique(labels)))\r\n\r\n#eps 조정\r\ndbscan = DBSCAN(min_samples=3, eps=15)\r\nlabels = dbscan.fit_predict(X_pca)\r\nprint(\"eigen label : {}\".format(np.unique(labels)))\r\n\r\n#label 별 개수\r\n#bincount는 음수 불가라 +1 처리해 계산\r\nprint(\"label count : {}\".format(np.bincount(labels+1))) #잡음 32개\r\n\r\nnoise = X_people[labels==-1]\r\nfig, axes = plt.subplots(3, 9, subplot_kw = {\"xticks\":(),\"yticks\":()},figsize=(12,4))\r\nfor image, ax in zip(noise, axes.ravel()):\r\n ax.imshow(image.reshape(image_shape),vmin=0, vmax=1)\r\n \r\n#eps별 클러스터 구분\r\nfor eps in [1, 3, 5, 7, 9, 11, 13]:\r\n print(\"eps = {}\".format(eps))\r\n dbscan = DBSCAN(min_samples=3, eps = eps)\r\n labels = dbscan.fit_predict(X_pca)\r\n print(\"label count : {}\".format(len(np.unique(labels))))\r\n print(\"label size : {}\".format(np.bincount(labels+1)))#eps 7이 특이함\r\n \r\n#eps=7\r\ndbscan = DBSCAN(min_samples=3, eps=7)\r\nlabels = dbscan.fit_predict(X_pca)\r\n\r\nfor cluster in range(max(labels)+1):\r\n mask = labels == cluster\r\n n_images = np.sum(mask)\r\n fig, axes = plt.subplots(1, n_images, figsize=(n_images * 1.5, 4),\r\n subplot_kw = {\"xticks\":(),\"yticks\":()})\r\n for image, label, ax in zip(X_people[mask],y_people[mask],axes):\r\n ax.imshow(image.reshape(image_shape),vmin=0,vmax=1)\r\n ax.set_title(people.target_names[label].split()[-1])\r\n\r\n#kmeans(자동)\r\nkm = KMeans(n_clusters=10, random_state=0)\r\nlabels_km = km.fit_predict(X_pca)\r\nprint(\"kmeans cluster size : {}\".format(np.bincount(labels_km)))\r\n\r\n#pca성분으로 kmeans했기 때문에 pca.inverse_transform 사용해야함\r\nfig, axes = plt.subplots(2, 5, figsize=(12, 4), subplot_kw = {\"xticks\":(),\"yticks\":()})\r\nfor center, ax in zip(km.cluster_centers_, axes.ravel()):\r\n ax.imshow(pca.inverse_transform(center).reshape(image_shape),vmin=0,vmax=1)\r\n \r\n#예시\r\nmglearn.plots.plot_kmeans_faces(km, pca, X_pca, X_people, y_people, people.target_names)\r\n\r\n#계층 군집\r\nagglomerative = AgglomerativeClustering(n_clusters=10)\r\nlabels_agg = agglomerative.fit_predict(X_pca)\r\nprint(\"cluster size : {}\".format(np.bincount(labels_agg)))\r\n#계층과 kmeans 비교\r\nprint(\"ARI : {}\".format(adjusted_rand_score(labels_agg, labels_km)))#공통부분 거의 없음\r\n\r\n#덴드로그램\r\nfrom scipy.cluster.hierarchy import dendrogram, ward\r\nlinkage_array = ward(X_pca)\r\nplt.figure(figsize=(20,5))\r\n#p : 군집 갯수, no_label : 라벨 표시 안함\r\ndendrogram(linkage_array, p=7, truncate_mode=\"level\", no_labels=True)\r\nplt.xlabel(\"sample No\")\r\nplt.ylabel(\"cluster distance\")\r\nax = plt.gca()\r\nbounds = ax.get_xbound()\r\nax.plot(bounds, [36,36], \"--\",c=\"k\")\r\n\r\n#10개 클러스터 그림\r\n#각 행은 한 클러스터 소속이고, 왼쪽 숫자는 갯수.\r\nn_clusters = 10\r\nfor cluster in range(n_clusters):\r\n mask = labels_agg == cluster\r\n fig, axes = plt.subplots(1, 10, figsize=(15, 8),\r\n subplot_kw = {\"xticks\":(),\"yticks\":()})\r\n axes[0].set_ylabel(np.sum(mask))\r\n for image, label, asdf, ax in zip(\r\n X_people[mask],y_people[mask],labels_agg[mask],axes):\r\n ax.imshow(image.reshape(image_shape),vmin=0,vmax=1)\r\n ax.set_title(people.target_names[label].split()[-1],\r\n fontdict={\"fontsize\":9})\r\n\r\n#클러스터를 10개에서 40개로 늘려봄\r\nagglomerative = AgglomerativeClustering(n_clusters=40)\r\nlabels_agg = agglomerative.fit_predict(X_pca)\r\nprint(\"cluster size : {}\".format(np.bincount(labels_agg)))\r\n\r\nn_clusters = 40\r\nfor cluster in [10,13,19,22,36]:#아무 클러스터 번호\r\n mask = labels_agg == cluster\r\n fig, axes = plt.subplots(1, 10, figsize=(15, 8),\r\n subplot_kw = {\"xticks\":(),\"yticks\":()})\r\n cluster_size = np.sum(mask)\r\n axes[0].set_ylabel(\"#{} : {}\".format(cluster, cluster_size))\r\n for image, label, asdf, ax in zip(\r\n X_people[mask],y_people[mask],labels_agg[mask],axes):\r\n ax.imshow(image.reshape(image_shape),vmin=0,vmax=1)\r\n ax.set_title(people.target_names[label].split()[-1],\r\n fontdict={\"fontsize\":9})\r\n for i in range(cluster_size, 15):\r\n axes[i].set_visible(False)\r\n \r\n","repo_name":"Hanbyeongrim/python","sub_path":"cluster estimation.py","file_name":"cluster estimation.py","file_ext":"py","file_size_in_byte":8112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39439840120","text":"#Import library\r\nimport pygame\r\nfrom pygame.locals import *\r\nimport random\r\n \r\nclass Player(pygame.sprite.Sprite):\r\n def __init__(self):\r\n super(Player, self).__init__()\r\n self.surf = pygame.Surface((75, 75))\r\n self.surf.fill((255, 255, 255))\r\n self.rect = self.surf.get_rect()\r\n \r\n def update(self, pressed_keys):\r\n if pressed_keys[K_UP]:\r\n self.rect.move_ip(0, -1)\r\n if pressed_keys[K_DOWN]:\r\n self.rect.move_ip(0, 1)\r\n if pressed_keys[K_LEFT]:\r\n self.rect.move_ip(-1, 0)\r\n if pressed_keys[K_RIGHT]:\r\n self.rect.move_ip(1, 0)\r\n \r\n #Keep player on the screen\r\n if self.rect.left < 0:\r\n self.rect.left = 0\r\n elif self.rect.right > 800:\r\n self.rect.right = 800\r\n if self.rect.top <= 0:\r\n self.rect.top = 0\r\n elif self.rect.bottom >= 600:\r\n self.rect.bottom = 600 \r\n \r\nclass Opponent(pygame.sprite.Sprite):\r\n def __init__(self):\r\n super(Opponent, self).__init__()\r\n self.surf = pygame.Surface((20, 10))\r\n self.surf.fill((255, 255, 255))\r\n self.rect = self.surf.get_rect(center=(820, random.randint(0, 600)))\r\n self.speed = random.randint(0, 2)\r\n \r\n def update(self):\r\n self.rect.move_ip(-self.speed, 0)\r\n if self.rect.right < 0:\r\n self.kill()\r\n \r\n#Initialize pygame modules\r\npygame.init()\r\n \r\n#Create your screen\r\nscreen = pygame.display.set_mode((800, 600))\r\n \r\n#Instantiate our player; right now he's just a rectangle\r\nplayer = Player()\r\n \r\n#Set background color\r\nbackground = pygame.Surface(screen.get_size())\r\nbackground.fill((0, 0, 0))\r\n \r\n#Create Groups and add game objects\r\nplayers = pygame.sprite.Group()\r\nopponents = pygame.sprite.Group()\r\nall_sprites = pygame.sprite.Group()\r\nall_sprites.add(player)\r\n \r\n#Create opponent event\r\nADDOPPONENT = pygame.USEREVENT + 1\r\n \r\n#Set timer for opponent event to occur every 250ms\r\npygame.time.set_timer(ADDOPPONENT, 250)\r\n \r\n#Create the surface and pass in a tuple with its length and width\r\n#surf = pygame.Surface((75, 75))\r\n \r\n#Give the surface a color to differentiate it from the background\r\n#surf.fill((255, 255, 255))\r\n#rect = surf.get_rect()\r\n \r\nrunning = True\r\nwhile running:\r\n \r\n #For loop through the event queue\r\n for event in pygame.event.get():\r\n #Check for KEYDOWN event\r\n #KEYDOWN is a constant defined in pygame.locals, imported earlier\r\n if event.type == KEYDOWN and event.key == K_ESCAPE:\r\n running = False\r\n print(\"Escape\")\r\n #Check for QUIT event; if QUIT, set running to false\r\n elif event.type == QUIT:\r\n running = False\r\n print(\"QUIT\") \r\n #Check for Opponent event; if ADDOPPONENT, create and add opponent\r\n elif event.type == ADDOPPONENT:\r\n new_opponent = Opponent()\r\n opponents.add(new_opponent)\r\n all_sprites.add(new_opponent)\r\n \r\n #Draw background\r\n screen.blit(background, (0, 0))\r\n \r\n #Get pressed keys\r\n pressed_keys = pygame.key.get_pressed()\r\n \r\n #Update player position\r\n player.update(pressed_keys)\r\n \r\n #Update opponents position\r\n opponents.update()\r\n \r\n #Draw surf onto screen at coordinates x:400, y:300\"\r\n #screen.blit(surf, (400, 300))\r\n #screen.blit(player.surf, (400, 300))\r\n #screen.blit(player.surf, player.rect)\r\n for entity in all_sprites:\r\n screen.blit(entity.surf, entity.rect)\r\n pygame.display.flip()\r\n \r\n#Exit the Game\r\npygame.quit()\r\n","repo_name":"CharliePepin2017/Python","sub_path":"pygame/pygame_charlie.py","file_name":"pygame_charlie.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33837169989","text":"from data import conf\nclass MyTest(object):\n # 打开后台管理界面,输入账号密码,点击登陆。\n path =conf.WEB_SERVER_URL\n def admin_loggin(self,dr):\n #dr.open(self.path+\"https://10.11.64.242/admin/majorSec.php?pageID=runtimeRisk\")\n dr.open(self.path+\"admin/majorSec.php?pageID=runtimeRisk\")\n dr.maximize_window()\n dr.click(\"#btnAdmin\")\n\n dr.send_keys(\"#userNmFake\", \"admin\")\n dr.send_keys(\"#pwdFake\", \"Mjs#2015\")\n dr.click(\"#btnLogin\")\n\n # 打开master页面输入账号密码,点击登陆。\n def master_logoin(self,dr):\n dr.open(self.path+\"master/login/login.php\")\n dr.maximize_window()\n # self.click(\"#btnAdmin\")\n dr.send_keys(\"input[autofocus='autofocus']\", \"Mjs#2015\")\n # self.send_keys(\"#pwdFake\",\"Mjs#2015\")\n # dr.click(\".btnExecClass.ivu-btn.ivu-btn-primary.ivu-btn-long\")\n dr.click(\"button[type='button']\")\n\n\n","repo_name":"ItTestKing/Base_test","sub_path":"common/login_page.py","file_name":"login_page.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3906597155","text":"from dataclasses import dataclass\nfrom operator import attrgetter\nimport pprint\n\nfrom aoc import solution\nimport re\n\ndebug = False\nfilename = 'test.txt' if debug else 'input.txt'\nlength = 1000 if debug else 2503\n\n@dataclass\nclass Reindeer:\n name: str\n speed: int\n go_time: int\n rest_time: int\n timer: int = 0\n distance: int = 0\n state: str = \"travelling\"\n score: int = 0\n\n def advance(self, seconds=1):\n self.timer += seconds\n if self.state == \"travelling\":\n self.distance += self.speed*seconds\n\n self.state_check(self.go_time, \"resting\")\n elif self.state == \"resting\":\n self.state_check(self.rest_time, \"travelling\")\n\n def state_check(self, time, otherstate):\n if self.timer >= time:\n self.state = otherstate\n self.timer = 0\n\n def reset(self):\n self.state = \"travelling\"\n self.distance = 0\n self.timer = 0\n\n\nspec = re.compile('([^ ]+)[^\\d]*' + '(\\d+)[^\\d]*'*3)\nreindeer = []\nwith open(filename) as f:\n for line in f:\n m = spec.match(line)\n name, *numbers = m.groups()\n\n speed, go_time, rest_time = map(int, numbers)\n\n reindeer.append(Reindeer(name, speed, go_time, rest_time))\n\n# Part 1\nfor second in range(length):\n for r in reindeer:\n r.advance()\n\nwinner = max(reindeer, key=attrgetter('distance'))\nsolution(winner.distance)\n\n# Part 2\nfor r in reindeer:\n r.reset()\n\nfor second in range(length):\n for r in reindeer:\n r.advance()\n winner = max(reindeer, key=attrgetter('distance'))\n for r2 in reindeer:\n if r2.distance == winner.distance:\n r2.score += 1\n\nwinner = max(reindeer, key=attrgetter('score'))\nsolution(winner.score)","repo_name":"alchemyst/adventofcode","sub_path":"2015/14/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27881472406","text":"# vim: set fileencoding=utf-8 :\n#\n# (C) 2016,2017 Johannes Hubertz \n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, please see\n# \n#\n# Additionally this program is licensed under the terms of the\n# Apache License, Version 2, see the top level directory right here,\n# or you can view there: \n#\nfrom __future__ import absolute_import, division, print_function\n\n__all__ = [\n \"__title__\", \"__author__\", \"__author_email__\",\n \"__license__\", \"__url__\", \"__version__\",\n]\n\n__title__ = \"iptables-converter\"\n\n__author__ = \"Johannes Hubertz\"\n__author_email__ = \"johannes@hubertz.de\"\n\n__license__ = \"GNU General Public License version 3 or later, Apache License Version 2\"\n\n__url__ = \"https://github.com/sl0/conv.git\"\n\n__version__ = \"0.9.11\"\n","repo_name":"sl0/conv","sub_path":"iptables_conv/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"67"} +{"seq_id":"33653262760","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport networkx as nx\nimport csv\nimport igraph\nimport random\nimport numpy as np\nfrom igraph import *\nfrom collections import deque\n\n\n# In[2]:\n\n\n\"\"\"g = igraph.Graph.Read_Ncol('hep.txt', directed=True)\ng=g.as_undirected()\"\"\"\n\n\n# In[3]:\n\n\ng = Graph.Read_GML(\"netscience.gml\")\ng=g.as_undirected()\n\n\n# In[4]:\n\n\nneighbors_list = g.get_adjlist(mode=OUT)\n\n\n# In[5]:\n\n\nverclus=g.community_multilevel() # Using multilevel community detection \n\ntotal_clusters=len(verclus)\n\nprint(verclus)\nprint(g.modularity(verclus))\n\n\n# In[6]:\n\n\nw=0.5\nsz = np.zeros((total_clusters))\nsz1=np.zeros((total_clusters))\n\"\"\"for i in range(total_clusters):\n edge=0\n for j in verclus[i]:\n templist=neighbors_list[j]\n for k in templist:\n if k not in verclus[i]:\n edge+=1\n sz1[i]=edge\nfor i in range(total_clusters):\n sz[i]=w*len(verclus[i])+(1-w)*sz1[i] \"\"\"\nfor i in range(total_clusters):\n sz[i]=len(verclus[i])\nsort_order = np.argsort(sz)\n#print(sort_order)\nnum_sig = 20\ncnt=0\nsig_com = []\nfor i in range(total_clusters):\n sig_com.append(verclus[sort_order[total_clusters-1-i]])\n cnt+=1;\n if cnt==num_sig:\n break\nprint(len(sig_com))\nprint(sig_com)\n\n\n# In[7]:\n\n\nnum_candidates = []\n\nmax_c = -1\nmin_c = 10000000000\n\nfor i in range(num_sig):\n if(len(sig_com[i]) < min_c):\n min_c = len(sig_com[i])\n if(len(sig_com[i]) > max_c):\n max_c = len(sig_com[i])\nprint(\"Min Max \",min_c,max_c) \nalpha = 4\nbeta = 10\n\nfor i in range(num_sig):\n # print(len(sig_com[i]))\n x = (len(sig_com[i])-min_c)/(max_c-min_c)*beta + alpha\n if x>len(sig_com[i]):\n x=len(sig_com[i])\n num_candidates.append(int(x))\n \nprint(num_candidates) \ntotal_candidates=0\nfor y in num_candidates:\n total_candidates+=y\nprint(total_candidates) \n\n\n# In[8]:\n\n\ndeg = g.indegree()\nlen(deg)\n\n\n# In[9]:\n\n\ndef closeness_centrality(particle):\n visit=np.zeros((g.vcount()))\n cc=np.zeros((g.vcount()))\n q=deque()\n q.append(particle)\n cc[particle]=0\n while q:\n cur=q.popleft()\n visit[cur]=1\n for j in neighbors_list[cur]:\n if visit[j]==0:\n cc[j]=cc[cur]+1\n q.append(j)\n return np.sum(cc)\n\n\n# In[10]:\n\n\ndef LAC(particle):\n list=neighbors_list[particle]\n l=len(list)\n sum=0\n for i in range(l):\n for j in range(i+1,l):\n if j in neighbors_list[i]:\n sum+=1\n if l==0:\n return 0\n return sum/l\n\n\n# In[11]:\n\n\ncandidates=[]\nfor i in range(num_sig):\n temp = sig_com[i]\n degs = np.zeros((len(temp)))\n for j in range(len(temp)):\n degs[j]=deg[temp[j]]\n #degs[j]=LAC(temp[j])\n #degs[j]=closeness_centrality(temp[j])\n sort_order=np.argsort(degs)\n #desc_order = np.flip(sort_order,0)\n sz=len(sort_order)\n for z in range(num_candidates[i]):\n candidates.append(sig_com[i][sort_order[sz-1-z]])\n #candidates.append(sig_com[i][sort_order[z]])\nprint(len(sig_com))\nprint(candidates) \nprint(len(candidates))\n\n\n# In[12]:\n\n\ndef similarity(u,v):\n neighbors_u = set()\n neighbors_u.add(u)\n for i in neighbors_list[u]:\n neighbors_u.add(i)\n neighbors_v = set()\n neighbors_v.add(v)\n for i in neighbors_list[v]:\n neighbors_v.add(i)\n sim = len(neighbors_u.intersection(neighbors_v))/(len(neighbors_u) + len(neighbors_v))\n return sim\n\n\n# In[13]:\n\n\nrandom.seed(200)\ndef SHD(candidates,sim):\n x = []\n temp_candidate = set()\n for xx in candidates:\n temp_candidate.add(xx)\n for i in range(num_seeds):\n max_degree = 0\n for node in temp_candidate:\n if deg[node] > max_degree:\n max_degree = deg[node]\n v = node\n if len(x) sim:\n sim_neighbors.add(neighbor)\n temp_candidate.discard(v)\n for sim_neighbor in sim_neighbors:\n temp_candidate.discard(sim_neighbor)\n if len(temp_candidate) == 0: \n remaining = num_seeds-1-i\n rem=0\n while len(x)=0.5:\n random.seed(i*pop_size//2+j)\n \n \n r = random.choice(list(available.keys()))\n tempo = population[i][j] \n population[i][j] = r\n del available[r]\n available[tempo]=1\n\n\n\nfor i in range(pop_size//2,pop_size):\n for cand in candidates:\n available[cand] = 1\n for j in range(num_seeds):\n random.seed(i*pop_size//2+j)\n r = random.choice(list(available.keys()))\n \n population[i][j] = r\n del available[r]\n \n \n\"\"\"for frog in population:\n available = {}\n for cand in candidates:\n available[cand]=1\n \n for k in range(num_seeds):\n r = random.choice(list(available.keys()))\n \n frog[k] = r\n del available[r]\"\"\"\n \nprint(population) \n\n\n# In[18]:\n\n\nfit=find_fitness(population)\nprint(fit) \n\norder = np.argsort(fit)\norder = np.flipud(order)\nnew_pop = np.zeros((pop_size,num_seeds),dtype=int) \n\nfor it in range(len(order)):\n new_pop[it] = population[order[it]].copy()\nfor it in range(len(order)):\n population[it] = new_pop[it].copy() \n\npx=population[0].copy()\n\n#print(swarm_pos)\nfit=find_fitness(population)\nprint(fit) \nprint(len(candidates))\n\n\n# In[19]:\n\n\ndef max_influence(array):\n max=-1\n index=-1\n for i in range(len(array)):\n neighbors=neighbors_list[array[i]]\n sum=0\n for j in neighbors:\n sum=sum+deg[j]\n if sum>max:\n max=sum\n index=i\n return array[index]\n\n\n# In[ ]:\n\n\nfor itr in range(max_iterations): ###max iterations\n \n #step 3\n cnt = 0;\n for i in range(n):\n for j in range(m):\n memeplex[j][i] = population[cnt]\n cnt+=1;\n \n #step 4.0,4,1 and 4.2\n max_ls=10\n pb=np.zeros((num_seeds),dtype=int)\n pw=np.zeros((num_seeds),dtype=int)\n \n temparray=np.zeros((num_seeds),dtype=int)\n \n \n for im in range(m):\n for iN in range(max_ls):\n \n #step 4.3\n q = n//2\n pb = memeplex[im][0].copy()\n pw = memeplex[im][q-1].copy()\n \n \n temparray=pw\n min_fit=find_fitness_single(temparray)\n \n #step 4.4\n available = {}\n for cand in candidates:\n available[cand]=1\n for k in range(num_seeds):\n if pw[k] in pb:\n #print(pw[k])\n if pw[k] in available:\n del available[pw[k]]\n continue\n #r = random.choice(list(available.keys()))\n r=max_influence(list(available.keys()))\n tempo = pw[k] \n pw[k] = r\n del available[r]\n available[tempo]=1\n \"\"\" = random.randrange(0,len(candidates),1);\n while candidates[r] in pw:\n r = random.randrange(0,len(candidates),1);\n pw[k] = candidates[r]\"\"\"\n \n #step 4.5\n if find_fitness_single(pw) < find_fitness_single(temparray):\n available = {}\n for cand in candidates:\n available[cand] = 1\n pw = temparray.copy() \n \"\"\"for k in range(num_seeds):\n if pw[k] in px:\n continue\n r = random.randrange(0,len(candidates),1);\n while candidates[r] in pw:\n r = random.randrange(0,len(candidates),1);\n pw[k] = candidates[r]\"\"\"\n for k in range(num_seeds):\n if pw[k] in px:\n if pw[k] in available:\n del available[pw[k]]\n continue\n #r = random.choice(list(available.keys()))\n r=max_influence(list(available.keys()))\n tempo = pw[k]\n pw[k] = r\n del available[r]\n available[tempo]=1\n \n #step 4.6\n if find_fitness_single(pw) < min_fit:\n available = {}\n for cand in candidates:\n available[cand] = 1 \n pw1=pw.copy()\n \"\"\"for k in range(num_seeds):\n r = random.randrange(0,len(candidates),1);\n while candidates[r] in pw:\n r = random.randrange(0,len(candidates),1);\n pw[k] = candidates[r]\"\"\"\n percent=0.2\n numchange=int(num_seeds*percent)\n pos=np.random.randint(0,num_seeds-1,numchange)\n for k in pos:\n #r=random.choice(list(available.keys()))\n r=max_influence(list(available.keys()))\n tempo=pw[k]\n pw[k]=r\n del available[r]\n available[tempo]=1\n if find_fitness_single(pw)> 0.9*find_fitness_single(pw1):\n if find_fitness_single(pw)<=find_fitness_single(pw1):\n max_worse=15\n percent=0.2\n numchange=int(num_seeds*percent)\n for j in range(max_worse):\n pos=np.random.randint(0,num_seeds-1,numchange)\n for k in pos:\n #r=random.choice(list(available.keys()))\n r=max_influence(list(available.keys()))\n tempo=pw[k]\n pw[k]=r\n del available[r]\n available[tempo]=1\n if find_fitness_single(pw)>min_fit:\n break\n if j==max_worse:\n pos=np.random.randint(0,num_seeds-1,numchange)\n for k in pos:\n r=random.choice(list(available.keys()))\n #r=max_influence(list(available.keys()))\n tempo=pw[k]\n pw[k]=r\n del available[r]\n available[tempo]=1\n elif find_fitness_single(pw)', methods=['GET'])\ndef show(key: str):\n print(key)\n try:\n print(request.headers.get('Authorization'))\n r = requests.get('http://localhost:6000/api/users/auth', \n headers={\n \"Authorization\":\n request.headers.get('Authorization')}).json()\n print(r)\n if 'valid' not in r or not r['valid']:\n print(r)\n abort(422, r['message']) \n ticket = get_ticket_with_id(key)\n except TicketDoesNotExistsException:\n abort(422, TicketDoesNotExistsException.get_message())\n return {\"title\": ticket['title'], \"price\": ticket['price'], \"userId\":ticket['userId']}\n\n\n@app.errorhandler(422)\ndef unprocessable(error):\n return jsonify({\n \"Success\" : False,\n \"error\": 422,\n \"message\": f\"unprocessable: {error.description}\"\n }), 422\n\n@app.errorhandler(404)\ndef not_found(error):\n return jsonify({\n \"Success\": False,\n \"error\" : 404,\n \"message\" : \"resource not found\"\n }), 404","repo_name":"tabareslorenzo/Ticketing-App","sub_path":"tickets/routes/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37756491990","text":"#Crie um dicionário com livros que você goste e o número de páginas;\r\n# Imprima estes valores no terminal;\r\n\r\nlivros= {\r\n \"Pai rico, pai pobre\": 278,\r\n \"O segredo da mente milionária\": 170,\r\n \"O manual de persuasão do fbi\": 580\r\n}\r\n\r\nfor livro in livros:\r\n print(\"O livro %s tem: %d páginas.\" % (livro, livros[livro]))","repo_name":"Diusval/Studying-Algorithms-and-programming-logic-with-Python-3","sub_path":"PORTUGUESE/Seção05-Lista/30_exercicio_48/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"14246408475","text":"\"\"\"Utility functions for canvas integration\"\"\"\nimport logging\nfrom collections import defaultdict\n\nfrom common.djangoapps.student.models import CourseEnrollment, CourseEnrollmentAllowed\nfrom django.contrib.auth.models import User\nfrom lms.djangoapps.courseware.access import has_access\nfrom lms.djangoapps.courseware.courses import get_course_by_id\nfrom lms.djangoapps.grades.context import grading_context_for_course\nfrom lms.djangoapps.grades.course_grade_factory import CourseGradeFactory\nfrom ol_openedx_canvas_integration.client import (\n CanvasClient,\n create_assignment_payload,\n update_grade_payload_kv,\n)\nfrom opaque_keys.edx.locator import CourseLocator\n\nlog = logging.getLogger(__name__)\n\n\ndef first_or_none(iterable):\n \"\"\"Returns the first item in the given iterable, or None if the iterable is empty\"\"\" # noqa: D401\n return next((x for x in iterable), None)\n\n\ndef course_graded_items(course):\n grading_context = grading_context_for_course(course)\n for graded_item_type, graded_items in grading_context[\n \"all_graded_subsections_by_type\"\n ].items():\n for graded_item_index, graded_item in enumerate(graded_items, start=1):\n yield graded_item_type, graded_item, graded_item_index\n\n\ndef get_enrolled_non_staff_users(course):\n \"\"\"\n Returns an iterable of non-staff enrolled users for a given course\n \"\"\" # noqa: D401\n return [\n user\n for user in CourseEnrollment.objects.users_enrolled_in(course.id)\n if not has_access(user, \"staff\", course)\n ]\n\n\ndef enroll_emails_in_course(emails, course_key):\n \"\"\"\n Attempts to enroll all provided emails in a course. Emails without a corresponding\n user have a CourseEnrollmentAllowed object created for the course.\n \"\"\" # noqa: D401\n results = {}\n for email in emails:\n user = User.objects.filter(email=email).first()\n result = \"\"\n if not user:\n _, created = CourseEnrollmentAllowed.objects.get_or_create(\n email=email, course_id=course_key\n )\n if created:\n result = \"User does not exist - created course enrollment permission\"\n else:\n result = \"User does not exist - enrollment is already allowed\"\n elif not CourseEnrollment.is_enrolled(user, course_key):\n try:\n CourseEnrollment.enroll(user, course_key)\n result = \"Enrolled user in the course\"\n except Exception as ex: # pylint: disable=broad-except # noqa: BLE001\n result = f\"Failed to enroll - {ex}\"\n else:\n result = \"User already enrolled\"\n results[email] = result\n return results\n\n\ndef get_subsection_user_grades(course):\n \"\"\"\n Builds a dict of user grades grouped by block locator. Only returns grades if the assignment has been attempted\n by the given user.\n\n Args:\n course: The course object (of the type returned by courseware.courses.get_course_by_id)\n\n Returns:\n dict: Block locators for graded items (assignments, exams, etc.) mapped to a dict of users\n and their grades for those assignments.\n Example: {\n : {\n : ,\n : ,\n }\n }\n \"\"\" # noqa: D401, E501\n enrolled_students = CourseEnrollment.objects.users_enrolled_in(course.id)\n subsection_grade_dict = defaultdict(dict)\n for student, course_grade, _error in CourseGradeFactory().iter(\n users=enrolled_students, course=course\n ):\n for subsection_dict in course_grade.graded_subsections_by_format().values():\n for subsection_block_locator, subsection_grade in subsection_dict.items():\n subsection_grade_dict[subsection_block_locator].update(\n # Only include grades if the assignment/exam/etc. has been attempted\n {student: subsection_grade}\n if subsection_grade.graded_total.first_attempted\n else {}\n )\n return subsection_grade_dict\n\n\ndef get_subsection_block_user_grades(course):\n \"\"\"\n Builds a dict of user grades grouped by the subsection XBlock representing each graded item.\n Only returns grades if the assignment has been attempted by the given user.\n\n Args:\n course: The course object (of the type returned by courseware.courses.get_course_by_id)\n\n Returns:\n dict: Block objects representing graded items (assignments, exams, etc.) mapped to a dict of users\n and their grades for those assignments.\n Example: {\n : {\n : ,\n : ,\n }\n }\n \"\"\" # noqa: D401, E501\n subsection_user_grades = get_subsection_user_grades(course)\n graded_subsection_blocks = [\n graded_item.get(\"subsection_block\")\n for graded_item_type, graded_item, graded_item_index in course_graded_items(\n course\n )\n ]\n locator_block_dict = {\n block_locator: first_or_none(\n block\n for block in graded_subsection_blocks\n if block.location == block_locator\n )\n for block_locator in subsection_user_grades\n }\n return {\n block: subsection_user_grades[block_locator]\n for block_locator, block in locator_block_dict.items()\n if block is not None\n }\n\n\ndef sync_canvas_enrollments(course_key, canvas_course_id, unenroll_current):\n \"\"\"\n Fetch enrollments from canvas and update\n\n Args:\n course_key (str): The edX course key\n canvas_course_id (int): The canvas course id\n unenroll_current (bool): If true, unenroll existing students if not staff\n \"\"\"\n client = CanvasClient(canvas_course_id)\n emails_to_enroll = client.list_canvas_enrollments()\n users_to_unenroll = []\n\n course_key = CourseLocator.from_string(course_key)\n course = get_course_by_id(course_key)\n\n if unenroll_current:\n enrolled_user_dict = {\n user.email: user for user in get_enrolled_non_staff_users(course)\n }\n emails_to_enroll_set = set(emails_to_enroll)\n already_enrolled_email_set = set(enrolled_user_dict.keys())\n emails_to_enroll = emails_to_enroll_set - already_enrolled_email_set\n users_to_unenroll = [\n enrolled_user_dict[email]\n for email in (already_enrolled_email_set - emails_to_enroll)\n ]\n\n enrolled = enroll_emails_in_course(emails=emails_to_enroll, course_key=course_key)\n log.info(\"Enrolled users in course %s: %s\", course_key, enrolled)\n\n if users_to_unenroll:\n for user_to_unenroll in users_to_unenroll:\n CourseEnrollment.unenroll(user_to_unenroll, course.id)\n log.info(\n \"Unenrolled non-staff users in course %s: %s\", course_key, users_to_unenroll\n )\n\n\ndef push_edx_grades_to_canvas(course):\n \"\"\"\n Gathers all student grades for each assignment in the given course, creates equivalent assignment in Canvas\n if they don't exist already, and adds/updates the student grades for those assignments in Canvas.\n\n Args:\n course: The course object (of the type returned by courseware.courses.get_course_by_id)\n\n Returns:\n dict: A dictionary with some information about the success/failure of the updates\n \"\"\" # noqa: E501\n canvas_course_id = course.canvas_course_id\n client = CanvasClient(canvas_course_id=canvas_course_id)\n existing_assignment_dict = client.get_assignments_by_int_id()\n subsection_block_user_grades = get_subsection_block_user_grades(course)\n\n # Populate missing assignments\n new_assignment_blocks = (\n subsection_block\n for subsection_block in subsection_block_user_grades\n if str(subsection_block.location) not in existing_assignment_dict\n )\n created_assignments = {\n subsection_block: client.create_canvas_assignment(\n create_assignment_payload(subsection_block)\n )\n for subsection_block in new_assignment_blocks\n }\n\n # Build request payloads for updating grades in each assignment\n enrolled_user_dict = client.list_canvas_enrollments()\n grade_update_payloads = {}\n for subsection_block, user_grade_dict in subsection_block_user_grades.items():\n grade_update_payloads[subsection_block] = dict(\n update_grade_payload_kv(\n enrolled_user_dict[student_user.email.lower()], grade.percent_graded\n )\n for student_user, grade in user_grade_dict.items()\n # Only add the grade if the user exists in Canvas\n if student_user.email.lower() in enrolled_user_dict\n )\n\n # Send requests to update grades in each relevant course\n assignment_grades_updated = {\n subsection_block: client.update_assignment_grades(\n canvas_assignment_id=existing_assignment_dict[\n str(subsection_block.location)\n ],\n payload=grade_request_payload,\n )\n for subsection_block, grade_request_payload in grade_update_payloads.items()\n if grade_request_payload\n and str(subsection_block.location) in existing_assignment_dict\n }\n\n return assignment_grades_updated, created_assignments\n","repo_name":"mitodl/open-edx-plugins","sub_path":"src/ol_openedx_canvas_integration/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":9736,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"3325885338","text":"import numpy as np\r\nimport numpy.ma as MaskedArray\r\nfrom Utilities import *\r\nfrom finta import TA\r\nfrom datetime import datetime\r\nfrom sklearn.neighbors import KNeighborsRegressor\r\nfrom sklearn.model_selection import train_test_split, GridSearchCV\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom math import sqrt\r\nimport pandas as pd\r\nfrom copy import deepcopy\r\nfrom collections import deque\r\nimport random as rand\r\nimport math\r\nfrom finta import TA\r\nimport seaborn as sns\r\n\r\n\"\"\" DQN Imports. \"\"\"\r\nfrom keras import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.optimizers import Adam\r\n\r\n# *********************************************************************************************************************************************************** #\r\n# ----------------------------------------------------------------------------------------------------------------------------------------------------------- #\r\n# ------------------------------------------------------ Algorithmic Trading Using Technical Indicators ----------------------------------------------------- #\r\n# ----------------------------------------------------------------------------------------------------------------------------------------------------------- #\r\n# *********************************************************************************************************************************************************** #\r\n\r\n\r\n\"\"\" ------------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\"\"\" ---------------------------------------------------- Bollinger Bands Learner -------------------------------------------- \"\"\"\r\n\"\"\" ------------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\r\nclass BollingerBandsLearner(object):\r\n\r\n # Constructor. \r\n def __init__(self):\r\n pass\r\n\r\n\r\n # Adjust dataframe for bollinger bands trading.\r\n def BollingerBandsDf(self, ticker, startDate, endDate, window=8, stdVal=1.3):\r\n\r\n tickerDf = IndividualHistoricalData(ticker, startDate, endDate, 'Yes')\r\n bbUpper = tickerDf['Close'].rolling(window).mean() + tickerDf['Close'].rolling(window).std() * stdVal\r\n bbLower = tickerDf['Close'].rolling(window).mean() - tickerDf['Close'].rolling(window).std() * stdVal\r\n\r\n tickerDf['SMA'] = TA.SMA(tickerDf, window)\r\n tickerDf['BBU'] = bbUpper\r\n tickerDf['BBL'] = bbLower \r\n\r\n return tickerDf\r\n\r\n\r\n # Acquire sell prices and dates based on bollinger bands.\r\n def BbSellPricesAndDates(self, checkDf):\r\n\r\n # Drop na to avoid confusing algorithm.\r\n df = checkDf.dropna()\r\n sellPrice, sellDate = [], []\r\n overBB = None\r\n for index in range(len(df)):\r\n\r\n # If stock value goes outside of bollinger bands, make var True.\r\n if (df.iloc[index, 0] or df.iloc[index, 3]) > df.iloc[index, 6]:\r\n overBB = True\r\n \r\n else: overBB is False\r\n\r\n # If the value comes back in from being above BB, sell it.\r\n if overBB is True and (df.iloc[index, 0] < df.iloc[index, 6]):\r\n overBB = False\r\n sellPrice.append(df.iloc[index, 3])\r\n sellDate.append(df.index[index])\r\n\r\n elif overBB is True and (df.iloc[index, 3] < df.iloc[index, 6]):\r\n overBB = False\r\n sellPrice.append(df.iloc[index + 1, 0])\r\n sellDate.append(df.index[index + 1]) \r\n\r\n return sellPrice, sellDate\r\n\r\n\r\n # Acquire buy prices and dates based on bollinger bands.\r\n def BbBuyPricesAndDates(self, checkDf):\r\n \r\n # Drop na to avoid confusing algorithm.\r\n df = checkDf.dropna()\r\n buyPrice, buyDate = [], []\r\n underBB = None\r\n for index in range(len(df)):\r\n\r\n # Check if value is underneath BB.\r\n if (df.iloc[index, 0] or df.iloc[index, 3]) < df.iloc[index, 7]:\r\n underBB = True\r\n \r\n else: underBB is False\r\n\r\n # If value re enters the BB, buy it.\r\n if underBB is True and (df.iloc[index, 0] > df.iloc[index, 6]):\r\n underBB = False\r\n buyPrice.append(df.iloc[index, 3])\r\n buyDate.append(df.index[index])\r\n\r\n elif underBB is True and (df.iloc[index, 3] > df.iloc[index, 6]):\r\n underBB = False\r\n\r\n try:\r\n buyPrice.append(df.iloc[index + 1, 0])\r\n buyDate.append(df.index[index + 1])\r\n except:\r\n break \r\n\r\n return buyPrice, buyDate\r\n\r\n\r\n # Format time for aesthetic purposes.\r\n def FormatTime(self, time):\r\n return str(time).split(' ')[0]\r\n\r\n\r\n # Calculate the trading results.\r\n def BbTradeResults(self, tickerDf, ticker): \r\n\r\n # Make first row the first value.\r\n startVal = tickerDf.iloc[0, 0]\r\n\r\n # Get sell and buy prices and dates.\r\n sp, sd = self.BbSellPricesAndDates(tickerDf)\r\n totSp = sum(sp)\r\n \r\n bp, bd = self.BbBuyPricesAndDates(tickerDf)\r\n totBp = sum(bp) \r\n\r\n # Compute transaction fees.\r\n totTranFee = (totSp + totBp) * 0.12\r\n\r\n # Calculate total shares needed along with start value.\r\n totalShares = len(sp)\r\n totalValue = totalShares * startVal\r\n\r\n # Calculate profit after all transactions and performance.\r\n finalValue = totalValue - totBp + totSp - totTranFee\r\n profit = ((finalValue/totalValue - 1)*100)\r\n profit = round(profit, 2)\r\n\r\n print(\"\\n\\nThe total amount of sells: {}\\n\"\r\n \"The total amount of buys: {}\\n\\n\"\r\n \"After transaction fees of about 12%, considering your portfolio had {} total shares of {} to\\n\" \r\n \"invest from {} to {}, my algorithm could have made you profitable by {}%\\n\\n\"\r\n .format(len(sp), \r\n len(bp), \r\n totalShares,\r\n ticker, \r\n self.FormatTime(tickerDf.index[0]), \r\n self.FormatTime(tickerDf.index[len(tickerDf) - 1]),\r\n profit))\r\n\r\n\r\n # Plot the trading results.\r\n def BbVisualizeTrades(self, tickerDf, ticker):\r\n\r\n bp, bd = self.BbBuyPricesAndDates(tickerDf)\r\n sp, sd = self.BbSellPricesAndDates(tickerDf)\r\n\r\n up = tickerDf[tickerDf.Close >= tickerDf.Open]\r\n down = tickerDf[tickerDf.Close < tickerDf.Open]\r\n\r\n # Plot a candelstick graph.\r\n plt.figure()\r\n plt.bar(up.index, up.Close - up.Open, 1, bottom=up.Open, color='black')\r\n plt.bar(up.index, up.High - up.Close, 0.25, bottom=up.Close, color=\"black\")\r\n plt.bar(up.index, up.Low - up.Open, 0.25, bottom=up.Open, color=\"black\")\r\n\r\n # Plot the regulat stock graph in there as well.\r\n plt.plot(tickerDf['Close'], label=ticker, color='purple', linestyle='dashed')\r\n plt.bar(down.index, down.Close - down.Open, 1, bottom=down.Open, color='steelblue')\r\n plt.bar(down.index, down.High - down.Open, 0.25, bottom=down.Open, color='steelblue')\r\n plt.bar(down.index, down.Low - down.Close, 0.25, bottom=down.Close, color='steelblue')\r\n\r\n # Plot buy and sell datapoints in the graph as well.\r\n plt.xticks(rotation=45, ha='right')\r\n plt.scatter(bd, bp, label='BUY', marker='^', color='Green', s=70)\r\n plt.scatter(sd, sp, label='SELL', marker='v', color='Red', s=70)\r\n plt.legend(loc='best')\r\n plt.show()\r\n\r\n\r\n # Perform all operations given a time range and stock.\r\n def StockTradeBb(self,\r\n ticker='GOOGL', \r\n startDate='2022-01-01', \r\n endDate=datetime.today().strftime('%Y-%m-%d'),\r\n window=8, \r\n stdVal=1.3):\r\n\r\n tickerDf = self.BollingerBandsDf(ticker, startDate, endDate, window, stdVal)\r\n \r\n self.BbTradeResults(tickerDf, ticker)\r\n\r\n self.BbVisualizeTrades(tickerDf, ticker)\r\n\r\n\r\n\r\n\r\n# *********************************************************************************************************************************************************** #\r\n# ----------------------------------------------------------------------------------------------------------------------------------------------------------- #\r\n# ------------------------------------------------------------ Machine Learning Algorithm Learners ---------------------------------------------------------- #\r\n# ----------------------------------------------------------------------------------------------------------------------------------------------------------- #\r\n# *********************************************************************************************************************************************************** #\r\n\r\n\r\n\"\"\" ------------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\"\"\" --------------------------------------------------- Linear Regression Learner ------------------------------------------- \"\"\"\r\n\"\"\" ------------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\r\nclass LinRegLearner(object):\r\n\r\n def __init__(self, verbose=False):\r\n \"\"\"\r\n Description: This is the constructor for the LinRegLearner class\r\n that simply initializes a Linear Regression Learner.\r\n\r\n Params:\r\n verbose (bool): Print process or not.\r\n\r\n Returns: Initializes variables.\r\n \"\"\"\r\n \r\n self.modelCoefficients = None\r\n self.residuals = None\r\n self.rank = None\r\n self.s = None\r\n self.verbose = verbose\r\n\r\n if verbose:\r\n print(\"Initialization Complete.\")\r\n self.GetLearnerInfo()\r\n\r\n\r\n def AddEvidence(self, X, Y):\r\n \"\"\"\r\n Description: This function trains a linear regression learner\r\n when given training dataframes X and Y.\r\n\r\n Params:\r\n X (pd.DataFrame): Dataframe X.\r\n Y (pd.DataFrame): Dataframe Y.\r\n\r\n Returns: A trained model and its variables.\r\n \"\"\"\r\n\r\n # Add a column of 1s so that linear regression finds a constant term.\r\n newX = np.ones([X.shape[0], X.shape[1] + 1])\r\n newX[:, 0:X.shape[1]] = X\r\n\r\n # Build and save model.\r\n self.modelCoefficients, self.residuals, self.rank, self.s = np.linalg.lstsq(newX, Y)\r\n\r\n if self.verbose:\r\n print(\"Post Linear Regression Training\")\r\n self.GetLearnerInfo()\r\n\r\n \r\n def Query(self, points):\r\n \"\"\"\r\n Description: This function tests the learner that was trained by estimating a set\r\n of test points given the model we built before.\r\n\r\n Params:\r\n points(np.Array): Represents row queries.\r\n\r\n Returns: Estimated values according to trained model.\r\n \"\"\"\r\n\r\n # Predict the models performance.\r\n return (self.modelCoefficients[:-1] * points).sum(axis=1) + self.modelCoefficients[-1]\r\n\r\n\r\n def GetLearnerInfo(self):\r\n \"\"\"\r\n Description: This function serves to simply print out data from the learner.\r\n \"\"\"\r\n print(\"Model Coefficient Matrix: \", self.modelCoefficients,\r\n \"\\nSums of Residuals: \", self.residuals, \"\\n\")\r\n\r\n\r\n\r\n\"\"\" ------------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\"\"\" -------------------------------------------------- K-Nearest Neighbor Learner ------------------------------------------- \"\"\"\r\n\"\"\" ------------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\r\n\r\nclass KNearestNeighborLearner(object):\r\n\r\n def __init__(self, K=4, verbose=False):\r\n \"\"\"\r\n Description: This is the constructor for the KNearestNeighborLearner class\r\n that simply initializes a k nearest neighbor Learner.\r\n\r\n Params:\r\n K (int): K nearest neighbors value.\r\n verbose (bool): Print process or not.\r\n\r\n Returns: Initializes variables.\r\n \"\"\"\r\n \r\n self.K = K\r\n self.verbose = verbose\r\n\r\n\r\n def AddEvidence(self, X, Y):\r\n \"\"\"\r\n Description: This function trains a knn learner\r\n when given training dataframes X and Y.\r\n\r\n Params:\r\n X (pd.DataFrame): Dataframe X.\r\n Y (pd.DataFrame): Dataframe Y.\r\n\r\n Returns: Variables designated to their respective class variables.\r\n \"\"\"\r\n\r\n # Split into training and testing.\r\n xTrain, xTest, yTrain, yTest = train_test_split(X, Y, test_size=0.2, random_state=12345)\r\n \r\n # Create instance of a KNR but just to compare with Prof.\r\n model = KNeighborsRegressor(n_neighbors=self.K)\r\n model.fit(xTrain, yTrain)\r\n\r\n # Predictions and RMSEs.\r\n xTrainPred = model.predict(xTrain)\r\n trainRmse = sqrt(mean_squared_error(yTrain, xTrainPred))\r\n xTestPred = model.predict(xTest)\r\n testRmse = sqrt(mean_squared_error(yTest, xTestPred))\r\n\r\n # Hyper parameterize.\r\n hp = dict(n_neighbors=list(range(1,10)),\r\n weights=['uniform', 'distance'])\r\n\r\n self.model = GridSearchCV(KNeighborsRegressor(), hp)\r\n self.model.fit(xTrain, yTrain)\r\n\r\n return xTrain, xTrainPred, xTest, xTestPred, trainRmse, testRmse\r\n\r\n\r\n def Query(self, xTest):\r\n \"\"\"\r\n Description: This function tests the learner that was trained by estimating a set\r\n of test points given the model we built before.\r\n\r\n Params:\r\n points(np.Array): Represents row queries.\r\n\r\n Returns: Estimated values according to trained model.\r\n \"\"\"\r\n yTest = self.model.predict(xTest)\r\n return yTest\r\n\r\n\r\n\r\n\"\"\" ------------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\"\"\" ----------------------------------------------------- Decision Tree Learner --------------------------------------------- \"\"\"\r\n\"\"\" ------------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\r\n\r\nclass DecisionTreeLearner(object):\r\n\r\n def __init__(self, leafSize=20, verbose=False):\r\n \"\"\"\r\n Description: This function serves to initialize a Decision Tree Learner\r\n and all its respective variables.\r\n\r\n Params:\r\n leafSize (int): Maximum number of samples to be aggregated to a leaf.\r\n verbose (bool): Print process or not.\r\n\r\n Returns: Initialized variables.\r\n \"\"\"\r\n\r\n self.leafSize = leafSize\r\n self.verbose = verbose\r\n\r\n \r\n def BuildTree(self,data):\r\n \"\"\"\r\n Description: This function builds a decision tree using recursion by choosing the\r\n best column feature to split along with best value to split. Usually the best \r\n feature has the highest correlation with data Y. If they are all the same however, \r\n then select the first feature. Typically, the best value to split is based on the median \r\n of the data according to its best determined feature.\r\n\r\n Params:\r\n data (numpy.Array): The data being used to build the decision tree.\r\n\r\n Returns: A numpy NDArray that represents a tree. \r\n \"\"\"\r\n \r\n dataY = data[:, -1]\r\n\r\n # If there is only one row, return just a leaf with the average of y.\r\n if data.shape[0] <= self.leafSize or len(data.shape) == 1:\r\n return np.array([['leaf',\r\n np.mean(dataY),\r\n -1,\r\n -1]])\r\n\r\n # Or, if all the data is the same, return null leaf.\r\n elif np.all(dataY == data[0, -1]):\r\n return np.array([['leaf',\r\n data[0, -1],\r\n -1,\r\n -1]])\r\n\r\n # Otherwise, find the best feature to slit on. Based on JR Quinlan Decision\r\n # Tree algorithm, the best feature X should have the highest correlation to Y.\r\n else:\r\n bestDtlFeat = 0\r\n highestCorrelation = -1\r\n\r\n # For loop across all features (X values).\r\n for idx in range (data.shape[1] - 1):\r\n # Get best absolute correlation.\r\n correlation = MaskedArray.corrcoef(\r\n MaskedArray.masked_invalid(data[:, idx]), # Mask where invalid values occur.\r\n MaskedArray.masked_invalid(dataY))[0, 1] # Mask where invalid values occur.\r\n\r\n # Absolute value.\r\n correlation = abs(correlation)\r\n\r\n # Replace correlation if condition passes.\r\n if correlation > highestCorrelation:\r\n highestCorrelation = correlation\r\n bestDtlFeat = idx\r\n\r\n # Split down the middle and check its not just 2 rows.\r\n splitValue = np.median(data[:, bestDtlFeat], axis=0)\r\n if splitValue == max(data[:, bestDtlFeat]):\r\n return np.array([['leaf',\r\n np.mean(dataY),\r\n -1,\r\n -1]])\r\n\r\n # Create left tree.\r\n leftTree = self.BuildTree(\r\n data[data[:, bestDtlFeat] <= splitValue]\r\n )\r\n\r\n # Create right tree.\r\n rightTree = self.BuildTree(\r\n data[data[:, bestDtlFeat] > splitValue]\r\n )\r\n\r\n # Establish root of tree and create a decision tree from it.\r\n root = np.array([[bestDtlFeat, splitValue, 1, leftTree.shape[0] + 1]])\r\n decisionTree = np.vstack((np.vstack((root, leftTree)), rightTree))\r\n\r\n return decisionTree\r\n\r\n\r\n def AddEvidence(self, X, Y):\r\n \"\"\"\r\n Description: This function serves to add training data to the \r\n decision tree learner.\r\n\r\n Params:\r\n X (np.NDArray): X values of data to add.\r\n Y (np.1DArray): Y training values.\r\n\r\n Returns: Updated tree matrix for Decision Tree Learner.\r\n \"\"\"\r\n\r\n # Build a tree based on the data.\r\n data = np.hstack((X, Y.reshape(-1, 1)))\r\n self.tree = self.BuildTree(data)\r\n\r\n\r\n def Query(self, points):\r\n \"\"\"\r\n Description: This function serves to estimate a set of test points given\r\n a model we created. Basically, this is a test function for our model.\r\n\r\n Params:\r\n points (np.NDArray): Test queries.\r\n\r\n Returns: Predictions in a numpy 1D array of estimated values.\r\n \"\"\"\r\n yPred = []\r\n root = self.tree\r\n\r\n for row in range(points.shape[0]):\r\n \r\n node = 0\r\n\r\n while root[node, 0] != 'leaf':\r\n idx = root[node, 0]\r\n splitValue = root[node, 1]\r\n\r\n if points[row, int(float(idx))] <= float(splitValue):\r\n left = int(float(root[node, 2]))\r\n node = node + left\r\n\r\n else:\r\n right = int(float(root[node, 3]))\r\n node = node + right\r\n\r\n result = root[node, 1]\r\n yPred.append(float(result))\r\n\r\n return np.array(yPred)\r\n\r\n\r\n\r\n\"\"\" ------------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\"\"\" ------------------------------------------------------ Random Tree Learner ---------------------------------------------- \"\"\"\r\n\"\"\" ------------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\r\n\r\nclass RandomTreeLearner(object):\r\n\r\n def __init__(self, leafSize=20, verbose=False):\r\n \"\"\"\r\n Description: This function serves to initialize a Random Tree Learner\r\n and all its respective variables.\r\n\r\n Params:\r\n leafSize (int): Maximum number of samples to be aggregated to a leaf.\r\n verbose (bool): Print process or not.\r\n\r\n Returns: Initialized variables.\r\n \"\"\"\r\n\r\n self.leafSize = leafSize\r\n self.verbose = verbose\r\n\r\n \r\n def BuildTree(self,data):\r\n \"\"\"\r\n Description: This function builds a decision tree using recursion by choosing a\r\n random column feature to split along with best value to split. Usually the random \r\n feature has does not perform as well as the DTL. If they are all the same however, \r\n then we select the first feature. Typically, the best value to split is based on the median \r\n of the data according to its best determined feature.\r\n\r\n Params:\r\n data (numpy.Array): The data being used to build the decision tree.\r\n\r\n Returns: A numpy NDArray that represents a tree. \r\n \"\"\"\r\n \r\n dataY = data[:, -1]\r\n\r\n # If there is only one row, return just a leaf with the average of y.\r\n if data.shape[0] <= self.leafSize or len(data.shape) == 1:\r\n return np.array([['leaf',\r\n np.mean(dataY),\r\n -1,\r\n -1]])\r\n\r\n # Or, if all the data is the same, return null leaf.\r\n elif np.all(dataY == data[0, -1]):\r\n return np.array([['leaf',\r\n data[0, -1],\r\n -1,\r\n -1]])\r\n\r\n # Otherwise, find the best feature to slit on. Based on JR Quinlan Decision\r\n # Tree algorithm, the best feature X should have the highest correlation to Y.\r\n else:\r\n numOfFeat = data.shape\r\n randRtlFeat = np.random.randint(0, data.shape[1] - 2)\r\n\r\n # Split down the middle and check its not just 2 rows.\r\n splitValue = np.median(data[:, randRtlFeat], axis=0)\r\n if splitValue == max(data[:, randRtlFeat]):\r\n return np.array([['leaf',\r\n np.mean(dataY),\r\n -1,\r\n -1]])\r\n\r\n # Create left tree.\r\n leftTree = self.BuildTree(\r\n data[data[:, randRtlFeat] <= splitValue]\r\n )\r\n\r\n # Create right tree.\r\n rightTree = self.BuildTree(\r\n data[data[:, randRtlFeat] > splitValue]\r\n )\r\n\r\n # Establish root of tree and create a decision tree from it.\r\n root = np.array([[randRtlFeat, splitValue, 1, leftTree.shape[0] + 1]])\r\n randomTree = np.vstack((np.vstack((root, leftTree)), rightTree))\r\n\r\n return randomTree\r\n\r\n\r\n def AddEvidence(self, X, Y):\r\n \"\"\"\r\n Description: This function serves to add training data to the \r\n random tree learner.\r\n\r\n Params:\r\n X (np.NDArray): X values of data to add.\r\n Y (np.1DArray): Y training values.\r\n\r\n Returns: Updated tree matrix for Random Tree Learner.\r\n \"\"\"\r\n\r\n # Build a tree based on the data.\r\n data = np.hstack((X, Y.reshape(-1, 1)))\r\n self.tree = self.BuildTree(data)\r\n\r\n\r\n def Query(self, points):\r\n \"\"\"\r\n Description: This function serves to estimate a set of test points given\r\n a model we created. Basically, this is a test function for our model.\r\n\r\n Params:\r\n points (np.NDArray): Test queries.\r\n\r\n Returns: Predictions in a numpy 1D array of estimated values.\r\n \"\"\"\r\n yPred = []\r\n root = self.tree\r\n\r\n for row in range(points.shape[0]):\r\n \r\n node = 0\r\n\r\n while root[node, 0] != 'leaf':\r\n idx = root[node, 0]\r\n splitValue = root[node, 1]\r\n\r\n if points[row, int(float(idx))] <= float(splitValue):\r\n left = int(float(root[node, 2]))\r\n node = node + left\r\n\r\n else:\r\n right = int(float(root[node, 3]))\r\n node = node + right\r\n\r\n result = root[node, 1]\r\n yPred.append(float(result))\r\n\r\n return np.array(yPred)\r\n\r\n\r\n\r\n\"\"\" ------------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\"\"\" ------------------------------------------------- Bootstrap Aggregating Learner ----------------------------------------- \"\"\"\r\n\"\"\" ------------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\r\n\r\nclass BootstrapAggregatingLearner(object):\r\n\r\n def __init__(self, learner, bags=20, boost=False,\r\n verbose=False, **kwargs):\r\n\r\n \"\"\"\r\n Description: This function serves to initialize a Boostrap Aggregating Learner\r\n and all its respective variables.\r\n\r\n Params:\r\n learner (object): LRL, DTL, or RTL.\r\n bags (int): Quantity of learners to be trained.\r\n boost (bool): Applies boosting.\r\n verbose (bool): Print process or not.\r\n **kwargs: Additional arguments.\r\n\r\n Returns: Initialized variables.\r\n \"\"\"\r\n \r\n Learners = []\r\n\r\n # Add the amount of learners to learners array depending bag size.\r\n for i in range(bags):\r\n Learners.append(learner(**kwargs))\r\n\r\n self.Learners = Learners\r\n self.bags = bags\r\n self.boost = boost\r\n self.verbose = verbose\r\n self.kwargs = kwargs\r\n \r\n if self.verbose:\r\n print(\"Initialization complete.\")\r\n self.GetLearnerInfo()\r\n\r\n \r\n def AddEvidence(self, X, Y):\r\n \"\"\"\r\n Description: This function serves to add training data to the \r\n bootstrap aggregating learner.\r\n\r\n Params:\r\n X (np.NDArray): X values of data to add.\r\n Y (np.1DArray): Y training values.\r\n\r\n Returns: Updated training data for BagLearner.\r\n \"\"\"\r\n\r\n # Get the number of samples based on the shape of X data.\r\n numOfSamples = X.shape[0]\r\n\r\n # For every iteration of bag, grab a random amount of training data and train it.\r\n for learner in self.Learners:\r\n index = np.random.choice(numOfSamples, numOfSamples)\r\n\r\n bagX = X[index]\r\n bagY = Y[index]\r\n learner.AddEvidence(bagX, bagY)\r\n\r\n if self.verbose:\r\n print(\"Post Bag Learner Training.\")\r\n self.GetLearnerInfo()\r\n\r\n\r\n def Query(self, points):\r\n \"\"\"\r\n Description: This function serves to estimate a set of test points given\r\n a model we created. Basically, this is a test function for our model.\r\n\r\n Params:\r\n points (np.NDArray): Test queries.\r\n\r\n Returns: Predictions in a numpy 1D array of estimated values.\r\n \"\"\"\r\n\r\n # Use a for loop to predict a value using the mean of all the learners for that given points.\r\n predictions = np.array([learner.Query(points) for learner in self.Learners])\r\n return np.mean(predictions, axis=0)\r\n\r\n\r\n def GetLearnerInfo(self):\r\n \"\"\"\r\n Description: This function serves to print out the \r\n data for the BagLearner.\r\n \"\"\"\r\n learnerName = str(type(self.Learners[0]))[8:-2]\r\n print(\"This Boostrap Aggregating Learner is made up of \"\r\n \" {} {}.\".format(self.bags, learnerName))\r\n\r\n print(\"Kwargs = {}\\nBoost = {}\".format(self.kwargs, self.boost))\r\n\r\n for i in range (1, self.bags + 1):\r\n print(\"{} #{}.\\n\".format(learnerName, i))\r\n self.Learners[i-1].GetLearnerInfo()\r\n\r\n\r\n\r\n\"\"\" ------------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\"\"\" --------------------------------------------------------- Insane Learner ------------------------------------------------ \"\"\"\r\n\"\"\" ------------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\r\n\r\nclass InsaneLearner(object):\r\n\r\n def __init__(self, verbose=False, **kwargs):\r\n \"\"\"\r\n Description: This function serves to initialize an InsaneLearner\r\n and all its respective variables.\r\n\r\n Params:\r\n verbose (bool): Print process or not.\r\n **kwargs: Additional arguments.\r\n\r\n Returns: Initialized variables.\r\n \"\"\"\r\n\r\n self.verbose = verbose\r\n self.learners = [BootstrapAggregatingLearner(learner=DecisionTreeLearner, bags=20)] * 20\r\n\r\n\r\n def AddEvidence(self, X, Y):\r\n \"\"\"\r\n Description: This function serves to add training data to the \r\n insane learner.\r\n\r\n Params:\r\n X (np.NDArray): X values of data to add.\r\n Y (np.1DArray): Y training values.\r\n\r\n Returns: Updated training data for insane learner.\r\n \"\"\"\r\n\r\n for learner in self.learners:\r\n learner.AddEvidence(X, Y)\r\n\r\n\r\n def Query(self, points):\r\n \"\"\"\r\n Description: This function serves to estimate a set of test points given\r\n a model we created. Basically, this is a test function for our model.\r\n\r\n Params:\r\n points (np.NDArray): Test queries.\r\n\r\n Returns: Predictions in a numpy 1D array of estimated values.\r\n \"\"\"\r\n\r\n results = []\r\n \r\n for learner in self.learners:\r\n results.append(learner.Query(points))\r\n \r\n results = np.mean(np.array(results), axis=0)\r\n\r\n return results\r\n\r\n\r\n\r\n\r\n# *********************************************************************************************************************************************************** #\r\n# ----------------------------------------------------------------------------------------------------------------------------------------------------------- #\r\n# -------------------------------------------------------- Reinforcement Learning Algorithm Learners -------------------------------------------------------- #\r\n# ----------------------------------------------------------------------------------------------------------------------------------------------------------- #\r\n# *********************************************************************************************************************************************************** #\r\n\r\n\r\n\"\"\" ------------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\"\"\" ------------------------------------------------------------ Q Learner -------------------------------------------------- \"\"\"\r\n\"\"\" ------------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\r\nclass QLearner(object):\r\n\r\n def __init__(self, numOfStates=100, numOfActions=4, \r\n alpha=0.2, gamma=0.9, rar=0.5, radr=0.99,\r\n dyna=0, verbose=False):\r\n \"\"\"\r\n Description: This function serves as the constructor for a Dyna QLearner instance.\r\n Params:\r\n numOfStates (int): Number of states within a Q Table.\r\n numOfActions (int): Number of actions within a Q Table.\r\n alpha (float): Value for learning rate.\r\n gamma (float): Value of future reward.\r\n rar (float): Random action rate (Probability of selection a random action at each step).\r\n radr (float): Random action decay rate (After each update, rar = rar * radr).\r\n dyna (int): Number of dyna updates.\r\n verbose (bool): Display info or not.\r\n Returns: Initialized variables.\r\n \"\"\"\r\n self.numOfStates = numOfStates\r\n self.numOfActions = numOfActions\r\n\r\n self.alpha = alpha\r\n self.gamma = gamma\r\n self.rar = rar\r\n self.radr = radr\r\n \r\n self.dyna = dyna\r\n self.verbose = verbose\r\n\r\n # Double ended queue data structure that allows insert and delete at both ends.\r\n self.memory = deque(maxlen=2000)\r\n\r\n # Keep track of the latest state and action.\r\n self.state = 0\r\n self.action = 0\r\n\r\n # Initialize a Q-table that records and updates q values for each action/state.\r\n self.Q = np.zeros(shape=(numOfStates, numOfActions))\r\n\r\n # Keep track of transitions from s to sprime when performing an aciton in Dyna-Q.\r\n self.T = {}\r\n\r\n # Keep track of reward for each action in each state when doing Dyna-Q.\r\n self.R = np.zeros(shape=(numOfStates, numOfActions))\r\n\r\n \r\n def RememberQValues(self, state, action, reward, nextState, done):\r\n \"\"\"\r\n Description: Allows for remember the Q values and appends to deque data structure.\r\n Params:\r\n state (int): State of Q table.\r\n action (int): Action to perform for respective state.\r\n reward (float): Reward for specific aciton.\r\n nextState (int): Subsequent state of Q table.\r\n done (bool): If q value acquisition is complete.\r\n \"\"\"\r\n self.memory.append((state, action, reward, nextState, done))\r\n \r\n\r\n def Act(self, state, reward, done=False, update=True):\r\n \"\"\"\r\n Description: Peforms a query operation depending on current status of Q table.\r\n Params:\r\n state (int): Current state to perform query on.\r\n reward (float): Immediate reward from previous action.\r\n done (bool): If acting has been performed.\r\n update (bool): Update Q table based on values.\r\n Returns: Query.\r\n \"\"\"\r\n if update:\r\n return self.Query(state, reward, done=done)\r\n \r\n else:\r\n return self.QueryState(state)\r\n\r\n\r\n def QueryState(self, state):\r\n \"\"\"\r\n Description: Find the next action to take in state s. Update the latest state and action \r\n without updating the Q table.\r\n Parameters:\r\n state (int): The new state\r\n Returns: The selected action to take in state.\r\n \"\"\"\r\n if rand.uniform(0.0, 1.0) < self.rar:\r\n action = rand.randint(0, self.numOfActions - 1)\r\n \r\n else:\r\n action = self.Q[state, :].argmax()\r\n\r\n self.state = state\r\n self.action = action\r\n\r\n if self.verbose:\r\n print(\"\\nState = {}, Action = {}\".format(state, action))\r\n\r\n return action\r\n\r\n\r\n def Query(self, statePrime, reward, done=False):\r\n \"\"\"\r\n Find the next action to take in state s_prime. Update the latest state \r\n and action and the Q table. Update rule:\r\n Q'[s, a] = (1 - α) · Q[s, a] + α · (r + γ · Q[s', argmax a'(Q[s', a'])]).\r\n Parameters:\r\n statePrime (int): New state.\r\n reward (float): Immediate reward for taking the previous action.\r\n Returns: The selected action to take in statePrime.\r\n \"\"\"\r\n self.RememberQValues(self.state, self.action, reward, statePrime, done)\r\n\r\n # Update Q table.\r\n self.Q[self.state, self.action] = (\r\n (1 - self.alpha) * self.Q[self.state, self.action] + \r\n self.alpha * (reward + self.gamma * self.Q[statePrime, self.Q[statePrime, :].argmax()])\r\n )\r\n\r\n # Implement Dyna-Q.\r\n if self.dyna > 0:\r\n # Update reward table.\r\n self.R[self.state, self.action] = (\r\n (1 - self.alpha) * self.R[self.state, self.action] + self.alpha * reward\r\n )\r\n\r\n if (self.state, self.action) in self.T:\r\n if statePrime in self.T[(self.state, self.action)]:\r\n self.T[(self.state, self.action)][statePrime] += 1\r\n\r\n else:\r\n self.T[(self.state, self.action)][statePrime] = 1\r\n\r\n else:\r\n self.T[(self.state, self.action)] = { statePrime: 1 }\r\n\r\n Q = deepcopy(self.Q)\r\n\r\n # Hallucinations.\r\n for i in range (self.dyna):\r\n dummyState = rand.randint(0, self.numOfStates - 1)\r\n dummyAction = rand.randint(0, self.numOfActions - 1)\r\n\r\n if (dummyState, dummyAction) in self.T:\r\n # Find the most common statePrime as a result of taking action.\r\n dummyStatePrime = max(self.T[(dummyState, dummyAction)], key=lambda x: self.T[(dummyState, dummyAction)][x])\r\n\r\n # Update temp table.\r\n Q[dummyState, dummyAction] = (\r\n (1 - self.alpha) * Q[dummyState, dummyAction] + \r\n self.alpha * (self.R[dummyState, dummyAction] + \r\n self.gamma * Q[dummyStatePrime, Q[dummyStatePrime, :].argmax()])\r\n )\r\n\r\n # Update once dyna is complete.\r\n self.Q = deepcopy(Q)\r\n\r\n # Find the next action to take and update.\r\n nextAction = self.QueryState(statePrime)\r\n self.rar *= self.radr\r\n\r\n if self.verbose:\r\n print(\"\\nState = {}, Action = {}, Reward = {}\".format(statePrime, nextAction, reward))\r\n\r\n return nextAction\r\n\r\n\r\n\"\"\" ------------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\"\"\" ---------------------------------------------------------- Deep Q Network ----------------------------------------------- \"\"\"\r\n\"\"\" ------------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\r\nclass DeepQNetwork(object):\r\n \r\n ACTIONS = { 0:-1, 1:0, 2:1 }\r\n\r\n def __init__(self, stateSize=20, actionSize=20, \r\n alpha=0.001, gamma=0.95, epsilon=1.0,\r\n minEpsilon=0.9, epsilonDecay=0.9, verbose=False):\r\n \"\"\"\r\n Description: Constructor for deep nueral network q learner.\r\n Params:\r\n stateSize(int): Number of states.\r\n actionSize (int): Number of actions.\r\n alpha (float): Learning rate.\r\n gamma (float): Value of future reward.\r\n epsilon (float): Exploration rate.\r\n minEpsilon (float): Minimum exploration rate.\r\n epsilonDecay (float): Decay rate for exploration.\r\n verbose (bool): Print info out.\r\n Returns: Initialized variables.\r\n \"\"\"\r\n\r\n self.stateSize = stateSize\r\n self.actionSize = actionSize\r\n\r\n self.alpha = alpha\r\n self.gamma = gamma\r\n self.epsilon = epsilon\r\n self.minEpsilon = minEpsilon\r\n self.epsilonDecay = epsilonDecay\r\n\r\n self.memory = deque(maxlen=2000)\r\n \r\n # Build neural network.\r\n self.model = self.BuildModel()\r\n \r\n if verbose:\r\n self.verbose = 1\r\n \r\n else:\r\n self.verbose = 0\r\n\r\n \r\n def BuildModel(self):\r\n \"\"\"\r\n Description: Builds the neural networks for our deep-q learning model.\r\n \"\"\"\r\n # Prepares/Initializes NN layers.\r\n model = Sequential()\r\n model.add(Dense(60, input_dim=self.stateSize, activation='relu'))\r\n model.add(Dense(60, activation='relu'))\r\n model.add(Dense(3, activation='linear'))\r\n \r\n # Configures model for training using MSE loss function and Adam optimizer.\r\n model.compile(loss='mse', optimizer=Adam(lr=self.alpha))\r\n\r\n return model\r\n\r\n\r\n def Remember(self, state, action, reward, nextState, done):\r\n \"\"\"\r\n Description: Appends information to memory to remember performance and values.\r\n Params:\r\n state (int): Current state.\r\n action (int): Current action.\r\n reward (int): Current reward.\r\n nextState (int): Next state.\r\n done (bool): Completed or not.\r\n \"\"\"\r\n self.memory.append((state, action, reward, nextState, done))\r\n\r\n\r\n def Act(self, state):\r\n \"\"\"\r\n Description: Makes decision on best action to perform based on state parameter.\r\n Params:\r\n state (int): Current state.\r\n Returns: Action to perform.\r\n \"\"\"\r\n if np.random.rand() <= self.epsilon:\r\n return rand.randrange(self.actionSize)\r\n\r\n actVals = self.model.predict(np.asarray([state]), verbose=self.verbose)\r\n\r\n return np.argmax(actVals[0])\r\n\r\n\r\n def RewardTarget(self, memory):\r\n \"\"\"\r\n Description: Makes model understand what the best outcome is and reward algorithm accordingly.\r\n Params:\r\n memory (deque): Model memory.\r\n Returns: Target reward/action.\r\n \"\"\"\r\n\r\n states = []\r\n targRwds = []\r\n\r\n for state, action, reward, nextState, done in memory:\r\n target = reward\r\n\r\n if not done:\r\n target = (\r\n reward + self.gamma *\r\n np.amax(self.model.predict(np.asarray([nextState]), \r\n verbose=self.verbose)[0])\r\n )\r\n \r\n tempTarget = self.model.predict(np.asarray([state]), verbose=self.verbose)\r\n tempTarget[0][action] = target\r\n\r\n states.append(state)\r\n targRwds.append(tempTarget)\r\n\r\n return (states, targRwds)\r\n\r\n \r\n def Query(self, df):\r\n \"\"\"\r\n Description: Fits model.\r\n Params:\r\n df (pd.DataFrame): Data to be tested.\r\n Returns: Trained model.\r\n \"\"\"\r\n validMem = self.AddEvidence(df, df.index[-1], memory=[])\r\n\r\n validStates, validTargetRewards = self.RewardTarget(validMem)\r\n\r\n dfStates, dfTargetRewards = self.RewardTarget(self.memory)\r\n\r\n self.model.fit(np.asarray(dfStates),\r\n np.asarray(dfTargetRewards)[:, 0, :],\r\n validation_data=(np.asarray(validStates), np.asarray(validTargetRewards)[:, 0, :]),\r\n epochs=10, \r\n verbose=self.verbose)\r\n\r\n if self.epsilon > self.minEpsilon:\r\n self.epsilon *= self.epsilonDecay\r\n\r\n\r\n def AddEvidence(self, df, endDate, memory=[]):\r\n \"\"\"\r\n Description: Creates memory for training purposes.\r\n Param:\r\n df (pd.DataFrame): Dataframe to create memory from.\r\n endDate (string): End data of dataframe.\r\n memory (deque): Memory.\r\n Returns: Created memory.\r\n \"\"\"\r\n \r\n\r\n for idx, (domain, range) in enumerate(df.iloc[self.stateSize:-2].iterrows()):\r\n\r\n state = np.asarray(df.iloc[idx: idx + self.stateSize]['Return'])\r\n\r\n action = self.ACTIONS[self.Act(state)]\r\n\r\n nextState = np.asarray(df.iloc[idx + 1: idx + self.stateSize + 1]['Return'])\r\n\r\n done = domain == endDate\r\n\r\n reward = action * df.iloc[idx + self.stateSize]['Return']\r\n\r\n memory.append((state, action, reward, nextState, done))\r\n \r\n return memory\r\n\r\n\r\n def TransformDf(self, df, windowSize=2):\r\n \"\"\"\r\n Description: Transforms dataframe into a tradeable one using daily returns for the \r\n DQN to use as reward system.\r\n Params:\r\n df (pd.DataFrame): Dataframe to transform.\r\n windowSize (int): Window size for return computing.\r\n Returns: Transformed dataframe.\r\n \"\"\"\r\n \r\n df['Return'] = df[df.columns[0]].rolling(\r\n window=windowSize).apply(lambda x: x[1] / x[0] - 1)\r\n\r\n return df\r\n\r\n\r\n def CreateTradesDf(self, df, learner):\r\n\r\n dfTrades = { \"Trade\": []}\r\n cumRet = 1\r\n\r\n df = df.append(df.iloc[-1])\r\n\r\n for idx, (domain, range) in enumerate(\r\n df[learner.stateSize:-2].iterrows()):\r\n \r\n state = np.asarray(df.iloc[\r\n idx: idx + learner.stateSize\r\n ]['Return'])\r\n \r\n position = learner.Act(state)\r\n \r\n reward = position * df.iloc[ \r\n idx + learner.stateSize + 1\r\n ]['Return']\r\n\r\n dfTrades[\"Trade\"].append(position)\r\n cumRet *= 1 + reward\r\n\r\n dfTrades = pd.DataFrame(dfTrades, \r\n index=df.index[\r\n learner.stateSize + 1:-1]).join(df)\r\n dfTrades[\"Portfolio Return\"] = (dfTrades[\"Trade\"] * dfTrades[\"Return\"])\r\n dfTrades[\"DQNLearner\"] = (1 + dfTrades[\"Portfolio Return\"]).cumprod()\r\n dfTrades[df.columns[0]] = dfTrades[df.columns[0]] / dfTrades.iloc[0][df.columns[0]]\r\n \r\n return dfTrades\r\n\r\n\r\n def PlotDqnPerformance(self, testTrades, symbol):\r\n plt.plot(testTrades[[symbol]], label=symbol, color=\"maroon\")\r\n plt.plot(testTrades[[\"DQNLearner\"]], label=\"DQN Learner\", color=\"green\")\r\n plt.title(\"DQN Test Plot\")\r\n plt.xlabel(\"Date\")\r\n plt.ylabel(\"Value\")\r\n plt.legend(loc=\"best\")\r\n\r\n fig = plt.gcf()\r\n fig.set_size_inches(9, 4)\r\n sym = str(symbol).lower().capitalize()\r\n plt.savefig(f\"Images/{sym}DqnLearnerVisual.png\")\r\n plt.close()\r\n\r\n\r\n\"\"\" ------------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\"\"\" -------------------------------------------------------- QStrategyLearner ---------------------------------------------- \"\"\"\r\n\"\"\" ------------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\r\nclass StrategyLearner(object):\r\n \r\n BUY = 1\r\n HOLD = 0\r\n SELL = -1\r\n\r\n def __init__(self, accuracyBool=False, numOfShares=1, epochs=100, numOfSteps=10,\r\n impact=0.0, commission=0.0, verbose=False,\r\n learner=QLearner(numOfStates=3000, numOfActions=3)):\r\n \"\"\"\r\n Description: This function serves to create a StrategyLearner that can learn a trading policy.\r\n Params:\r\n numOfShares (int): Number of shares that can be traded in one order.\r\n epochs (int): The number of times to train the learner.\r\n numOfSteps (int): Steps in getting discretization thresholds.\r\n impact (float): Difference between learner and actual data.\r\n commision (float): Amount charged per transaction.\r\n verbose (bool): Print info or not.\r\n learner (object): Learner to implement strategy on.\r\n Returns: Initialized variables.\r\n \"\"\"\r\n\r\n self.numOfShares = numOfShares\r\n self.epochs = epochs\r\n self.numOfSteps = numOfSteps\r\n self.impact = impact\r\n self.commision = commission\r\n self.verbose = verbose\r\n self.accuracyBool = accuracyBool\r\n self.QLearner = learner\r\n\r\n\r\n def GetTechnicalIndicators(self, df):\r\n \"\"\"\r\n Description: This function implements the technical indicators and features of a position and feeds it into the Q Learner.\r\n Params:\r\n df (pd.DataFrame): Dataframe to compute tech indicators.\r\n Returns: A pandas dataframe of the technical indicators.\r\n \"\"\"\r\n df['MOMENTUM'] = TA.MOM(df, period=5)\r\n df['SMA'] = TA.SMA(df, period=5)\r\n df['BBWIDTH'] = TA.BBWIDTH(df, period=5) \r\n df.drop(columns={'Open', 'High', 'Low', 'Close', 'Volume'}, inplace=True)\r\n df.dropna(inplace=True)\r\n return df\r\n\r\n\r\n def GetThresholds(self, dfTechnicalIndicators, numOfSteps):\r\n \"\"\"\r\n Description: Computes thresholds to be used for discretization and \r\n returns a 2-d numpy array where the first dimension indicates the index\r\n of features in dfTechnicalIndicators and second dimension refers to the value\r\n of a feature at the particular threshold.\r\n \"\"\"\r\n stepSize = round(dfTechnicalIndicators.shape[0] / numOfSteps)\r\n\r\n tempDf = dfTechnicalIndicators.copy()\r\n\r\n thresholds = np.zeros(shape=(dfTechnicalIndicators.shape[1], numOfSteps))\r\n\r\n for idx, features in enumerate(dfTechnicalIndicators.columns):\r\n tempDf.sort_values(by=[features], inplace=True)\r\n\r\n for step in range(numOfSteps):\r\n if step < numOfSteps - 1:\r\n thresholds[idx, step] = tempDf[features].iloc[(step + 1) * stepSize]\r\n\r\n else:\r\n thresholds[idx, step] = tempDf[features].iloc[-1]\r\n\r\n return thresholds\r\n\r\n\r\n def Discretize(self, dfTechnicalIndicators, nonNegativePosition, thresholds):\r\n \"\"\"\r\n Description: This function serves to discretize the upcoming values of the deep q network. \r\n In applied mathematics, discretization is the process of transferring continuous functions, \r\n models, variables, and equations into discrete counterparts. This process is usually \r\n carried out as a first step toward making them suitable for numerical evaluation and \r\n implementation on digital computers\r\n Params:\r\n dfTechnicalIndicators (pd.DataFrame): Dataframe with technical indicators.\r\n nonNegativePosition (int): Positions of DQN.\r\n thresholds (float): Threshold computed from previous function.\r\n Returns: State in Q Table from which we query the action.\r\n \"\"\"\r\n state = nonNegativePosition * pow(self.numOfSteps, len(dfTechnicalIndicators))\r\n\r\n for idx in range(len(dfTechnicalIndicators)):\r\n threshold = thresholds[idx][thresholds[idx] >= dfTechnicalIndicators[idx]][0]\r\n\r\n thresholdIdx = np.where(thresholds == threshold)[1][0]\r\n\r\n state += thresholdIdx * pow(self.numOfSteps, idx)\r\n\r\n return state\r\n\r\n\r\n def GetPosition(self, prevPosition, signal):\r\n \"\"\"\r\n Description: This function serves to find a new position based on the previous \r\n position and the given signal. Signal is the action that results from querying\r\n a state which comes from discretize in the q table. Action is either 0, 1, 2 and is \r\n the second index of the q table.\r\n \"\"\"\r\n newPosition = self.HOLD\r\n\r\n if prevPosition < self.BUY and signal == self.BUY:\r\n newPosition = self.BUY\r\n\r\n elif prevPosition > self.SELL and signal == self.SELL:\r\n newPosition = self.SELL\r\n\r\n return newPosition\r\n\r\n\r\n def GetDailyReward(self, prevPrice, currPrice, position):\r\n \"\"\"\r\n Description: This function serves to calculate the daily reward of the dataframe\r\n as a percentage change in prices.\r\n \"\"\"\r\n\r\n return position * ((currPrice / prevPrice) - 1)\r\n\r\n \r\n def CheckConverged(self, cumReturns, patience=10):\r\n \"\"\"\r\n Description: This function serves to check if the cumulative returns has converged. \r\n Patience is the number of epochs with no improvements in cumulative returns. This\r\n will return either true or false.\r\n \"\"\"\r\n\r\n if patience > len(cumReturns):\r\n return False\r\n\r\n lastFewReturns = cumReturns[-patience:]\r\n\r\n if len(set(lastFewReturns)) == 1:\r\n return True\r\n \r\n maxReturn = max(cumReturns)\r\n\r\n if maxReturn in lastFewReturns:\r\n if maxReturn not in cumReturns[:len(cumReturns) - patience]:\r\n return False\r\n\r\n else:\r\n return True\r\n\r\n return True\r\n\r\n\r\n def CreateDfTrades(self, orders, numOfShares,\r\n hold=0, buy=1, sell=-1):\r\n \"\"\"\r\n Description: This function serves to simply create a dataframe for \r\n orders executed to simulate trading.\r\n \"\"\"\r\n\r\n trades = []\r\n\r\n if self.accuracyBool == False:\r\n buyOrSell = orders[orders != hold]\r\n\r\n for date in buyOrSell.index:\r\n if buyOrSell.loc[date] == buy:\r\n trades.append((date, numOfShares))\r\n\r\n elif buyOrSell.loc[date] == sell:\r\n trades.append((date, -numOfShares))\r\n\r\n elif self.accuracyBool == True:\r\n buyOrSell = orders\r\n \r\n for date in buyOrSell.index:\r\n if buyOrSell.loc[date] == buy:\r\n trades.append((date, numOfShares))\r\n\r\n elif buyOrSell.loc[date] == sell:\r\n trades.append((date, -numOfShares))\r\n\r\n elif buyOrSell.loc[date] == hold:\r\n trades.append((date, 0))\r\n\r\n dfTrades = pd.DataFrame(trades, columns=[\"Date\", \"Shares\"])\r\n dfTrades.set_index(\"Date\", inplace=True)\r\n\r\n return dfTrades\r\n\r\n\r\n def CreateBmDfTrades(self, symbol, startDate, endDate, numOfShares):\r\n \"\"\"\r\n Description: This function serves to simply createa an empty df that can be used to test\r\n against the dfTrades dataframe and establishes a benchmark.\r\n \"\"\"\r\n\r\n bmPrices = NormalizeDfs(IndividualHistoricalData(symbol, startDate, endDate, keepAllColumns=\"NO\"))[0]\r\n\r\n dfBmTrades = pd.DataFrame(\r\n data=[\r\n (bmPrices.index.min(), numOfShares),\r\n (bmPrices.index.max(), -numOfShares)\r\n ], \r\n columns=[\"Date\", \"Shares\"]\r\n )\r\n\r\n dfBmTrades.set_index(\"Date\", inplace=True)\r\n\r\n return dfBmTrades\r\n\r\n\r\n def SymbolValueFromTrading(self, dfOrders, symbol, startDate, endDate,\r\n startVal=1, commision=9.95, impact=0.05):\r\n \"\"\"\r\n Description: This function serves to simulate trading a stock \r\n based on the orders performed and symbol given. This returns\r\n a column of the portfolio value given that one stock after every \r\n action performed.\r\n \"\"\"\r\n dfOrders.sort_index(ascending=True, inplace=True)\r\n\r\n dfPrices = NormalizeDfs(IndividualHistoricalData(symbol, startDate, endDate, keepAllColumns=\"NO\"))[0]\r\n\r\n dfPrices[\"Cash\"] = 1.0\r\n\r\n dfPrices.fillna(method=\"ffill\", inplace=True)\r\n dfPrices.fillna(method=\"bfill\", inplace=True)\r\n dfPrices.fillna(1.0, inplace=True)\r\n\r\n dfTrades = pd.DataFrame(np.zeros((dfPrices.shape)), dfPrices.index,\r\n dfPrices.columns)\r\n\r\n for idx, row in dfOrders.iterrows():\r\n tradedShareVal = dfPrices.loc[idx, symbol] * row[\"Shares\"]\r\n transactionCost = commision + impact * dfPrices.loc[idx, symbol] * abs(row[\"Shares\"])\r\n\r\n if row[\"Shares\"] > 0:\r\n dfTrades.loc[idx, symbol] = dfTrades.loc[idx, symbol] + row[\"Shares\"]\r\n dfTrades.loc[idx, \"Cash\"] = dfTrades.loc[idx, \"Cash\"] - tradedShareVal - transactionCost\r\n\r\n elif row[\"Shares\"] < 0:\r\n dfTrades.loc[idx, symbol] = dfTrades.loc[idx, symbol] + row[\"Shares\"]\r\n dfTrades.loc[idx, \"Cash\"] = dfTrades.loc[idx, \"Cash\"] - tradedShareVal - transactionCost\r\n\r\n dfHoldings = pd.DataFrame(np.zeros((dfPrices.shape)), dfPrices.index,\r\n dfPrices.columns)\r\n\r\n for rowCount in range(len(dfHoldings)):\r\n if rowCount == 0:\r\n dfHoldings.iloc[0, :-1] = dfTrades.iloc[0, :-1].copy()\r\n dfHoldings.iloc[0, -1] = dfTrades.iloc[0, -1] + startVal\r\n\r\n else:\r\n dfHoldings.iloc[rowCount] = dfHoldings.iloc[rowCount - 1] + dfTrades.iloc[rowCount]\r\n\r\n rowCount += 1\r\n\r\n dfVal = dfPrices * dfHoldings\r\n \r\n portVals = pd.DataFrame(dfVal.sum(axis=1), dfVal.index, [\"Port Val\"])\r\n \r\n return portVals\r\n\r\n\r\n def MarketSimulator(self, dfOrders, dfOrdersBm, symbol,\r\n startDate, endDate, title, startVal=1,\r\n commission=9.95, impact=0.005,\r\n saveFigure=False, figName=\"Plot.png\",\r\n showPlot=False):\r\n \"\"\"\r\n Description: This function serves to mimic the market simulator project from ML4T\r\n university course by Tucker Balch. In summary, this function intakes dfOrders\r\n dataframe that executes the trades and displays the portfolio value respectively.\r\n \"\"\"\r\n portVals = self.SymbolValueFromTrading(dfOrders=dfOrders, symbol=symbol,\r\n startVal=startVal, startDate=startDate,\r\n endDate=endDate, commision=commission,\r\n impact=impact)\r\n\r\n portValsBm = self.SymbolValueFromTrading(dfOrders=dfOrdersBm, symbol=symbol,\r\n startVal=startVal, startDate=startDate,\r\n endDate=endDate, commision=commission,\r\n impact=impact)\r\n\r\n portValsBm.rename(columns={\"Port Val\": symbol}, inplace=True)\r\n\r\n temp = []\r\n temp.append(portVals)\r\n temp.append(portValsBm)\r\n df = temp[0].join(temp[1])\r\n\r\n plt.plot(df.loc[:, df.columns[1]], label=symbol, color=\"maroon\")\r\n plt.plot(df.loc[:, df.columns[0]], label=\"QLearner\", color=\"darkgreen\")\r\n plt.title(title)\r\n plt.xlabel(\"Date\")\r\n plt.ylabel(\"Normalized Prices\")\r\n plt.legend(loc=\"best\")\r\n\r\n fig = plt.gcf()\r\n fig.set_size_inches(9, 4)\r\n \r\n if saveFigure:\r\n plt.savefig(figName)\r\n\r\n if showPlot:\r\n plt.show()\r\n\r\n plt.close()\r\n \r\n \r\n def AddEvidence(self, symbol='GLD', startDate=\"2021-01-01\", \r\n endDate=\"2022-01-01\", startVal=1):\r\n \"\"\"\r\n Description: This function serves to add training data to the \r\n Strategy learner.\r\n Params:\r\n Self explanatory.\r\n Returns: Updated training data for Strategy learner.\r\n \"\"\"\r\n tempDf = NormalizeDfs(IndividualHistoricalData(symbol=symbol, startDate=startDate,\r\n endDate=endDate, keepAllColumns=\"YES\"))[0]\r\n\r\n dfPrices = NormalizeDfs(IndividualHistoricalData(symbol=symbol, startDate=startDate,\r\n endDate=endDate, keepAllColumns=\"NO\"))[0]\r\n\r\n dfFeatures = self.GetTechnicalIndicators(tempDf)\r\n\r\n dfThres = self.GetThresholds(dfFeatures, self.numOfSteps)\r\n\r\n cumReturns = []\r\n\r\n for epoch in range(1, self.epochs + 1):\r\n\r\n # Initial position is hold.\r\n position = self.HOLD\r\n\r\n # Create pandas series that captures order signals.\r\n orders = pd.Series(index=dfFeatures.index)\r\n\r\n for day, date in enumerate(dfFeatures.index):\r\n # Get a state.\r\n state = self.Discretize(dfFeatures.loc[date],\r\n position + 1,\r\n dfThres)\r\n\r\n # Get action, do not update table if first time.\r\n if date == dfFeatures.index[0]:\r\n action = self.QLearner.Act(state, 0.0, update=False)\r\n\r\n # Otherwise, calculate reward and update table.\r\n else:\r\n prevPrice = dfPrices[symbol].iloc[day - 1]\r\n currPrice = dfPrices[symbol].loc[date]\r\n\r\n reward = self.GetDailyReward(prevPrice, currPrice, position)\r\n action = self.QLearner.Act(state, reward, \r\n done=date==dfFeatures.index[-1],\r\n update=True)\r\n\r\n # If last day, sell.\r\n if date == dfFeatures.index[-1]:\r\n newPosition = -position\r\n\r\n else:\r\n newPosition = self.GetPosition(position, action - 1)\r\n\r\n orders.loc[date] = newPosition\r\n\r\n position += newPosition\r\n\r\n dfTrades = self.CreateDfTrades(orders, self.numOfShares)\r\n portVals = self.SymbolValueFromTrading(dfOrders=dfTrades,\r\n symbol=symbol,\r\n startDate=startDate,\r\n endDate=endDate,\r\n startVal=startVal,\r\n commision=self.commision,\r\n impact=self.impact)\r\n\r\n cr = portVals.iloc[-1, 0] / portVals.iloc[0, 0] - 1\r\n cumReturns.append(cr)\r\n\r\n if self.verbose:\r\n print(\"Epoch: {}, Cumulative Return: {}\\n\".format(epoch, cr))\r\n\r\n if epoch > 20:\r\n if self.CheckConverged(cumReturns):\r\n break\r\n\r\n if self.verbose:\r\n sns.heatmap(self.QLearner.Q, cmap='Blues')\r\n plt.plot(cumReturns)\r\n plt.xlabel(\"Epochs\")\r\n plt.ylabel(\"Cumulative Return (%)\")\r\n plt.show()\r\n\r\n \r\n def Query(self, symbol='GLD', startDate=\"2022-01-02\", \r\n endDate=datetime.today().strftime('%Y-%m-%d'), \r\n ):\r\n \"\"\"\r\n Description: This function serves to test the existing policy on a new data set.\r\n \"\"\"\r\n\r\n tempDf = NormalizeDfs(IndividualHistoricalData(symbol=symbol,\r\n startDate=startDate,\r\n endDate=endDate,\r\n keepAllColumns=\"YES\"))[0] \r\n\r\n dfFeatures = self.GetTechnicalIndicators(tempDf)\r\n\r\n thresholds = self.GetThresholds(dfTechnicalIndicators=dfFeatures,\r\n numOfSteps=self.numOfSteps)\r\n\r\n position = self.HOLD\r\n\r\n orders = pd.Series(index=dfFeatures.index)\r\n\r\n for date in dfFeatures.index:\r\n state = self.Discretize(dfFeatures.loc[date],\r\n position + 1,\r\n thresholds)\r\n\r\n action = self.QLearner.Act(state, 0.0, update=False)\r\n\r\n if date == dfFeatures.index[-1]:\r\n newPosition = -position\r\n\r\n else:\r\n newPosition = self.GetPosition(position, action - 1)\r\n\r\n orders.loc[date] = newPosition\r\n\r\n position += newPosition\r\n\r\n dfTrades = self.CreateDfTrades(orders=orders,\r\n numOfShares=self.numOfShares)\r\n\r\n return dfTrades\r\n \r\n\r\n\r\n\r\n# *********************************************************************************************************************************************************** #\r\n# ----------------------------------------------------------------------------------------------------------------------------------------------------------- #\r\n# ------------------------------------------------------ DefeatLearners for LinRegLearner and DTLearner ----------------------------------------------------- #\r\n# ----------------------------------------------------------------------------------------------------------------------------------------------------------- #\r\n# *********************************************************************************************************************************************************** #\r\n\r\n\r\nclass DefeatLearners(object):\r\n\r\n def BestForLinRegLearner(self, seed=1489683273):\r\n \"\"\"\r\n Description: This finds the best data for a linear regression learner.\r\n Params:\r\n seed (int): Input seed value to repeat random number.\r\n Returns: Optimized data for a linear regression learner.\r\n \"\"\"\r\n np.random.seed(seed) \r\n\r\n X = np.random.rand(100,4)\r\n Y = X[:, 0] + X[:,1]*3 + X[:,2]**2 - X[:,3]*4\r\n\r\n return X, Y \r\n\r\n\r\n def BestForDecisionTreeLearner(self, seed=1489683273):\r\n \"\"\"\r\n Description: This finds the best data for a decision tree learner.\r\n Params:\r\n seed (int): Input seed value to repeat random number.\r\n Returns: Optimized data for a decision tree learner.\r\n \"\"\"\r\n\r\n np.random.seed(seed)\r\n # Data must contain between 10 to 1000 (inclusive) entries.\r\n rows = np.random.randint(10, 1001)\r\n\r\n # Number of features must be between 2 and 10 (inclusive).\r\n cols = np.random.randint(2, 11)\r\n\r\n # Generate random x data.\r\n x = np.random.rand(rows, cols)\r\n\r\n # Defeat linear regression learner with a non linear (exponential) function y = x0^5 + x1^2.\r\n y = (-1/8) * ( \r\n ((x[:, 0] - 2)**3) * ((x[:, 1] + 1)**2) * ((x[:, 2] - 4) )\r\n )\r\n\r\n return x, y\r\n\r\n\r\n def CompareRmses(self, learnerOne, learnerTwo, X, Y):\r\n \"\"\"\r\n Description: This function serves to simply compare the two learners.\r\n\r\n Params:\r\n X (array): X axis values.\r\n Y (array): Y axis values.\r\n learnerOne/learnerTwo (object): LRL, KNN, DTL, RTL, etc.\r\n\r\n Returns: Rmses for both learners.\r\n \"\"\"\r\n\r\n trainRows = int(math.floor(0.6 * X.shape[0]))\r\n testRows = X.shape[0] - trainRows\r\n\r\n train = np.random.choice(X.shape[0], size=trainRows, replace=False)\r\n test = np.setdiff1d(np.array(range(X.shape[0])), train)\r\n\r\n trainX = X[train, :]\r\n trainY = Y[train]\r\n\r\n testX = X[test, :]\r\n testY = Y[test]\r\n\r\n learnerOne.AddEvidence(trainX, trainY)\r\n learnerTwo.AddEvidence(trainX, trainY)\r\n\r\n predY = learnerOne.Query(testX)\r\n rmseOne = math.sqrt(((testY - predY) ** 2).sum() / testY.shape[0])\r\n\r\n predY2 = learnerTwo.Query(testX)\r\n rmseTwo = math.sqrt(((testY - predY2) ** 2).sum() / testY.shape[0])\r\n\r\n return rmseOne, rmseTwo\r\n \r\n\r\n def TestDefeatLearners(self):\r\n lrl = LinRegLearner()\r\n dtl = DecisionTreeLearner(leafSize=1)\r\n\r\n X, Y = self.BestForLinRegLearner()\r\n rmseLrl, rmseDtl = self.CompareRmses(lrl, dtl, X, Y)\r\n print(\"\\nBest for LRL Results.\\nRMSE LRL: {}\\nRMSE DTL: {}\"\r\n .format(rmseLrl, rmseDtl))\r\n\r\n if rmseLrl < 0.9 * rmseDtl:\r\n print(\"LRL < 0.9 DTL: Pass.\")\r\n else:\r\n print(\"LRL >= 0.9 DTL: Fail.\")\r\n\r\n X, Y = self.BestForDecisionTreeLearner()\r\n rmseLrl, rmseDtl = self.CompareRmses(lrl, dtl, X, Y)\r\n print(\"\\nBest for DTL Results.\\nRMSE LRL: {}\\nRMSE DTL: {}\"\r\n .format(rmseLrl, rmseDtl))\r\n\r\n if rmseDtl < 0.9 * rmseLrl:\r\n print(\"DTL < 0.9 LRL: Pass.\\n\")\r\n else:\r\n print(\"DTL >= 0.9 LRL: Fail.\\n\")\r\n ","repo_name":"dannyleall/StockMarketTraderBot","sub_path":"Learners.py","file_name":"Learners.py","file_ext":"py","file_size_in_byte":67249,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"43132225654","text":"import ccxt\nimport logging\nimport numpy as np\nimport pandas as pd\n\nfrom core.parser import EXCHANGE_NAME\nfrom core.config import APIKEYS, db\n\n\n# LOGGING\nlevel = logging.INFO # TODO: set from .env\nlogging.basicConfig(format=\"%(asctime)s %(levelname)s %(threadName)s %(name)s %(message)s\",\n datefmt='%m-%d %H:%M', level=level)\n\n\n# for xcg in ccxt.exchanges: ...\nACCESS_ID = APIKEYS[EXCHANGE_NAME]['ACCESS_ID']\nSECRET_KEY = APIKEYS[EXCHANGE_NAME]['SECRET_KEY']\n\n# exchange = eval(f'ccxt.{exchangeName}()') # alternative using eval\nexchange = getattr(ccxt, EXCHANGE_NAME)(\n {'apiKey': ACCESS_ID, 'secret': SECRET_KEY})\n\n\ndef cleanupHistorical(exchange, symbol, timeframes):\n \"\"\"\n Delete rows older than term\n \"\"\"\n cleanupAfter = {\n '1m': '3 days',\n '5m': '3 weeks',\n '1d': '3 months',\n '1w': '3 years'\n }\n\n try:\n for t in timeframes:\n logging.info(f'cleaned timeframe, {t}...')\n tbl = f'{exchange}_{symbol}_{t}'\n sqlCleanup = f\"\"\"delete from \"{tbl}\" where timestamp_utc < CURRENT_DATE - INTERVAL '{cleanupAfter[t]}'\"\"\"\n res = db.execute(sqlCleanup, con=db)\n if res.rowcount > 0:\n logging.info(\n f'cleaned up {res.rowcount} rows in timeframe, {t}...')\n\n except Exception as e: # consider narrowing exception handing from generic, \"Exception\"\n logging.error(\n f'{e}\\n{res or \"\"}\\ntable, {tbl} may not exist, sql may be incorrect ({sqlCleanup or \"\"}), or connection to SQL may be invalid.')\n pass\n\n\ndef getLatestTimestamp(tbl, since='1970-01-01T00:00:00Z', removeLatest=True):\n \"\"\"\n Find last imported row and remove\n \"\"\"\n try:\n sqlLatest = f'select max(timestamp_utc) as timestamp_utc from \"{tbl}\"'\n dfLatest = pd.read_sql(sql=sqlLatest, con=db)\n\n # from ccxt docs, indicates last close value may be inaccurate\n since = dfLatest.iloc[0]['timestamp_utc']\n\n # remove latest to avoid dups and provide more accurate closing value\n if removeLatest:\n sqlRemoveLatest = f\"\"\"delete from \"{tbl}\" where timestamp_utc = '{since}'\"\"\"\n res = db.execute(sqlRemoveLatest, con=db)\n if res.rowcount == 0:\n logging.warning(\n 'No rows deleted; maybe table is blank, or issue with latest timestamp_utc')\n\n return exchange.parse8601(since.isoformat())\n\n except Exception as e: # consider narrowing exception handing from generic, \"Exception\"\n logging.error(\n f'table, {tbl} may not exist, or connection to SQL invalid.')\n pass\n\n return 0\n\n\ndef getLatestOHLCV(exchange, symbol, since=None):\n \"\"\"\n Find the latest X rows of ohlcv data from exchange and save to SQL\n \"\"\"\n data = []\n limit = 1000\n timeframe = '5m'\n tf_milliseconds = 5*60*1000\n try:\n if exchange.has['fetchOHLCV']:\n # paginate latest ohlcv from exchange\n if since == None:\n since = exchange.milliseconds() - 86400000 # 1 day\n\n while since < exchange.milliseconds()-tf_milliseconds:\n print(f'since: {since}...')\n data += exchange.fetch_ohlcv(symbol,\n timeframe=timeframe, since=since, limit=limit)\n since = int(data[len(data)-1][0])\n\n except Exception as e:\n logging.debug(e)\n\n return data\n\n\ndef putLatestOHLCV(ohlcv, tbl, utcLatest):\n \"\"\"\n comments go here\n \"\"\"\n try:\n # save ohlcv to sql\n columns = ['timestamp_utc', 'open', 'high', 'low', 'close', 'volume']\n df = pd.DataFrame(np.row_stack(ohlcv), columns=columns)\n dfLatest = df[df['timestamp_utc'] >= utcLatest].astype(\n {'timestamp_utc': 'datetime64[ms]'}).set_index('timestamp_utc')\n dfLatest.to_sql(tbl, con=db, if_exists='append',\n index_label='timestamp_utc')\n\n except Exception as e:\n logging.debug(e)\n","repo_name":"thedlop/ergopad","sub_path":"aggregator/app/exchanges/coinex.py","file_name":"coinex.py","file_ext":"py","file_size_in_byte":4025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"1633708635","text":"import os\nfrom pathlib import Path\n\nimport fatpack\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom utils import GUI, get_data_np, graphs, level_crossing, matrices\nimport rainflow\nfrom datetime import datetime\n\n# logger setup\nfrom utils.loggers import MyLog\n\nlogger = MyLog().logger\n\n\nclass RainFlowCounter:\n def __init__(self, file_path,\n mean_bin_size, range_bin_size,\n material, k_t, g_exc_bin_size,\n racetrack_filter=True, h=0.1,\n verbose=False):\n self.verbose = verbose\n self.racetrack_filter = racetrack_filter,\n self.file_path = file_path\n self.mean_bin_size = mean_bin_size\n self.range_bin_size = range_bin_size\n self.material = material\n self.k_t = k_t\n self.gExc_bin_size = g_exc_bin_size\n\n # load data\n t0 = datetime.now()\n self.tt, self.nz = get_data_np.get_data(self.file_path, [7])\n self.nz = np.array(self.nz).reshape(-1)\n\n if verbose:\n logger.info(\n f'\\t▪ Data successfully loaded in '\n f'{round((datetime.now() - t0).total_seconds(), 3)} s.')\n\n # check if Nz is higher than 3.5g\n self.invalid_data = False\n if np.partition(self.nz, 1)[-1:] > 3.5:\n self.invalid_data = True\n\n # filter data\n if racetrack_filter:\n self.nz, ix = fatpack.find_reversals_racetrack_filtered(\n self.nz, h=h, k=200)\n self.tt = self.tt[ix]\n\n # extract cycles and their properties - (n, 5)\n # - columns = [mean, range, count, start, end]\n cyc_prop = {\n 'cyc_mean': [], 'cyc_range': [], 'cyc_count': [],\n 'start_i': [], 'end_i': [],\n 'peak': [], 'valley': []\n }\n\n t0 = datetime.now()\n for rg, mn, c, i_s, i_e in rainflow.extract_cycles(\n self.nz): # this is a generator\n cyc_prop['cyc_mean'].append(mn)\n cyc_prop['cyc_range'].append(rg)\n cyc_prop['cyc_count'].append(c)\n cyc_prop['start_i'].append(self.nz[i_s])\n cyc_prop['end_i'].append(self.nz[i_e])\n cyc_prop['peak'].append(mn + rg / 2)\n cyc_prop['valley'].append(mn - rg / 2)\n if verbose:\n logger.info(\n f'\\t▪ Load cycles successfully extracted in '\n f'{round((datetime.now() - t0).total_seconds(), 3)} s.')\n\n self.cycles = np.column_stack(\n (cyc_prop['cyc_mean'], cyc_prop['cyc_range'],\n cyc_prop['cyc_count'], cyc_prop['peak'], cyc_prop['valley']))\n\n # ==================== level cross counting ====================\n t0 = datetime.now()\n self.top_bins, self.bottom_bins, self.top_counts, self.bottom_counts \\\n = level_crossing.level_cross_count(self.cycles[:, 2:],\n self.gExc_bin_size)\n if verbose:\n logger.info(f'\\t▪ Level-crossings successfully calculated in '\n f'{round((datetime.now() - t0).total_seconds(), 3)} '\n f's.\\n')\n\n # print(gc.get_count())\n # gc.collect()\n # print(gc.get_count())\n\n # ================ method to generate mean-range matrix ================\n def mean_range_matrix(self, mean_bin_size, range_bin_size):\n t0 = datetime.now()\n if self.verbose:\n m_r_matrix = matrices.mean_range_matrix(self.cycles, mean_bin_size,\n range_bin_size)\n logger.info(f'\\t▪ Mean-Range matrix successfully generated in '\n f'{round((datetime.now() - t0).total_seconds(), 3)} '\n f's.')\n else:\n m_r_matrix = matrices.mean_range_matrix(self.cycles, mean_bin_size,\n range_bin_size)\n return m_r_matrix\n\n # ================== method to generate from-to matrix ==================\n def from_to_matrix(self, from_bin_size=0.25, to_bin_size=0.25):\n t0 = datetime.now()\n if self.verbose:\n from_to_matrix = matrices.from_to(self.cycles, from_bin_size,\n to_bin_size)\n logger.info(f'\\t▪ From-To matrix successfully generated in '\n f'{round((datetime.now() - t0).total_seconds(), 3)} '\n f's.')\n else:\n from_to_matrix = matrices.from_to(self.cycles, from_bin_size,\n to_bin_size)\n return from_to_matrix\n\n # ========== method to calculate total damage from Minor's rule ==========\n def total_damage(self, material, k_t):\n t0 = datetime.now()\n if self.verbose:\n total_damage = matrices.damage(material, k_t)\n logger.info(f'\\t▪ Total damage successfully calculated in '\n f'{round((datetime.now() - t0).total_seconds(), 3)} '\n f's.')\n else:\n total_damage = matrices.damage(material, k_t)\n return total_damage\n\n\nclass MultipleFlights:\n\n def __init__(self, verbose=False):\n \"\"\"\n This is the main object that calls the GUI to either run a single\n IMU file or aggregate the IMU data from multiple flights and\n generate a total g-exceedance curve.\n\n :param verbose: bool Determines whether punch report of the process\n time in the run console.\n\n \"\"\"\n t0 = datetime.now()\n self.verbose = verbose\n\n # call GUI\n self.mode, mean_bin_size, range_bin_size, gExc_bin_size, \\\n self.address, material, k_t, rt_flt, h, self.show_labels = \\\n GUI.call_gui()\n\n # adjust file names and addresses depending on analysis type requested\n if self.mode: # if multi file analysis is requested\n # create list of all the files in `address`\n imu_files = os.listdir(self.address)\n else: # if single file analysis is requested\n imu_files = [\n os.path.basename(self.address)] # put the file name in a list\n # get the directory name\n self.address = os.path.dirname(self.address)\n\n # create and populate a list, each element of which is a\n # `RainFlowCounter` object generated by processing each file (flight)\n self.flights = []\n i = 0\n for file_name in imu_files:\n i += 1\n # use only *.dat files\n ext = os.path.splitext(file_name)[1]\n if ext != '.dat':\n pass\n else:\n with open(os.path.join(self.address, file_name), 'r') as f:\n if len(f.readline()) != 152:\n pass\n else:\n logger.info(\n f'• {i}. Analyzing \"{file_name.split(\".\")[0]}\"')\n flight = RainFlowCounter(\n os.path.join(self.address, file_name),\n mean_bin_size, range_bin_size,\n material, k_t, gExc_bin_size,\n racetrack_filter=rt_flt, h=h,\n verbose=self.verbose)\n\n # in case flight data contains Nz > 3, discard file\n if flight.invalid_data:\n logger.warning(\n f'File \"{file_name.split(\".\")[0]}\" contains '\n f'Nz values higher than 3.5g, therefore was '\n f'discarded.')\n continue\n else:\n self.flights.append(flight)\n\n # each flight object has its own dimensions based on its data range\n # and requested g-exceedance resolution. Below all flights are\n # padded with zeros to match the flight with the widest data range.\n\n # farthest bin above baseline (flight with the widest data range)\n highest_bin = max([len(f.top_counts) for f in self.flights])\n self.x_above = np.argmax([len(f.top_counts) for f in self.flights])\n\n # farthest bin below baseline (flight with the widest data range)\n lowest_bin = max([len(f.bottom_counts) for f in self.flights])\n self.x_below = np.argmax([len(f.bottom_counts) for f in self.flights])\n\n for f in self.flights:\n if len(f.top_counts) < highest_bin:\n f.top_counts = np.pad(f.top_counts,\n (0, highest_bin - len(f.top_bins)),\n 'constant')\n\n if len(f.bottom_counts) < lowest_bin:\n f.bottom_counts = np.pad(f.bottom_counts,\n (0, lowest_bin - len(f.bottom_bins)),\n 'constant')\n\n # aggregation of all flights (files)\n self.above_baseline_counts = \\\n sum([f.top_counts for f in self.flights])\n self.below_baseline_counts = \\\n sum([f.bottom_counts for f in self.flights])\n\n minutes = divmod((datetime.now() - t0).total_seconds(), 60)\n logger.info(\n f'Total collapsed time is '\n f'{int(minutes[0])} minutes and {round(minutes[1], 3)} seconds.')\n\n def g_exc_curve(self,\n title: str = 'g-Exceedance Spectra',\n x_label: str = r'$N_Z$, Vertical Acceleration [g]',\n y_label: str = 'Cumulative Exceedance Count',\n save_figure: bool = False):\n \"\"\"\n Method to generate the aggregate g-exceedance curve from all input data\n\n :param title: Graph title\n :param x_label: x-axis label\n :param y_label: y-axis label\n :param save_figure: should the graph be saved?\n :return: Shows the graph.\n \"\"\"\n\n fig, ax = plt.subplots(figsize=(11, 7), dpi=150)\n plt.subplots_adjust(bottom=0.1, top=0.94, right=0.96, left=0.1)\n\n graphs.g_exceedance_plot(top_bins=self.flights[self.x_above].top_bins,\n top_counts=self.above_baseline_counts,\n bottom_bins=self.flights[\n self.x_below].bottom_bins,\n bottom_counts=self.below_baseline_counts,\n title=title, x_label=x_label, y_label=y_label,\n print_label=self.show_labels,\n print_bins=False,\n ax=ax)\n\n # save graph if requested\n if save_figure:\n # form the image name and its storage directory\n now = datetime.now()\n d, m, y = \\\n now.strftime('%d'), now.strftime('%b'), now.strftime('%Y')\n hr, mn = \\\n now.strftime('%H'), now.strftime('%M')\n img_name = \\\n title + ' ' + d + m + y + ', ' + hr + '-' + mn + '.png'\n img_dir = os.path.join(self.address, '~images')\n\n # check if the directory exist and if not, create it\n Path(img_dir).mkdir(parents=True, exist_ok=True)\n\n # save image and report the address\n plt.savefig(os.path.join(img_dir, img_name), dpi=300)\n logger.info(f'\\nThe plot is saved as an image at the address '\n f'below:\\n\\n{os.path.join(img_dir, img_name)}')\n\n # show the plot\n plt.show()\n","repo_name":"pooya-row/Load_Monitoring","sub_path":"utils/rf_counter.py","file_name":"rf_counter.py","file_ext":"py","file_size_in_byte":11648,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"42524487328","text":"\nf = open(\"hightemp.txt\")\nd = {}\n\nfor line in f:\n first = line.split('\\t')[0]\n for c in first:\n if c in d.keys():\n d[c] = d[c] + 1\n else:\n d[c] = 1\n\nfor k, v in sorted(d.items(), key =lambda x:x[1], reverse= True):\n print( k, v)\n\n","repo_name":"MizukiSonoko/nlk100","sub_path":"chapter2/a19.py","file_name":"a19.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"41522419358","text":"from collections import deque\nfrom typing import Deque\n\nfrom kivy.app import App\nfrom kivy.uix.screenmanager import ScreenManager\n\nfrom ..controls.KynikuiScreen import KynikuiScreen\n\n\nclass WindowManager(ScreenManager):\n \"\"\"Controller that manages transitions between screens.\n \"\"\"\n\n _animationDuration: float\n\n _currentScreen: KynikuiScreen\n\n _windowStack: Deque[KynikuiScreen]\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self._animationDuration = .3\n self._currentScreen = None\n self._windowStack = deque()\n\n def proceedToScreen(self,\n nextScreen: KynikuiScreen) -> None:\n \"\"\"Proceeds to the given screen.\n\n ### Args:\n - nextScreen (KynikuiScreen): The next screen to proceed to.\n \"\"\"\n\n if not self._currentScreen:\n self._currentScreen = App.get_running_app().root.ids[self.current]\n\n self._windowStack.append(self._currentScreen)\n\n self._changeScreen(nextScreen)\n\n def _changeScreen(self,\n nextScreen: KynikuiScreen,\n transitionDirection: str = \"left\") -> None:\n \"\"\"Changes screen based on the given transitionDirection.\n\n ### Args:\n - nextScreen (KynikuiScreen): The next screen to proceed to.\n - transitionDirection (str, optional): Determines which direction the transition animates in. Default value is \"left\".\n \"\"\"\n app = App.get_running_app()\n\n self._animateNavbar()\n\n self.transition.duration = self._animationDuration\n self.transition.direction = transitionDirection\n\n if self._currentScreen:\n self._currentScreen._onScreenExited()\n nextScreen._onScreenEntered()\n\n self.current = nextScreen.name\n self._currentScreen = nextScreen\n app._current = self._currentScreen","repo_name":"ryll-spectre/KyNikui","sub_path":"src/components/views/window_manager/window_manager.py","file_name":"window_manager.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25824367455","text":"import pyodbc\nimport logging\nimport os\nimport sys\nimport time\n\nmodulePath = os.path.dirname(__file__)\nrootPath = os.path.dirname(modulePath)\nscriptsPath = f'{modulePath}/sql_scripts'\n\n# Such ugly hack to allow import from sibling folder\nsys.path.append(rootPath) ## append parent folder to path\nimport common\n\nlogger = common.getLogger(modulePath)\nlogger.info('Running ' + __file__)\n\ndef main():\n try:\n os.system(\"clear\")\n start_time = time.monotonic()\n # Create Tables & Materialized Views\n create_tables()\n\n # Create Views\n create_views()\n\n # Create Expansion Views\n create_expansion_views()\n\n # Create Dedupe Views\n # create_dedupe_views()\n\n print('seconds: ', time.monotonic() - start_time)\n except Exception as e:\n logger.critical(f'The model encountered an exception: {e}')\n\ndef create_tables():\n tear_down = common.read_file(f'{scriptsPath}/tear_down.sql')\n create_tables = common.read_file(f'{scriptsPath}/create_tables.sql')\n create_materialized_views = common.read_file(f'{scriptsPath}/create_materialized_views.sql')\n\n with pyodbc.connect(common.connectionString) as conn:\n with conn.cursor() as cursor:\n logger.info('tear down')\n cursor.execute(tear_down)\n logger.info('create tables')\n cursor.execute(create_tables)\n logger.info('create materialized views')\n cursor.execute(create_materialized_views)\n\ndef create_views():\n view_files = [f.name for f in os.scandir(f'{scriptsPath}/views') if f.is_file()]\n view_files.sort()\n for view_file in view_files:\n view_sql = common.read_file(f'{scriptsPath}/views/{view_file}')\n\n with pyodbc.connect(common.connectionString) as conn:\n with conn.cursor() as cursor:\n logger.info(f'create view {view_file}')\n cursor.execute(view_sql)\n\ndef create_expansion_views():\n expanded_lanes_view_v1 = common.read_file(f'{scriptsPath}/expanded_lanes_v1.sql')\n expanded_lanes_view_v2 = common.read_file(f'{scriptsPath}/expanded_lanes_v2.sql')\n\n with pyodbc.connect(common.connectionString) as conn:\n with conn.cursor() as cursor:\n logger.info('create expansion views')\n cursor.execute(expanded_lanes_view_v1)\n cursor.execute(expanded_lanes_view_v2)\n\ndef create_dedupe_views():\n deduped_lanes_view_v1 = common.read_file(f'{scriptsPath}/deduped_lanes_v1.sql')\n deduped_lanes_view_v2 = common.read_file(f'{scriptsPath}/deduped_lanes_v2.sql')\n\n with pyodbc.connect(common.connectionString) as conn:\n with conn.cursor() as cursor:\n logger.info('create deduped views')\n cursor.execute(deduped_lanes_view_v1)\n cursor.execute(deduped_lanes_view_v2)\n\nif __name__ == '__main__':\n main()\n\n logger.info('done')\n logging.shutdown()","repo_name":"TheRealChssAddct/anura-lane-expansion-spike","sub_path":"1.generate_anura_standard_scaffolding/generate_model_db.py","file_name":"generate_model_db.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71323809815","text":"#Write a Python program that matches a string that has an 'a' followed by anything, ending in 'b'.\n\nimport re\n\nkxkx = input()\n\nx = re.match(r'^a.*b$', kxkx)\nif x:\n print(\"match\")\nelse:\n print (\"no\")","repo_name":"Aluakx/pp2-22B031106","sub_path":"tsis5/regex/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"18644042057","text":"from p2pool.bitcoin import networks\n\nPARENT = networks.nets['rpcoin']\nSHARE_PERIOD = 10 # seconds\nCHAIN_LENGTH = 12*60*60//10 # shares\nREAL_CHAIN_LENGTH = 12*60*60//10 # shares\nTARGET_LOOKBEHIND = 20 # shares\nSPREAD = 50 # blocks\nIDENTIFIER = 'faf1e34fb0c7ef21'.decode('hex')\nPREFIX = 'e0ff56a1340e56f1'.decode('hex')\nP2P_PORT = 8127\nMIN_TARGET = 0\nMAX_TARGET = 2**256//2**20 - 1\nPERSIST = False\nWORKER_PORT = 9127\nBOOTSTRAP_ADDRS = 'p2pool.e-pool.net p2pool-eu.gotgeeks.com p2pool-us.gotgeeks.com rav3n.dtdns.net p2pool.gotgeeks.com p2pool.dtdns.net solidpool.org'.split(' ')\nANNOUNCE_CHANNEL = '#p2pool-alt'\nVERSION_CHECK = lambda v: True\n","repo_name":"amarian12/p2pool-adaptive","sub_path":"p2pool/networks/rpcoin.py","file_name":"rpcoin.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"33123989263","text":"# -*- coding: utf-8 -*-\n\"\"\"\nhttps://www.runoob.com/python3/python3-lcm.html Python 最小公倍数算法\n\"\"\"\n\nimport pysnooper\n\n\n# 定义函数\n@pysnooper.snoop('log.txt')\ndef lcm(x, y):\n\n # 获取最大的数\n if x > y:\n greater = x\n else:\n greater = y\n\n while True:\n if greater % x == 0 and greater % y == 0: # 累加取余\n lcm = greater\n break # 满足if条件即停止循环\n greater += 1\n\n return lcm\n\n\n# 获取用户输入\nnum1 = int(input(\"输入第一个数字: \"))\nnum2 = int(input(\"输入第二个数字: \"))\n\nprint(num1, \"和\", num2, \"的最小公倍数为\", lcm(num1, num2))\n","repo_name":"yusheng88/RookieInstance","sub_path":"Rookie024.py","file_name":"Rookie024.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33863368039","text":"import numpy as np\nimport cv2\nimport time\nimport pyautogui\nimport os\n\nmethod = cv2.TM_CCOEFF_NORMED\n\nclass Match:\n\n\t#method = cv2.TM_CCOEFF_NORMED\n\t\n\tdef __init__(self, path, name, method):\n\t\tself.path = path\n\t\tself.name = name\n\t\tself.method = method\n\t\t#self.template = path+name\n\t\t\n\tdef check_open(self):\n\t\ttemplate = cv2.imread('{}{}'.format(self.path,self.name),0)\n\t\tisvisible = False #start with a open variable as False\n\t\tpyautogui.screenshot(\"/home/user01/Pictures/screen.png\") #Take screenshot\n\t\timg = cv2.imread('/home/user01/Pictures/screen.png',0) \n\t\timg2 = img.copy() #Copy to another variable so can edit\n\t\tos.system (\"rm /home/user01/Pictures/screen*.png\") \n\t\tresult = cv2.matchTemplate(img2, template, self.method) \n\t\tmin_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result) #Put values into variables\n\t\tprint(\"\\nSearching for {}\".format(self.name))\n\t\t#Check to see if there is a match from the matchTemplate method\n\t\t#Any max value is over 90% is considered a match. We are matching\n\t\t#first against the open menu template\n\t\tif max_val >= 0.9: #If max value is over 90%\n\t\t\tisvisible=True #then true\n\t\telse: #If max value is under 90%\n \t\tisvisible=False #then false\n\n\t\treturn(isvisible)\n\npath_arena = '/home/user01/Documents/Town/resources/Arena/'\n\nOpenArena = Match(path_arena,'arena1.png', cv2.TM_CCOEFF_NORMED)\n\nSkipBattle = Match(path_arena,'skip_battle.png', cv2.TM_CCOEFF_NORMED)\n\n#print(type(SkipBattle.template))\nSkipBattle.method\n\nprint(OpenArena.check_open())\n\ndef arena_open():\n name = 'arena1.png'\n template = cv2.imread('{}{}'.format(path,name),0)\n method = cv2.TM_CCOEFF_NORMED\n note1 = \"{} found\".format(name)\n note2 = \"{} not found\".format(name)\n print(\"\\nSearching for {}\".format(name))\n Found_Click(template,method,note1,note2)\n\n","repo_name":"gibsai/town","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21126941648","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def kthSmallest(self, root: Optional[TreeNode], k: int) -> int:\n stk = []\n T = root\n while True:\n while T:\n stk.append(T)\n T = T.left\n topmost = stk.pop()\n k -= 1\n if not k:\n return topmost.val\n T = topmost.right","repo_name":"fxrcode/FG","sub_path":"230-kth-smallest-element-in-a-bst/230-kth-smallest-element-in-a-bst.py","file_name":"230-kth-smallest-element-in-a-bst.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"7788643762","text":"import requests\ndict = {}\nhtml = requests.get('https://raw.githubusercontent.com/mobigen/mobigen-python-study-2021/master/week03/wiki_python.txt')\nfor word in list(filter(None, html.text.split(' '))):\n if(word not in dict):\n dict[word] = 0\n dict[word] += 1;\n\nresult = sorted(dict.items(), key = lambda item: item[1], reverse=True)\nfor i in range(0,10):\n print(result[i])\n","repo_name":"mobigen/mobigen-python-study-2021","sub_path":"week03/homework/word_count_안교준.py","file_name":"word_count_안교준.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"74777929814","text":"#!/usr/bin/env python3\nimport binascii\nimport random\nimport io\nimport copy\nfrom typing import List, Set, Tuple, Union\nfrom ...platforms.platform import Platform\nfrom ...core.smtlib import (\n SelectedSolver,\n BitVec,\n Array,\n ArrayProxy,\n Operators,\n Constant,\n ArrayVariable,\n ArrayStore,\n translate_to_smtlib,\n simplify,\n issymbolic,\n)\nfrom ...core.state import Concretize, TerminateState\nfrom ...core.smtlib.visitors import simplify\nfrom ...exceptions import EthereumError\nimport sha3\nimport rlp\nfrom .common import *\nfrom .transaction import Transaction\nfrom .exceptions import *\nimport logging\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass EVMWorld(Platform):\n _published_events = {\n \"evm_read_storage\",\n \"evm_write_storage\",\n \"evm_read_code\",\n \"evm_write_code\",\n \"decode_instruction\",\n \"execute_instruction\",\n \"open_transaction\",\n \"close_transaction\",\n \"symbolic_function\",\n \"solve\",\n }\n\n def __init__(self, constraints, fork=DEFAULT_FORK, **kwargs):\n super().__init__(path=\"NOPATH\", **kwargs)\n self._world_state = {}\n self._constraints = constraints\n self._callstack: List[\n Tuple[Transaction, List[EVMLog], Set[int], Union[bytearray, ArrayProxy], EVM]\n ] = []\n self._deleted_accounts: Set[int] = set()\n self._logs: List[EVMLog] = list()\n self._pending_transaction = None\n self._transactions: List[Transaction] = list()\n self._fork = fork\n self._block_header = None\n self.start_block()\n\n def __getstate__(self):\n state = super().__getstate__()\n state[\"_pending_transaction\"] = self._pending_transaction\n state[\"_logs\"] = self._logs\n state[\"_world_state\"] = self._world_state\n state[\"_constraints\"] = self._constraints\n state[\"_callstack\"] = self._callstack\n state[\"_deleted_accounts\"] = self._deleted_accounts\n state[\"_transactions\"] = self._transactions\n state[\"_fork\"] = self._fork\n state[\"_block_header\"] = self._block_header\n\n return state\n\n def __setstate__(self, state):\n super().__setstate__(state)\n self._constraints = state[\"_constraints\"]\n self._pending_transaction = state[\"_pending_transaction\"]\n self._world_state = state[\"_world_state\"]\n self._deleted_accounts = state[\"_deleted_accounts\"]\n self._logs = state[\"_logs\"]\n self._callstack = state[\"_callstack\"]\n self._transactions = state[\"_transactions\"]\n self._fork = state[\"_fork\"]\n self._block_header = state[\"_block_header\"]\n\n for _, _, _, _, vm in self._callstack:\n self.forward_events_from(vm)\n\n def try_simplify_to_constant(self, data):\n concrete_data = bytearray()\n # for c in data:\n for index in range(len(data)):\n c = data[index]\n simplified = simplify(c)\n\n if isinstance(simplified, Constant):\n concrete_data.append(simplified.value)\n else:\n # simplify by solving. probably means that we need to improve simplification\n self._publish(\"will_solve\", self.constraints, simplified, \"get_all_values\")\n solutions = SelectedSolver.instance().get_all_values(\n self.constraints, simplified, 2, silent=True\n )\n self._publish(\n \"did_solve\", self.constraints, simplified, \"get_all_values\", solutions\n )\n if len(solutions) != 1:\n break\n concrete_data.append(solutions[0])\n else:\n data = bytes(concrete_data)\n return data\n\n def symbolic_function(self, func, data):\n \"\"\"\n Get an unsound symbolication for function `func`\n\n \"\"\"\n data = self.try_simplify_to_constant(data)\n try:\n result = []\n self._publish(\n \"on_symbolic_function\", func, data, result\n ) # This updates the local copy of result\n\n return result[0]\n except Exception as e:\n logger.info(\"Error! %r\", e)\n self._publish(\"will_solve\", self.constraints, data, \"get_value\")\n data_c = SelectedSolver.instance().get_value(self.constraints, data)\n self._publish(\"did_solve\", self.constraints, data, \"get_value\", data_c)\n return int(sha3.keccak_256(data_c).hexdigest(), 16)\n\n @property\n def PC(self):\n return (self.current_vm.address, self.current_vm.pc)\n\n def __getitem__(self, index):\n assert isinstance(index, int)\n return self._world_state[index]\n\n def __contains__(self, key):\n assert not issymbolic(key), \"Symbolic address not supported\"\n return key in self.accounts\n\n def __str__(self):\n return (\n \"WORLD:\"\n + str(self._world_state)\n + \"\\n\"\n + str(list((map(str, self.transactions))))\n + str(self.logs)\n )\n\n @property\n def logs(self):\n return self._logs\n\n @property\n def constraints(self):\n return self._constraints\n\n @constraints.setter\n def constraints(self, constraints):\n self._constraints = constraints\n if self.current_vm:\n self.current_vm.constraints = constraints\n\n @property\n def evmfork(self):\n return self._fork\n\n def _transaction_fee(self, sort, address, price, bytecode_or_data, caller, value):\n GTXCREATE = (\n 32000 # Paid by all contract creating transactions after the Homestead transition.\n )\n GTXDATAZERO = 4 # Paid for every zero byte of data or code for a transaction.\n GTXDATANONZERO = 16 # Paid for every non - zero byte of data or code for a transaction.\n GTRANSACTION = 21000 # Paid for every transaction\n if sort == \"CREATE\":\n tx_fee = GTXCREATE\n else:\n tx_fee = GTRANSACTION # Simple transaction fee\n\n zerocount = 0\n nonzerocount = 0\n if isinstance(bytecode_or_data, (Array, ArrayProxy)):\n # if nothing was written we can assume all elements are default to zero\n if len(bytecode_or_data.written) == 0:\n zerocount = len(bytecode_or_data)\n else:\n for index in range(len(bytecode_or_data)):\n try:\n c = bytecode_or_data.get(index, 0)\n except AttributeError:\n c = bytecode_or_data[index]\n\n zerocount += Operators.ITEBV(256, c == 0, 1, 0)\n nonzerocount += Operators.ITEBV(256, c == 0, 0, 1)\n\n tx_fee += zerocount * GTXDATAZERO\n tx_fee += nonzerocount * GTXDATANONZERO\n return simplify(tx_fee)\n\n def _make_vm_for_tx(self, tx):\n if tx.sort == \"CREATE\":\n bytecode = tx.data\n data = bytes()\n else:\n bytecode = self.get_code(tx.address)\n data = tx.data\n\n if tx.sort == \"DELEGATECALL\":\n # So at a DELEGATECALL the environment should look exactly the same as the original tx\n # This means caller, value and address are the same as prev tx\n assert tx.value == 0\n address = self.current_transaction.address\n caller = self.current_transaction.caller\n value = self.current_transaction.value\n else:\n address = tx.address\n caller = tx.caller\n value = tx.value\n\n gas = tx.gas\n\n vm = EVM(self._constraints, address, data, caller, value, bytecode, world=self, gas=gas)\n if self.depth == 0:\n # Only at human level we need to debit the tx_fee from the gas\n # In case of an internal tx the CALL-like instruction will\n # take the fee by itself\n tx_fee = self._transaction_fee(\n tx.sort, tx.address, tx.price, tx.data, tx.caller, tx.value\n )\n vm._consume(tx_fee)\n return vm\n\n def _open_transaction(self, sort, address, price, bytecode_or_data, caller, value, gas=None):\n \"\"\"\n This try to opens a transaction.\n\n :param sort: CREATE, CALL, CALLCODE, STATICCALL, DELEGATECALL\n :param address: the destination address\n :param price: the gas price. Used at human transactions\n :param bytecode_or_data: the calldata or bytecode in creates\n :param caller: the caller account\n :param value: wei to transfer\n :param gas: gas budget\n :return: True if the transaction got accepted (enough balance to pay for stuff)\n \"\"\"\n # sort\n if sort not in {\"CALL\", \"CREATE\", \"DELEGATECALL\", \"CALLCODE\", \"STATICCALL\"}:\n raise EVMException(f\"Transaction type '{sort}' not supported\")\n\n if caller not in self.accounts:\n logger.info(\"Caller not in account\")\n raise EVMException(\n f\"Caller account {hex(caller)} does not exist; valid accounts: {list(map(hex, self.accounts))}\"\n )\n\n if sort == \"CREATE\":\n expected_address = self.new_address(sender=caller)\n if address is None:\n address = expected_address\n if address != expected_address:\n raise EthereumError(\n f\"Error: contract created from address {hex(caller)} with nonce {self.get_nonce(caller)} was expected to be at address {hex(expected_address)}, but create_contract was called with address={hex(address)}\"\n )\n\n if address not in self.accounts:\n logger.info(\"Address does not exists creating it.\")\n # Creating an unaccessible account\n self.create_account(address=address, nonce=int(sort != \"CREATE\"))\n\n tx = Transaction(\n sort, address, price, bytecode_or_data, caller, value, depth=self.depth, gas=gas\n )\n self._publish(\"will_open_transaction\", tx)\n # Send the tx funds (We know there are enough at this point)\n if self.depth == 0:\n # Debit full gas budget in advance\n aux_price = Operators.ZEXTEND(tx.price, 512)\n aux_gas = Operators.ZEXTEND(tx.gas, 512)\n self.sub_from_balance(caller, aux_price * aux_gas)\n self.send_funds(tx.caller, tx.address, tx.value)\n\n if tx.address not in self.accounts:\n self.create_account(tx.address)\n\n # If not a human tx, reset returndata\n # https://github.com/ethereum/EIPs/blob/master/EIPS/eip-211.md\n if self.current_vm:\n self.current_vm._return_data = b\"\"\n\n vm = self._make_vm_for_tx(tx)\n\n self._callstack.append(\n (tx, self.logs, self.deleted_accounts, copy.copy(self.get_storage(address)), vm)\n )\n self.forward_events_from(vm)\n self._publish(\"did_open_transaction\", tx)\n return True\n\n def _close_transaction(self, result, data=None, rollback=False):\n self._publish(\"will_close_transaction\", self._callstack[-1][0])\n tx, logs, deleted_accounts, account_storage, vm = self._callstack.pop()\n assert self.constraints == vm.constraints\n # Keep constraints gathered in the last vm\n self.constraints = vm.constraints\n\n # https://github.com/ethereum/EIPs/blob/master/EIPS/eip-211.md\n if data is not None and self.current_vm is not None:\n self.current_vm._return_data = data\n if rollback:\n self._set_storage(vm.address, account_storage)\n self._logs = logs\n # Return the transaction value\n self.send_funds(tx.address, tx.caller, tx.value)\n else:\n self._deleted_accounts = deleted_accounts\n self.increase_nonce(tx.caller)\n\n if result in {\"THROW\"}:\n unused_gas = 0\n refund = 0\n else:\n unused_gas = vm._gas\n refund = vm._refund\n\n used_gas = Operators.ZEXTEND(tx.gas, 512) - unused_gas\n refund = Operators.ITEBV(512, Operators.UGE(refund, used_gas // 2), used_gas // 2, refund)\n\n if tx.is_human:\n for deleted_account in self._deleted_accounts:\n if deleted_account in self._world_state:\n del self._world_state[deleted_account]\n unused_fee = unused_gas * tx.price\n used_fee = used_gas * tx.price\n self.add_to_balance(tx.caller, unused_fee)\n self.add_to_balance(tx.caller, refund * tx.price)\n if self.block_coinbase() in self:\n self.add_to_balance(self.block_coinbase(), used_fee - refund * tx.price)\n else:\n logger.info(\n \"Coinbase not set. Throwing %r weis for the gas\", used_fee - refund * tx.price\n )\n else:\n # if not rollback:\n # Refund unused gas to caller if\n self.current_vm._gas += unused_gas\n self.current_vm._refund += refund\n if tx.sort == \"CREATE\":\n if result in (\"RETURN\", \"STOP\"):\n # vm.consume(len(tx.return_data) * GCREATEDATAGAS)\n self.set_code(tx.address, data)\n else:\n self.delete_account(tx.address)\n\n tx.set_result(result, data, used_gas - refund)\n self._transactions.append(tx)\n self._publish(\"did_close_transaction\", tx)\n\n if self.depth == 0:\n raise TerminateState(tx.result)\n\n @property\n def all_transactions(self):\n txs = tuple(self._transactions)\n return txs + tuple((x[0] for x in reversed(self._callstack)))\n\n @property\n def transactions(self):\n \"\"\"Completed completed transaction\"\"\"\n return tuple(self._transactions)\n\n @property\n def human_transactions(self):\n \"\"\"Completed human transaction\"\"\"\n txs = []\n for tx in self.transactions:\n if tx.depth == 0:\n txs.append(tx)\n return tuple(txs)\n\n @property\n def last_transaction(self):\n \"\"\"Last completed transaction\"\"\"\n if len(self.transactions):\n return self.transactions[-1]\n return None\n\n @property\n def last_human_transaction(self):\n \"\"\"Last completed human transaction\"\"\"\n for tx in reversed(self.transactions):\n if tx.depth == 0:\n return tx\n return None\n\n @property\n def current_vm(self):\n \"\"\"current vm\"\"\"\n try:\n _, _, _, _, vm = self._callstack[-1]\n return vm\n except IndexError:\n return None\n\n @property\n def current_transaction(self):\n \"\"\"current tx\"\"\"\n try:\n tx, _, _, _, _ = self._callstack[-1]\n if tx.result is not None:\n # That tx finished. No current tx.\n return None\n return tx\n except IndexError:\n return None\n\n @property\n def current_human_transaction(self):\n \"\"\"Current ongoing human transaction\"\"\"\n try:\n tx, _, _, _, _ = self._callstack[0]\n if tx.result is not None:\n # That tx finished. No current tx.\n return None\n assert tx.depth == 0\n return tx\n except IndexError:\n return None\n\n @property\n def accounts(self):\n return list(self._world_state.keys())\n\n @property\n def normal_accounts(self):\n accs = []\n for address in self.accounts:\n if len(self.get_code(address)) == 0:\n accs.append(address)\n return accs\n\n @property\n def contract_accounts(self):\n accs = []\n for address in self.accounts:\n if len(self.get_code(address)) > 0:\n accs.append(address)\n return accs\n\n @property\n def deleted_accounts(self):\n return self._deleted_accounts\n\n def delete_account(self, address):\n if address in self._world_state:\n self._deleted_accounts.add(address)\n\n def get_storage_data(self, storage_address, offset):\n \"\"\"\n Read a value from a storage slot on the specified account\n\n :param storage_address: an account address\n :param offset: the storage slot to use.\n :type offset: int or BitVec\n :return: the value\n :rtype: int or BitVec\n \"\"\"\n value = self._world_state[storage_address][\"storage\"].get(offset, 0)\n return simplify(value)\n\n def set_storage_data(self, storage_address, offset, value):\n \"\"\"\n Writes a value to a storage slot in specified account\n\n :param storage_address: an account address\n :param offset: the storage slot to use.\n :type offset: int or BitVec\n :param value: the value to write\n :type value: int or BitVec\n \"\"\"\n self._world_state[storage_address][\"storage\"][offset] = value\n\n def get_storage_items(self, address):\n \"\"\"\n Gets all items in an account storage\n\n :param address: account address\n :return: all items in account storage. items are tuple of (index, value). value can be symbolic\n :rtype: list[(storage_index, storage_value)]\n \"\"\"\n storage = self._world_state[address][\"storage\"]\n items = []\n array = storage.array\n while not isinstance(array, ArrayVariable):\n items.append((array.index, array.value))\n array = array.array\n return items\n\n def has_storage(self, address):\n \"\"\"\n True if something has been written to the storage.\n Note that if a slot has been erased from the storage this function may\n lose any meaning.\n \"\"\"\n storage = self._world_state[address][\"storage\"]\n array = storage.array\n while not isinstance(array, ArrayVariable):\n if isinstance(array, ArrayStore):\n return True\n array = array.array\n return False\n\n def get_storage(self, address):\n \"\"\"\n Gets the storage of an account\n\n :param address: account address\n :return: account storage\n :rtype: bytearray or ArrayProxy\n \"\"\"\n return self._world_state[address][\"storage\"]\n\n def _set_storage(self, address, storage):\n \"\"\"Private auxiliary function to replace the storage\"\"\"\n self._world_state[address][\"storage\"] = storage\n\n def get_nonce(self, address):\n if issymbolic(address):\n raise ValueError(f\"Cannot retrieve the nonce of symbolic address {address}\")\n else:\n ret = self._world_state[address][\"nonce\"]\n return ret\n\n def increase_nonce(self, address):\n new_nonce = self.get_nonce(address) + 1\n self._world_state[address][\"nonce\"] = new_nonce\n return new_nonce\n\n def set_balance(self, address, value):\n if isinstance(value, BitVec):\n value = Operators.ZEXTEND(value, 512)\n self._world_state[int(address)][\"balance\"] = value\n\n def get_balance(self, address):\n if address not in self._world_state:\n return 0\n return Operators.EXTRACT(self._world_state[address][\"balance\"], 0, 256)\n\n def account_exists(self, address):\n if address not in self._world_state:\n return False # accounts default to nonexistent\n return (\n self.has_code(address)\n or Operators.UGT(self.get_nonce(address), 0)\n or Operators.UGT(self.get_balance(address), 0)\n )\n\n def add_to_balance(self, address, value):\n if isinstance(value, BitVec):\n value = Operators.ZEXTEND(value, 512)\n self._world_state[address][\"balance\"] += value\n\n def sub_from_balance(self, address, value):\n if isinstance(value, BitVec):\n value = Operators.ZEXTEND(value, 512)\n self._world_state[address][\"balance\"] -= value\n\n def send_funds(self, sender, recipient, value):\n if isinstance(value, BitVec):\n value = Operators.ZEXTEND(value, 512)\n self._world_state[sender][\"balance\"] -= value\n self._world_state[recipient][\"balance\"] += value\n\n def get_code(self, address):\n if address not in self._world_state:\n return bytes()\n return self._world_state[address][\"code\"]\n\n def set_code(self, address, data):\n assert data is not None and isinstance(data, (bytes, Array))\n if self._world_state[address][\"code\"]:\n raise EVMException(\"Code already set\")\n self._world_state[address][\"code\"] = data\n\n def has_code(self, address):\n return len(self._world_state[address][\"code\"]) > 0\n\n def log(self, address, topics, data):\n self._logs.append(EVMLog(address, data, topics))\n logger.info(\"LOG %r %r\", data, topics)\n\n def log_storage(self, addr):\n pass\n\n def add_refund(self, value):\n self._refund += value\n\n def sub_refund(self, value):\n self._refund -= value\n\n def block_prevhash(self):\n return 0\n\n # Block header related\n def start_block(\n self,\n blocknumber=4370000,\n timestamp=1524785992,\n difficulty=0x200,\n gaslimit=0x7FFFFFFF,\n coinbase=0,\n ):\n if coinbase not in self.accounts and coinbase != 0:\n logger.info(\"Coinbase account does not exists\")\n self.create_account(coinbase)\n\n self._block_header = BlockHeader(blocknumber, timestamp, difficulty, gaslimit, coinbase)\n\n def end_block(self, block_reward=None):\n coinbase = self.block_coinbase()\n if coinbase not in self:\n raise EVMException(\"Coinbase not set\")\n\n if block_reward is None:\n block_reward = 2000000000000000000 # 2 eth\n self.add_to_balance(self.block_coinbase(), block_reward)\n # self._block_header = None\n\n def block_coinbase(self):\n return self._block_header.coinbase\n\n def block_timestamp(self):\n return self._block_header.timestamp\n\n def block_number(self):\n return self._block_header.blocknumber\n\n def block_difficulty(self):\n return self._block_header.difficulty\n\n def block_gaslimit(self):\n return self._block_header.gaslimit\n\n def block_hash(self, block_number=None, force_recent=True):\n \"\"\"\n Calculates a block's hash\n\n :param block_number: the block number for which to calculate the hash, defaulting to the most recent block\n :param force_recent: if True (the default) return zero for any block that is in the future or older than 256 blocks\n :return: the block hash\n \"\"\"\n if block_number is None:\n block_number = self.block_number() - 1\n\n # We are not maintaining an actual -block-chain- so we just generate\n # some hashes for each virtual block\n value = sha3.keccak_256((repr(block_number) + \"NONCE\").encode()).hexdigest()\n value = int(value, 16)\n\n if force_recent:\n # 0 is left on the stack if the looked for block number is greater or equal\n # than the current block number or more than 256 blocks behind the current\n # block. (Current block hash is unknown from inside the tx)\n bnmax = Operators.ITEBV(256, self.block_number() > 256, 256, self.block_number())\n value = Operators.ITEBV(\n 256,\n Operators.OR(block_number >= self.block_number(), block_number < bnmax),\n 0,\n value,\n )\n\n return value\n\n def tx_origin(self):\n if self.current_human_transaction:\n return self.current_human_transaction.caller\n\n def tx_gasprice(self):\n if self.current_human_transaction:\n return self.current_human_transaction.price\n\n @property\n def depth(self):\n return len(self._callstack)\n\n def new_address(self, sender=None, nonce=None):\n \"\"\"Create a fresh 160bit address\"\"\"\n if sender is not None and nonce is None:\n nonce = self.get_nonce(sender)\n\n new_address = self.calculate_new_address(sender, nonce)\n if sender is None and new_address in self:\n return self.new_address(sender, nonce)\n return new_address\n\n @staticmethod\n def calculate_new_address(sender=None, nonce=None):\n if sender is None:\n # Just choose a random address for regular accounts:\n new_address = random.randint(100, pow(2, 160))\n elif issymbolic(sender):\n # TODO(Evan Sultanik): In the interim before we come up with a better solution,\n # consider breaking Yellow Paper comability and just returning\n # a random contract address here\n raise EthereumError(\n \"Manticore does not yet support contracts with symbolic addresses creating new contracts\"\n )\n else:\n if nonce is None:\n # assume that the sender is a contract account, which is initialized with a nonce of 1\n nonce = 1\n new_address = int(sha3.keccak_256(rlp.encode([sender, nonce])).hexdigest()[24:], 16)\n return new_address\n\n def execute(self):\n self._process_pending_transaction()\n if self.current_vm is None:\n raise TerminateState(\"Trying to execute an empty transaction\", testcase=False)\n try:\n self.current_vm.execute()\n except StartTx:\n pass\n except EndTx as ex:\n self._close_transaction(ex.result, ex.data, rollback=ex.is_rollback())\n\n def create_account(self, address=None, balance=0, code=None, storage=None, nonce=None):\n \"\"\"\n Low level account creation. No transaction is done.\n\n :param address: the address of the account, if known. If omitted, a new address will be generated as closely to the Yellow Paper as possible.\n :param balance: the initial balance of the account in Wei\n :param code: the runtime code of the account, if a contract\n :param storage: storage array\n :param nonce: the nonce for the account; contracts should have a nonce greater than or equal to 1\n \"\"\"\n if code is None:\n code = bytes()\n else:\n if not isinstance(code, (bytes, Array)):\n raise EthereumError(\"Wrong code type\")\n\n # nonce default to initial nonce\n if nonce is None:\n # As per EIP 161, contract accounts are initialized with a nonce of 1\n nonce = 1 if len(code) > 0 else 0\n\n if isinstance(balance, BitVec):\n balance = Operators.ZEXTEND(balance, 512)\n\n if address is None:\n address = self.new_address()\n\n if not isinstance(address, int):\n raise EthereumError(\"You must provide an address\")\n\n if address in self.accounts:\n # FIXME account may have been created via selfdestruct destination\n # or CALL and may contain some ether already, though if it was a\n # selfdestructed address, it can not be reused\n raise EthereumError(\"The account already exists\")\n\n if storage is None:\n # Uninitialized values in a storage are 0 by spec\n storage = self.constraints.new_array(\n index_bits=256,\n value_bits=256,\n name=f\"STORAGE_{address:x}\",\n avoid_collisions=True,\n default=0,\n )\n else:\n if isinstance(storage, ArrayProxy):\n if storage.index_bits != 256 or storage.value_bits != 256:\n raise TypeError(\"An ArrayProxy 256bits -> 256bits is needed\")\n else:\n if any((k < 0 or k >= 1 << 256 for k, v in storage.items())):\n raise TypeError(\n \"Need a dict like object that maps 256 bits keys to 256 bits values\"\n )\n # Hopefully here we have a mapping from 256b to 256b\n\n self._world_state[address] = {}\n self._world_state[address][\"nonce\"] = nonce\n self._world_state[address][\"balance\"] = balance\n self._world_state[address][\"storage\"] = storage\n self._world_state[address][\"code\"] = code\n\n # adds hash of new address\n data = binascii.unhexlify(\"{:064x}{:064x}\".format(address, 0))\n value = sha3.keccak_256(data).hexdigest()\n value = int(value, 16)\n self._publish(\"on_concrete_sha3\", data, value)\n\n return address\n\n def create_contract(self, price=0, address=None, caller=None, balance=0, init=None, gas=None):\n \"\"\"\n Initiates a CREATE a contract account.\n Sends a transaction to initialize the contract.\n Do a world.run() after this to explore all _possible_ outputs\n\n :param address: the address of the new account, if known. If omitted, a new address will be generated as closely to the Yellow Paper as possible.\n :param balance: the initial balance of the account in Wei\n :param init: the initialization code of the contract\n\n The way that the Solidity compiler expects the constructor arguments to\n be passed is by appending the arguments to the byte code produced by the\n Solidity compiler. The arguments are formatted as defined in the Ethereum\n ABI2. The arguments are then copied from the init byte array to the EVM\n memory through the CODECOPY opcode with appropriate values on the stack.\n This is done when the byte code in the init byte array is actually run\n on the network.\n \"\"\"\n self.start_transaction(\n \"CREATE\", address, price=price, data=init, caller=caller, value=balance, gas=gas\n )\n return address\n\n def transaction(self, address, price=0, data=\"\", caller=None, value=0, gas=2300):\n \"\"\"Initiates a CALL transaction on current state.\n Do a world.run() after this to explore all _possible_ outputs\n \"\"\"\n self.start_transaction(\n \"CALL\", address, price=price, data=data, caller=caller, value=value, gas=gas\n )\n\n def start_transaction(\n self, sort, address, *, price=None, data=None, caller=None, value=0, gas=2300\n ):\n \"\"\"\n Initiate a transaction.\n\n :param sort: the type of transaction. CREATE or CALL or DELEGATECALL\n :param address: the address of the account which owns the code that is executing.\n :param price: the price of gas in the transaction that originated this execution.\n :param data: the byte array that is the input data to this execution\n :param caller: the address of the account which caused the code to be executing. A 160-bit code used for identifying Accounts\n :param value: the value, in Wei, passed to this account as part of the same procedure as execution. One Ether is defined as being 10**18 Wei.\n :param bytecode: the byte array that is the machine code to be executed.\n :param gas: gas budget for this transaction.\n :param failed: True if the transaction must fail\n \"\"\"\n assert self._pending_transaction is None, \"Already started tx\"\n assert caller is not None\n self._pending_transaction = PendingTransaction(\n sort, address, price, data, caller, value, gas, None\n )\n\n def _constraint_to_accounts(self, address, include_zero=False, ty=\"both\"):\n if ty not in (\"both\", \"normal\", \"contract\"):\n raise ValueError(\"Bad account type. It must be `normal`, `contract` or `both`\")\n if ty == \"both\":\n accounts = self.accounts\n elif ty == \"normal\":\n accounts = self.normal_accounts\n else:\n assert ty == \"contract\"\n accounts = self.contract_accounts\n\n # Constraint it so it can range over all accounts + address0\n cond = True\n if accounts:\n cond = None\n if include_zero:\n cond = address == 0\n\n for known_account in accounts:\n if cond is None:\n cond = address == known_account\n else:\n cond = Operators.OR(address == known_account, cond)\n return cond\n\n def _pending_transaction_concretize_address(self):\n sort, address, price, data, caller, value, gas, failed = self._pending_transaction\n if issymbolic(address):\n\n def set_address(state, solution):\n world = state.platform\n world._pending_transaction = (\n sort,\n solution,\n price,\n data,\n caller,\n value,\n gas,\n failed,\n )\n\n # Assuming this condition has at least one solution\n cond = self._constraint_to_accounts(address, ty=\"contract\", include_zero=False)\n self.constraints.add(cond)\n\n raise Concretize(\n \"Concretizing address on transaction\",\n expression=address,\n setstate=set_address,\n policy=\"ALL\",\n )\n\n def _pending_transaction_concretize_caller(self):\n sort, address, price, data, caller, value, gas, failed = self._pending_transaction\n if issymbolic(caller):\n\n def set_caller(state, solution):\n world = state.platform\n world._pending_transaction = (\n sort,\n address,\n price,\n data,\n solution,\n value,\n gas,\n failed,\n )\n\n # Constrain it so it can range over all normal accounts\n # TODO: document and log this is loosing completness\n cond = self._constraint_to_accounts(caller, ty=\"normal\")\n\n self.constraints.add(cond)\n raise Concretize(\n \"Concretizing caller on transaction\",\n expression=caller,\n setstate=set_caller,\n policy=\"ALL\",\n )\n\n def _pending_transaction_failed(self):\n sort, address, price, data, caller, value, gas, failed = self._pending_transaction\n\n # Initially the failed flag is not set. For now we need the caller to be\n # concrete so the caller balance is easy to get. Initialize falied here\n if failed is None:\n # Check depth\n failed = self.depth >= 1024\n # Fork on enough funds for value and gas\n if not failed:\n aux_src_balance = Operators.ZEXTEND(self.get_balance(caller), 512)\n aux_value = Operators.ZEXTEND(value, 512)\n enough_balance = Operators.UGE(aux_src_balance, aux_value)\n if self.depth == 0:\n # take the gas from the balance\n aux_price = Operators.ZEXTEND(price, 512)\n aux_gas = Operators.ZEXTEND(gas, 512)\n aux_fee = aux_price * aux_gas\n # Iff a human tx debit the fee\n enough_balance = Operators.AND(\n enough_balance, Operators.UGE(aux_src_balance - aux_value, aux_fee)\n )\n failed = Operators.NOT(enough_balance)\n self._pending_transaction = sort, address, price, data, caller, value, gas, failed\n\n if issymbolic(failed):\n # optimistic/pesimistic is inverted as the expresion represents fail\n policy = {\"optimistic\": \"PESSIMISTIC\", \"pessimistic\": \"OPTIMISTIC\"}.get(\n consts.txfail, \"ALL\"\n )\n\n def set_failed(state, solution):\n world = state.platform\n world._pending_transaction = (\n sort,\n address,\n price,\n data,\n caller,\n value,\n gas,\n solution,\n )\n\n raise Concretize(\n \"Concretizing tx-fail on transaction\",\n expression=failed,\n setstate=set_failed,\n policy=policy,\n )\n\n if self.depth != 0:\n price = 0\n aux_price = Operators.ZEXTEND(price, 512)\n aux_gas = Operators.ZEXTEND(gas, 512)\n tx_fee = Operators.ITEBV(512, self.depth == 0, aux_price * aux_gas, 0)\n aux_src_balance = Operators.ZEXTEND(self.get_balance(caller), 512)\n aux_value = Operators.ZEXTEND(value, 512)\n enough_balance = Operators.UGE(aux_src_balance, aux_value + tx_fee)\n return failed\n\n def _process_pending_transaction(self):\n # Nothing to do here if no pending transactions\n if self._pending_transaction is None:\n return\n sort, address, price, data, caller, value, gas, failed = self._pending_transaction\n # caller\n self._pending_transaction_concretize_caller()\n # to/address\n self._pending_transaction_concretize_address()\n # check onough balance for the value\n failed = self._pending_transaction_failed()\n\n # done concretizing stuff\n self._pending_transaction = None\n\n if not failed:\n self._open_transaction(sort, address, price, data, caller, value, gas=gas)\n else:\n tx = Transaction(\n sort, address, price, data, caller, value, depth=self.depth + 1, gas=gas\n )\n tx.set_result(\"TXERROR\")\n self._transactions.append(tx)\n\n def dump(self, stream, state, mevm, message):\n from ..ethereum.manticore import calculate_coverage, flagged\n\n blockchain = state.platform\n last_tx = blockchain.last_transaction\n\n stream.write(\"Message: %s\\n\" % message)\n stream.write(\"Last exception: %s\\n\" % state.context.get(\"last_exception\", \"None\"))\n\n if last_tx:\n at_runtime = last_tx.sort != \"CREATE\"\n address, offset, at_init = state.context.get(\"evm.trace\", ((None, None, None),))[-1]\n assert last_tx.result is not None or at_runtime != at_init\n\n # Last instruction if last tx was valid\n if str(state.context[\"last_exception\"]) != \"TXERROR\":\n metadata = mevm.get_metadata(blockchain.last_transaction.address)\n if metadata is not None and address is not None:\n stream.write(\"Last instruction at contract %x offset %x\\n\" % (address, offset))\n source_code_snippet = metadata.get_source_for(offset, at_runtime)\n if source_code_snippet:\n stream.write(\" \".join(source_code_snippet.splitlines(True)))\n stream.write(\"\\n\")\n\n # Accounts summary\n assert state.can_be_true(True)\n is_something_symbolic = False\n stream.write(\"%d accounts.\\n\" % len(blockchain.accounts))\n for account_address in blockchain.accounts:\n is_account_address_symbolic = issymbolic(account_address)\n account_address = state.solve_one(account_address, constrain=True)\n\n stream.write(\"* %s::\\n\" % mevm.account_name(account_address))\n stream.write(\n \"Address: 0x%x %s\\n\" % (account_address, flagged(is_account_address_symbolic))\n )\n balance = blockchain.get_balance(account_address)\n\n if not consts.ignore_balance:\n is_balance_symbolic = issymbolic(balance)\n is_something_symbolic = is_something_symbolic or is_balance_symbolic\n balance = state.solve_one(balance, constrain=True)\n stream.write(\"Balance: %d %s\\n\" % (balance, flagged(is_balance_symbolic)))\n\n storage = blockchain.get_storage(account_address)\n concrete_indexes = []\n if len(storage.written) > 0:\n concrete_indexes = state.solve_one_n_batched(storage.written, constrain=True)\n\n concrete_values = []\n if len(concrete_indexes) > 0:\n concrete_values = state.solve_one_n_batched(concrete_indexes, constrain=True)\n\n assert len(concrete_indexes) == len(concrete_values)\n for index, value in zip(concrete_indexes, concrete_values):\n stream.write(f\"storage[{index:x}] = {value:x}\\n\")\n\n storage = blockchain.get_storage(account_address)\n stream.write(\"Storage: %s\\n\" % translate_to_smtlib(storage, use_bindings=False))\n\n if consts.sha3 is consts.sha3.concretize:\n all_used_indexes = []\n with state.constraints as temp_cs:\n # make a free symbolic idex that could address any storage slot\n index = temp_cs.new_bitvec(256)\n # get the storage for account_address\n storage = blockchain.get_storage(account_address)\n # we are interested only in used slots\n # temp_cs.add(storage.get(index) != 0)\n temp_cs.add(storage.is_known(index))\n # Query the solver to get all storage indexes with used slots\n self._publish(\"will_solve\", temp_cs, index, \"get_all_values\")\n all_used_indexes = SelectedSolver.instance().get_all_values(temp_cs, index)\n self._publish(\"did_solve\", temp_cs, index, \"get_all_values\", all_used_indexes)\n\n if all_used_indexes:\n stream.write(\"Storage:\\n\")\n for i in all_used_indexes:\n value = storage.get(i)\n is_storage_symbolic = issymbolic(value)\n stream.write(\n \"storage[%x] = %x %s\\n\"\n % (\n state.solve_one(i, constrain=True),\n state.solve_one(value, constrain=True),\n flagged(is_storage_symbolic),\n )\n )\n\n runtime_code = state.solve_one(blockchain.get_code(account_address))\n if runtime_code:\n stream.write(\"Code:\\n\")\n fcode = io.BytesIO(runtime_code)\n for chunk in iter(lambda: fcode.read(32), b\"\"):\n stream.write(\"\\t%s\\n\" % binascii.hexlify(chunk))\n runtime_trace = set(\n (\n pc\n for contract, pc, at_init in state.context[\"evm.trace\"]\n if address == contract and not at_init\n )\n )\n stream.write(\n \"Coverage %d%% (on this state)\\n\"\n % calculate_coverage(runtime_code, runtime_trace)\n ) # coverage % for address in this account/state\n stream.write(\"\\n\")\n return is_something_symbolic\n","repo_name":"hhamud/ape-manticore","sub_path":"ape_manticore/manticore/platforms/evm/evmworld.py","file_name":"evmworld.py","file_ext":"py","file_size_in_byte":43160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33781602659","text":"# importação de biblioteca:\nfrom Bio.Seq import Seq\n\n#input\nentrada= str(input('insira a sequência de DNA:'))\nDNA = Seq(entrada)\n\n#Transcrição\nmRNA = DNA.transcribe()\n\n# Tradução\nptn = mRNA.translate()\n\n#imprimir\nprint('mRNA:', mRNA)\nprint('ptn:', ptn)","repo_name":"anabcoelho/ProgBio","sub_path":"TAC3/sample/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71338708053","text":"import os\nimport torch\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\n\n\nclass NumpyTupleDataset(Dataset):\n \"\"\"Dataset of a tuple of datasets.\n\n It combines multiple datasets into one dataset. Each example is represented\n by a tuple whose ``i``-th item corresponds to the i-th dataset.\n And each ``i``-th dataset is expected to be an instance of numpy.ndarray.\n\n Args:\n datasets: Underlying datasets. The ``i``-th one is used for the\n ``i``-th item of each example. All datasets must have the same\n length.\n\n \"\"\"\n\n def __init__(self, datasets, transform=None):\n # Load dataset\n # if not os.path.exists(filepath):\n # raise ValueError('Invalid filepath for dataset')\n # load_data = np.load(filepath)\n # datasets = []\n # i = 0\n # while True:\n # key = 'arr_{}'.format(i)\n # if key in load_data.keys():\n # datasets.append(load_data[key]) # [(133885, 9), (133885,4,9,9), (133885, 15)]\n # i += 1\n # else:\n # break\n if not datasets:\n raise ValueError('no datasets are given')\n length = len(datasets[0]) # 133885\n for i, dataset in enumerate(datasets):\n if len(dataset) != length:\n raise ValueError(\n 'dataset of the index {} has a wrong length'.format(i))\n # Initialization\n self._datasets = datasets\n self._length = length\n # self._features_indexer = NumpyTupleDatasetFeatureIndexer(self)\n # self.filepath = filepath\n self.transform = transform\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n batches = [dataset[index] for dataset in self._datasets]\n if isinstance(index, (slice, list, np.ndarray)):\n length = len(batches[0])\n batches = [tuple([batch[i] for batch in batches])\n for i in range(length)] # six.moves.range(length)]\n else:\n batches = tuple(batches)\n\n if self.transform:\n batches = self.transform(batches)\n return batches\n\n def get_datasets(self):\n return self._datasets\n\n\n @classmethod\n def save(cls, filepath, numpy_tuple_dataset):\n \"\"\"save the dataset to filepath in npz format\n\n Args:\n filepath (str): filepath to save dataset. It is recommended to end\n with '.npz' extension.\n numpy_tuple_dataset (NumpyTupleDataset): dataset instance\n\n \"\"\"\n if not isinstance(numpy_tuple_dataset, NumpyTupleDataset):\n raise TypeError('numpy_tuple_dataset is not instance of '\n 'NumpyTupleDataset, got {}'\n .format(type(numpy_tuple_dataset)))\n np.savez(filepath, *numpy_tuple_dataset._datasets)\n print('Save {} done.'.format(filepath))\n\n @classmethod\n def load(cls, filepath, transform=None):\n print('Loading file {}'.format(filepath))\n if not os.path.exists(filepath):\n raise ValueError('Invalid filepath {} for dataset'.format(filepath))\n # return None\n load_data = np.load(filepath)\n result = []\n i = 0\n while True:\n key = 'arr_{}'.format(i)\n if key in load_data.keys():\n result.append(load_data[key])\n i += 1\n else:\n break\n return cls(result, transform)\n\n","repo_name":"calvin-zcx/moflow","sub_path":"data/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"67"} +{"seq_id":"17782701225","text":"import asyncio\nimport json\nimport logging\nimport logging\nimport os\nimport re\nimport sqlite3\n\nimport __main__\nimport discord\nfrom discord.ext import commands\n\nfrom newblue.EventRunner import EventRunner\nfrom newblue.lib.mudae.mudae_manager import MudaeManager\n\n\nclass BlueBot(commands.Bot):\n\n def __init__(self, command_prefix=None):\n logging.basicConfig(filename='GoldLog.log', level=logging.INFO)\n self.mudae_manager = MudaeManager(self, sqlite3.connect('mudae.db'))\n self.event_runner = EventRunner.EventRunner(self)\n self.dir = os.path.dirname(__file__)\n self.load_config()\n super().__init__(command_prefix=command_prefix)\n\n self.on_message_commands = []\n\n async def on_ready(self):\n print(\"ready\")\n self.load_base_commands()\n # asyncio.ensure_future(self.process_old_messages())\n for com in self.commands:\n print(com)\n\n async def on_message(self, message):\n self.mudae_manager.determine_message_type(message)\n if self.check_gigguk_content(message.content) and not message.author.bot:\n print('true')\n await message.channel.send(\"Fuck off with your gigguk\")\n await self.process_commands(message)\n\n def load_config(self):\n print('/'.join(os.path.abspath(__main__.__file__).split(r'/')[:-1]))\n with open(f\"{self.dir}\\\\config.json\", \"r\") as file:\n self.cfg = json.loads(file.read())\n\n def load_base_commands(self):\n # Load each cog from ./Commands/BaseCommands\n for cog_file in [file[:-3] for file in\n os.listdir(f'{self.dir}\\\\newblue\\\\Commands\\\\BaseCommands'.replace(\"\\\\\", \"/\")) if\n file not in ['__init__.py', '__pycache__']]:\n self.load_extension(f\"newblue.Commands.BaseCommands.{cog_file}\")\n logging.info(\"successfully loaded extension {}\".format(cog_file))\n","repo_name":"Froogss/gold","sub_path":"BlueBot.py","file_name":"BlueBot.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43617057797","text":"from uuid import uuid4\n\nfrom flask import request, render_template, redirect\nfrom models.message import Message\nfrom aengine_flask.screen import Screen\nfrom helpers import get_user\nfrom settings import db\n\n\nclass ChatScreen(Screen):\n options = {\n \"methods\": [\"POST\", \"GET\"]\n }\n\n def main(self):\n user = get_user(request)\n if request.method == \"POST\":\n form = request.form\n if form:\n message = form.get('message')\n if message != \"\":\n m = Message(id=uuid4().hex, from_id=user.id, text=message)\n db.session.add(m)\n db.session.commit()\n return redirect(\"/chat\")\n if user:\n return render_template(\"chat.html\", data=Message.query.all(), user=user)\n return redirect(\"/login\")\n","repo_name":"aaalllexxx/Arturio","sub_path":"screens/ChatScreen.py","file_name":"ChatScreen.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33590276405","text":"from .models import Thread\nfrom django.contrib.messages.api import get_messages\nfrom django.contrib.messages.constants import DEFAULT_LEVELS\n\ndef messages(request):\n \"\"\"\n Return a lazy 'messages' context variable as well as\n 'DEFAULT_MESSAGE_LEVELS'.\n \"\"\"\n return {\n \"messages\": get_messages(request),\n \"DEFAULT_MESSAGE_LEVELS\": DEFAULT_LEVELS,\n }\n\ndef user_messages(request):\n c = {}\n if request.user.is_authenticated:\n c[\"inbox_threads\"] = Thread.inbox(request.user)\n c[\"unread_threads\"] = Thread.unread(request.user)\n return c\n","repo_name":"ithoanghai/CmsEcommerce","sub_path":"SPS/creme/message/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33378635349","text":"\"\"\"Offer common helper functions for the CLI.\"\"\"\nimport asyncio\nimport logging\nimport time\n\nimport click\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef common_gateway_options(func):\n \"\"\"Supply common gateway options.\"\"\"\n func = click.option(\n \"-v\",\n \"--protocol_version\",\n help=\"Protocol version of the gateway.\",\n default=\"2.2\",\n show_default=True,\n )(func)\n func = click.option(\n \"-s\", \"--persistence\", help=\"Turn on persistence.\", is_flag=True\n )(func)\n return func\n\n\ndef handle_msg(msg):\n \"\"\"Handle mysensors updates.\"\"\"\n _LOGGER.info(\"Received message: %s\", msg.encode().strip())\n\n\ndef run_gateway(gateway):\n \"\"\"Run a sync gateway.\"\"\"\n gateway.start_persistence()\n gateway.start()\n try:\n while True:\n time.sleep(0.5)\n except KeyboardInterrupt:\n gateway.stop()\n\n\ndef run_async_gateway(gateway_factory):\n \"\"\"Run an async gateway.\"\"\"\n try:\n asyncio.run(handle_async_gateway(gateway_factory))\n except KeyboardInterrupt:\n pass\n\n\nasync def handle_async_gateway(gateway_factory):\n \"\"\"Handle gateway.\"\"\"\n gateway, stop_task = await gateway_factory()\n await gateway.start_persistence()\n await gateway.start()\n\n try:\n while True:\n await asyncio.sleep(0.5)\n except asyncio.CancelledError:\n await gateway.stop()\n if stop_task:\n await stop_task()\n raise\n","repo_name":"theolind/pymysensors","sub_path":"mysensors/cli/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"67"} +{"seq_id":"74022009174","text":"#!/bin/python3\n\nimport sys, os, pprint\nimport bumps, stats, draw\n\nstate = {\n 'sets' : [],\n 'highlight' : None,\n 'readstdin' : False,\n 'web' : None,\n 'output' : None,\n 'stepon' : None,\n 'svg_config' : {'scale' : 16,\n 'sep' : 32, #scale*2\n 'dash' : 6,\n 'colours' : False},\n 'stats' : None,\n}\n\ndef join_stats(event1, event2):\n diffs = {}\n\n for num2 in range(len(event2['crews'])):\n found = False\n for num1 in range(len(event1['crews'])):\n if event2['crews'][num2]['start'] == event1['crews'][num1]['end']:\n found = True\n d = num1 - num2\n if d < 0:\n d = -d\n\n d = int(d / (int(num2/10)+1))\n\n if d not in diffs:\n diffs[d] = 0\n diffs[d] += 1\n break\n\n total = 0\n count = 0\n for d in diffs:\n count += diffs[d]\n total += d * diffs[d]\n\n if count > 10 and total / count > 0.5:\n print(\"Warning: %s->%s: %.3f %s\" % (event1['year'], event2['year'], total / count, diffs))\n\ndef write_web(state):\n series = {}\n for s in state['sets']:\n if s['short'] not in series:\n series[s['short']] = {'all' : [], 'split' : []}\n if s['gender'] not in series[s['short']]:\n series[s['short']][s['gender']] = []\n\n year = s['year']\n p = year.split(\" \")\n if len(p) > 1:\n year = p[0]\n if year not in series[s['short']]['split']:\n series[s['short']]['split'].append(year)\n\n if year not in series[s['short']][s['gender']]:\n series[s['short']][s['gender']].append(year)\n if year not in series[s['short']]['all']:\n series[s['short']]['all'].append(year)\n\n fp = open(state['web'], 'w')\n fp.write(\"# results currently available\\n\\n\")\n fp.write(\"results = {\\n\")\n for s in sorted(series.keys()):\n fp.write(\" '%s' : {\\n\" % s)\n for g in sorted(series[s].keys()):\n fp.write(\" '%s' : %s,\\n\" % (g, series[s][g]))\n fp.write(\" },\\n\")\n fp.write(\"}\\n\")\n fp.close()\n\ncmd = sys.argv.pop(0)\nif len(sys.argv) == 0:\n print(\"%s Usage notes\" % cmd)\n print(\" -c : Enables per-club colours in results lines\")\n print(\" -r : Enables reading results from stdin\")\n print(\" -h : Enables highlights for crews with names starting with \")\n print(\" -w : Writes svg output to \")\n print(\" -s : Writes template for next year into \")\n print(\" -stats : Output statistics\")\n print(\" -web : Write python summary of all results files into \")\n print(\" Any additional arguments are treated as files containing results to be read in\")\n sys.exit()\n\nwhile len(sys.argv) > 0:\n arg = sys.argv.pop(0)\n \n if arg == '-c':\n state['svg_config']['colours'] = True\n elif arg == '-r':\n state['readstdin'] = True\n elif arg == '-h':\n state['highlight'] = sys.argv.pop(0)\n elif arg == '-w':\n state['output'] = sys.argv.pop(0)\n elif arg == '-s':\n state['stepon'] = sys.argv.pop(0)\n elif arg == '-stats':\n state['stats'] = {}\n elif arg == '-web':\n state['web'] = sys.argv.pop(0)\n else:\n sys.argv.insert(0, arg)\n break\n\nwhile len(sys.argv) > 0:\n s = bumps.read_file(sys.argv.pop(0), state['highlight'])\n if s is not None:\n state['sets'].append(s)\n\nif state['readstdin']:\n s = bumps.read_file(None, state['highlight'])\n if s is not None:\n state['sets'].append(s)\n\nfor s in state['sets']:\n bumps.process_results(s)\n if state['stats'] is not None:\n stats.get_stats(s, state['stats'])\n\n\nif state['web'] is not None:\n write_web(state)\nelif len(state['sets']) == 1:\n if state['stepon'] is None:\n draw.write_svg(state['output'], state['sets'][0], state['svg_config'])\n else:\n if os.path.exists(state['stepon']):\n print(\"File '%s' already exists, not overwriting\" % state['stepon'])\n else:\n bumps.step_on(state['sets'][0])\n bumps.write_file(state['sets'][0], state['stepon'])\nelif len(state['sets']) == 2 and state['sets'][0]['set'] == state['sets'][1]['set'] and state['sets'][0]['year'] == state['sets'][1]['year'] and state['sets'][0]['gender'] != state['sets'][1]['gender']:\n draw.write_pair(state['output'], state['sets'], state['svg_config'])\nelif len(state['sets']) > 1:\n for i in range(len(state['sets'])-1):\n join_stats(state['sets'][i], state['sets'][i+1])\n draw.write_multi_svg(state['output'], state['sets'], state['svg_config'])\n\nif state['stats'] is not None:\n stats.html_stats(state['stats'])\n","repo_name":"mcshane-fire/bumps","sub_path":"src/harness.py","file_name":"harness.py","file_ext":"py","file_size_in_byte":4819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12568919023","text":"# prev, cur = 0, 1\n# total = 0\n# while True:\n# prev, cur = cur, prev + cur\n# if cur >= 4000000:\n# break\n# if cur % 2 == 0:\n# total += cur\n# print(total)\n\nlimit = 4000000\nsum = 0\na = 1\nb = 1\nwhile b < limit:\n\n if b % 2 == 0:\n sum += b\n h = a + b\n a, b = b, h\n\nprint(sum)\n","repo_name":"Kani712/binder-framework","sub_path":"Day1/ProjectEluer/Fibonnaci_even_sum.py","file_name":"Fibonnaci_even_sum.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"20164951919","text":"import random\nfrom typing import List, Optional, Union\n\nimport hypothesis.strategies as st\nfrom hypothesis import given\nfrom hypothesis.extra.numpy import arrays\n\nimport numpy as np\nfrom scipy.sparse import csr_matrix, issparse\n\nfrom anndata import AnnData\n\n\n# TODO: Add possibility to generate adata object with floats as counts\n# TODO: Add possibility to generate different columns with different data types in\n# adata.obs and adata.var\n@st.composite\ndef get_adata(\n draw,\n n_obs: Optional[int] = None,\n n_vars: Optional[int] = None,\n min_obs: Optional[int] = 1,\n max_obs: Optional[int] = 100,\n min_vars: Optional[int] = 1,\n max_vars: Optional[int] = 100,\n layer_keys: Optional[Union[List, str]] = None,\n min_layers: Optional[int] = 2,\n max_layers: int = 2,\n obsm_keys: Optional[Union[List, str]] = None,\n min_obsm: Optional[int] = 2,\n max_obsm: Optional[int] = 2,\n varm_keys: Optional[Union[List, str]] = None,\n min_varm: Optional[int] = 2,\n max_varm: Optional[int] = 2,\n obs_col_names=None,\n min_obs_cols=2,\n max_obs_cols=2,\n var_col_names=None,\n min_var_cols=2,\n max_var_cols=2,\n sparse_entries: bool = False,\n) -> AnnData:\n \"\"\"Generate an AnnData object.\n\n The largest possible value of a numerical entry is `1e5`.\n\n Arguments:\n ---------\n n_obs\n Number of observations. If set to `None`, a random integer between `1` and\n `max_obs` will be drawn. Defaults to `None`.\n n_vars\n Number of variables. If set to `None`, a random integer between `1` and\n `max_vars` will be drawn. Defaults to `None`.\n min_obs\n Minimum number of observations. If set to `None`, there is no lower limit.\n Defaults to `1`.\n max_obs\n Maximum number of observations. If set to `None`, there is no upper limit.\n Defaults to `100`.\n min_vars\n Minimum number of variables. If set to `None`, there is no lower limit.\n Defaults to `1`.\n max_vars\n Maximum number of variables. If set to `None`, there is no upper limit.\n Defaults to `100`.\n layer_keys\n Names of layers. If set to `None`, layers will be named at random. Defaults\n to `None`.\n min_layers\n Minimum number of layers. Is set to the number of provided layer names if\n `layer_keys` is not `None`. Defaults to `2`.\n max_layers\n Maximum number of layers. Is set to the number of provided layer\n names if `layer_keys` is not `None`. Defaults to `2`.\n obsm_keys\n Names of multi-dimensional observations annotation. If set to `None`, names\n will be generated at random. Defaults to `None`.\n min_obsm\n Minimum number of multi-dimensional observations annotation. Is set to the\n number of keys if `obsm_keys` is not `None`. Defaults to `2`.\n max_obsm\n Maximum number of multi-dimensional observations annotation. Is set to the\n number of keys if `obsm_keys` is not `None`. Defaults to `2`.\n varm_keys\n Names of multi-dimensional variables annotation. If set to `None`, names\n will be generated at random. Defaults to `None`.\n min_varm\n Minimum number of multi-dimensional variables annotation. Is set to the\n number of keys if `varm_keys` is not `None`. Defaults to `2`.\n max_varm\n Maximum number of multi-dimensional variables annotation. Is set to the\n number of keys if `varm_keys` is not `None`. Defaults to `2`.\n obs_col_names\n Names of columns in `adata.obs`. If set to `None`, colums will be named at\n random. Defaults to `None`.\n min_obs_cols\n Minimum number of columns in `adata.obs`. Is set to the number of provided\n column names if `obs_col_names` is not `None`. Defaults to `2`.\n max_obs_cols\n Maximum number of columns in `adata.obs`. Is set to the number of provided\n column names if `obs_col_names` is not `None`. Defaults to `2`.\n var_col_names\n Names of columns in `adata.var`. If set to `None`, colums will be named at\n random. Defaults to `None`.\n min_var_cols\n Minimum number of columns in `adata.var`. Is set to the number of provided\n column names if `var_col_names` is not `None`. Defaults to `2`.\n max_var_cols\n Maximum number of columns in `adata.var`. Is set to the number of provided\n column names if `var_col_names` is not `None`. Defaults to `2`.\n sparse_entries\n Whether or not to make AnnData entries sparse.\n\n Returns:\n -------\n AnnData\n Generated :class:`~anndata.AnnData` object.\n \"\"\"\n\n if n_obs is None:\n n_obs = draw(st.integers(min_value=min_obs, max_value=max_obs))\n if n_vars is None:\n n_vars = draw(st.integers(min_value=min_vars, max_value=max_vars))\n\n if isinstance(layer_keys, str):\n layer_keys = [layer_keys]\n if isinstance(obsm_keys, str):\n obsm_keys = [obsm_keys]\n if isinstance(obs_col_names, str):\n obs_col_names = [obs_col_names]\n if isinstance(var_col_names, str):\n var_col_names = [var_col_names]\n\n if layer_keys is not None:\n min_layers = len(layer_keys)\n max_layers = len(layer_keys)\n if obsm_keys is not None:\n min_obsm = len(obsm_keys)\n max_obsm = len(obsm_keys)\n if varm_keys is not None:\n min_varm = len(varm_keys)\n max_varm = len(varm_keys)\n if obs_col_names is not None:\n min_obs_cols = len(obs_col_names)\n max_obs_cols = len(obs_col_names)\n if var_col_names is not None:\n min_var_cols = len(var_col_names)\n max_var_cols = len(var_col_names)\n\n X = draw(\n arrays(\n dtype=int,\n elements=st.integers(min_value=0, max_value=1e2),\n shape=(n_obs, n_vars),\n )\n )\n\n layers = draw(\n st.dictionaries(\n st.text(\n st.characters(\n blacklist_categories=(\"Cs\",),\n blacklist_characters=(\"X\"),\n ),\n min_size=1,\n )\n if layer_keys is None\n else st.sampled_from(layer_keys),\n arrays(\n dtype=int,\n elements=st.integers(min_value=0, max_value=1e2),\n shape=(n_obs, n_vars),\n ),\n min_size=min_layers,\n max_size=max_layers,\n )\n )\n\n obsm = draw(\n st.dictionaries(\n st.text(\n st.characters(\n blacklist_categories=(\"Cs\",),\n blacklist_characters=(\"X\"),\n ),\n min_size=1,\n )\n if obsm_keys is None\n else st.sampled_from(obsm_keys),\n arrays(\n dtype=int,\n elements=st.integers(min_value=0, max_value=1e2),\n shape=st.tuples(\n st.integers(min_value=n_obs, max_value=n_obs),\n st.integers(min_value=min_vars, max_value=max_vars),\n ),\n ),\n min_size=min_obsm,\n max_size=max_obsm,\n )\n )\n\n varm = draw(\n st.dictionaries(\n st.text(\n st.characters(\n blacklist_categories=(\"Cs\",),\n blacklist_characters=(\"X\"),\n ),\n min_size=1,\n )\n if varm_keys is None\n else st.sampled_from(varm_keys),\n arrays(\n dtype=int,\n elements=st.integers(min_value=0, max_value=1e2),\n shape=st.tuples(\n st.integers(min_value=n_vars, max_value=n_vars),\n st.integers(min_value=min_obs, max_value=max_obs),\n ),\n ),\n min_size=min_varm,\n max_size=max_varm,\n )\n )\n\n obs = draw(\n st.dictionaries(\n st.text(min_size=1)\n if obs_col_names is None\n else st.sampled_from(obs_col_names),\n st.lists(\n elements=st.integers(min_value=0, max_value=1e2),\n min_size=n_obs,\n max_size=n_obs,\n ),\n min_size=min_obs_cols,\n max_size=max_obs_cols,\n )\n )\n\n var = draw(\n st.dictionaries(\n st.text(min_size=1)\n if var_col_names is None\n else st.sampled_from(var_col_names),\n st.lists(\n elements=st.integers(min_value=0, max_value=1e2),\n min_size=n_vars,\n max_size=n_vars,\n ),\n min_size=min_var_cols,\n max_size=max_var_cols,\n )\n )\n\n # Make keys for layers and obsm unique\n for key in set(layers.keys()).intersection(obsm.keys()):\n layers[f\"{key}_\"] = layers.pop(key)\n\n if sparse_entries:\n layers = {key: csr_matrix(val) for key, val in layers.items()}\n obsm = {key: csr_matrix(val) for key, val in obsm.items()}\n varm = {key: csr_matrix(val) for key, val in varm.items()}\n return AnnData(\n X=csr_matrix(X), layers=layers, obsm=obsm, varm=varm, obs=obs, var=var\n )\n else:\n return AnnData(X=X, layers=layers, obsm=obsm, varm=varm, obs=obs, var=var)\n\n\nclass TestAdataGeneration:\n @given(adata=get_adata(max_obs=5, max_vars=5))\n def test_default_adata_generation(self, adata: AnnData):\n assert type(adata) is AnnData\n assert \"X\" not in adata.layers\n assert \"X\" not in adata.obsm\n assert \"X\" not in adata.varm\n\n @given(adata=get_adata(max_obs=5, max_vars=5, sparse_entries=True))\n def test_sparse_adata_generation(self, adata: AnnData):\n assert type(adata) is AnnData\n assert issparse(adata.X)\n assert np.all([issparse(adata.layers[layer]) for layer in adata.layers])\n assert np.all([issparse(adata.obsm[name]) for name in adata.obsm])\n assert np.all([issparse(adata.varm[name]) for name in adata.varm])\n\n @given(\n adata=get_adata(\n n_obs=2,\n n_vars=2,\n layer_keys=[\"unspliced\", \"spliced\"],\n obsm_keys=\"X_umap\",\n varm_keys=[\"varm_entry_1\", \"varm_entry_2\"],\n obs_col_names=[\"louvain\", \"donor\", \"day\"],\n var_col_names=[\"alpha\", \"beta\", \"gamma\"],\n )\n )\n def test_custom_adata_generation(self, adata: AnnData):\n assert adata.X.shape == (2, 2)\n assert len(adata.layers) == 2\n assert len(adata.obsm) == 1\n assert len(adata.varm) == 2\n assert set(adata.layers.keys()) == {\"unspliced\", \"spliced\"}\n assert set(adata.obsm.keys()) == {\"X_umap\"}\n assert set(adata.varm.keys()) == {\"varm_entry_1\", \"varm_entry_2\"}\n assert set(adata.obs.columns) == {\"louvain\", \"donor\", \"day\"}\n assert set(adata.var.columns) == {\"alpha\", \"beta\", \"gamma\"}\n\n @given(adata=get_adata(max_obs=5, max_vars=5, min_obs_cols=0, max_obs_cols=10))\n def test_setting_number_obs_columns(self, adata):\n assert len(adata.obs.columns) >= 0\n assert len(adata.obs.columns) <= 10\n\n @given(adata=get_adata(max_obs=5, max_vars=5, min_var_cols=0, max_var_cols=10))\n def test_setting_number_var_columns(self, adata):\n assert len(adata.var.columns) >= 0\n assert len(adata.var.columns) <= 10\n\n\nclass TestBase:\n def _subset_modalities(\n self,\n adata: AnnData,\n n_modalities: int,\n from_layers: bool = True,\n from_obsm: bool = True,\n ):\n \"\"\"Subset modalities of an AnnData object.\"\"\"\n\n modalities = [\"X\"]\n if from_layers:\n modalities += list(adata.layers.keys())\n if from_obsm:\n modalities += list(adata.obsm.keys())\n return random.sample(modalities, min(len(modalities), n_modalities))\n\n def _subset_columns(\n self,\n adata: AnnData,\n n_cols: int,\n from_obs: bool = True,\n from_var: bool = True,\n ):\n \"\"\"Subset columns of an AnnData object in `obs` and `var` slots.\"\"\"\n\n columns = []\n if from_obs:\n columns += list(adata.obs.columns)\n if from_var:\n columns += list(adata.var.columns)\n return random.sample(columns, min(len(columns), n_cols))\n\n def _convert_to_float(self, adata: AnnData):\n \"\"\"Convert AnnData entries in `layer` and `obsm` into floats.\"\"\"\n\n for layer in adata.layers:\n adata.layers[layer] = adata.layers[layer].astype(float)\n for obs in adata.obsm:\n adata.obsm[obs] = adata.obsm[obs].astype(float)\n","repo_name":"theislab/scvelo","sub_path":"tests/core/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":12859,"program_lang":"python","lang":"en","doc_type":"code","stars":363,"dataset":"github-code","pt":"67"} +{"seq_id":"1113045680","text":"#--------------------------------------------------------------------\r\n# Paper: NEORL: A Framework for NeuroEvolution Optimization with RL\r\n# Section: Script for supplementary materials section 6\r\n# Contact: Majdi I. Radaideh (radaideh@mit.edu)\r\n# Last update: 9/10/2021\r\n#---------------------------------------------------------------------\r\n\r\n#---------------------------------\r\n# Import packages\r\n#---------------------------------\r\nimport numpy as np\r\nnp.random.seed(50)\r\nimport matplotlib.pyplot as plt\r\nfrom math import sqrt\r\nfrom neorl.tune import BAYESTUNE\r\nfrom neorl import ES, JAYA, WOA, GWO, HHO\r\n\r\n#**********************************************************\r\n# Part I: Original Problem\r\n#**********************************************************\r\n#Define the fitness function (for the welded beam)\r\ndef BEAM(x):\r\n\r\n y = 1.10471*x[0]**2*x[1]+0.04811*x[2]*x[3]*(14.0+x[1])\r\n\r\n # parameters\r\n P = 6000; L = 14; E = 30e+6; G = 12e+6;\r\n t_max = 13600; s_max = 30000; d_max = 0.25;\r\n\r\n M = P*(L+x[1]/2)\r\n R = sqrt(0.25*(x[1]**2+(x[0]+x[2])**2))\r\n J = 2*(sqrt(2)*x[0]*x[1]*(x[1]**2/12+0.25*(x[0]+x[2])**2));\r\n P_c = (4.013*E/(6*L**2))*x[2]*x[3]**3*(1-0.25*x[2]*sqrt(E/G)/L);\r\n t1 = P/(sqrt(2)*x[0]*x[1]); t2 = M*R/J;\r\n t = sqrt(t1**2+t1*t2*x[1]/R+t2**2);\r\n s = 6*P*L/(x[3]*x[2]**2)\r\n d = 4*P*L**3/(E*x[3]*x[2]**3);\r\n # Constraints\r\n g1 = t-t_max; #done\r\n g2 = s-s_max; #done\r\n g3 = x[0]-x[3];\r\n g4 = 0.10471*x[0]**2+0.04811*x[2]*x[3]*(14.0+x[1])-5.0;\r\n g5 = 0.125-x[0];\r\n g6 = d-d_max;\r\n g7 = P-P_c; #done\r\n\r\n g=[g1,g2,g3,g4,g5,g6,g7]\r\n g_round=np.round(np.array(g),6)\r\n w1=100\r\n w2=100\r\n\r\n phi=sum(max(item,0) for item in g_round)\r\n viol=sum(float(num) > 0 for num in g_round)\r\n\r\n reward = (y + (w1*phi + w2*viol))\r\n\r\n return reward\r\n\r\n#**********************************************************\r\n# Part II: Setup parameter space\r\n#**********************************************************\r\n#--setup the parameter space for the welded beam\r\nlb=[0.1, 0.1, 0.1, 0.1]\r\nub=[2.0, 10, 10, 2.0]\r\nd2type=['float', 'float', 'float', 'float']\r\nBOUNDS={}\r\nnx=4\r\nfor i in range(nx):\r\n BOUNDS['x'+str(i+1)]=[d2type[i], lb[i], ub[i]]\r\n\r\n#*************************************************************\r\n# Part III: Define fitness function for hyperparameter tuning\r\n#*************************************************************\r\ndef tune_fit(cxpb, mu, alpha, cxmode, mutpb):\r\n\r\n #--setup the ES algorithm\r\n es=ES(mode='min', bounds=BOUNDS, fit=BEAM, lambda_=50, mu=mu, mutpb=mutpb, alpha=alpha,\r\n cxmode=cxmode, cxpb=cxpb, ncores=1, seed=1)\r\n\r\n #--Evolute the ES object and obtains y_best\r\n #--turn off verbose for less algorithm print-out when tuning\r\n x_best, y_best, es_hist=es.evolute(ngen=100, verbose=0)\r\n\r\n return y_best #returns the best score\r\n\r\n#*************************************************************\r\n# Part IV: Tuning\r\n#*************************************************************\r\n#Setup the parameter space for Bayesian optimisation\r\n#VERY IMPORTANT: The order of these parameters MUST be similar to their order in tune_fit\r\n#see tune_fit\r\nparam_grid={\r\n#def tune_fit(cxpb, mu, alpha, cxmode):\r\n'cxpb': ['float', 0.1, 0.7], #cxpb is first (low=0.1, high=0.8, type=float/continuous)\r\n'mu': ['int', 20, 30], #mu is second (low=30, high=60, type=int/discrete)\r\n'alpha':['grid', [0.1, 0.2, 0.3, 0.4]], #alpha is third (grid with fixed values, type=grid/categorical)\r\n'cxmode':['grid', ['blend', 'cx2point']],\r\n'mutpb': ['float', 0.05, 0.3]} #cxmode is fourth (grid with fixed values, type=grid/categorical)\r\n\r\n#setup a bayesian tune object\r\nbtune=BAYESTUNE(param_grid=param_grid, fit=tune_fit, ncases=30)\r\n#tune the parameters with method .tune\r\nbayesres=btune.tune(ncores=1, csvname='bayestune.csv', verbose=True)\r\n\r\nprint('----Top 10 hyperparameter sets----')\r\nbayesres = bayesres[bayesres['score'] >= 1] #drop the cases with scores < 1 (violates the constraints)\r\nbayesres = bayesres.sort_values(['score'], axis='index', ascending=True) #rank the scores from best (lowest) to worst (high)\r\nprint(bayesres.iloc[0:10,:]) #the results are saved in dataframe and ranked from best to worst\r\n\r\n#*************************************************************\r\n# Part V: Rerun ES with the best hyperparameter set\r\n#*************************************************************\r\nes=ES(mode='min', bounds=BOUNDS, fit=BEAM, lambda_=50, mu=bayesres['mu'].iloc[0],\r\n mutpb=bayesres['mutpb'].iloc[0], alpha=bayesres['alpha'].iloc[0],\r\n cxmode=bayesres['cxmode'].iloc[0], cxpb=bayesres['cxpb'].iloc[0],\r\n ncores=1, seed=1)\r\n\r\nes_x, es_y, es_hist=es.evolute(ngen=200, verbose=0)\r\n\r\n#*************************************************************\r\n# Part VI: Run Other Algorithms without Tuning\r\n#*************************************************************\r\n\r\n#---------------------------------\r\n# GWO\r\n#---------------------------------\r\ngwo=GWO(mode='min', bounds=BOUNDS, fit=BEAM, nwolves=50, ncores=1, seed=1)\r\ngwo_x, gwo_y, gwo_hist=gwo.evolute(ngen=200, verbose=0)\r\n\r\n#---------------------------------\r\n# HHO\r\n#---------------------------------\r\nhho=HHO(mode='min', bounds=BOUNDS, fit=BEAM, nhawks=50, ncores=1, seed=1)\r\nhho_x, hho_y, hho_hist=hho.evolute(ngen=200, verbose=0)\r\n\r\n#---------------------------------\r\n# JAYA\r\n#---------------------------------\r\njaya=JAYA(mode='min', bounds=BOUNDS, fit=BEAM, npop=50, ncores=1, seed=1)\r\njaya_x, jaya_y, jaya_hist=jaya.evolute(ngen=200, verbose=0)\r\n\r\n#---------------------------------\r\n# WOA\r\n#---------------------------------\r\nwoa=WOA(mode='min', bounds=BOUNDS, fit=BEAM, nwhales=50, a0=1.5, b=1, ncores=1, seed=1)\r\nwoa_x, woa_y, woa_hist=woa.evolute(ngen=200, verbose=0)\r\n\r\nprint('---- ES ----')\r\nprint('Best fitness (y) found:', es_y)\r\nprint('Best individual (x) found:', es_x)\r\n\r\nprint('---- GWO ----')\r\nprint('Best fitness (y) found:', gwo_y)\r\nprint('Best individual (x) found:', gwo_x)\r\n\r\nprint('---- HHO ----')\r\nprint('Best fitness (y) found:', hho_y)\r\nprint('Best individual (x) found:', hho_x)\r\n\r\nprint('---- JAYA ----')\r\nprint('Best fitness (y) found:', jaya_y)\r\nprint('Best individual (x) found:', jaya_x)\r\n\r\nprint('---- WOA ----')\r\nprint('Best fitness (y) found:', woa_y)\r\nprint('Best individual (x) found:', woa_x)\r\n\r\n\r\n#---------------------------------\r\n# Plot\r\n#---------------------------------\r\n#Plot fitness convergence\r\nplt.figure()\r\nplt.plot(np.array(es_hist), label='ES')\r\nplt.plot(gwo_hist['fitness'], '--', label='GWO')\r\nplt.plot(woa_hist['global_fitness'], '-.', label='WOA')\r\nplt.plot(jaya_hist, ':', label='JAYA')\r\nplt.plot(hho_hist['global_fitness'], '-o', markersize=2, label='HHO')\r\nplt.xlabel('Generation')\r\nplt.ylabel('Fitness')\r\nplt.legend()\r\nplt.savefig('beam_conv.png',format='png', dpi=300, bbox_inches=\"tight\")\r\nplt.show()","repo_name":"mradaideh/neorl","sub_path":"examples/journal_tests/supplementary/sec6_welded_beam/welded_beam.py","file_name":"welded_beam.py","file_ext":"py","file_size_in_byte":6878,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"67"} +{"seq_id":"12759884880","text":"import pandas, random, pickle, os, time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sn\nfrom pandas import DataFrame \nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'#enable log of tf\nrandom.seed(0)\ndef getIndexOfTopic(topic):\n tr, i = [], 0\n l = len(text)\n while i=maxC:\n maxC = corr\n thresholds.append([embedTopic[j], embedTopic[k]])\n riga.append(round(corr, 3))\n return m, thresholds\n\ndef matrixMax(thresholds):\n l = len(thresholds)\n A = []\n for i in range(l):\n riga = []\n A.append(riga)\n for j in range(l):\n if i == j:\n riga.append(round(np.inner(thresholds[i][0], thresholds[j][0]),3))\n elif i = threshold:\n riga.append(matrix[i][j])\n j+=1\n j = 0\n i+=1\n return A\n\npath = \"src/dump\"\ncolnames = ['text', 'tipo']\nfilename = \"src/dataset_nosense.csv\"\ndata = pandas.read_csv(filename, encoding='utf8', skiprows=1, names=colnames)\ndump_embed, dump_y = path, path\n\nif filename == \"src/dataset.csv\":\n dump_embed+=\"/X_embed\"\n dump_y+=\"/y\"\nelse:\n dump_y+=\"/y_nosense\"\n dump_embed+=\"/X_embed_nosense\"\n\ntext = data.text.tolist()\ntipo = data.tipo.tolist()\nX_embed = pickle.load(open(dump_embed, \"rb\"))\ny = pickle.load(open(dump_y, \"rb\"))\n\nindex_pol = getIndexOfTopic(0)\nindex_health = getIndexOfTopic(1)\nindex_work = getIndexOfTopic(2)\nindex_fly = getIndexOfTopic(3)\nindex_movie = getIndexOfTopic(4)\n\npath = \"src/matrix/report\"\nfiles = [path+\"/pol.txt\", path+\"/health.txt\", path+\"/work.txt\", path+\"/fly.txt\", path+\"/movie.txt\", path+\"/mixed.txt\"]\n\nlistTopicsIndex = [index_pol, index_health, index_work, index_fly, index_movie]\nn_sentences = 20\nmatrixList = createMatrix(n_sentences)\n\nfp = open(files[len(files)-1],\"w\")\nn_mix = 5 #numero di frasi per topic\nmixedList = doubleTopicMatrix(fp, n_mix)\nmixedMtr = topicMatrix(mixedList)\nwriteMatrix(fp, mixedMtr)\n#plotSingleMatrix(matrixList)\nplotMixedMatrix()\nembed_pol = getEmbedByTopic(index_pol)\nembed_health = getEmbedByTopic(index_health)\nembed_work = getEmbedByTopic(index_work)\nembed_fly = getEmbedByTopic(index_fly)\nembed_movie = getEmbedByTopic(index_movie)\n\nm_pol, max_embed_pol = allProduct(embed_pol, 0.6, 5)\nm_health, max_embed_health = allProduct(embed_health, 0.3, 5)\nm_work, max_embed_work = allProduct(embed_work, 0.4, 5)\nm_fly, max_embed_fly = allProduct(embed_fly, 0.5, 5)\nm_movie, max_embed_movie = allProduct(embed_movie, 0.6, 5)\nmax_pol = matrixMax(max_embed_pol)\nmax_list = [\nmatrixMax(max_embed_pol),\nmatrixMax(max_embed_health),\nmatrixMax(max_embed_work),\nmatrixMax(max_embed_fly),\nmatrixMax(max_embed_movie)\n]\nplotSingleMatrix(max_list, 5)","repo_name":"CiccioTecchio/IA-Knoxly","sub_path":"src/matrix/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":6336,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"7912515566","text":"#! /usr/bin/env python\n# coding: UTF8\n\nfrom logging import getLogger\nimport math\n\n\nclass LocusTagGenerator(object):\n def __init__(self, genome, config):\n self.logger = getLogger(__name__)\n self.genome = genome\n self.locus_tag_prefix = config.LOCUS_TAG_SETTINGS.get(\"locus_tag_prefix\", \"LOCUS\")\n self.step = config.LOCUS_TAG_SETTINGS.get(\"step\", 1)\n self.separate = config.LOCUS_TAG_SETTINGS.get(\"use_separate_tags\", True)\n self.symbols = config.LOCUS_TAG_SETTINGS.get(\"symbols\", {\"CDS\": \"\", \"rRNA\": \"r\", \"tRNA\": \"t\"})\n if self.locus_tag_prefix:\n self.enabled = True\n self.logger.info(\"Locus_tag settings: locus_tag_prefix={self.locus_tag_prefix} and step={self.step}.\".format(self=self))\n if self.separate:\n examples = \", \".join([key + \": \" + self.locus_tag_prefix + \"_\" + value + \"000xx\" for key, value in self.symbols.items()])\n self.logger.info(\"Locus_tags are assigned separately to each feature type. e.g. \" + examples + \".\")\n else:\n self.enabled = False\n self.logger.info(\"locus_tag_prefix is empty. Locus_tags will not be assigned.\")\n\n def execute(self):\n if not self.enabled:\n return\n features = \", \".join(list(self.symbols.keys()))\n self.logger.info(\"Assigning locus_tags to \" + features)\n\n counts = {key: 0 for key in self.symbols.keys()}\n count = 0\n digit = 1 if len(self.genome.features) == 0 else int(math.log10(len(self.genome.features) * self.step)) + 1\n for feature in self.genome.features.values():\n type_ = feature.type\n if type_ == \"misc_feature\":\n type_ = \"CDS\" # temporarily treated as CDS\n if type_ in self.symbols:\n counts[type_] += 1\n count += 1\n if self.separate:\n locus_tag = self.locus_tag_prefix + \"_\" + self.symbols[type_] + str(self.step * counts[type_]).zfill(digit)\n else:\n locus_tag = self.locus_tag_prefix + \"_\" + str(self.step * count).zfill(digit)\n feature.qualifiers[\"locus_tag\"] = [locus_tag]\n else:\n if \"locus_tag\" in feature.qualifiers:\n del feature.qualifiers[\"locus_tag\"]\n\nif __name__ == '__main__':\n pass\n","repo_name":"nigyta/dfast_core","sub_path":"dfc/utils/locus_tag_generator.py","file_name":"locus_tag_generator.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"67"} +{"seq_id":"10301001677","text":"import itertools\n\nimport pytest\nimport tensorflow as tf\n\nfrom common.tflite_layer_test_class import TFLiteLayerTest\nfrom common.utils.tflite_utils import data_generators, additional_test_params\n\ntest_ops = [\n {'op_name': 'MEAN', 'op_func': tf.math.reduce_mean},\n {'op_name': 'REDUCE_ALL', 'op_func': tf.math.reduce_all, 'kwargs_to_prepare_input': 'boolean', 'dtype': tf.bool},\n {'op_name': 'REDUCE_ANY', 'op_func': tf.math.reduce_any, 'kwargs_to_prepare_input': 'boolean', 'dtype': tf.bool},\n {'op_name': 'REDUCE_MAX', 'op_func': tf.math.reduce_max},\n {'op_name': 'REDUCE_MIN', 'op_func': tf.math.reduce_min},\n {'op_name': 'REDUCE_PROD', 'op_func': tf.math.reduce_prod, 'kwargs_to_prepare_input': 'short_range'},\n {'op_name': 'SUM', 'op_func': tf.math.reduce_sum},\n]\n\ntest_params = [\n {'shape': [2, 10, 10, 3]},\n {'shape': [2, 10]}\n]\n\n\ntest_data = list(itertools.product(test_ops, test_params))\nfor i, (parameters, shapes) in enumerate(test_data):\n parameters.update(shapes)\n test_data[i] = parameters.copy()\n\n\ntest_data = list(itertools.product(test_data, additional_test_params[0]))\nfor i, (parameters, additional_test_params[0]) in enumerate(test_data):\n parameters.update(additional_test_params[0])\n test_data[i] = parameters.copy()\n\n\nclass TestTFLiteReduceLayerTest(TFLiteLayerTest):\n inputs = [\"Input\"]\n outputs = [\"ReduceOperation\"]\n\n def _prepare_input(self, inputs_dict, generator=None):\n if generator is None:\n return super()._prepare_input(inputs_dict)\n return data_generators[generator](inputs_dict)\n\n def make_model(self, params):\n assert len(set(params.keys()).intersection({'op_name', 'op_func', 'shape', 'axis'})) == 4, \\\n 'Unexpected parameters for test: ' + ','.join(params.keys())\n self.allowed_ops = [params['op_name']]\n tf.compat.v1.reset_default_graph()\n with tf.compat.v1.Session() as sess:\n place_holder = tf.compat.v1.placeholder(params.get('dtype', tf.float32), params['shape'],\n name=TestTFLiteReduceLayerTest.inputs[0])\n params['op_func'](place_holder, axis=params['axis'], name=TestTFLiteReduceLayerTest.outputs[0])\n net = sess.graph_def\n return net\n\n @pytest.mark.parametrize(\"params\", test_data)\n @pytest.mark.nightly\n def test_reduce(self, params, ie_device, precision, temp_dir):\n self._test(ie_device, precision, temp_dir, params)\n","repo_name":"xpippi/openvino","sub_path":"tests/layer_tests/tensorflow_lite_tests/test_tfl_Reduce.py","file_name":"test_tfl_Reduce.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19003008973","text":"import http.client\nimport json\n\nserver = \"localhost:5000\"\njsonHeader = {'Content-type': 'application/json'}\n\ndef returnRequestAsJson(method, path):\n\tconnection = http.client.HTTPConnection(server)\n\tconnection.request(method, path)\n\tresponse = connection.getresponse()\n\tresp = json.loads(response.read().decode())\n\tconnection.close()\n\treturn resp['data']\n\ndef addPlayerToGame(player, game, team):\n\tbody = {'historical_elo': player['elo'], 'name':player['name']}\n\tjsonBody = json.dumps(body)\n\n\tconnection = http.client.HTTPConnection(server)\n\tconnection.request(\"POST\", \"/teams/\" + team + \"/games/\" + game, jsonBody, jsonHeader)\n\tresponse = connection.getresponse()\n\tprint(response.read().decode())\n\tconnection.close()\n\ndef updatePlayer(player):\n\tbody = {'elo': player['elo'], 'wins': player['wins'], 'losses':player['losses']}\n\tjsonBody = json.dumps(body)\n\n\tconnection = http.client.HTTPConnection(server)\n\tconnection.request(\"PUT\", \"/players/\" + player['name'], jsonBody, jsonHeader)\n\tresponse = connection.getresponse()\n\tprint(response.read().decode())\n\tconnection.close()\n\ndef updateGame(game):\n\tbody = {'win_team':game['win_team'], 'elo_score':game['elo_score'], \n\t'map':game['map'], 'round':game['round'], 'emp_team_elo':game['emp_team_elo'], 'nr_team_elo':game['nr_team_elo']}\n\tjsonBody = json.dumps(body)\n\t\n\tconnection = http.client.HTTPConnection(server)\n\tconnection.request(\"PUT\", \"/events/games/\" + str(game['id']), jsonBody, jsonHeader)\n\tresponse = connection.getresponse()\n\tprint(response.read().decode())\n\tconnection.close()\n\ndef getPlayersForTeam(team):\n\tplayers = returnRequestAsJson(\"GET\", \"/teams/\" + team + \"/players\")\n\tprint(\"\\nPlayers for \", players[0]['team_name'], \":\", sep=\"\")\n\tfor i in range(len(players)):\n\t\tprint(\"IDX:\", i, \"|| Name:\", players[i]['name'], \"|| Elo:\", players[i]['elo'])\n\treturn players \n\ndef getEvents():\n\tevents = returnRequestAsJson(\"GET\", \"/events\")\n\tprint(\"\\nEvents:\")\n\tfor event in events:\n\t\tprint(\"Event ID:\", event['id'], \"|| Name:\", event['name'], \"|| Date:\", event['date'])\n\treturn events \n\ndef getTeamsForEvent(event):\n\tteams = returnRequestAsJson(\"GET\", \"/events/\" + event + \"/teams/\")\n\tprint(\"\\nTeams:\")\n\tfor team in teams:\n\t\tprint(\"Team ID:\", team['id'], \"Name:\", team['team_name'], \"|| Captain:\", team['captain'])\n\treturn teams\n\ndef getGamesForEvent(event):\n\tgames = returnRequestAsJson(\"GET\", \"/events/\" + event + \"/games\")\n\tprint(\"\\nOutstanding Results:\")\n\tfor i in range(len(games)):\n\t\tif games[i]['elo_score'] == 0:\n\t\t\tprint(\"IDX:\", i, \"|| Game ID:\", games[i]['id'], \"|| NR team:\", games[i]['nr_team'], \"|| Emp team:\", games[i]['emp_team'])\n\treturn games\n\ndef getTeamGameRoster(team):\n\troster = getPlayersForTeam(team)\n\trosterIn = \"\"\n\tprint(\"Remove idx of players from roster to match players that played. Enter 99 when finished\")\n\twhile rosterIn != 99:\n\t\trosterIn = int(input())\n\t\tif rosterIn != 99:\n\t\t\tprint(\"Player\", roster.pop(rosterIn)['name'], \"removed\")\n\n\treturn roster\n\ndef calculateTeamElo(roster):\n\tsum = 0\n\tfor player in roster:\n\t\tsum += player['elo']\n\tavg = round(sum / len(roster))\n\tprint(roster[0]['team_name'], \"average elo:\", avg)\n\treturn avg\n\ndef calculateEloChange(winnerElo, loserElo):\n\twinChance = 1.0 /(1 + 10**((loserElo - winnerElo)/400.0)) \n\tdelta = round(32 * (1 - winChance))\n\tprint(\"Elo change of team rated\", winnerElo, \"beating team rated\", loserElo, \"is:\", delta)\n\treturn delta\n\ndef addGameResults():\n\tevents = getEvents()\n\tevent = input(\"\\nEnter ID to get games teams and games from event: \")\n\tgetTeamsForEvent(event)\n\tgames = getGamesForEvent(event)\n\tgameIdx = int(input(\"\\nEnter idx of game to post results for: \"))\n\tgame = games[gameIdx]\n\tprint(\"Modifying Game ID:\",game['id'])\n\twinner = input(\"Enter winning team ID: \")\n\tgameRound = int(input(\"Enter round #: \"))\n\tgameMap = input(\"Enter map: \")\n\n\tloser = \"\"\n\tif int(winner) == game['nr_team']:\n\t\tloser = game['emp_team']\n\t\t\n\telse:\n\t\tloser = game['nr_team']\n\n\tprint(\"Losing team ID:\", loser, \"Winning team ID:\", winner)\n\twinningTeamRoster = getTeamGameRoster(winner)\n\tlosingTeamRoster = getTeamGameRoster(str(loser))\n\n\twinningTeamAvgElo = calculateTeamElo(winningTeamRoster)\n\tlosingTeamAvgElo = calculateTeamElo(losingTeamRoster)\n\teloChange = calculateEloChange(winningTeamAvgElo, losingTeamAvgElo)\n\n\twarning = input(\"WARNING: Please check above output. If something is off, type QUIT to cancel this operation before sending changes to the database\")\n\tif warning == \"QUIT\":\n\t\treturn\n\t\t\n\tfor player in winningTeamRoster:\n\t\taddPlayerToGame(player, str(game['id']), str(winner))\n\t\tplayer['wins'] += 1\n\t\tplayer['elo'] += eloChange\n\t\tupdatePlayer(player)\n\n\tfor player in losingTeamRoster:\n\t\taddPlayerToGame(player, str(game['id']), str(loser))\n\t\tplayer['losses'] += 1\n\t\tplayer['elo'] -= eloChange\n\t\tupdatePlayer(player)\n\n\tgame['win_team'] = int(winner)\n\tgame['elo_score'] = eloChange\n\tgame['round'] = gameRound\n\tgame['map'] = gameMap\n\tif int(winner) == game['nr_team']:\n\t\tgame['nr_team_elo'] = winningTeamAvgElo\n\t\tgame['emp_team_elo'] = losingTeamAvgElo\n\telse:\n\t\tgame['nr_team_elo'] = losingTeamAvgElo\n\t\tgame['emp_team_elo'] = winningTeamAvgElo\n\n\tupdateGame(game)\n\n\ndef getCommand():\n\tusrIn = input().lower()\n\tif usrIn == \"post results\":\n\t\taddGameResults()\n\tprint(\"\\n\")\n\treturn usrIn\n\ndef main():\n\tusrIn = \"\"\n\twhile(usrIn!=\"q\"):\n\t\tprint(\"ADMIN MENU\")\n\t\tprint(\"'post results' - Add Game Results\")\n\t\tprint(\"'q' - quit\")\n\t\tusrIn = getCommand()\n\nmain()","repo_name":"marshalltj/spbl-elo-admin","sub_path":"admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":5387,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"26603539553","text":"\nfrom flask import Flask, request, jsonify, Blueprint\nfrom flask_sqlalchemy import SQLAlchemy\nfrom Config.db import db\nimport Services.index as Services\nimport Utils.index as Utils\n\npayment = Blueprint(\"payment\", __name__)\n\n\n@payment.route('/create', methods=['POST'])\ndef create_pay():\n # Getting the token\n token = request.headers['Authorization']\n token = token.replace(\"Bearer\", \"\")\n token = token.replace(\" \", \"\")\n vf = Utils.Token.verify_token(token)\n # Verifying the token\n if vf[\"error\"] == False:\n data = request.get_json()\n user_id_number = data['user_id_number']\n id_pck = data['id_pck']\n\n # Get user\n user = Services.Users.get_by_id_number(user_id_number)\n user_id = user[\"id\"]\n if user == None:\n return jsonify({\"error\": True, \"message\": \"User does not exist\"}), 400\n else:\n # Getting all users completed packages\n package_data = Services.Package.get_by_id(id_pck)\n if package_data == None:\n return jsonify({\"error\": True, \"message\": \"Unable to create payment due to no package outstanding\"}), 400\n else:\n # Getting the amount\n amount = package_data[\"cumulative_total\"]\n\n # Create payment\n new_pay = Services.Payment.create(user_id, amount)\n Services.Package.update(package_data[\"id\"], 12, \"Closed\", package_data[\"cumulative_total\"])\n Services.Payment.add_payment_detail(new_pay[\"id\"], package_data[\"id\"])\n return jsonify(new_pay=new_pay), 200\n else:\n return jsonify(vf), 401\n\n@payment.route('/find_all', methods=['GET'])\ndef find_all():\n try:\n # Getting the token\n token = request.headers['Authorization']\n token = token.replace(\"Bearer\", \"\")\n token = token.replace(\" \", \"\")\n vf = Utils.Token.verify_token(token)\n # Verifying the token\n if vf[\"error\"] == False:\n # Getting all payments\n payments = Services.Payment.get_all()\n return jsonify(payments=payments), 200\n else:\n return jsonify(vf), 401\n except Exception as e:\n print(\"An error occurred while getting the package\", e)\n return jsonify({\"error\": True, \"message\": \"Unable to get all the packages\"}), 500\n \n\n@payment.route('/find_one/', methods=['GET'])\ndef find_by_user(id_number):\n try:\n # Getting the token\n token = request.headers['Authorization']\n token = token.replace(\"Bearer\", \"\")\n token = token.replace(\" \", \"\")\n vf = Utils.Token.verify_token(token)\n # Verifying the token\n if vf[\"error\"] == False:\n # Getting payment by user\n user = Services.Users.get_by_id_number(id_number)\n if user == None:\n return jsonify({\"error\": True, \"message\": \"User does not exist\"}), 400\n else:\n package = Services.Payment.get_by_user(user[\"id\"])\n if package:\n return jsonify(package=package), 200\n else:\n return jsonify({\"error\": True, \"message\": \"No package found\"}), 404\n else:\n return jsonify(vf), 401\n except Exception as e:\n print(\"An error occurred while getting the package\", e)\n return jsonify({\"error\": True, \"message\": \"Unable to get the package\"}), 500\n","repo_name":"Keiddy15/backend_web_avanzada","sub_path":"Routes/PaymentRoutes.py","file_name":"PaymentRoutes.py","file_ext":"py","file_size_in_byte":3448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31414354237","text":"# 方法一:蛮力法。\n\"\"\"\n先计算前面连续字符的长度,随后计算后面连续字符的长度\n\"\"\"\n\n\nclass Solution:\n def countBinarySubstrings(self, s):\n result = 0\n for i in range(len(s)):\n first_length = 0\n second_length = 0\n j = i\n while j < len(s) and s[j] == s[i]:\n first_length += 1\n j += 1\n while j < len(s) and s[j] != s[i]:\n second_length += 1\n j += 1\n if first_length == second_length:\n result += 1\n break\n return result\n\n\n# s = Solution()\n# print(s.countBinarySubstrings('00001111'))\n\n\n# 方法二:方法一其实还没用到问题背后的规律,本方法参考LeetCode他人评论\n\"\"\"\n先统计连续的0和1分别有多少个,如:111100011000,得到4323;在4323中的任意相邻两个数字,\n取小的一个加起来,就是3+2+2 = 7.\n\"\"\"\n\n\nclass Solution2:\n def countBinarySubstrings(self, s):\n candidate = []\n max_len = 1\n for i in range(1, len(s)):\n if s[i] == s[i - 1]:\n max_len += 1\n else:\n candidate.append(max_len)\n max_len = 1\n candidate.append(max_len)\n\n result = 0\n for j in range(len(candidate) - 1):\n result += min(candidate[j], candidate[j + 1])\n return result\n\n\ns = Solution2()\nprint(s.countBinarySubstrings('111100011000'))\n","repo_name":"yangwei-nlp/LeetCode-Python","sub_path":"LeetCode/tag字符串/696. 计数二进制子串.py","file_name":"696. 计数二进制子串.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"45022940568","text":"import flask\nimport json\nserver=flask.Flask(__name__)\n\n\n@server.route('/ewms-api-server/api/ewms-goods/labelPrint',methods=['post'])\ndef index():\n # res={'msg':'cccccc','msg_code':0}\n res={ \"result\":\n {\n \"foreignTitle\": \"foreignTitle1\",\n \"id\": 0,\n \"longDescription\": \"string\",\n \"managerId\": 0,\n \"managerName\": \"string\",\n \"parentSellerSku\": \"string\",\n \"productCode\": \"string\",\n \"productId\": 0,\n \"sellerId\": 0,\n \"sellerUser\": \"string\",\n \"shopId\": 0,\n \"shortDescription\": \"string\",\n \"url\": \"stringurl\",\n\n }\n }\n\n return json.dumps(res,ensure_ascii=False)\n\n\nif __name__==\"__main__\":\n server.run(port=8812, debug=True, host='0.0.0.0')","repo_name":"714866/banggood_request_test","sub_path":"api_server/fbl_twms_print.py","file_name":"fbl_twms_print.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20842552090","text":"#Trying to implement a clustering-based semi-supervised CNN with MNIST and pytorch. 2020/01/27\n#fix init#\nfrom numpy.random import seed\nimport argparse \nimport os\nimport time\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \nimport sys\nstderr = sys.stderr\nsys.stderr = open(os.devnull, 'w')\n\nimport pickle\nimport numpy as np\nimport tensorflow as tf\ntf.get_logger().setLevel('ERROR')\nimport keras\nsys.stderr = stderr\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras import datasets \nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras import metrics\nfrom tensorflow.keras.optimizers import Adam\nfrom matplotlib import pyplot as plt\n\n#import clustering\nimport my_clustering\nimport utils\nimport models\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Keras Implementation of Semi-DeepCluster')\n #parser.add_argument('--seed', type=int, default=31, help='random seed (default: 31)')\n parser.add_argument('--n_cluster', '--k', type=int, default=16,\n help='number of cluster for k-means (default: 16)')\n parser.add_argument('--alpha', '--a', type=float, default=0.05,\n help='parameter that controls the role of supervsied CNN (default: 0.05)')\n parser.add_argument('--beta', '--b', type=float, default=1,\n help='parameter that controls the role of Deep Clustering (default: 1)') \n parser.add_argument('--epochs', '--e',type=int, default=20,\n help='number of total epochs to run (default: 20)')\n parser.add_argument('--batch_size', default=16, type=int,\n help='mini-batch size (default: 10)')\n parser.add_argument('--n_classes', '--c', type=int, default=10,\n help='number of classes (default: 10)')\n parser.add_argument('--n_sup', type=int, default=100,\n help='number of supervsied data (default: 100)')\n parser.add_argument('--seed', type=int, default=3,\n help='random seed (default: 3)') \n parser.add_argument('--path2data', type=str, default='C:/Users/acicula/Desktop/sGAN/dataset/experiment1/', help='path to dataset folder')\n parser.add_argument('--scale', default=(0,1), type=tuple,\n help='scale range of the sample (default: 0 to 1)') \n parser.add_argument('--verbose', '-v', action='store_true', help='make noise')\n\n return parser.parse_args()\n\ndef main(args):\n seed(1)\n end = time.time()\n # load the data\n (X_train, y_train), (X_tst, y_tst) = datasets.mnist.load_data()\n X_train = X_train.astype(\"float32\") / 255\n X_tst = X_tst.astype(\"float32\") / 255\n # Make sure images have shape (28, 28, 1)\n X_train = np.expand_dims(X_train, -1)\n X_tst = np.expand_dims(X_tst, -1)\n if args.verbose:\n print('Loading MNIST Samples.\\n Training set size: {}, \\n Testing size: {}'.format(X_train.shape, X_tst.shape))\n\n # setup optimizer\n DC_opt = Adam(lr=1e-5, beta_1=0.5)\n C_opt = Adam(lr=1e-6, beta_1=0.5)\n\n #create CNN \n DC_model, C_model = models.semi_cnn(X_train.shape[1:], args.n_classes, args.n_cluster, args.alpha, args.beta, DC_opt, C_opt)\n if args.verbose:\n DC_model.summary()\n C_model.summary()\n #load supervised samples, pre-train the CNN\n X_sup, y_sup = utils.select_supervised_samples((X_train, y_train), args.n_sup, args.n_classes)\n\n # from label to categorical \n y_sup = keras.utils.to_categorical(y_sup, args.n_classes)\n y_train = keras.utils.to_categorical(y_train, args.n_classes)\n y_tst = keras.utils.to_categorical(y_tst, args.n_classes)\n #if args.verbose:\n # print('=>Train the CNN with supervised sampeles with shape:: {}'.format(X_sup.shape, y_sup.shape))\n # print(y_sup)\n #cnn.fit(X_sup, y_sup, epochs = 100, verbose =0) \n\n # clustering algorithm to use\n deepcluster = my_clustering.Kmeans(args.n_cluster)\n\n #Pre-train model in supervised with some label\n #cnn.fit(X_sup, y_sup, _batch_size, _epochs, verbose)\n #score= cnn.evaluate(tstset[0], tstset[1])\n #print('Test accuracy:', score[1])\n #cnn.fit(dataset[0], dataset[1], args.batch, args.epochs, verbose)\n\n loss1_history = []\n loss2_history = []\n tst_history = [] \n\n # calculate the number of training iterations\n bat_per_epo = int(X_train.shape[0] / args.batch_size)\n n_steps = bat_per_epo * args.epochs\n if args.verbose:\n print('n_epochs=%d, n_batch=%d, b/e=%d, steps=%d' % (args.epochs, args.batch_size, bat_per_epo, n_steps))\n\n ############################################################ \n ###########training model with semi-DeepCluster#############\n ############################################################\n for i in range(n_steps):\n end = time.time()\n if args.verbose:\n print('=>Step.{}/ {}'.format(i+1, n_steps))\n\n #fit the model with supervised samples\n _loss1, _acc1 = C_model.train_on_batch(X_sup, y_sup)\n if args.verbose:\n print('C_loss:{}'.format(_loss1)) \n #remove DC_models' fc layer \n features_model = Model(DC_model.input, DC_model.layers[-3].output)\n # get features for the whole dataset\n features = features_model.predict(X_train)\n # cluster the features\n if args.verbose:\n print('=>Cluster the features')\n print(features.shape)\n images_lists = deepcluster.cluster(features, verbose=args.verbose)\n\n # assign pseudo-labels\n if args.verbose:\n print('=>Assign pseudo labels') \n train_dataset = my_clustering.cluster_assign(images_lists, X_train)\n X, Y = train_dataset.X, train_dataset.pseudolabels\n \n # train network with clusters as pseudo-labels\n _X_dc, _y_dc = utils.select_supervised_samples([X, Y], args.batch_size, args.n_cluster) \n _y_dc = keras.utils.to_categorical(_y_dc, args.n_cluster)\n _loss2, _acc2 = DC_model.train_on_batch(_X_dc, _y_dc)\n if args.verbose:\n print('DC_loss:{}'.format(_loss2))\n # summarize \n if (i+1)%bat_per_epo ==0:\n acc = utils.evaluate_c_model(C_model, X_tst, y_tst)\n print('>> {}'.format(acc))\n tst_history.append(acc)\n loss1_history.append(_loss1)\n loss2_history.append(_loss2)\n\n print('DC loss history:', loss2_history)\n print('Test history:', tst_history) \n print('Time: % s\\n'%(time.time() - end))\n\nif __name__ == '__main__':\n \n args = parse_args()\n main(args)\n","repo_name":"aciculachen/CSI-DeepClustering","sub_path":"test_on_mnist.py","file_name":"test_on_mnist.py","file_ext":"py","file_size_in_byte":6628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31349057176","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom data_processor import get_xp, get_zxpy\nfrom sklearn.metrics import classification_report\ndef two_stage_least_square():\n x, p = get_xp()\n clf = LinearRegression().fit(x, p)\n pred_p = clf.predict(x)\n for i in range(len(pred_p)):\n if pred_p[i] >= 0.5:\n pred_p[i] = 1.0\n else:\n pred_p[i] = 0.0\n print(classification_report(p, pred_p))\n x.insert(loc=len(x.columns), column=\"education\", value=pred_p)\n del x[\"race\"]\n _, _, _, _, y = get_zxpy()\n clf = LinearRegression().fit(x, y)\n print(x.keys())\n pred_y = clf.predict(x)\n for i in range(len(pred_y)):\n if pred_y[i] >= 0.5:\n pred_y[i] = 1.0\n else:\n pred_y[i] = 0.0\n print(classification_report(y, pred_y))\n del x[\"education\"]\n x.insert(loc=len(x.columns), column=\"education\", value=\"1,0\")\n x = x.drop('education', axis=1).join(\n x['education'].str.split(',', expand=True).stack().reset_index(level=1, drop=True).rename('education')) \\\n .reset_index(level=0)\n xp = x[['workclass', 'age', 'marital_status', 'occupation', 'relationship',\n 'gender', 'native_country', 'hours-per-week', 'education']]\n xp[\"education\"] = pd.to_numeric(xp[\"education\"])\n pred_y = clf.predict(xp)\n for i in range(len(pred_y)):\n if pred_y[i] >= 0.5:\n pred_y[i] = 1.0\n else:\n pred_y[i] = 0.0\n def calculate_ate(y_pred, y_fact):\n ate = 0\n size = len(y_fact)\n for i in range(size):\n if y_fact['education'][i] == 1:\n ite = y_fact['income_bigger_than_50K'][i] - y_pred[2 * i + 1]\n else:\n ite = y_pred[2 * i] - y_fact['income_bigger_than_50K'][i]\n ate += ite\n ate = ate / size\n return ate\n\n y_fact = pd.read_csv('../data/income_data/modified_train.csv')[['education', 'income_bigger_than_50K']]\n print('ATE = '+ str(calculate_ate(pred_y, y_fact)))\nif __name__ == '__main__':\n two_stage_least_square()\n","repo_name":"williamkunhan/Casual-Inference","sub_path":"code/2SLS.py","file_name":"2SLS.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32957763401","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport ctypes\nimport os\nimport time\nimport platform\n\nfrom logger import logger\nfrom BitSrunLogin.LoginManager import LoginManager\nfrom config import login_options\n\n# 获取计算机名\nhost_name = platform.node()\ntry:\n # disable the QuickEdit and Insert mode for the current console\n kernel32 = ctypes.windll.kernel32\n kernel32.SetConsoleMode(kernel32.GetStdHandle(-10), 128)\nexcept:\n pass\n\ndef is_connect_internet(test_ip):\n if platform.system().lower().startswith('windows'):\n cmd = u\"ping {} -n 1\".format(test_ip)\n else:\n cmd = u\"ping {} -c 1\".format(test_ip)\n status = os.system(cmd)\n return status == 0\n\ndef always_login(user=None, test_ip=None, delay=2, max_failed=3, **kwargs):\n time_now = lambda: time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n org_delay = delay\n failed = 0\n logger.info(f'[{time_now()}] [{host_name}] NetWork Monitor StartUp.')\n while True:\n if not is_connect_internet(test_ip):\n failed += 1\n delay = max(0., delay / 2)\n if failed >= max_failed:\n logger.info(f'[{time_now()}] [{host_name}] offline.')\n LoginManager(**kwargs).login(username=user.user_id, password=user.passwd)\n else:\n if failed >= max_failed:\n logger.info(f'[{time_now()}] [{host_name}] online now.')\n failed = 0\n delay = org_delay\n time.sleep(delay)\n\n\nif __name__ == \"__main__\":\n\n while True:\n try:\n always_login(**login_options)\n except:\n import traceback\n\n error = traceback.format_exc()\n logger.error(error)\n time.sleep(15)\n","repo_name":"b71db892/AutoLoginUESTC","sub_path":"always_online.py","file_name":"always_online.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"67"} +{"seq_id":"72469087574","text":"from argparse import ArgumentParser as Ag\n\nfrom classes import Vehicle, VehicleOptions\n\n\ndef main():\n static = []\n var = []\n d1 = VehicleOptions.flat_static\n d2 = VehicleOptions.nested_static\n d3 = VehicleOptions.var_opt\n d4 = {'motorcycle': {\n 'all': 'mca',\n 'dealer': 'mcd',\n 'owner': 'mco'},\n 'cage': {\n 'all': 'cta',\n 'dealer': 'ctd',\n 'owner': 'cto'}}\n parser = Ag()\n parser.add_argument('city')\n parser.add_argument('vehicle_type')\n parser.add_argument('seller_type')\n parser.add_argument('--search')\n\n for option in d1:\n parser.add_argument(f'--{option}', action='store_true')\n for option in d2:\n parser.add_argument(f'--{option}')\n for option in d3:\n parser.add_argument(f'--{option}')\n\n args = parser.parse_args()\n city = args.city\n vehicle_type = args.vehicle_type\n seller_type = args.seller_type\n try:\n category = d4[vehicle_type][seller_type]\n except KeyError:\n category = None\n print('Invalid vehicle or seller type')\n quit()\n for value in args.__dict__:\n if args.__dict__[value]:\n if value in d1:\n static.append(value)\n if value in d2 and args.__dict__[value] in d2[value]:\n static.append((value, args.__dict__[value]))\n if value in d2 and args.__dict__[value] not in d2[value]:\n print(f'Invalid Argument \"{args.__dict__[value]}\" for option \"{value}\"')\n if value in d3:\n var.append((value, args.__dict__[value]))\n\n options = VehicleOptions(static, var).options_list\n search = args.search\n url = Vehicle(city, category, options, search).get_url\n print(url)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jakkso/carSearch","sub_path":"cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27126548113","text":"import CommonFunctions\nimport SecurityServer\nimport Storage\nimport Auditing\nimport time\n\nserverDomain = \"AS-SERVER.DERBY.AC.UK\"\ncommandsUnimplemented = {\"SOML\", \"SEND\", \"SAML\", \"TURN\"} # Set containing unimplemented commands.\ncommandsAnytime = {\"NOOP\", \"EXPN\", \"VRFY\", \"HELP\", \"QUIT\"} # Set of commands that can be executed at any time.\ncommandsImplemented = {\"HELO\", \"QUIT\", \"MAIL FROM:\", \"RCPT TO:\", \"DATA\", \"RSET\", \"NOOP\", \"EXPN\", \"VRFY\", \"HELP\",\n \"EHLO\", \"ADDMAIL\", \"REGMAIL\", \"RMVMAIL\", \"MYMAILS\", \"LISTMAIL\", \"VIEWMAIL\", \"DELMAIL\"}\n# primary use of commandsImplemented is for the command help\n\n\nclass responseProcessor:\n def __init__(self):\n self.state = \"keyExchange\"\n self.transferKey = 0\n self.securityServer = SecurityServer.securityServer()\n\n self.accountEmailRegistry = Storage.accountsLoad(\"Email\")\n self.accountUserRegistry = Storage.accountsLoad(\"User\")\n self.emailListRegistry = Storage.accountsLoad(\"MailList\")\n\n print(\"FILES LOADED\")\n self.subStateMail = \"init\"\n self.mailFromBuffer = \"\"\n self.rcptBuffer = []\n self.dataBuffer = \"\"\n self.clientDomain = \"-\"\n self.currentUser = []\n\n def commandRouter(self, dataEnc, module): # Sends the commands to the function associated with the current state.\n\n if self.state == \"keyExchange\":\n dataDec = dataEnc.decode()\n self.stateKeyExchange(dataDec, module)\n\n elif self.state == \"login\":\n dataDec = self.securityServer.decryptData(dataEnc).decode()\n self.stateLogin(dataDec, module)\n\n elif self.state == \"greetings\":\n dataDec = self.securityServer.decryptData(dataEnc).decode()\n self.stateGreetings(dataDec, module)\n\n elif self.state == \"default\":\n dataDec = self.securityServer.decryptData(dataEnc).decode()\n self.stateDefault(dataDec, module)\n\n elif self.state == \"mail\":\n dataDec = self.securityServer.decryptData(dataEnc).decode()\n self.stateMail(dataDec, module)\n else:\n print(\"Command couldn't be routed state unknown\")\n self.code421(module)\n\n def stateKeyExchange(self, dataDec, module): # Handles the Key Exchange State.\n self.transferKey, completed = (self.securityServer.initiateKeyExchangeServer(dataDec, module))\n if completed:\n self.state = \"login\"\n print(str(self.transferKey))\n\n def stateLogin(self, dataDec, module): # Handles the login state.\n command = CommonFunctions.commandOnly(dataDec).upper()\n argument = CommonFunctions.argumentOnly(dataDec)\n if (command == \"REGISTER\" or command == \"LOGIN\") and CommonFunctions.numberOfWords(argument) == 2:\n userName = CommonFunctions.firstWord(argument)\n userPass = CommonFunctions.secondWord(argument)\n if CommonFunctions.userpassValidate(userName) and CommonFunctions.userpassValidate(userPass):\n if command == \"REGISTER\":\n self.commandREGISTER(argument, module)\n else:\n self.commandLOGIN(argument, module)\n else:\n self.code503(\" Username and Password must be atleast 6 characters long and CAN contain numbers, letters including the following symbols !@#$%^&*()-=_+,.?\", module)\n else:\n self.code501(\" Available commands: \\n\"\n \"login \\n\"\n \"register \", module)\n\n def stateGreetings(self, dataDec, module): # Handles commands and data while in the Greetings state.\n\n command = CommonFunctions.commandOnly(dataDec)\n argument = CommonFunctions.argumentOnly(dataDec)\n print(\"State:\" + self.state + \" Data:\" + dataDec + \" Command:\" + command + \" argument:\" + argument)\n\n if command in commandsAnytime:\n self.commandsAnytimeRouter(dataDec, module)\n elif command == \"HELO\":\n self.commandHELO(argument, module)\n elif command == \"EHLO\":\n self.commandEHLO(argument, module)\n elif command in commandsImplemented:\n self.code503(\"\", module)\n else:\n self.code500(module)\n if self.clientDomain != \"-\":\n self.state = \"default\"\n\n def stateDefault(self, dataDec, module): # Handles commands while in the default state.\n\n command = CommonFunctions.commandOnly(dataDec)\n argument = CommonFunctions.argumentOnly(dataDec)\n print(\"State:\" + self.state + \" Data:\" + dataDec + \" Command:\" + command + \" argument:\" + argument)\n\n if command in commandsUnimplemented:\n self.code502(module)\n elif command in commandsAnytime:\n self.commandsAnytimeRouter(dataDec, module)\n elif command == \"LOGOUT\":\n self.commandLOGOUT(module)\n elif command == \"REGMAIL\":\n self.commandREGMAIL(argument, module)\n elif command == \"ADDMAIL\":\n self.commandADDMAIL(argument, module)\n elif command == \"RMVMAIL\":\n self.commandRMVMAIL(argument, module)\n elif command == \"MYMAILS\":\n self.commandMYMAILS(module)\n elif command == \"LISTMAIL\":\n self.commandLISTMAIL(module)\n elif command == \"VIEWMAIL\":\n self.commandVIEWMAIL(argument, module)\n elif command == \"DELMAIL\":\n self.commandDELMAIL(argument, module)\n elif command == \"MAIL\": # added so that if its just mail and from is missing it throws a 501 error\n self.code501(\" valid parameter is FROM:\", module)\n elif command == \"MAIL FROM:\":\n self.commandMAIL(dataDec, module)\n else:\n self.code500(module)\n\n def stateMail(self, dataDec, module): # Handles incoming data and commands while in the mail state.\n command = CommonFunctions.commandOnly(dataDec)\n argument = CommonFunctions.argumentOnly(dataDec)\n print(\"State:\" + self.state + \" Data:\" + dataDec + \" Command:\" + command + \" argument:\" + argument)\n\n if self.subStateMail == \"data\":\n if dataDec == \".\":\n for rcpt in self.rcptBuffer:\n temp = Storage.email(self.mailFromBuffer, rcpt, self.dataBuffer)\n temp.saveEmail()\n Auditing.logMail(self.mailFromBuffer, rcpt)\n self.commandRSET(module)\n else:\n if dataDec[0] == \".\":\n dataDec = dataDec[1:]\n self.dataBuffer = self.dataBuffer + dataDec + \"\\n\"\n self.code250(\" OK\", module)\n\n elif command in commandsAnytime:\n self.commandsAnytimeRouter(dataDec, module)\n\n elif command == \"RSET\":\n self.commandRSET(module)\n\n elif self.subStateMail == \"init\":\n self.mailFromBuffer = argument[1:-1]\n self.subStateMail = \"rcpt\"\n self.code250(\" OK\", module)\n\n elif self.subStateMail == \"rcpt\":\n if command == \"RCPT TO:\":\n validity = CommonFunctions.mailValidationSMTP(argument)\n if validity == \"OK\":\n self.rcptBuffer.append(argument[1:-1])\n self.code250(\" OK\", module)\n else:\n self.code553(validity, module)\n elif command == \"DATA\":\n if len(self.rcptBuffer) == 0:\n self.code503(\"\", module)\n else:\n self.subStateMail = \"data\"\n self.code354(module)\n else:\n self.code500(module)\n\n\n def commandsAnytimeRouter(self, data, module): # Routes commands that can be used at any time.\n command = CommonFunctions.commandOnly(data)\n argument = CommonFunctions.argumentOnly(data)\n if command == \"VRFY\":\n self.commandVRFY(argument, module)\n elif command == \"EXPN\":\n self.commandEXPN(argument, module)\n elif command == \"HELP\":\n self.commandHELP(argument, module)\n elif command == \"NOOP\":\n self.commandNOOP(module)\n elif command == \"QUIT\":\n self.commandQUIT(module)\n else:\n print(\"Wrong input\") # This would probably never occur due to the way the function is used.\n\n def commandREGISTER(self, argument, module):\n userName = CommonFunctions.firstWord(argument)\n if Storage.accountExists(self.accountUserRegistry, userName):\n self.code554(\", account already exists.\", module)\n else:\n userPass = CommonFunctions.secondWord(argument)\n hashedPassword, salt = SecurityServer.hashPW(userPass)\n tuser = Storage.accountUser(userName, hashedPassword, salt, [\"\",\"\"])\n Storage.accountAdd(self.accountUserRegistry, tuser)\n Storage.accountsSave(self.accountUserRegistry, \"User\")\n self.code250(\" Account Registered Successfuly, Log in.\", module)\n\n\n def commandLOGIN(self, argument, module):\n userName = CommonFunctions.firstWord(argument)\n userPass = CommonFunctions.secondWord(argument)\n if Storage.accountValidateLogin(self.accountUserRegistry, userName, userPass):\n self.currentUser = Storage.accountGet(self.accountUserRegistry, userName)\n self.state = \"greetings\"\n Auditing.logLoginAttempt(userName,True)\n self.code250(\" Logged in successfully\", module)\n else:\n Auditing.logLoginAttempt(userName, False)\n self.code554(\", username password pair doesn't exist, try again.\", module)\n\n\n\n def commandLOGOUT(self, module):\n self.state = \"login\"\n self.currentUser = []\n self.clientDomain = \"-\"\n self.code250(\" Logged out successfully\", module)\n\n def commandMAIL(self, dataDec, module):\n argument = CommonFunctions.argumentOnly(dataDec)\n if argument == \"-\":\n self.code501(\" You need to specify the sender address.\", module)\n else:\n validity = CommonFunctions.mailValidationSMTP(argument)\n if validity == \"OK\":\n self.state = \"mail\"\n self.subStateMail = \"init\"\n self.stateMail(dataDec, module)\n else:\n self.code501(validity, module)\n\n def commandHELO(self, argument, module):\n self.clientDomain = argument\n message = \" \" + serverDomain\n self.code250(message, module)\n\n def commandEHLO(self, argument, module):\n self.clientDomain = argument\n message = \"-\" + serverDomain + \" Hello \" + self.clientDomain\n self.code250(message, module)\n message = \"-LOGOUT\"\n self.code250(message, module)\n message = \"-EXPN\"\n self.code250(message, module)\n message = \"-VRFY\"\n self.code250(message, module)\n message = \"-REGMAIL\"\n self.code250(message, module)\n message = \"-ADDMAIL\"\n self.code250(message, module)\n message = \"-RMVMAIL\"\n self.code250(message, module)\n message = \"-MYMAILS\"\n self.code250(message, module)\n message = \"-LISTMAIL\"\n self.code250(message, module)\n message = \"-VIEWMAIL\"\n self.code250(message, module)\n message = \"-DELMAIL\"\n self.code250(message, module)\n self.commandRSET(module)\n\n def commandRSET(self, module):\n if self.state != \"greetings\":\n self.state = \"default\"\n self.subStateMail = \"init\"\n self.mailFromBuffer = \"\"\n self.rcptBuffer = []\n self.dataBuffer = \"\"\n self.code250(\" OK\", module)\n\n\n\n def commandVRFY(self, argument, module):\n if argument != \"-\":\n data = Storage.commandVRFY(self.accountEmailRegistry, argument)\n CommonFunctions.sendData(data, module, self.securityServer)\n else:\n self.code501(\"\", module)\n\n\n def commandEXPN(self, argument, module):\n if self.clientDomain == \"-\":\n self.code550(\" No access\", module)\n else:\n mailListAcc = Storage.accountGet(self.emailListRegistry, argument)\n if Storage.accountExists(self.emailListRegistry, argument) and len(mailListAcc.mailset) >= 1:\n for i in range(len(mailListAcc.mailset) - 1):\n self.code250(\"-\" + mailListAcc.mailset[i], module)\n self.code250(\" \" + mailListAcc.mailset[-1], module)\n else:\n self.code550(\", not found\", module)\n\n\n def commandHELP(self, argument, module):\n command = argument.upper()\n if command == \"-\":\n self.code211(module)\n else:\n if command in commandsImplemented:\n self.code214(command,module)\n else:\n self.code504(module)\n\n\n def commandNOOP(self, module):\n self.code250(\" OK\", module)\n\n\n\n def commandQUIT(self, module):\n self.code221(module)\n module.close()\n\n def commandREGMAIL(self, argument, module):\n address = CommonFunctions.firstWord(argument)\n addressPass = CommonFunctions.secondWord(argument)\n mailValid = CommonFunctions.mailValidation(address)\n if mailValid == \"OK\":\n if CommonFunctions.userpassValidate(addressPass):\n hashedPassword, salt = SecurityServer.hashPW(addressPass)\n tmail = Storage.accountEmail(address, hashedPassword, salt)\n Storage.accountAdd(self.accountEmailRegistry, tmail)\n Storage.accountsSave(self.accountEmailRegistry, \"Email\")\n self.code250(\" Email Account Registered Successfuly. Don't forget to add it to your User Account\", module)\n else:\n self.code503(\" Password must be atleast 6 characters long and CAN contain numbers, letters including the following symbols !@#$%^&*()-=_+,.?\", module)\n else:\n self.code501(mailValid, module)\n\n def commandADDMAIL(self, argument, module):\n address = CommonFunctions.firstWord(argument)\n addressPass = CommonFunctions.secondWord(argument)\n mailValid = CommonFunctions.mailValidation(address)\n if mailValid == \"OK\":\n if CommonFunctions.userpassValidate(addressPass):\n if Storage.accountValidateLogin(self.accountEmailRegistry, address, addressPass):\n Storage.accountUserEmailAdd(self.accountUserRegistry, self.currentUser.getIdentifier(), address)\n Storage.accountsSave(self.accountUserRegistry, \"User\")\n self.code250(\" Email added successfully\", module)\n else:\n self.code550(\" Email Password pair doesn't match anything.\", module)\n else:\n self.code503(\" Password must be atleast 6 characters long and CAN contain numbers, letters including the following symbols !@#$%^&*()-=_+,.?\", module)\n else:\n self.code501(\", \" + mailValid, module)\n\n def commandRMVMAIL(self, argument, module):\n address = argument\n mailValid = CommonFunctions.mailValidation(address)\n if mailValid == \"OK\":\n if Storage.accountUserEmailExists(self.accountUserRegistry, self.currentUser.getIdentifier(), address):\n Storage.accountUserEmailRemove(self.accountUserRegistry, self.currentUser.getIdentifier(), address)\n Storage.accountsSave(self.accountUserRegistry, \"User\")\n self.code250(\" Email removed successfully\", module)\n else:\n self.code550(\" Email not in current users emails.\", module)\n else:\n self.code501(mailValid, module)\n\n def commandMYMAILS(self, module):\n mailset = self.currentUser.mailset\n if len(mailset) >= 1:\n for i in range(len(mailset) - 1):\n self.code250(\"-\" + mailset[i], module)\n self.code250(\" \" + mailset[-1], module)\n else:\n self.code554(\", no mails in your mailboxes\",module)\n\n def commandLISTMAIL(self, module):\n maillist = Storage.accountUserListEmail(self.accountUserRegistry, self.currentUser.getIdentifier())\n if len(maillist) >= 1:\n for i in range(len(maillist) - 1):\n self.code250(\"-\" + maillist[i][1], module)\n self.code250(\" \" + maillist[-1][1], module)\n else:\n self.code554(\", no mails in your mailboxes\",module)\n def commandVIEWMAIL(self, argument, module):\n try:\n emailid = int(argument)\n except ValueError:\n self.code501(\" emailid can only be integer\",module)\n else:\n contents = Storage.accountUserGetEmail(self.accountUserRegistry, self.currentUser.getIdentifier(), emailid)\n if contents == \"IDERROR\":\n self.code554(\", mail id doesn't exist\", module)\n else:\n splt = contents.splitlines()\n if len(splt) >= 1:\n for i in range(len(splt) - 1):\n self.code250(\"-\" + splt[i], module)\n self.code250(\" \" + splt[-1], module)\n else:\n self.code554(\", empty mail\", module)\n def commandDELMAIL(self, argument, module):\n try:\n emailid = int(argument)\n except ValueError:\n self.code501(\" emailid can only be integer\", module)\n else:\n returnCode = Storage.accountUserDeleteEmail(self.accountUserRegistry, self.currentUser.getIdentifier(), emailid)\n if returnCode == \"IDERROR\":\n self.code554(\", mail id doesn't exist\", module)\n elif returnCode == \"NFERROR\":\n self.code554(\", mail file doesn't exist\", module)\n else:\n self.code250(\" OK\",module)\n\n\n def code211(self, module):\n data = \"For more information on a specific command, type HELP command-name \\n\" \\\n \"HELO Identifies the sender-SMTP to the receiver-SMTP. \\n\" \\\n \"QUIT Specifies that the receiver must send an OK reply, and then close the transmission channel. \\n\" \\\n \"MAIL FROM: Initiates outbound mail sequence.\\n\" \\\n \"RCPT TO: Identifies a recipient in mail sequence.\\n\" \\\n \"DATA Indicates mmail data in mail sequence.\\n\" \\\n \"HELP Provides help information for SMTP commands.\\n\" \\\n \"RSET Aborts a mail transaction.\\n\" \\\n \"VRFY Verfies a username exists.\\n\" \\\n \"NOOP No action other than to send send an OK reply to the receiver. \\n\" \\\n \"EXPN Expands a mailing list.\\n\" \\\n \"EHLO Same as HELO but tells the server that the client may want to use the Extended SMTP (ESMTP) protocol instead.\\n\" \\\n \"REGMAIL Registers a new email address to the server. \\n\" \\\n \"ADDMAIL Adds a registered email address to the current users mail accounts list. \\n\" \\\n \"RMVMAIL Removes an email address from the current users mail list. \\n\" \\\n \"MYMAILS Displays the current users mail accounts list. \\n\" \\\n \"LISTMAIL Lists mails accessible to the current user with their corresponding ID. \\n\" \\\n \"VIEWMAIL Sends the client a copy of a mail associated with the ID provided. \\n\" \\\n \"DELMAIL Deletes the mail that is associated with the ID provided.\"\n\n CommonFunctions.sendData(data, module, self.securityServer)\n\n\n def code214(self, argument, module):\n if argument == \"HELO\":\n data = \"The HELO command is the command used by the host sending the command to identify itself; the command may be interpreted as saying \\\"Hello, I am \\\" \\n\" \\\n \"USAGE: HELO clientdomain\"\n elif argument == \"QUIT\":\n data = \"The QUIT command specifies that the receiver must send an OK reply, and then close the transmission channel. \\n\" \\\n \"USAGE: QUIT\"\n elif argument == \"MAIL FROM:\":\n data = \"The MAIL FROM: command is used to initiate a mail transaction in which the mail data is delivered to one or more mailboxes. The argument field contains a reverse-path. \\n\" \\\n \"USAGE: MAIL FROM: \"\n elif argument == \"RCPT TO:\":\n data = \"The RCPT TO: command is used to identify an individual recipient of the mail data; multiple recipients are specified by multiple use of this command. \\n\" \\\n \"USAGE: RCPT TO: \"\n elif argument == \"DATA\":\n data = \"The DATA command causes the mail data from this command to be appended to the mail data buffer. The mail data may contain any of the 128 ASCII character codes. The mail data is terminated by a line containing only a period, that is the character sequence \\\".\\\" \\n\" \\\n \"USAGE: DATA\"\n elif argument == \"HELP\":\n data = \"The HELP command provides help information for SMTP commands, if used with a command returns information on that command. \\n\" \\\n \"USAGE: HELP COMMAND\"\n elif argument == \"RSET\":\n data = \"The RSET command specifies that the current mail transaction is to be aborted. Any stored sender, recipients, and mail data must be discarded, and all buffers and state tables cleared. The server will send an OK reply. \\n\" \\\n \"USAGE: RSET\"\n elif argument == \"VRFY\":\n data = \"The VRFY command asks the receiver to confirm that the argument identifies a user. If it is a user name, the full name of the user (if known) and the fully specified mailbox are returned. \\n\" \\\n \"USAGE: VRFY name\"\n elif argument == \"NOOP\":\n data = \"The NOOP command does not affect any parameters or previously entered commands. It specifies no action other than that the receiver send an OK reply. \\n\" \\\n \"USAGE: NOOP\"\n elif argument == \"EXPN\":\n data = \"The EXPN command asks the receiver to confirm that the argument identifies a mailing list, and if so, to return the membership of that list. The full name of the users (if known) and the fully specified mailboxes are returned in a multiline reply. \\n\" \\\n \"USAGE: EXPN nameoflist\"\n elif argument == \"EHLO\":\n data = \"Same as HELO but tells the server that the client may want to use the Extended SMTP (ESMTP) protocol instead.\\n\" \\\n \"USAGE: EHLO clientdomain\"\n elif argument == \"REGMAIL\":\n data = \"Registers a new email address to the server. \\n \" \\\n \"USAGE: REGMAIL email@domain.com password\"\n elif argument == \"ADDMAIL\":\n data = \"Adds a registered email address to the current users mail accounts list. \\n \" \\\n \"USAGE: ADDMAIL email@domain.com password\"\n elif argument == \"RMVMAIL\":\n data = \"Removes an email address from the current users mail list. \\n \" \\\n \"USAGE: RMVMAIL email@domain.com\"\n elif argument == \"MYMAILS\":\n data = \"Displays the current users mail accounts list. \\n \" \\\n \"USAGE: MYMAILS\"\n elif argument == \"LISTMAIL\":\n data = \"Lists mails accessible to the current user with their corresponding ID. \\n \" \\\n \"USAGE: LISTMAIL\"\n elif argument == \"VIEWMAIL\":\n data = \"Sends the client a copy of a mail associated with the ID provided. \\n \" \\\n \"USAGE: VIEWMAIL 2\"\n elif argument == \"DELMAIL\":\n data = \"Deletes the mail that is associated with the ID provided. \\n \" \\\n \"USAGE: DELMAIL 2\"\n CommonFunctions.sendData(data, module, self.securityServer)\n\n\n # The following functions are used to send the error codes to the client.\n\n\n def code220(self, module):\n data = \"220 \" + serverDomain + \" Simple Mail Transfer Service Ready\"\n CommonFunctions.sendData(data, module, self.securityServer)\n\n\n def code221(self, module):\n data = \"221 \" + serverDomain + \" Service closing transmission channel\"\n CommonFunctions.sendData(data, module, self.securityServer)\n\n\n def code250(self, message, module):\n data = \"250\" + message\n CommonFunctions.sendData(data, module, self.securityServer)\n\n\n def code251(self, module, path):\n # path = \n data = \"251 User not local; will forward to \" + path\n CommonFunctions.sendData(data, module, self.securityServer)\n\n\n def code354(self, module):\n data = \"354 Start mail input; end with .\"\n CommonFunctions.sendData(data, module, self.securityServer)\n\n\n def code421(self, module):\n data = \"421\" + serverDomain + \" Service not available, closing transmission channel\"\n CommonFunctions.sendData(data, module, self.securityServer)\n\n\n def code450(self, module):\n data = \"450 Requested mail action not taken: mailbox unavailable\"\n # [E.g., mailbox busy]\n CommonFunctions.sendData(data, module, self.securityServer)\n\n\n def code451(self, module):\n data = \"451 Requested action aborted: local error in processing\"\n CommonFunctions.sendData(data, module, self.securityServer)\n\n\n def code452(self, module):\n data = \"452 Requested action not taken: insufficient system storage\"\n CommonFunctions.sendData(data, module, self.securityServer)\n\n\n def code500(self, module):\n data = \"500 Syntax error, command unrecognized\"\n # [This may include errors such as command line too long]\n CommonFunctions.sendData(data, module, self.securityServer)\n\n\n def code501(self, message, module):\n data = \"501 Syntax error in parameters or arguments\" + message\n CommonFunctions.sendData(data, module, self.securityServer)\n\n\n def code502(self, module):\n data = \"502 Command not implemented\"\n CommonFunctions.sendData(data, module, self.securityServer)\n\n\n def code503(self, message, module):\n data = \"503 Bad sequence of commands\" + message\n CommonFunctions.sendData(data, module, self.securityServer)\n\n\n def code504(self, module):\n data = \"504 Command parameter not implemented\"\n CommonFunctions.sendData(data, module, self.securityServer)\n\n\n def code550(self, message, module):\n data = \"550 Requested action not taken: mailbox unavailable\" + message\n # [E.g., mailbox not found, no access]\n CommonFunctions.sendData(data, module, self.securityServer)\n\n\n def code551(self, module, path):\n # path = \n data = \"551 User not local; please try \" + path\n CommonFunctions.sendData(data, module, self.securityServer)\n\n\n def code552(self, module):\n data = \"552 Requested mail action aborted: exceeded storage allocation\"\n CommonFunctions.sendData(data, module, self.securityServer)\n\n\n def code553(self, message, module):\n data = \"553 Requested action not taken: mailbox name not allowed\" + message\n # [E.g., mailbox syntax incorrect]\n CommonFunctions.sendData(data, module, self.securityServer)\n\n\n def code554(self, message, module):\n data = \"554 Transaction failed\" + message\n CommonFunctions.sendData(data, module, self.securityServer)\n","repo_name":"MrThanasiz/NWS_AS","sub_path":"ServerFolder/ResponseProcessor.py","file_name":"ResponseProcessor.py","file_ext":"py","file_size_in_byte":27615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2620922422","text":"import numpy\n\nX = 0\nY = 1\n\ndef formulate(cnfs, N):\n qubo = numpy.zeros(shape=(N,N))\n const = 0\n for cnf in cnfs:\n i, j = abs(cnf[X])-1, abs(cnf[Y])-1\n if cnf[X] > 0 and cnf[Y] > 0:\n const = const + 1\n qubo[i,i] = qubo[i,i] -1\n qubo[j,j] = qubo[j,j] -1\n qubo[i,j] = qubo[i,j] + 1/2\n qubo[j,i] = qubo[j,i] + 1/2\n elif cnf[X] > 0 and cnf[Y] < 0:\n qubo[j,j] = qubo[j,j] + 1\n qubo[i,j] = qubo[i,j] + 1/2\n qubo[j,i] = qubo[j,i] + 1/2\n elif cnf[X] < 0 and cnf[Y] > 0:\n qubo[i,i] = qubo[i,i] + 1\n qubo[i,j] = qubo[i,j] - 1/2\n qubo[j,i] = qubo[j,i] - 1/2\n elif cnf[X] < 0 and cnf[Y] < 0:\n qubo[i,j] = qubo[i,j] + 1/2\n qubo[j,i] = qubo[j,i] + 1/2\n return qubo\n","repo_name":"pangenproject/formula_convert","sub_path":"formulate.py","file_name":"formulate.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14076384262","text":"import random\nimport string\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\nfrom threading import Thread\nfrom threading import Lock\n\n# Function to generate a random alphanumeric string\ndef get_random_string(length):\n letters_and_digits = string.ascii_letters + string.digits\n result_str = ''.join((random.choice(letters_and_digits) for i in range(length)))\n return result_str\n\n# Function to save URL to a text file\ndef save_url(url):\n with open('hit.txt', 'a') as f:\n f.write(url + '\\n')\n\n# Global counter for log entries\nlog_counter = 0\n\n# Global set for unique URLs and a lock for thread-safe operations\nunique_urls = set()\nlock = Lock()\n\n# Function to log bad URLs\ndef log_bad_url(url):\n global log_counter\n log_counter += 1\n print(f'\\033[91m {log_counter}: {url}') # Red color for bad URLs\n\n# Function to log good URLs\ndef log_good_url(url):\n global log_counter\n log_counter += 1\n print(f'\\033[92m {log_counter}: {url}') # Green color for good URLs\n\n# Function to open URL in browser and check if it's good\ndef open_url():\n while True:\n random_string = get_random_string(17)\n url = f\"https://altsworld.atshop.io/order/{random_string}/completed\"\n with lock:\n if url in unique_urls:\n continue\n unique_urls.add(url)\n driver = webdriver.Firefox(options=options)\n driver.get(url)\n time.sleep(5) # Wait for 5 seconds\n current_url = driver.current_url\n if current_url == 'https://altsworld.atshop.io/':\n log_bad_url(current_url)\n else:\n save_url(url)\n log_good_url(url)\n time.sleep(2) # Wait for 2 second\n driver.get(url)\n time.sleep(2.5) # Wait for 2 second\n current_url = driver.current_url\n if current_url == 'https://altsworld.atshop.io/':\n log_bad_url(current_url)\n else:\n save_url(url)\n log_good_url(url)\n driver.quit()\n\n# Set Firefox options for headless mode\noptions = Options()\noptions.headless = True\n\n# Create and start 20 threads\nfor _ in range(20):\n thread = Thread(target=open_url)\n thread.start()\n","repo_name":"redhat-ware/altsworld-linkgen","sub_path":"main_python3.py","file_name":"main_python3.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72795693012","text":"#!/usr/bin/python2\n\nimport random\nimport re\nimport pickle\nimport string\n\nclass Markov:\n\tdef __init__(self,num=2):\n\t\tself.depth = num\n\t\tself.Map = {}\n\t\tself.ending = \"@@END@@\"\n\t\tself.start = \"\"\n\t\tself.weights = [1,.8]\n\t\n\tdef addFile(self,file):\n\t\twith open(file,'r') as f:\n\t\t\ttext = self.cleanString(f.read())\n\t\tfor line in re.split(\"\\.|\\r\\n|\\n|\\?|!\",text):\n\t\t\tif( len(line) < 1):\n\t\t\t\tcontinue\n\t\t\tlast = self.start\n\t\t\tfor word in line.split(\" \"):\n\t\t\t\tself[last] = word\n\t\t\t\tlast = word\n\t\t\tself[last] = self.ending\n\n\tdef __setitem__(self, marker, value):\n\t\t# This is the same as self[marker] = value\n\t\ttry:\n\t\t\tself.Map[marker]\n\t\texcept KeyError:\n\t\t\tself.Map[marker] = {}\n\t\ttry:\n\t\t\tself.Map[marker][value] +=1\n\t\texcept KeyError:\n\t\t\tself.Map[marker][value] = 1\n\n\tdef __str__(self):\n\t\ts = \"{\\n\"\n\t\tfor key in self.Map:\n\t\t\ts += \"'{}':{}\\n\".format(key, str(self.Map[key]))\n\t\ts += \"}\\n\"\n\t\treturn s\n\n\tdef GetNextBest(self, num, key,part = \"\"):\n\t\tkey = self.cleanString(key)\n\t\tpart = self.cleanString(part)\n\t\tbest = self._bestHelper(key)\n\t\ttmp = sorted(best, key = lambda x:-x[1])\n\t\ttmp = filter(lambda x: x[0].startswith(part),tmp)\n\t\tlistOfOnes = list(map(lambda x:x[0][0],zip(tmp,range(num))))\n\t\treturn listOfOnes\n\tdef cleanString(self,s):\n\t\ttable = string.maketrans(\"\",\"\")\n\t\ts = str(s)\n\t\ts = s.lower()\n\t\ts = s.translate(table, \"#$%&\\'()*+,-/:;<=>@[\\\\]^_`{|}~\\\"\")\n\t\ts = s.strip()\n\t\treturn s\n\tdef _bestHelper(self,key):\n\t\tl0 = lambda x: sum([v for _,v in self.Map.get(x,{0:0}).items()])\n\t\tl1 = lambda x: self.Map.get(key,{}).get(x,0)\n\t\tmyElems = (set(self.Map) | set(self.Map.get(key,{})))\n\t\tmyElems.remove(self.start)\n\t\tmyElems.discard(self.ending)\n\t\tscaler = 4.0/len(myElems)\n\t\tbest = []\n\t\tfor k in myElems:\n\t\t\tt0 = l0(k)\n\t\t\tt1 = l1(k)\n\t\t\tbest.append((k,t0*scaler+t1))\n\t\t#best = [(k, l1(k) + *scaler) for k in myElems]\n\t\treturn best\n\nif __name__==\"__main__\":\n\tmark = Markov()\n\tmark.addFile(\"George.txt\")\n\tprint(mark.GetNextBest(4,\"\",\"\"))\n","repo_name":"Hovestar/EyeInput","sub_path":"src/Markov.py","file_name":"Markov.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"29378925389","text":"from threading import *\nimport time\ndef display():\n print(current_thread().name,'started')\n time.sleep(3)\n print(current_thread().name,'end')\nt1=Thread(target=display,name='child thread 1')\nt2=Thread(target=display,name='child thread 2')\nt3=Thread(target=display,name='child thread 3')\nt1.start()\nt1.join()\nt2.start()\nt2.join()\nt3.start()\nl=enumerate()\nfor i in l:\n print('name',i.name())\ntime.sleep(10)\nprint(\"after 10 second of sleep\")\nprint(t1.name,'is alive',t1.is_alive())\nprint(t2.name,'is alive',t2.is_alive())\nprint(t3.name,'is alive',t3.is_alive())\ntime.sleep(10)\nprint(t1.name,'is alive',t1.is_alive())\nprint(t2.name,'is alive',t2.is_alive())\nprint(t3.name,'is alive',t3.is_alive())","repo_name":"skyside5911/automation_scripts","sub_path":"practice of python august2022/thread/thread_prac6.py","file_name":"thread_prac6.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2634571476","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nimport sys, re, gzip, os, urllib.request, urllib.parse, urllib.error, urllib.request, urllib.error, urllib.parse\nimport stat\nimport pymol, json, math, multiprocessing\nimport subprocess\nfrom pymol.cgo import *\nfrom pymol import cmd\nimport time\nfrom glob import glob\nfrom ftplib import FTP\nfrom itertools import groupby\nfrom pathlib import Path\nimport logging\n\nfrom pymol.Qt import QtWidgets, QtGui, QtCore\nfrom pymol.Qt.utils import loadUi\n\nlogging.basicConfig(filename=\"probis_py.log\",\n filemode='a',\n format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',\n datefmt='%H:%M:%S',\n level=logging.DEBUG)\n\n# ADDITIONAL MODULES----------------------------------------------------------------------=\ntry:\n import numpy as np\n print(\"Numpy module is installed\")\nexcept:\n from pip._internal import main as pip\n print(\"Numpy module has not been installed yet. \\nProgram will now install Numpy module\")\n time.sleep(1)\n pip(['install', 'numpy'])\n import numpy as np\n\ntry:\n from sklearn.cluster import DBSCAN\n print(\"Sklearn module is installed\")\nexcept:\n from pip._internal import main as pip\n print(\"Sklearn module has not been installed yet. \\nProgram will now install Scikit-learn module\")\n time.sleep(1)\n pip(['install', 'scikit-learn'])\n from sklearn.cluster import DBSCAN\n\n# ---------------------------------------INITIALIZE-----------------------------\n\ninstall_dir=os.path.join(Path.home(), \".probish2o_installdir.txt\")\n\ninstall_datoteka = open(install_dir, \"r\")\nH2O_DIRECTORY=install_datoteka.readline()\ninstall_datoteka.close()\nMODULE_DIRECTORY=os.path.join(H2O_DIRECTORY, \"module\")\nUI_DIRECTORY=os.path.join(H2O_DIRECTORY,\"UI\")\nversionFile = os.path.join(H2O_DIRECTORY, \"version.txt\")\nSETTINGS_DIRECTORY=os.path.join(H2O_DIRECTORY, \"settings\")\n# ------------------------------------------------------------------------------\ndef get_current_version():\n global current_version\n with open(versionFile) as version:\n current_version = version.readline().strip()\n \ndef main():\n global default_dl_path, default_dl_site, dl_online_sett, dl_machine_sett, url, check_path, probis_dir, probis_exe_dir\n print(\"Settings file for instalation directory location: \", install_dir)\n default_dl_path = os.path.join(H2O_DIRECTORY, \"Probis_H2O\")\n default_dl_site = \"https://cdn.rcsb.org/resources/sequence/clusters\"\n dl_online_sett=os.path.join(SETTINGS_DIRECTORY, \"DB_Download_From.txt\")\n dl_machine_sett=os.path.join(SETTINGS_DIRECTORY, \"DB_Download_To.txt\")\n dl=open(dl_online_sett, \"r\") \n url= dl.read()\n dl.close()\n if url==\"\":\n dl=open(dl_online_sett, \"w\") \n dl.write(default_dl_site)\n dl.close()\n dl=open(dl_online_sett, \"r\") \n url= dl.read()\n dl.close()\n machine = open (dl_machine_sett, \"r\")\n check_path = machine.read()\n machine.close()\n if check_path==\"\":\n machine = open (dl_machine_sett, \"w\")\n machine.write(default_dl_path)\n machine.close()\n machine = open (dl_machine_sett, \"r\")\n check_path = machine.read()\n machine.close()\n probis_dir = os.path.join(check_path, \"probis\")\n probis_exe_dir = os.path.join(check_path, \"probis.exe\")\n get_current_version()\n settings.get_system()\n GUI().run()\n\n# global reference to avoid garbage collection of our dialog\ndialog = None\n\nred01 = QtGui.QColor('#ffe6e6')\nred02 = QtGui.QColor('#ffcccc')\nred03 = QtGui.QColor('#ffb3b3')\nred04 = QtGui.QColor('#ff9999')\nred05 = QtGui.QColor('#ff8080')\nred06 = QtGui.QColor('#ff6666')\nred07 = QtGui.QColor('#ff4d4d')\nred08 = QtGui.QColor('#ff3333')\nred09 = QtGui.QColor('#ff1a1a')\nred10 = QtGui.QColor('#ff0000')\n\nclass GUI:\n def run(self):\n '''\n Open our custom dialog\n '''\n global dialog\n \n #if main is None:\n dialog = self.make_dialog()\n \n dialog.show()\n \n def make_dialog(self): \n # create a new Window\n global dialog\n dialog = QtWidgets.QMainWindow()\n \n # populate the Window from our *.ui file which was created with the Qt Designer\n uifile = os.path.join(UI_DIRECTORY, 'ProbisH2OMain.ui')\n self.form = loadUi(uifile, dialog)\n \n def dl_changed():\n line_input=dialog.LineDlFrom.text()\n if line_input == url:\n dialog.PushSetDL.setEnabled(False)\n else:\n dialog.PushSetDL.setEnabled(True)\n def machine_changed():\n line_input=dialog.LineDlTo.text()\n if line_input== check_path:\n dialog.PushSetMachine.setEnabled(False)\n else:\n dialog.PushSetMachine.setEnabled(True)\n def CheckCompare_changed():\n checked = dialog.CheckCompare.isChecked()\n if checked == True:\n dialog.CheckWater.setChecked(False)\n dialog.CheckAnalyze.setChecked(False)\n dialog.CheckWater.setEnabled(False)\n dialog.CheckAnalyze.setEnabled(False)\n else:\n dialog.CheckWater.setEnabled(True)\n dialog.CheckAnalyze.setEnabled(True)\n dialog.CheckAnalyze.setChecked(True)\n def protein_change():\n global custom\n if len(dialog.LineProtein.text()) >= 5:\n custom = True\n else:\n custom = False\n\n # hook up button callbacks\n self.form.PushCustom.clicked.connect(custom_disk_file_get.load_file)\n self.form.LineFind.setEnabled(False)\n self.form.PushFind.clicked.connect(ClusterComplexManipulation.get_cluster_complexes)\n self.form.PushDownlad.clicked.connect(ClusterComplexManipulation.download_complexes)\n self.form.PushDownlad.setEnabled(False)\n self.form.PushIdentify.clicked.connect(BindingSites.get_binding_sites)\n self.form.PushGo.clicked.connect(h20Analysis.analyze_waters)\n self.form.PushSetup.clicked.connect(RSCB_contact.contact_rscb_pdb)\n self.form.PushDisplay.clicked.connect(pyMOLinterface.pyMOL_display_cluster)\n self.form.PushBSite.clicked.connect(pyMOLinterface.pyMOL_bsite_cluster)\n self.form.PushContacts.clicked.connect(pyMOLinterface.pyMOL_water_contacts)\n self.form.PushChain.clicked.connect(pyMOLinterface.pyMOL_chain_box)\n self.form.PushFetch.clicked.connect(pyMOLinterface.pyMOL_fetch_system)\n self.form.LineDlTo.textChanged.connect(machine_changed)\n self.form.LineDlFrom.textChanged.connect(dl_changed)\n self.form.LineDlFrom.setText(url)\n self.form.LineDlTo.setText(check_path)\n self.form.PushSetDL.clicked.connect(settings.set_download_from)\n self.form.PushSetMachine.clicked.connect(settings.set_download_to)\n self.form.PushCurrentDL.clicked.connect(settings.current_download_from)\n self.form.PushCurrentMachine.clicked.connect(settings.current_download_to)\n self.form.PushDefault.clicked.connect(settings.default)\n self.form.CheckCompare.stateChanged.connect(CheckCompare_changed)\n self.form.LineProtein.textChanged.connect(protein_change)\n \n self.form.LabelCurrent.setText(current_version)\n \n if os.path.isdir(check_path) == False:\n dialog.CheckAligned.setEnabled(False)\n dialog.CheckAnalyze.setEnabled(False)\n dialog.CheckCompare.setEnabled(False)\n dialog.CheckDebye.setEnabled(False)\n dialog.CheckWater.setEnabled(False)\n dialog.ComboBlastclust.setEnabled(False)\n dialog.LineProtein.setEnabled(False)\n dialog.ListIdentify.setEnabled(False)\n dialog.PushCustom.setEnabled(False)\n dialog.PushFind.setEnabled(False)\n dialog.PushGo.setEnabled(False)\n dialog.PushIdentify.setEnabled(False)\n dialog.CheckKeep.setEnabled(False)\n dialog.ListCalculated.setEnabled(False)\n dialog.PushBSite.setEnabled(False)\n dialog.PushChain.setEnabled(False)\n dialog.PushContacts.setEnabled(False)\n dialog.PushDisplay.setEnabled(False)\n dialog.PushFetch.setEnabled(False)\n QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O IMPORTANT NOTE\", \"On the first start of the plugin, please setup the ProBiS DB and ProBiS_H2O folder. This can be done in 'Settings' tab by clicking on the 'SETUP DATABASE' button! This needs to be done only once. We wish you an exiting adventure in the world of (conserved) waters!!! ;-)\")\n print(\"Please setup probis DB and Probis_H2O folder first using 'Setup Database' in Settings tab!\")\n else:\n os.chdir(check_path)\n RSCB_contact.file_checks()\n return dialog\n \n def open_process_window(self, maximum, text):\n global progress\n progress = QtWidgets.QDialog()\n progFile = os.path.join(UI_DIRECTORY, \"ProbisH2OProgress.ui\")\n self.prog = loadUi(progFile, progress)\n self.prog.label.setText(text)\n self.prog.progressBar.setMaximum(int(maximum))\n progress.show()\n\n# ----------------------------------------------------FUNCTIONS-----------------\nclass settings:\n def set_download_from():\n global url\n set_from = dialog.LineDlFrom.text()\n dl=open(dl_online_sett, \"w\") \n dl.write(set_from)\n dl.close()\n dl=open(dl_online_sett, \"r\") \n url=dl.read()\n dl.close() \n def set_download_to():\n global check_path, probis_dir, probis_exe_dir\n set_to = dialog.LineDlTo.text()\n machine = open (dl_machine_sett, \"w\")\n machine.write(set_to)\n machine.close()\n machine = open (dl_machine_sett, \"r\")\n check_path = machine.read()\n machine.close()\n if os.path.isdir(check_path) == False:\n dialog.CheckAligned.setEnabled(False)\n dialog.CheckAnalyze.setEnabled(False)\n dialog.CheckCompare.setEnabled(False)\n dialog.CheckDebye.setEnabled(False)\n dialog.CheckWater.setEnabled(False)\n dialog.ComboBlastclust.setEnabled(False)\n dialog.LineProtein.setEnabled(False)\n dialog.ListIdentify.setEnabled(False)\n dialog.PushCustom.setEnabled(False)\n dialog.PushFind.setEnabled(False)\n dialog.PushGo.setEnabled(False)\n dialog.PushIdentify.setEnabled(False)\n dialog.CheckKeep.setEnabled(False)\n dialog.ListCalculated.setEnabled(False)\n dialog.PushBSite.setEnabled(False)\n dialog.PushChain.setEnabled(False)\n dialog.PushContacts.setEnabled(False)\n dialog.PushDisplay.setEnabled(False)\n dialog.PushFetch.setEnabled(False)\n QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O Database Warning\", \"You have changed the Database Machine directory!\\nPlease setup probis DB and Probis_H2O folder first using 'Setup Database' in Settings tab!\")\n print(\"You have changed the Database Machine directory!\\nPlease setup probis DB and Probis_H2O folder first using 'Setup Database' in Settings tab!\")\n else:\n os.chdir(check_path)\n RSCB_contact.file_checks()\n probis_dir = os.path.join(check_path, \"probis\")\n probis_exe_dir = os.path.join(check_path, \"probis.exe\")\n def current_download_from():\n dialog.LineDlFrom.setText(url)\n def current_download_to():\n dialog.LineDlTo.setText(check_path)\n def default():\n dialog.LineDlFrom.setText(default_dl_site)\n dialog.LineDlTo.setText(default_dl_path);\n def get_system():\n global platform\n platform=sys.platform\n \n\nclass custom_disk_file_get:\n @staticmethod\n def load_file():\n global custom,filename\n filename, _filter = QtWidgets.QFileDialog.getOpenFileName(dialog, \"Open...\" + \"PDB File\", \".\", \"PDB File (*.pdb)\")\n if filename:\n try:\n print(\"File read.\")\n print((str(filename)))\n except:\n QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O Warning\", \"File error or \\nFile not Found! \\nPlease investigate!\")\n custom=True\n dialog.LineProtein.setText(filename)\n\n\nclass RSCB_contact:\n \"\"\"PDB database server setup\"\"\"\n\n lista_cluster_fajlov = [\"clusters-by-entity-30.txt\", \"clusters-by-entity-40.txt\", \"clusters-by-entity-50.txt\", \"clusters-by-entity-70.txt\",\n \"clusters-by-entity-90.txt\", \"clusters-by-entity-95.txt\", \"clusters-by-entity-100.txt\"]\n \n @staticmethod\n def contact_rscb_pdb():\n text = \"Downloading cluster and ProBiS files. This might take up to 5 minutes. Time to stretch.\"\n GUI().open_process_window(8, text)\n value = 0\n progress.progressBar.setValue(value)\n current_path = str(os.getcwd())\n if current_path == check_path:\n pass\n else:\n try:\n os.mkdir(check_path)\n except:\n pass\n os.chdir(check_path)\n\n\n \"\"\"download fajlov\"\"\"\n print(\"Setting up the cluster database!\")\n for cluster_fajl in RSCB_contact.lista_cluster_fajlov:\n url_fajl = url+\"/\"+cluster_fajl\n print(\"Downloading file \" + url_fajl)\n urllib.request.urlretrieve(url_fajl, cluster_fajl)\n value += 1\n progress.progressBar.setValue(value)\n\n print((\"FIXED Database setup finished\" + \"\\t thank you too RCSB protein data bank\"))\n # if platform == \"linux\":\n if True:\n print(\"Platform: LINUX!!!!!!! (mac os)\")\n if os.path.isfile(probis_dir) == False:\n RSCB_contact.fetch_probis()\n value += 1\n progress.progressBar.setValue(value)\n \n elif platform == \"win32\":\n print(\"Platform: WINDOWS\")\n if os.path.isfile(probis_exe_dir) == False:\n RSCB_contact.fetch_probis_exe()\n value += 1\n progress.progressBar.setValue(value)\n \n else:\n QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O Warning\", \"System error:\\nOperating on a non-compatible operating system!\")\n \n dialog.CheckAligned.setEnabled(True)\n dialog.CheckAnalyze.setEnabled(True)\n dialog.CheckCompare.setEnabled(True)\n dialog.CheckDebye.setEnabled(True)\n dialog.CheckWater.setEnabled(True)\n dialog.ComboBlastclust.setEnabled(True)\n dialog.LineProtein.setEnabled(True)\n dialog.ListIdentify.setEnabled(True)\n dialog.PushCustom.setEnabled(True)\n dialog.PushFind.setEnabled(True)\n dialog.PushGo.setEnabled(True)\n dialog.PushIdentify.setEnabled(True)\n dialog.CheckKeep.setEnabled(True)\n dialog.ListCalculated.setEnabled(True)\n dialog.PushBSite.setEnabled(True)\n dialog.PushChain.setEnabled(True)\n dialog.PushContacts.setEnabled(True)\n dialog.PushDisplay.setEnabled(True)\n dialog.PushFetch.setEnabled(True)\n RSCB_contact.file_checks()\n progress.close()\n return True\n\n @staticmethod\n def fetch_probis():\n urllib.request.urlretrieve(\"http://insilab.org/files/probis-algorithm/probis\", \"probis\")\n def fetch_probis_exe():\n urllib.request.urlretrieve(\"https://gitlab.com/JuricV/skupni-gui-lisica_probis/-/raw/testfiles/probis_exe/probis.exe\", \"probis.exe\")\n urllib.request.urlretrieve(\"https://gitlab.com/JuricV/skupni-gui-lisica_probis/-/raw/testfiles/probis_exe/libgsl.dll\", \"libgsl.dll\")\n urllib.request.urlretrieve(\"https://gitlab.com/JuricV/skupni-gui-lisica_probis/-/raw/testfiles/probis_exe/libgslcblas.dll\", \"libgslcblas.dll\")\n\n @staticmethod\n def file_checks():\n \"\"\"preveri ce imamo instalirano bazo\"\"\"\n print(\"\\nProBis_H2O: checking database!\")\n check = \"\"\n for fajl in RSCB_contact.lista_cluster_fajlov:\n if str(os.path.isfile(fajl)) == \"False\":\n print(\"ProBiS_H2O: please setup DB of rscb cluster files\")\n break\n else:\n check += \"a\"\n if check == \"aaaaaaaaaaa\":\n print(\"ProBis_H2O: Database OK\")\n # if platform == \"linux\":\n if True:\n if os.path.isfile(probis_dir) == False:\n QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O Warning\", \"Please ensure probis is downloaded correctly using setup DB button in Settings tab on plugin GUI\")\n print(\"ProBiS_H2O: please ensure probis is downloaded correctly using setup DB button in Settings tab on plugin GUI\")\n else:\n try:\n if os.access(probis_dir, os.X_OK) == True:\n print(\"Probis executable ok\")\n else:\n st = os.stat(probis_dir)\n os.chmod(probis_dir, st.st_mode | stat.S_IEXEC)\n print(\"Probis executable permission set\")\n print(\"Prois directory: : \", probis_dir)\n except:\n pass\n elif platform == \"win32\":\n if os.path.isfile(probis_exe_dir) == False:\n QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O Warning\", \"Please ensure probis.exe is downloaded correctly using setup DB button in Settings tab on plugin GUI\")\n print(\"ProBiS_H2O: please ensure probis.exe is downloaded correctly using setup DB button in Settings tab on plugin GUI\")\n\n# ------------------------------------------------------------------------------\n\nclass ClusterComplexManipulation:\n \"\"\"download and identification of cluster complexes from RCSB\"\"\"\n\n @staticmethod\n def get_cluster_unique_list(target_complex, ime_selekcije, sekvenca_id):\n #target complex: kater pdb\n #ime selekcije: \"clusters\" ali \"clusters-by-entity-\"\n #sekvenca katera je izbrana 50, 70, 90\n vzorec = re.compile(target_complex, re.IGNORECASE)\n # lista z linijami ki vsebujejo iskani protein\n line_list = []\n # lista z linijami ki so del clustra\n line_list_2 = []\n # lista samo z imeni proteinov - ni unique - lahko je vec verig ....\n cluster_list = []\n examined_cluster_nums = []\n\n # try:\n global cluster_list_unique\n print(\"ime_selekcije: \" + ime_selekcije + \" \" + \"sekvenca_id: \"+ sekvenca_id + \" \" + \"target_complex: \")\n if ime_selekcije == \"clusters\":\n\n with open(ime_selekcije + sekvenca_id + \".txt\", \"rt\") as infile:\n for linenumber, line in enumerate(infile):\n if vzorec.search(line) != None:\n line_list.append(line.rstrip('\\n'))\n examined_cluster_nums.append(linenumber+1)\n print(\"using cd-hit preclustering\")\n print(\"\"\"\nCd-hit: a fast program for clustering and comparing large sets of\nprotein or nucleotide sequences, Weizhong Li & Adam Godzik (2006)\nBioinformatics, 22:1658-9.\n \"\"\")\n print((\"Found entries in sequence file: \", line_list))\n # examined_cluster = line_list[0].split()[0]\n if len(examined_cluster_nums) >= 2:\n print(\"We suggest you temporarily remove examined protein from some clusters, so it is analiyed in a single cluster\")\n\n for line in line_list:\n line_list_2 = line.split()\n for element in line_list_2:\n print(\"__ element\" + element)\n cluster_list.append(element[0:4])\n # make unique list set\n cluster_list_unique = set(cluster_list)\n print(\"\\nComplexes: \")\n print(cluster_list_unique)\n\n dialog.LineFind.setText(str(len(cluster_list_unique)) + \" compl. in cluster no.:\"\n + str(examined_cluster_nums))\n\n print((\"Found num of entries in cluster: \", len(cluster_list_unique)))\n dialog.PushDownlad.setEnabled(True)\n dialog.ListIdentify.clear()\n return None\n\n elif ime_selekcije == \"clusters-by-entity-\":\n with open(ime_selekcije + sekvenca_id + \".txt\", \"rt\") as infile:\n for linenumber, line in enumerate(infile):\n if vzorec.search(line) != None:\n line_list.append(line.rstrip('\\n'))\n\n\n line_list_2 = line_list[0].split()\n for element in line_list_2:\n cluster_list.append(element[0:4])\n\n cluster_list_unique = set(cluster_list)\n print(\"\\nComplexes: \")\n print(cluster_list_unique)\n\n dialog.LineFind.setText(str(len(cluster_list_unique)) + \" compl. in blastclust cluster.\")\n print(\"using blastclust pre-clustering\")\n print(\"\"\"\nBasic local alignment search tool, S.F. Altschul, W. Gish, W. Miller,\nE.W. Myers, & D.J. Lipman (1990) J. Mol. Biol. 215:403-410.\n\"\"\")\n print((\"Found num of entries in cluster: \", len(cluster_list_unique)))\n dialog.PushDownlad.setEnabled(True)\n dialog.ListIdentify.clear()\n return None\n\n # except:\n # QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O Warning\", \"Invalid PDB ID or \\nDatabase File not Found! \\nPlease investigate!\")\n\n @staticmethod\n def get_cluster_complexes():\n target_complex = dialog.LineProtein.text()\n if custom == True:\n if target_complex.endswith(\".pdb\") == False:\n QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O Warning\", \"Please choose a valid .pdb file!\")\n else:\n # replaced by water definition in first stage\n target_complex = os.path.split(target_complex)[1].split(\".\")[0]\n if dialog.ComboBlastclust.currentText() == \"Custom Cluster\":\n selected_sequence = \"_custom\"\n ClusterComplexManipulation.get_cluster_unique_list(target_complex, \"clusters\", selected_sequence)\n\n elif dialog.ComboBlastclust.currentText() == \"Blastclust: 100\":\n selected_sequence = \"100\"\n ClusterComplexManipulation.get_cluster_unique_list(target_complex, \"clusters-by-entity-\", selected_sequence)\n\n elif \"Blastclust\" in dialog.ComboBlastclust.currentText():\n selected_sequence = dialog.ComboBlastclust.currentText()[12:14]\n ClusterComplexManipulation.get_cluster_unique_list(target_complex, \"clusters-by-entity-\", selected_sequence)\n\n else:\n selected_sequence = dialog.ComboBlastclust.currentText()[12:14]\n ClusterComplexManipulation.get_cluster_unique_list(target_complex, \"clusters-by-entity-\", selected_sequence)\n else:\n if len(target_complex) < 4:\n QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O Warning\", \"Invalid PDB ID!\\n PDB IDs are 4 symbols long\")\n\n elif len(target_complex) > 4:\n QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O Warning\", \"Invalid PDB ID! \\nFor custom protein analysis please input full path.\")\n else:\n\n # replaced by water definition in first stage\n if dialog.ComboBlastclust.currentText() == \"Custom Cluster\":\n selected_sequence = \"_custom\"\n ClusterComplexManipulation.get_cluster_unique_list(target_complex, \"clusters\", selected_sequence)\n\n elif dialog.ComboBlastclust.currentText() == \"Blastclust: 100\":\n selected_sequence = \"100\"\n ClusterComplexManipulation.get_cluster_unique_list(target_complex, \"clusters-by-entity-\", selected_sequence)\n\n elif \"Blastclust\" in dialog.ComboBlastclust.currentText():\n selected_sequence = dialog.ComboBlastclust.currentText()[12:14]\n ClusterComplexManipulation.get_cluster_unique_list(target_complex, \"clusters-by-entity-\", selected_sequence)\n\n else:\n selected_sequence = dialog.ComboBlastclust.currentText()[12:14]\n ClusterComplexManipulation.get_cluster_unique_list(target_complex, \"clusters-by-entity-\", selected_sequence)\n\n\n @staticmethod\n def download_complexes():\n\n #za PDBje ki niso dostoni a imajo svoj entry\n nedostopni_datoteka = open(\"unavailable_pdb_list.txt\", \"w\")\n\n try:\n text = \"Downloading related complexes. This might take a while depending on their number. Time to stretch.\"\n GUI().open_process_window(len(cluster_list_unique), text)\n def get_files_2(fajl):\n # ah lepse to kasneje !!! za download individualnih ent fajlov\n lokalni_fajl = open(fajl, 'wb')\n ftp_wwpdb_server.retrbinary('RETR ' + fajl, lokalni_fajl.write)\n lokalni_fajl.close()\n # unzip\n\n with gzip.open(fajl, 'rb') as compressed_f:\n lokalni_fajl_uncompressed = open(fajl_uncompressed, 'wb')\n lokalni_fajl_uncompressed.write(compressed_f.read())\n lokalni_fajl_uncompressed.close()\n # -----\n\n i = 1\n print(\"Downloading files... \\nThis may take a while depending on the number of complexes\")\n for kompleks in cluster_list_unique:\n try:\n if os.path.isfile(str(kompleks).lower() + \".pdb\"):\n i += 1\n ftp_wwpdb_server = FTP(\"ftp.wwpdb.org\")\n ftp_wwpdb_server.login()\n else:\n fajl = \"pdb\" + str(kompleks).lower() + \".ent.gz\"\n fajl_uncompressed = str(kompleks).lower() + \".pdb\"\n print((\"Downloading complex \" + str(i) + \" out of \" + str(len(cluster_list_unique))))\n i += 1\n ftp_wwpdb_server = FTP(\"ftp.wwpdb.org\")\n ftp_wwpdb_server.login()\n ftp_wwpdb_server.cwd('/pub/pdb/data/structures/divided/pdb/' + str(kompleks[1:3]).lower() + \"/\")\n\n get_files_2(fajl)\n except:\n #i += 1\n print((\"removing complex: \" + str(kompleks)))\n nedostopni_datoteka.write(str(kompleks).lower() + \"\\n\")\n pass\n newValue = progress.progressBar.value =+ 1\n progress.progressBar.setValue(newValue)\n\n ftp_wwpdb_server.quit()\n QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O\", \"Complexes downloaded successfully\")\n print(\"Download of complexes finished\")\n progress.close()\n\n return None\n except:\n QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O Warning\", \"Check all settings, \\nPlease investigate!\")\n nedostopni_datoteka.close()\n\n\nclass BindingSites:\n \"\"\"definiraj binding site\"\"\"\n\n bsite_unique_centers = []\n\n @staticmethod\n def get_binding_sites():\n try:\n dialog.ListIdentify.clear()\n \n water_sel = dialog.CheckWater.isChecked()\n chain_sel = dialog.CheckCompare.isChecked()\n if custom == True:\n target_complex_2 = dialog.LineProtein.text()\n else:\n target_complex_2 = (dialog.LineProtein.text().lower() + \".pdb\")\n \n vzorec_2 = re.compile(\"^\" + \"ATOM\")\n # pazi na naslednji * ali + v re\n vzorec_3 = re.compile(\"^\" + \"HETATM\")\n vzorec_4 = re.compile(\"HOH\")\n # VSE LISTE---------------------------------------------------------\n lista_za_heteroatome = []\n lista_za_atome = []\n lista_za_vode = []\n lista_binding_sites = []\n lista_water_binding_sites = []\n lista_verige = []\n lista_verige_konc = []\n warning_lista = []\n #lista_verige_unique = []\n bsite_unique = []\n # podaj rezultate v tri glavne liste--------------------------------\n try:\n with open(target_complex_2, \"rt\") as infile:\n for linenumber, line in enumerate(infile):\n if vzorec_3.search(line) != None:\n if vzorec_4.search(line) != None:\n lista_za_vode.append(line.rstrip('\\n'))\n else:\n lista_za_heteroatome.append(line.rstrip('\\n'))\n elif vzorec_2.search(line) != None:\n lista_za_atome.append(line.rstrip('\\n'))\n except OSError:\n QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O Warning\", \"File not found \\n Please Investigate!\")\n print(\"File not found!, please investigate.\")\n \n # VKLJUCITEV CHAIN VOD\n global lista_za_atome_xyzchain\n lista_za_atome_xyzchain = []\n \n for entry in lista_za_atome:\n test = []\n try:\n # ATOM 477 CG2 ILE A 78 6.540 0.762 34.941 1.00 13.42 C\n entry[30:38]\n test.append(entry[30:38])\n test.append(entry[38:46])\n test.append(entry[46:54])\n test.append(str(entry[21]))\n lista_za_atome_xyzchain.append(test)\n except:\n pass\n \n \n if water_sel == False:\n for linija in lista_za_heteroatome:\n unique_binding_site = linija[17:20].strip(\" \")+\".\"+linija[22:26].strip(\" \")+\".\"+linija[21].strip(\" \")\n lista_binding_sites.append(unique_binding_site)\n bsite_unique.append([unique_binding_site, linija[30:38].strip(\" \"), linija[38:46].strip(\" \"), linija[46:54].strip(\" \")])\n else:\n for linija in lista_za_vode:\n unique_binding_site = linija[17:20].strip(\" \")+\".\"+linija[22:26].strip(\" \")+\".\"+linija[21].strip(\" \")\n lista_binding_sites.append(unique_binding_site)\n bsite_unique.append([unique_binding_site, linija[30:38].strip(\" \"), linija[38:46].strip(\" \"), linija[46:54].strip(\" \")])\n \n \n \n # priprava bsite ---------------------------------------------------\n # ok grupiranje ker zelimo UNIQUE\n for key, group in groupby(bsite_unique, lambda x: x[0]):\n bsx = []\n bsy = []\n bsz = []\n for el in group:\n bsx.append(float(el[1]))\n bsy.append(float(el[2]))\n bsz.append(float(el[3]))\n \n # name of bsite, axerage x, average y, average z, min x, max x, min y, max y, min z, max z\n BindingSites.bsite_unique_centers.append([key, sum(bsx)/len(bsx), sum(bsy)/len(bsy), sum(bsz)/len(bsz), min(bsx), max(bsx), min(bsy), max(bsy), min(bsz), max(bsz)])\n \n \n \n # priprava vode-----------------------------------------------------\n for linija in lista_za_vode:\n water_binding_sites = linija[17:20].strip(\" \")+\".\"+linija[22:26].strip(\" \")+\".\"+linija[21].strip(\" \")\n lista_water_binding_sites.append(water_binding_sites)\n \n # priprava ostalo---------------------------------------------------\n for linija in lista_za_atome:\n atom_site = []\n try:\n atom_site.append(str(linija[21]))\n atom_site.append(int(linija[22:26]))\n lista_verige.append(atom_site)\n except:\n pass\n \n # priprava verige---------------------------------------------------\n for key, group in groupby(lista_verige, lambda x: x[0]):\n temp_group = []\n #residue_number = 0\n \n for el in group:\n # TO JE ZA STETJE UNIKATNIH AK OSTANKOV\n if el not in temp_group:\n temp_group.append(el)\n else:\n pass\n # uporbljeno za opcijo kjer se steje unikatne AK ostanke\n ins_str = str(temp_group[-1][0]) + \" chain with \" + str(len(temp_group)) + \" residues\"\n # ins_str = str(temp_group[-1][0]) + \" chain with \" + str(temp_group[-1][1]) + \" residues\"\n \n \n lista_verige_konc.append(ins_str)\n \n # resevanje prolematicnih pdbjev - naj preveri uporabnik\n # v bodoce mogoce avtomatsko\n #-------------------------------------------------------------------\n \n if chain_sel == False:\n for chainelement in lista_verige_konc:\n if float(chainelement.split()[3]) < 30.0:\n temp_str = \"ONLY \" + str(chainelement.split()[3]) + \" residues found in chain \" + str(chainelement.split()[0])\n # problemi v warning listi\n warning_lista.append(temp_str)\n else:\n pass\n \n if len(warning_lista) >= 1:\n QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O Warning\", \"Try to compare whole chains instead of individual binding sites at the short chain location.\")\n else:\n pass\n \n if water_sel == False:\n for entry in sorted(list(set(lista_binding_sites))):\n dialog.ListIdentify.addItem(entry)\n else:\n for entry in sorted(list(set(lista_binding_sites))):\n dialog.ListIdentify.addItem(entry)\n for entry in sorted(list(set(lista_water_binding_sites))):\n dialog.ListIdentify.addItem(entry)\n \n else:\n for entry in lista_verige_konc:\n dialog.ListIdentify.addItem(entry)\n \n if dialog.ListIdentify.count() == 0:\n if dialog.CheckCompare.isChecked() == False:\n QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O Message\",\n \"No binding sites found for analysis.\\nPlease use Compare Whole Chain option\")\n print(\"No binding sites found for analysis.\\nPlease use Compare Whole Chain option\")\n else:\n QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O Message\",\n \"No chains found to compare.\")\n print(\"No chains found to compare.\")\n \n return None\n # flow out except-------------------------------------------------------\n except:\n QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O Warning\", \"Invalid PDB ID or \\nDatabase File not Found! \\n\\nPlease investigate!\")\n print((sys.exc_info()))\n\n\n# report lista 1----------------------------------------------------------------\nreport_list_1 = []\nreport_list_2 = []\nreport_list_1.append(\"ProBiS H2O REPORT file\")\nreport_list_1.append(\"-\" * 25 + \"\\n\\n\")\n# ------------------------------------------------------------------------------\n\nclass h20Analysis:\n \"\"\"collect, prepare, cluster, analyze, display, crystal h20 data\"\"\"\n\n @staticmethod\n def analyze_waters():\n global SELECTED_SITE\n global SELECTED_SITE_CHAIN\n # NUM CPU:\n try:\n processors_available_local = str(multiprocessing.cpu_count())\n except:\n processors_available_local = \"1\"\n # NUM_CPUS //\n\n if custom == False:\n nova_datoteka = open(\"report_\" + dialog.LineProtein.text().lower() + \".txt\", \"w\")\n else:\n target = os.path.split(dialog.LineProtein.text())[1]\n nova_datoteka = open(\"report_\"+ \"custom_\" + target.split(\".\")[0]+ \".txt\", \"w\")\n nova_datoteka.close()\n\n # za eno ali vec verig na primerjan protein\n one_or_multiple = dialog.CheckAligned.isChecked()\n # get setting on superposition starting point analysis\n bsite_space_check = dialog.CheckAnalyze.isChecked()\n\n chain_sel = dialog.CheckCompare.isChecked()\n vzorec_3 = re.compile(\"^\" + \"HETATM\\s+\\d+\")\n vzorec_3_supp = re.compile(\"^\" + \"HETATM\\d\\d\\d\\d\\d\\s+\")\n examined_list_unique = []\n examined_list = list(cluster_list_unique)\n target_complex_2 = dialog.LineProtein.text()\n if custom == True:\n protein = target_complex_2.split(\".\")[0]\n else:\n protein = target_complex_2.lower()\n # to je za 1j4h kjer nastopajo nedosegljivi pdb-ji ki imajo svoje\n # entrije v bazi pdb: npr: 5GKY\n # za njih bomo tvorili bazo: unavailable_pdb_list.txt datoteka\n # ki se nahaja v delovnem okolju\n\n with open('unavailable_pdb_list.txt', 'r') as f:\n removed_list = f.read().splitlines()\n removed_list.append(protein)\n\n\n\n for element in examined_list:\n if str(element).lower() not in removed_list:\n examined_list_unique.append(str(element).lower())\n else:\n pass\n\n try:\n bsite_selection_full = dialog.ListIdentify.currentItem()\n bsite_selection = bsite_selection_full.text()\n chain_selection = bsite_selection_full.text()[-1]\n whole_chain_compare_selection = bsite_selection_full.text()[0]\n except:\n QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O Warning\", \"Invalid selection \\n\\nPlease select b-site or chain!\")\n return None\n\n if bsite_space_check == True:\n for element in BindingSites.bsite_unique_centers:\n if element[0] == bsite_selection:\n SELECTED_SITE = element\n SELECTED_SITE_CHAIN = str(chain_selection).upper()\n\n if bsite_space_check == False:\n for element in BindingSites.bsite_unique_centers:\n if element[0] == bsite_selection:\n SELECTED_SITE = element\n SELECTED_SITE_CHAIN = str(chain_selection).upper()\n \n if chain_sel == True:\n SELECTED_SITE = []\n SELECTED_SITE.append(\"no binding site used in analysis\")\n SELECTED_SITE_CHAIN = str(whole_chain_compare_selection).upper()\n \n else:\n pass\n\n #report 2,3,4,5,6,7,8,9, 10\n report_list_1.append(\"\\n\\n\\nExamined complex: \" + target_complex_2)\n report_list_1.append(\"Whole chain setting used: \" + str(chain_sel))\n if chain_sel == True:\n report_list_1.append(\"Whole chain selection: \" + whole_chain_compare_selection)\n report_list_1.append(\"Binding site selection: / (not used)\")\n report_list_1.append(\"Chain selection: \" + whole_chain_compare_selection)\n else:\n report_list_1.append(\"Whole chain selection: / (not used)\")\n report_list_1.append(\"Binding site selection: \" + bsite_selection)\n report_list_1.append(\"Chain selection: \" + chain_selection)\n\n\n report_list_1.append(\"Used PDB clusters with: \" + dialog.ComboBlastclust.currentText() + \" %\")\n report_list_1.append(\"Unique structures in identified cluster: \" + str(examined_list) + \"\\n\\n\\n\")\n\n #probis_starter=('start \"\" \"{}\"').format(probis_dir)\n probis_starter=probis_exe_dir\n\n print(\"processors_available_local: \" + processors_available_local)\n print(\"protein: \" + protein)\n print(\"protein: \" + protein)\n # print(\"protein: \" + )\n logging.info(\"RUN PROBIS %s\", [probis_starter, \"-ncpu\", processors_available_local, \n \"-extract\", \"-f1\", \"{}.pdb\".format(protein), \"-c1\",\n whole_chain_compare_selection, \"-srffile\", \n \"{}.srf\".format(protein)])\n \n if platform == \"win32\":\n if chain_sel == False:\n subprocess.run(args=[probis_starter, \"-ncpu\", processors_available_local, \n \"-extract\", \"-bsite\", bsite_selection, \"-dist 3.0\", \"-f1\",\n \"{}.pdb\".format(protein), \"-c1\", chain_selection,\n \" -srffile\", \"{}.srf\".format(protein)])\n else:\n subprocess.run(args=[probis_starter, \"-ncpu\", processors_available_local, \n \"-extract\", \"-f1\", \"{}.pdb\".format(protein), \"-c1\",\n whole_chain_compare_selection, \"-srffile\", \n \"{}.srf\".format(protein)])\n else:\n if chain_sel == False:\n subprocess.run(args=[probis_dir, \"-ncpu\", processors_available_local, \n \"-extract\", \"-bsite\", bsite_selection, \"-dist 3.0\", \"-f1\",\n \"{}.pdb\".format(protein), \"-c1\", chain_selection,\n \" -srffile\", \"{}.srf\".format(protein)])\n else:\n subprocess.run(args=[probis_dir, \"-ncpu\", processors_available_local, \n \"-extract\", \"-f1\", \"{}.pdb\".format(protein), \"-c1\",\n whole_chain_compare_selection, \"-srffile\", \n \"{}.srf\".format(protein)])\n\n master_chain_list = []\n\n open(\"./srfs.txt\", 'w').close()\n prot_list = []\n\n for element in examined_list_unique:\n unique_chain_list = []\n try:\n with open(element + \".pdb\", \"rt\") as infile:\n for linenumber, line in enumerate(infile):\n if vzorec_3.search(line) != None:\n\n unique_chain = str(line.rstrip('\\n').split()[4])\n if len(unique_chain) == 1:\n unique_chain_list.append(unique_chain)\n elif len(unique_chain) == 2:\n #4WUC primer\n #: HETATM 2976 NA A NA A 403 8.964 15.893 -17.028 0.50 27.84 NA\n unique_chain = str(line.rstrip('\\n').split()[5][0])\n unique_chain_list.append(unique_chain)\n else:\n # naslednja linija [4][0] je za primer PDBJA: 4BRI\n # HETATM 5862 O2G UNP A1393 60.198 4.590 12.738 1.00 13.14 O\n # klasicen PDB:\n # HETATM 2769 N1 ARU A 1 35.314 25.889 32.623 1.00 60.17 N\n unique_chain = str(line.rstrip('\\n').split()[4][0])\n unique_chain_list.append(unique_chain)\n\n elif vzorec_3_supp.search(line) != None:\n # tretji problematicen primer: PDB ID: 4BRP\n # HETATM11092 BR BR A1394 19.400 -62.788 -5.889 1.00 49.48 BR\n unique_chain = str(line.rstrip('\\n').split()[3][0])\n unique_chain_list.append(unique_chain)\n else:\n pass\n\n\n unique_chain_list = list(set(unique_chain_list))\n master_chain_list.append(unique_chain_list)\n\n for chain_id in unique_chain_list:\n # zacasna lista of unwanted\n unwanted_chain = [1, \"1\", 2, \"2\", 3, \"3\", 4, \"4\", 5, \"5\", 6, \"6\", 7, \"7\", 8, \"8\", 9, \"9\"]\n\n if chain_id in unwanted_chain:\n pass\n\n else:\n # for probis 2.4.7\n if platform == \"win32\":\n subprocess.run(args=[probis_starter, \"-ncpu\", processors_available_local, \n \"-extract\", \"-f1\", \"{}.pdb\".format(element), \"-c1\", \n chain_id, \"-srffile\", \"{}{}.srf\".format(element, chain_id)])\n else:\n subprocess.run(args=[probis_dir, \"-ncpu\", processors_available_local, \n \"-extract\", \"-f1\", \"{}.pdb\".format(element), \"-c1\", \n chain_id, \"-srffile\", \"{}{}.srf\".format(element, chain_id)])\n\n srf_fajl = open(\"./srfs.txt\", 'a')\n srf_fajl.write(element + chain_id + \".srf \" + chain_id + \"\\n\")\n srf_fajl.close()\n prot_list.append(element + \" \" + chain_id)\n pass\n except OSError:\n print(\"Database File not Found!, please investigate.\")\n ##############################\n if platform == \"win32\":\n os.system(\"DEL *.rota.pdb /S\")\n os.system(\"DEL AAA_NOSQL.nosql /S\")\n else:\n os.system(\"rm ./*.rota.pdb\")\n os.system(\"rm ./AAA_NOSQL.nosql\")\n ##############################\n \n if chain_sel == False:\n if platform == \"win32\":\n subprocess.run(args=[probis_starter, \"-ncpu\", processors_available_local, \n \"-surfdb\", \"-local\", \"-sfile\", \"srfs.txt\", \"-f1\", \n \"{}.srf\".format(protein), \"-c1\",\n chain_selection, \"-nosql\", \"AAA_NOSQL.nosql\"])\n subprocess.run(args=[probis_starter, \"-ncpu\", processors_available_local, \n \"-results\", \"-f1\", \"{}.pdb\".format(protein), \"-c1\",\n chain_selection, \"-nosql\", \"AAA_NOSQL.nosql\", \"-json\", \"AAA_NOSQL.json\"])\n else:\n subprocess.run(args=[probis_dir, \"-ncpu\", processors_available_local, \n \"-surfdb\", \"-local\", \"-sfile\", \"srfs.txt\", \"-f1\", \n \"{}.srf\".format(protein), \"-c1\",\n chain_selection, \"-nosql\", \"AAA_NOSQL.nosql\"])\n subprocess.run(args=[probis_dir, \"-ncpu\", processors_available_local, \n \"-results\", \"-f1\", \"{}.pdb\".format(protein), \"-c1\",\n chain_selection, \"-nosql\", \"AAA_NOSQL.nosql\", \"-json\", \"AAA_NOSQL.json\"])\n else:\n if platform == \"win32\":\n subprocess.run(args=[probis_starter, \"-ncpu\", processors_available_local, \n \"-surfdb\", \"-sfile\", \"srfs.txt\", \"-f1\", \"{}.srf\".format(protein),\n \"-c1\", whole_chain_compare_selection, \"-nosql\", \"AAA_NOSQL.nosql\"])\n \n subprocess.run(args=[probis_starter, \"-ncpu\", processors_available_local, \n \"-results\", \"-f1\", \"{}.pdb\".format(protein), \"-c1\",\n whole_chain_compare_selection, \"-nosql\", \"AAA_NOSQL.nosql\", \"-json\", \"AAA_NOSQL.json\"])\n else:\n subprocess.run(args=[probis_dir, \"-ncpu\", processors_available_local, \n \"-surfdb\", \"-sfile\", \"srfs.txt\", \"-f1\", \"{}.srf\".format(protein),\n \"-c1\", whole_chain_compare_selection, \"-nosql\", \"AAA_NOSQL.nosql\"])\n\n subprocess.run(args=[probis_dir, \"-ncpu\", processors_available_local, \n \"-results\", \"-f1\", \"{}.pdb\".format(protein), \"-c1\",\n whole_chain_compare_selection, \"-nosql\", \"AAA_NOSQL.nosql\", \"-json\", \"AAA_NOSQL.json\"])\n\n for element in prot_list:\n ele0=element.split()[0]\n ele1=element.split()[1]\n \n if chain_sel == False:\n if platform == \"win32\":\n subprocess.run(args=[probis_starter, \"-ncpu\", processors_available_local, \n \"-align\", \"-bkeep\", \"-alno\", \"0\", \"-nosql\", \"AAA_NOSQL.nosql\", \"-f1\", \n \"{}.pdb\".format(protein), \"-c1\", chain_selection,\n \"-f2\", \"{}.pdb\".format(ele0), \"-c2\", \n ele1])\n else:\n subprocess.run(args=[probis_dir, \"-ncpu\", processors_available_local, \n \"-align\", \"-bkeep\", \"-alno\", \"0\", \"-nosql\", \"AAA_NOSQL.nosql\", \"-f1\", \n \"{}.pdb\".format(protein), \"-c1\", chain_selection,\n \"-f2\", \"{}.pdb\".format(ele0), \"-c2\", \n ele1])\n else:\n if platform == \"win32\":\n subprocess.run(args=[probis_starter, \"-ncpu\", processors_available_local, \n \"-align\", \"-bkeep\", \"-alno\", \"0\", \"-nosql\", \"AAA_NOSQL.nosql\", \"-f1\", \n \"{}.pdb\".format(protein), \"-c1\",\n whole_chain_compare_selection, \"-f2\", \n \"{}.pdb\".format(ele0), \"-c2\", ele1]) \n else:\n subprocess.run(args=[probis_dir, \"-ncpu\", processors_available_local, \n \"-align\", \"-bkeep\", \"-alno\", \"0\", \"-nosql\", \"AAA_NOSQL.nosql\", \"-f1\", \n \"{}.pdb\".format(protein), \"-c1\",\n whole_chain_compare_selection, \"-f2\", \n \"{}.pdb\".format(ele0), \"-c2\", ele1]) \n \n print(\"Rotas done ... (alignment 0 !)\")\n\n\n # correction of multiple or one chain per protein allignment\n aligned_unique = []\n aligned_discard = []\n helper = []\n if one_or_multiple == True:\n with open(\"AAA_NOSQL.json\") as json_fajl:\n z_data = json.load(json_fajl)\n for aligned in z_data:\n if str(aligned[\"pdb_id\"]) in helper:\n aligned_discard.append(str(aligned[\"pdb_id\"]) + str(aligned[\"chain_id\"]))\n else:\n helper.append(str(aligned[\"pdb_id\"]))\n aligned_unique.append(str(aligned[\"pdb_id\"]) + str(aligned[\"chain_id\"]))\n \n if platform == \"win32\":\n if chain_sel == False:\n for discarded in aligned_discard:\n os.system(\"DEL \" + protein + chain_selection\n + \"_\" + str(discarded) + \".0.rota.pdb /S\")\n else:\n for discarded in aligned_discard:\n os.system(\"DEL \" + protein\n + whole_chain_compare_selection\n + \"_\" + str(discarded) + \".0.rota.pdb /S\")\n else:\n if chain_sel == False:\n for discarded in aligned_discard:\n os.system(\"rm ./\" + protein + chain_selection\n + \"_\" + str(discarded) + \".0.rota.pdb\")\n else:\n for discarded in aligned_discard:\n os.system(\"rm ./ \" + protein\n + whole_chain_compare_selection\n + \"_\" + str(discarded) + \".0.rota.pdb\")\n else:\n pass\n\n\n\n # ----------------START WATER COLLECTION------------------------------------\n\n # vzorci vod v PDB\n\n # linija1 = \"HETATM 3315 O HOH A 653 6.657 0.611 50.201 1.00 22.92 O\"\n # linija2 = \"HETATM 6180 O HOH A1063 46.720 3.111 27.787 1.00 51.91 O\"\n # linija3 = \"HETATM 3046 O AHOH A 562 19.114 37.882 -1.866 0.50 24.61 O\"\n # linija4 = \"HETATM 3047 O BHOH A 562 20.241 36.438 -1.021 0.50 30.54 O\"\n # linija5 = \"HETATM11839 O HOH A2001 45.529 16.939 64.867 1.00 20.76 O\"\n\n PDB_master_file_list = []\n imena_fajlov = glob(\"*.rota.pdb\")\n # ZADNJI JE KOMPLEKS\n imena_fajlov.append(protein + \".pdb\")\n\n vzorec_water_mining1 = re.compile(\"^\" + \"HETATM\\s+\\d+\\s+\\S+\\s+\" + \"HOH\" + \"\\s+\\D\\s+\\d+\\s+\") # ok za linijo 1\n vzorec_water_mining2 = re.compile(\"^\" + \"HETATM\\s+\\d+\\s+\\S+\\s+\" + \"HOH\" + \"\\s+\\D\\d{4}\\s+\") # ok za linijo 2\n vzorec_water_mining3 = re.compile(\"^\" + \"HETATM\\s+\\d+\\s+\\S+\\s+\" + \"\\wHOH\" + \"\\s+\\D\\s+\\d+\\s+\") # ok za linijo 3,4\n vzorec_water_mining4 = re.compile(\"^\" + \"HETATM\\d+\\s+\\S+\\s+\" + \"HOH\" + \"\\s+\\D\\d{4}\\s+\") # za linijo 5\n\n # VZOREC ZA END MODELA: VSAK *.rota.pdb ima MODEL2 zacetni kompleks\n vzorec_water_only_model_1 = re.compile(\"^\" + \"ENDMDL\")\n\n for ime_fajla in imena_fajlov:\n lista_h2o = []\n with open(ime_fajla, 'r') as brani_fajl:\n for linija in brani_fajl:\n if vzorec_water_mining1.search(linija) != None:\n voda = linija.split()\n voda.append(ime_fajla)\n lista_h2o.append(voda)\n\n elif vzorec_water_mining2.search(linija) != None:\n voda = linija.split()\n voda.insert(5, voda[4][1:])\n voda[4] = voda[4].strip(\"1234567890\")\n voda.append(ime_fajla)\n lista_h2o.append(voda)\n\n elif vzorec_water_mining3.search(linija) != None:\n voda = linija.split()\n voda[3] = voda[3][1:]\n voda.append(ime_fajla)\n lista_h2o.append(voda)\n\n elif vzorec_water_mining4.search(linija) != None:\n voda = linija.split()\n voda.insert(4, voda[3][1:])\n voda[3] = voda[3].strip(\"1234567890\")\n voda.insert(1, voda[0][6:])\n voda[0] = voda[0].strip(\"1234567890\")\n voda.append(ime_fajla)\n lista_h2o.append(voda)\n # naslednja linija je kljucna zaradi 2 modelov v *.rota.pdb\n elif vzorec_water_only_model_1.search(linija) != None:\n break\n\n else:\n pass\n PDB_master_file_list.append(lista_h2o)\n # print(\"PDB_master_file_list________________________\")\n # logging.info(\"PDB_master_file_list %s\", PDB_master_file_list)\n\n\n MASTER_h2o_list = []\n fajl_list = []\n for PDB_water_fajl in PDB_master_file_list:\n try:\n for het, stev, atom, molekula, veriga, zapor, x, y, z, occ, R, atom2, fajl in PDB_water_fajl:\n\n test = []\n test.append(x), test.append(y), test.append(z), test.append(fajl), test.append(R), test.append(zapor)\n MASTER_h2o_list.append(test)\n fajl_list.append(fajl)\n except (RuntimeError, TypeError, NameError, ValueError):\n pass\n\n entities = int(len(set(fajl_list)))\n dialog.PlainInfo.clear()\n \n # REPORT list 11,12\n report_list_1.append(dialog.LineFind.text() + \" (sequence identity pre-cluster)\")\n report_list_1.append(\"Master water list includes %r waters\" % (len(MASTER_h2o_list)))\n\n dialog.PlainInfo.insertPlainText(\"Master water list includes %r molecules /n\" % (len(MASTER_h2o_list)))\n dialog.PlainInfo.insertPlainText(\"Superimposed chains: %r\\n\" % (entities))\n\n print (\"Master lista vod narejena in vsebuje %r vod\" % (len(MASTER_h2o_list)))\n print(\"writing H2O master list\")\n nova_datoteka = open(\"master_water_list.txt\", \"w\")\n for tocka in MASTER_h2o_list:\n nova_datoteka.write(\"%s\\n\" % tocka)\n nova_datoteka.close()\n print(\"done...\")\n dialog.Tabs.setCurrentIndex(1)\n\n # DBSCAN formatting-----------------------------------------------------\n\n # binding site clustering\n # BindingSites.bsite_unique_centers\n master_bsite_lista_vod = []\n master_bsite_lista_vod_koordinata_x = []\n master_bsite_lista_vod_koordinata_y = []\n master_bsite_lista_vod_koordinata_z = []\n\n master_lista_vod = []\n master_lista_vod_koordinata_x = []\n master_lista_vod_koordinata_y = []\n master_lista_vod_koordinata_z = []\n\n # POPRAVA VOD DA NE SEGAJO IZVEN CHAINA\n\n correction_x = []\n correction_y = []\n correction_z = []\n for element in lista_za_atome_xyzchain:\n if SELECTED_SITE_CHAIN == element[3]:\n correction_x.append(float(element[0]))\n correction_y.append(float(element[1]))\n correction_z.append(float(element[2]))\n\n global atom_max_x\n global atom_min_x\n global atom_max_y\n global atom_min_y\n global atom_max_z\n global atom_min_z\n\n atom_max_x = max(correction_x) + 4\n atom_min_x = min(correction_x) - 4\n atom_max_y = max(correction_y) + 4\n atom_min_y = min(correction_y) - 4\n atom_max_z = max(correction_z) + 4\n atom_min_z = min(correction_z) - 4\n\n global boundingBox\n\n boundingBox = [LINEWIDTH, 2.0, BEGIN, LINES,\n COLOR, float(1), float(0), float(0),\n\n VERTEX, atom_min_x, atom_min_y, atom_min_z, #1\n VERTEX, atom_min_x, atom_min_y, atom_max_z, #2\n\n VERTEX, atom_min_x, atom_max_y, atom_min_z, #3\n VERTEX, atom_min_x, atom_max_y, atom_max_z, #4\n\n VERTEX, atom_max_x, atom_min_y, atom_min_z, #5\n VERTEX, atom_max_x, atom_min_y, atom_max_z, #6\n\n VERTEX, atom_max_x, atom_max_y, atom_min_z, #7\n VERTEX, atom_max_x, atom_max_y, atom_max_z, #8\n\n\n VERTEX, atom_min_x, atom_min_y, atom_min_z, #1\n VERTEX, atom_max_x, atom_min_y, atom_min_z, #5\n\n VERTEX, atom_min_x, atom_max_y, atom_min_z, #3\n VERTEX, atom_max_x, atom_max_y, atom_min_z, #7\n\n VERTEX, atom_min_x, atom_max_y, atom_max_z, #4\n VERTEX, atom_max_x, atom_max_y, atom_max_z, #8\n\n VERTEX, atom_min_x, atom_min_y, atom_max_z, #2\n VERTEX, atom_max_x, atom_min_y, atom_max_z, #6\n\n\n VERTEX, atom_min_x, atom_min_y, atom_min_z, #1\n VERTEX, atom_min_x, atom_max_y, atom_min_z, #3\n\n VERTEX, atom_max_x, atom_min_y, atom_min_z, #5\n VERTEX, atom_max_x, atom_max_y, atom_min_z, #7\n\n VERTEX, atom_min_x, atom_min_y, atom_max_z, #2\n VERTEX, atom_min_x, atom_max_y, atom_max_z, #4\n\n VERTEX, atom_max_x, atom_min_y, atom_max_z, #6\n VERTEX, atom_max_x, atom_max_y, atom_max_z, #8\n\n END\n ]\n\n\n # ----------------------------------------------------------------------\n\n # cluster TESTING\n mlv_datoteka = open(\"master_water_list.txt\", \"r\")\n for linija in mlv_datoteka:\n vmesna_lista = []\n linija2 = linija.replace(\"[\", \"\")\n linija3 = linija2.replace(\"]\", \"\")\n linija4 = linija3.replace(\" \", \"\")\n linija5 = linija4.replace(\"'\", \"\")\n linija_lista = linija5.split(\",\")\n x = float(linija_lista[0])\n y = float(linija_lista[1])\n z = float(linija_lista[2])\n\n vmesna_lista.append(x)\n vmesna_lista.append(y)\n vmesna_lista.append(z)\n \n if bsite_space_check == True and chain_sel == False:\n if SELECTED_SITE[4] - 4 <= vmesna_lista[0] <= SELECTED_SITE[5] + 4:\n if SELECTED_SITE[6] - 4 <= vmesna_lista[1] <= SELECTED_SITE[7] + 4:\n if SELECTED_SITE[8] - 4 <= vmesna_lista[2] <= SELECTED_SITE[9] + 4:\n master_bsite_lista_vod.append(vmesna_lista)\n x2 = float(vmesna_lista[0])\n master_bsite_lista_vod_koordinata_x.append(x2)\n y2 = float(vmesna_lista[1])\n master_bsite_lista_vod_koordinata_y.append(y2)\n z2 = float(vmesna_lista[2])\n master_bsite_lista_vod_koordinata_z.append(z2)\n else:\n pass\n else:\n pass\n else:\n pass\n\n if bsite_space_check == False and chain_sel == False:\n if atom_min_x <= vmesna_lista[0] <= atom_max_x:\n if atom_min_y <= vmesna_lista[1] <= atom_max_y:\n if atom_min_z <= vmesna_lista[2] <= atom_max_z:\n master_lista_vod_koordinata_x.append(x)\n master_lista_vod_koordinata_y.append(y)\n master_lista_vod_koordinata_z.append(z)\n master_lista_vod.append(vmesna_lista)\n if chain_sel == True:\n if atom_min_x <= vmesna_lista[0] <= atom_max_x:\n if atom_min_y <= vmesna_lista[1] <= atom_max_y:\n if atom_min_z <= vmesna_lista[2] <= atom_max_z:\n master_lista_vod_koordinata_x.append(x)\n master_lista_vod_koordinata_y.append(y)\n master_lista_vod_koordinata_z.append(z)\n master_lista_vod.append(vmesna_lista)\n else:\n pass\n\n mlv_datoteka.close()\n # /DBSCAN formatting----------------------------------------------------\n def display_cluster_info(mlist, mlist_x, mlist_y, mlist_z):\n\n x_dim = max(mlist_x) - min(mlist_x)\n y_dim = max(mlist_y) - min(mlist_y)\n z_dim = max(mlist_z) - min(mlist_z)\n\n system_volume = round(x_dim * y_dim * z_dim)\n\n dialog.PlainInfo.insertPlainText(\"System volume is: %d cubic A\\n\" % (system_volume))\n report_list_1.append(\"System volume is: %d cubic A\\n\" % (system_volume))\n report_list_1.append(\"IDENTIFIED CLUSTERS: \\n\")\n report_list_1.append(\"-\" * 25)\n\n\n def calculate_clusters(lista, sample_size):\n lista_np_array = np.array(lista)\n selected_eps=dialog.SpinDB.value()\n labels3D = DBSCAN(eps=selected_eps, min_samples=sample_size).fit_predict(lista_np_array)\n # Number of clusters in labels, ignoring noise if present.\n n_clusters_3D = len(set(labels3D)) - (1 if -1 in labels3D else 0)\n return int(n_clusters_3D)\n\n\n dialog.ListCalculated.clear()\n start_population = 2\n clus_num = calculate_clusters(mlist, start_population)\n cluster_collate_list = []\n max_population = 0\n while clus_num >= 1:\n\n consv = round((float(start_population)/float(entities)), 2)\n # limita ena je overloaded ker je lahko teoreticno prisotnih vec molekul vode na istem mestu v istem kristalu\n # glede na to da je smisel tega orodja v eksperimentalnih podatkih bi bile taksne vode na lokaciji manjsi od 1 A\n # nesmiselne in korigirane s strani kristalografa\n # zato lahko komot v skrajno nenavadno ali eksp-nekorigiranem primeru vrednost consv presega 1\n # taksne primere tukaj reduciramo na vrednost 1 kar pomeni, da je voda na tej lokaciji nastopa v vseh eksperimentalnih entitetah\n if consv > 1:\n consv = 1.0\n else:\n pass\n\n # za report list------------------------------------------------\n text_consv = int(round(consv*10)) * \"*\"\n st = 10 - len(text_consv)\n text_consv += st * \" \"\n if str(clus_num) == \"1\":\n text = (str(clus_num) + \" cluster with \" + str(start_population)\n + \" H2O molecules. \" + \"consv. \" + str(consv))\n else:\n text = (str(clus_num) + \" clusters with \" + str(start_population)\n + \" H2O molecules. \" + \"consv. \" + str(consv))\n num_spaces = 55 - len(text)\n report_list_1.append(text + + num_spaces * \" \" + \"[\" + text_consv + \"]\")\n # REPORT LIST APPEND--------------------------------------------\n\n cluster_collate_list.append([clus_num, start_population, text])\n start_population += 1\n clus_num = calculate_clusters(mlist, start_population)\n max_population = start_population\n \n temp_collate = []\n d=0\n for cluster_num, start_population, list_text in reversed(cluster_collate_list):\n d=d+1\n\n if cluster_num not in temp_collate:\n dialog.ListCalculated.addItem(list_text)\n temp_collate.append(cluster_num)\n else:\n pass\n\n calculated_items = []\n for x in range(dialog.ListCalculated.count()):\n calculated_items.append(dialog.ListCalculated.item(x))\n for i, listbox_entry in enumerate(calculated_items):\n if 0.9 <= float(listbox_entry.text().split()[7]) :\n dialog.ListCalculated.item(i).setBackground(red10)\n elif 0.8 <=float(listbox_entry.text().split()[7]) < 0.9:\n dialog.ListCalculated.item(i).setBackground(red09)\n elif 0.7 <=float(listbox_entry.text().split()[7]) < 0.8:\n dialog.ListCalculated.item(i).setBackground(red08)\n elif 0.6 <=float(listbox_entry.text().split()[7]) < 0.7:\n dialog.ListCalculated.item(i).setBackground(red07)\n elif 0.5 <=float(listbox_entry.text().split()[7]) < 0.6:\n dialog.ListCalculated.item(i).setBackground(red06)\n elif 0.4 <=float(listbox_entry.text().split()[7]) < 0.5:\n dialog.ListCalculated.item(i).setBackground(red05)\n elif 0.3 <=float(listbox_entry.text().split()[7]) < 0.4:\n dialog.ListCalculated.item(i).setBackground(red04)\n elif 0.2 <=float(listbox_entry.text().split()[7]) < 0.3:\n dialog.ListCalculated.item(i).setBackground(red03)\n elif 0.1 <=float(listbox_entry.text().split()[7]) < 0.2:\n dialog.ListCalculated.item(i).setBackground(red02)\n else:\n dialog.ListCalculated.item(i).setBackground(red01)\n \n # report list 15\n report_list_1.append(\"-\" * 25)\n max_pop_text = \"Maximum occupied cluster contains %d H2O molecules \\n binding site: %s\" % (max_population - 1, SELECTED_SITE[0])\n dialog.PlainInfo.insertPlainText(max_pop_text)\n report_list_1.insert(10, max_pop_text)\n\n if chain_sel == True:\n try:\n display_cluster_info(master_lista_vod, master_lista_vod_koordinata_x,\n master_lista_vod_koordinata_y, master_lista_vod_koordinata_z)\n except:\n QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O Warning\",\n \"Please ensure analized .pdb file includes water molecules\")\n else:\n try:\n if bsite_space_check == True:\n display_cluster_info(master_bsite_lista_vod, master_bsite_lista_vod_koordinata_x,\n master_bsite_lista_vod_koordinata_y, master_bsite_lista_vod_koordinata_z)\n else:\n display_cluster_info(master_lista_vod, master_lista_vod_koordinata_x,\n master_lista_vod_koordinata_y, master_lista_vod_koordinata_z)\n except:\n QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O Warning\",\n \"Please ensure analized .pdb file includes water molecules\")\n\n # for linija in report_list_1:\n # print(\" llll: \" + linija)\n # cleanup\n if platform == \"win32\":\n os.system(\"DEL *.ent.gz /S\")\n os.system(\"DEL *.srf /S\")\n os.system(\"DEL *.rota.pdb /S\")\n os.system(\"DEL AAA_NOSQL* /S\")\n os.system(\"DEL query.json /S\")\n os.system(\"DEL info.json /S\")\n os.system(\"DEL srfs.txt /S\")\n else:\n os.system(\"rm ./*.ent.gz\")\n os.system(\"rm ./*.srf\")\n os.system(\"rm ./*.rota.pdb\")\n os.system(\"rm ./AAA_NOSQL*\")\n os.system(\"rm ./query.json\")\n os.system(\"rm ./info.json\")\n os.system(\"rm ./srfs.txt\")\n \n dialog.Tabs.setCurrentIndex(1)\n return None\n\nclass pyMOLinterface:\n \"\"\"use wonderful pyMol for visualisation of collected results\"\"\"\n \"\"\"thanks! Warren L. DeLano!\"\"\"\n\n @staticmethod\n def pyMOL_water_contacts():\n if custom == False:\n target_complex_3 = dialog.LineProtein.text().lower()\n else:\n target_complex_3 = os.path.split(dialog.LineProtein.text())[1].split(\".\")[0]\n # za H-bond mejno razdajo kar 4 A\n cmd.select(\"protein_\", \"polymer and {}\".format(target_complex_3))\n cmd.select(\"ligand_\", \"organic and {}\".format(target_complex_3))\n cmd.select(\"conserved_waters\", \"H2O*\")\n cmd.select(\"donors_\", \"(elem n,o and (neighbor hydro)) and {}\".format(target_complex_3))\n cmd.select(\"acceptors_\", \"(elem o or (elem n and not (neighbor hydro))) and {}\".format(target_complex_3))\n cmd.distance(\"prot_acceptors\", \"(protein_ and acceptors_)\", \"conserved_waters\", \"4.0\")\n cmd.distance(\"prot_donors\", \"(protein_ and donors_)\", \"conserved_waters\", \"4.0\")\n cmd.distance(\"ligand_acceptors\", \"(ligand_ and acceptors_)\", \"conserved_waters\", \"4.0\")\n cmd.distance(\"ligand_donors\", \"(ligand_ and donors_)\", \"conserved_waters\", \"4.0\")\n cmd.distance(\"inter_cons_H2O\", \"conserved_waters\", \"conserved_waters\", \"4.0\")\n cmd.delete(\"donors_\")\n cmd.delete(\"acceptors_\")\n cmd.set(\"dash_color\", \"magenta\")\n cmd.set(\"dash_gap\", \"0.2\")\n cmd.set(\"dash_length\", \"0.2\")\n cmd.set(\"dash_round_ends\",\"on\")\n cmd.set(\"dash_width\",\"3\")\n\n @staticmethod\n def pyMOL_fetch_system():\n\n cmd.delete(name = \"all\")\n\n if custom == True:\n target_complex_3 = os.path.split(dialog.LineProtein.text())[1].split(\".\")[0]\n logging.info(\"pyMOL_fetch_system 1 %s\", [custom, filename, target_complex_3])\n cmd.load(filename, target_complex_3, multiplex=1)\n else:\n target_complex_3 = dialog.LineProtein.text().lower()\n logging.info(\"pyMOL_fetch_system 2 %s\", [custom, target_complex_3])\n cmd.fetch(target_complex_3, target_complex_3)\n\n cmd.hide(\"everything\", target_complex_3)\n cmd.show (\"cartoon\", target_complex_3)\n cmd.set(\"cartoon_color\", \"white\", target_complex_3)\n cmd.hide(\"lines\", \"all\")\n cmd.util.cbag(selection = target_complex_3)\n cmd.show(\"surface\", target_complex_3)\n cmd.set(\"transparency\", \"0.9\")\n cmd.set(\"surface_color\", \"white\")\n cmd.show(\"sticks\", \"organic\")\n cmd.color(\"blue\", \"organic\")\n waters = \"{}_waters\".format(target_complex_3)\n cmd.select(waters, \"resn hoh\")\n cmd.show(\"nonbonded\", waters)\n cmd.deselect()\n\n\n @staticmethod\n def pyMOL_chain_box():\n cmd.load_cgo(boundingBox, \"box\")\n\n @staticmethod\n def pyMOL_bsite_cluster():\n # display binding sites of clusters\n cmd.select(\"bsites\", \"H2O* around 6\")\n cmd.select(\"byres bsites\")\n cmd.show(\"sticks\", \"byres bsites\")\n cmd.util.cbay(\"byres bsites\")\n cmd.set_bond(\"stick_radius\", \"0.1\", \"byres bsites\")\n cmd.select(\"sele\", \"name ca and byres bsites\")\n cmd.label(\"sele\", \"'\\{}-{}\\'.format(resn, resi)\")\n cmd.set(\"label_size\", \"18\")\n cmd.set(\"label_font_id\", \"7\")\n cmd.show(\"sticks\", \"organic\")\n cmd.color(\"blue\", \"organic\")\n cmd.util.cnc (\"organic\")\n cmd.set_bond(\"stick_radius\", \"0.25\", \"organic\")\n \n @staticmethod\n def pyMOL_display_cluster():\n display_clusters_setting = dialog.CheckKeep.isChecked()\n bsite_space_check = dialog.CheckAnalyze.isChecked()\n chain_sel = dialog.CheckCompare.isChecked()\n\n # za korekcijo in pogled R faktorja\n debye_waller_check = dialog.CheckDebye.isChecked()\n\n # DBSCAN formatting-----------------------------------------------------\n\n # binding site clustering\n # BindingSites.bsite_unique_centers\n master_bsite_lista_vod = []\n master_bsite_lista_vod_koordinata_x = []\n master_bsite_lista_vod_koordinata_y = []\n master_bsite_lista_vod_koordinata_z = []\n master_lista_vod = []\n master_lista_vod_koordinata_x = []\n master_lista_vod_koordinata_y = []\n master_lista_vod_koordinata_z = []\n # za Debye Waller\n master_bsite_lista_atom_iso_displacement = []\n master_lista_atom_iso_displacement = []\n # master_lista_names = []\n master_bsite_lista_info = []\n master_lista_info = []\n\n mlv_datoteka = open(\"master_water_list.txt\", \"r\")\n for linija in mlv_datoteka:\n vmesna_lista = []\n linija2 = linija.replace(\"[\", \"\")\n linija3 = linija2.replace(\"]\", \"\")\n linija4 = linija3.replace(\" \", \"\")\n linija5 = linija4.replace(\"'\", \"\")\n linija_lista = linija5.split(\",\")\n x = float(linija_lista[0])\n y = float(linija_lista[1])\n z = float(linija_lista[2])\n B = float(linija_lista[4])\n if B < 0:\n B = 0\n info = str(linija_lista[3]) + \" location: \" + str(linija_lista[5].strip(\"\\n\"))\n\n\n # anizotropni displcement bomo implementirali v V2 hopefully\n # + 1.4 je zaradi r H2O\n isotropni_displacement = math.sqrt(B/(8*((math.pi)**2))) + 1.4\n\n\n # master_lista_names.append(ime)\n\n\n vmesna_lista.append(x)\n vmesna_lista.append(y)\n vmesna_lista.append(z)\n\n\n\n if bsite_space_check == True and chain_sel == False:\n if SELECTED_SITE[4] - 4 <= vmesna_lista[0] <= SELECTED_SITE[5] + 4:\n if SELECTED_SITE[6] - 4 <= vmesna_lista[1] <= SELECTED_SITE[7] + 4:\n if SELECTED_SITE[8] - 4 <= vmesna_lista[2] <= SELECTED_SITE[9] + 4:\n master_bsite_lista_vod.append(vmesna_lista)\n x2 = float(vmesna_lista[0])\n master_bsite_lista_vod_koordinata_x.append(x2)\n y2 = float(vmesna_lista[1])\n master_bsite_lista_vod_koordinata_y.append(y2)\n z2 = float(vmesna_lista[2])\n master_bsite_lista_vod_koordinata_z.append(z2)\n master_bsite_lista_atom_iso_displacement.append(isotropni_displacement)\n master_bsite_lista_info.append(info)\n\n if bsite_space_check == False and chain_sel == False:\n if atom_min_x <= vmesna_lista[0] <= atom_max_x:\n if atom_min_y <= vmesna_lista[1] <= atom_max_y:\n if atom_min_z <= vmesna_lista[2] <= atom_max_z:\n master_lista_vod_koordinata_x.append(x)\n master_lista_vod_koordinata_y.append(y)\n master_lista_vod_koordinata_z.append(z)\n master_lista_vod.append(vmesna_lista)\n master_lista_atom_iso_displacement.append(isotropni_displacement)\n master_lista_info.append(info)\n\n if chain_sel == True:\n if atom_min_x <= vmesna_lista[0] <= atom_max_x:\n if atom_min_y <= vmesna_lista[1] <= atom_max_y:\n if atom_min_z <= vmesna_lista[2] <= atom_max_z:\n master_lista_vod_koordinata_x.append(x)\n master_lista_vod_koordinata_y.append(y)\n master_lista_vod_koordinata_z.append(z)\n master_lista_vod.append(vmesna_lista)\n master_lista_atom_iso_displacement.append(isotropni_displacement)\n master_lista_info.append(info)\n\n\n else:\n pass\n\n # /DBSCAN formatting----------------------------------------------------\n\n # BSITE LOKALNO ALI GLOBALNO NA VERIGI\n if bsite_space_check == True and chain_sel == False:\n master_lista_vod = master_bsite_lista_vod\n master_lista_atom_iso_displacement = master_bsite_lista_atom_iso_displacement\n master_lista_info = master_bsite_lista_info\n else:\n pass\n\n mlv_datoteka.close()\n\n\n try:\n # example:\n # 2 clusters with 14 H2O molecules consv. 0.67\n cluster_selection = int(dialog.ListCalculated.currentItem().text().split()[3])\n consv_of_cluster = float(dialog.ListCalculated.currentItem().text().split()[7])\n\n # report list\n report_list_1.append(\"\\nBinding site info (name, avg x, y, z, min x, max x, min y, max y, min z, max z; box 4 A around extremes): \\n\" + str(SELECTED_SITE))\n report_list_1.append(\"\\nExamined cluster with \" + str(cluster_selection) + \" H2O molecules\\n\")\n report_list_1.append(\"-\" * 25)\n\n except:\n QtWidgets.QMessageBox.about(dialog, \"ProBiS H2O Warning\", \"Please select clusters to display\")\n return\n selected_eps=dialog.SpinDB.value()\n labels3D = DBSCAN(eps=selected_eps, min_samples=cluster_selection).fit_predict(np.array(master_lista_vod))\n\n i = 0\n tocke = []\n for element in labels3D:\n temp = []\n if element != -1:\n temp.append(master_lista_vod[i])\n temp.append(element)\n temp.append(master_lista_atom_iso_displacement[i])\n temp.append(master_lista_info[i])\n report_list_1.append(temp)\n tocke.append(temp)\n\n else:\n pass\n i += 1\n\n\n if debye_waller_check == False:\n\n if display_clusters_setting == False:\n cmd.delete(\"H2O*\")\n else:\n pass\n\n for element in list(set(labels3D)):\n cluster_temp = []\n for sub_element in tocke:\n if sub_element[1] == element:\n cluster_temp.append(sub_element[0])\n\n\n try:\n cmd.set_color(\"clus_color\", \"[%f, %f, %f]\" % (1.0, (1.0 - consv_of_cluster), (1.0 - consv_of_cluster)))\n pymol.cmd.do(\"pseudoatom H2O_clus-%d_%.2f, vdw=1, color=clus_color, pos=[%f, %f, %f]\" % (element, consv_of_cluster, cluster_temp[0][0], cluster_temp[0][1], cluster_temp[0][2]))\n cmd.show(\"spheres\", \"H2O_clus-%d*\" % (element))\n\n except IndexError:\n pass\n\n else:\n\n cmd.delete(\"H2O*\")\n cmd.delete(\"iso_disp\")\n \n for element in list(set(labels3D)):\n cluster_temp = []\n for sub_element in tocke:\n if sub_element[1] == element:\n sub_element[0].append(sub_element[2])\n cluster_temp.append(sub_element[0])\n\n try:\n pymol.cmd.do(\"pseudoatom H2O_clus, vdw=1, color=red, pos=[%f, %f, %f]\" % (cluster_temp[0][0], cluster_temp[0][1], cluster_temp[0][2]))\n cmd.show(\"spheres\", \"H2O_clus\")\n\n for tocka in cluster_temp:\n pymol.cmd.do(\"pseudoatom iso_disp, vdw=%f, color=red, pos=[%f, %f, %f]\" % (cluster_temp[0][3], cluster_temp[0][0], cluster_temp[0][1], cluster_temp[0][2]))\n\n cmd.show(\"dots\", \"iso_disp\")\n\n except IndexError:\n pass\n\n\n # report on cluster\n if custom == False:\n nova_datoteka = open(\"report_\" + dialog.LineProtein.text().lower() + \".txt\", \"w\")\n else:\n target = os.path.split(dialog.LineProtein.text())[1]\n nova_datoteka = open(\"report_\"+ \"custom_\" + target.split(\".\")[0]+ \".txt\", \"w\")\n for linija in report_list_1:\n logging.info(\"report_list_1 %s\", linija)\n nova_datoteka.write(\"%s\\n\" % linija)\n nova_datoteka.close()\n logging.info(\"______________________________!!!!!!!!!!!!!!!!!!______________________________\")\n print(\"report created...\")\n\n# thanks Janez for Support!\n","repo_name":"daryapotanina/public-diploma","sub_path":"probisH2O/ProBiS_H2O_plugin.py","file_name":"ProBiS_H2O_plugin.py","file_ext":"py","file_size_in_byte":81907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18810071484","text":"'''\n Module to extract the top-K packages based on GR model Baseline\n\n'''\nimport sys\nimport os.path as o\nsys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), \"..\")))\nfrom itertools import product\nfrom statistics import mean\nimport time\nimport pandas as pd\nfrom collections import defaultdict\nimport numpy as np\nfrom model import DataHelper as dh\nfrom parameters import common_parameters as p\nimport math\nfrom model import Arguments\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.filterwarnings('ignore')\n\nLOGS_PATH = p.LOGS_PATH\nkitems = p.top_k_items\ntemp_path = p.TEMP_PATH\ndata_path = p.DATA_PATH\npackages_path = p.PACKAGES_PATH\nnum_packages = p.num_packages\n\n\ndef mae_aggregation():\n print('Balanced preference for G=', balanced_preference)\n temp_pref_u_P = pref_u_P\n sum_pref = pref_u_P.sum()\n temp_pref_u_P['percentage'] = temp_pref_u_P.divide(sum_pref, axis=0)\n percentage_data = temp_pref_u_P['percentage']\n percentage_data = percentage_data.to_dict()\n temp_pref_u_P['difference'] = abs(temp_pref_u_P['percentage'] - balanced_preference)\n mae_score = temp_pref_u_P['difference'].mean()\n\n return mae_score\n\n\n\ndef find_package_fairness(row):\n global G\n item1 = int(row['c1_item'])\n item2 = int(row['c2_item'])\n fair = 0\n for user in G:\n c1 = item1 in c1_top_rated_items[user]\n c2 = item2 in c2_top_rated_items[user]\n result = c1 or c2\n fair = fair + result\n fair_score = fair / len(G)\n\n return fair_score\n\ndef get_index_series(index_list):\n idx_list = []\n for i in index_list:\n idx = i[0]\n idx_list.append(idx)\n\n return idx_list\n\n\ndef final_packages(row):\n item1 = int(row['c1_item'])\n item2 = int(row['c2_item'])\n package = (item1, item2)\n top_packages.append(package)\n\n\n###############################################################\nif __name__ == \"__main__\":\n print(\"GR model\")\n\n Arguments.create_parser(sys.argv, globals()) # globals(): pass the global vars from one module to another\n print(\"kitems:\", kitems)\n top_k = int(kitems)\n\n filename = temp_path+'Extract_User_Preferences.obj'\n data = dh.load_Data(filename)\n ratings_c1 = data[0]\n ratings_c2 = data[1]\n\n '''\n Extract users\n '''\n G = list(ratings_c1.index)\n print('[INFO] Users', G)\n g_size = len(G)\n\n '''\n Extract top rated items for each user\n '''\n c1_top_rated_items = defaultdict(list)\n c2_top_rated_items = defaultdict(list)\n\n c1_total_top_rated_items = p.top_k_items[ 0 ]\n c2_total_top_rated_items = p.top_k_items[ 0 ]\n\n for user in G:\n # Category 1\n x_top = ratings_c1.loc[user].sort_values(ascending=False)[:c1_total_top_rated_items]\n x = x_top.index.labels[0].tolist()\n x = [x+1 for x in x]\n c1_top_rated_items[user] = x\n\n # Category 2\n x_top = ratings_c2.loc[user].sort_values(ascending=False)[:c2_total_top_rated_items]\n x = x_top.index.labels[0].tolist()\n x = [x + 1 for x in x]\n c2_top_rated_items[user] = x\n\n print( '[INFO] Top %d (fixed) rated items for each user in each category' % (p.top_k_items[ 0 ]) )\n print('[INFO] Category 1')\n print(c1_top_rated_items)\n print('[INFO] Category 2')\n print(c2_top_rated_items)\n\n '''\n Load initial ratings for Category 1\n '''\n print('[INFO] Loding initial C1 ratings')\n matrix_filename = data_path+\"M_u_i_dataset.1.txt\"\n M_u_i = dh.read_file_to_df(matrix_filename)\n c1_ratings = pd.DataFrame(M_u_i)\n index = [x + 1 for x in c1_ratings.index]\n columns = [x + 1 for x in c1_ratings.columns]\n c1_ratings = c1_ratings.set_index([index])\n c1_ratings.columns = [columns]\n c1_initial_ratings = c1_ratings.loc[G]\n\n '''\n Load initial ratings for Category 2\n '''\n print('[INFO] Loding initial C2 ratings')\n matrix_filename = data_path+\"M_u_i_dataset.2.txt\"\n M_u_i = dh.read_file_to_df(matrix_filename)\n c2_ratings = pd.DataFrame(M_u_i)\n index = [x + 1 for x in c2_ratings.index]\n columns = [x + 1 for x in c2_ratings.columns]\n c2_ratings = c2_ratings.set_index([index])\n c2_ratings.columns = [columns]\n c2_initial_ratings = c2_ratings.loc[G]\n\n print('[INFO] Calculating Expertiness per user in C1')\n c1_initial_ratings.replace(0.0,np.NaN, inplace=True)\n c1_count = c1_initial_ratings.count(axis=1)\n c1_total_rated_items = c1_count.sum()\n c1_expertiness = c1_count / c1_total_rated_items\n\n del c1_initial_ratings\n del c1_ratings\n del c1_count\n\n print('[INFO] Calculating Expertiness per user in C2')\n c2_initial_ratings.replace(0.0, np.NaN, inplace=True)\n c2_count = c2_initial_ratings.count(axis=1)\n c2_total_rated_items = c2_count.sum()\n c2_expertiness = c2_count / c2_total_rated_items\n print(c2_expertiness)\n\n del c2_initial_ratings\n del c2_ratings\n del c2_count\n\n '''\n Multiply preference X pct_user\n '''\n func = lambda x: np.asarray(x) * np.asarray(c1_expertiness)\n ratings_c1 = ratings_c1.apply(lambda col: col * c1_expertiness)\n ratings_c2 = ratings_c2.apply(lambda col: col * c2_expertiness)\n\n '''\n Create the I2G for each category\n '''\n print('[INFO] Generating I2G for each category')\n ### Category 1\n I2G_c1 = ratings_c1.sum(axis=0)\n I2G_c1.sort_values(ascending=False, inplace=True)\n I2G_c1 = I2G_c1[:top_k]\n\n ### Category 2\n I2G_c2 = ratings_c2.sum(axis=0)\n I2G_c2.sort_values(ascending=False, inplace=True)\n I2G_c2 = I2G_c2[:top_k]\n\n '''\n Generate packages\n '''\n print('[INFO] Generating Packages using the top-%d I2G items from each category' % top_k)\n I2G_c1_index = list(I2G_c1.index.values)\n I2G_c2_index = list(I2G_c2.index.values)\n I2G_c1_index = get_index_series(I2G_c1_index)\n I2G_c2_index = get_index_series(I2G_c2_index)\n packages = list(product(I2G_c1_index, I2G_c2_index))\n print('[INFO] We formed %d packages' % len(packages))\n print('[INFO] Packages')\n print(packages)\n\n '''\n Calculate P2G\n '''\n print('[INFO] Calculating P2G (Multiplying I2G value for each category)')\n cols = ['c1_item', 'c2_item', 'g_score'] #g_score is P2G\n df = pd.DataFrame(columns=cols)\n\n for package in packages:\n c1_item = package[0]\n c2_item = package[1]\n value1 = I2G_c1.loc[c1_item][0]\n value2 = I2G_c2.loc[c2_item][0]\n package_group_score = value1 * value2\n row = [c1_item, c2_item, package_group_score]\n df = df.append(pd.Series(row, index=cols), ignore_index=True)\n\n df = df.astype({\"c1_item\": int, \"c2_item\": int, \"g_score\": float})\n pd.options.display.precision = 10\n\n '''\n Calculate the fairness of each package\n '''\n df['fairness'] = df.apply(find_package_fairness, axis=1)\n\n '''\n Calculate the SCORE_fairness\n '''\n print('[INFO] Calculate the Score_final for each package')\n df['score_f'] = df['g_score'] * df['fairness']\n df.sort_values(by=['score_f'], ascending=False, inplace=True)\n\n '''\n Pick the top-k packages\n '''\n print('[INFO] Pick the final top-%d packages' % num_packages)\n df_packages = df[:num_packages]\n top_packages = []\n df_packages.apply(final_packages, axis=1)\n\n '''\n Export packages for next stage\n '''\n data = top_packages\n dh.save_data(data, packages_path+'GRmodel_top_Packages.obj')\n print( \"Top {} Packages:\".format( num_packages ) )\n print( top_packages )\n dh.save_top_packages_list( packages_path + \"GRmodel_top_packages.txt\", top_packages )\n\n '''\n Calculate Error \n '''\n print('[INFO] Calculate Balance Error')\n '''\n Load the dense matrix\n '''\n filename = temp_path+'Build_Dense_Matrix.' + str(1) + '.obj'\n data = dh.load_Data(filename)\n '''\n Load importance information\n '''\n importance_c1 = data[3]\n\n '''\n Load the dense matrix\n '''\n filename = temp_path+'Build_Dense_Matrix.' + str(2) + '.obj'\n data = dh.load_Data(filename)\n ratings_G = data[0]\n '''\n Load importance information\n '''\n importance_c2 = data[3]\n\n\n balanced_preference = 1 / ratings_G.shape[0]\n top_k_total_error = 0\n mae_percentage_data = []\n for p in top_packages:\n item_c1 = [p[0]]\n item_c2 = [p[1]]\n p1 = importance_c1.loc[:, item_c1]\n p2 = importance_c2.loc[:, item_c2]\n pref_u_P = pd.concat([p1, p2], axis=1)\n pref_u_P = pref_u_P.sum(axis=1)\n\n '''\n Calculate Percentage\n '''\n\n error_G_P = mae_aggregation()\n print('error_G_P=', error_G_P)\n top_k_total_error = top_k_total_error + error_G_P\n\n print('Total error for the top %d recommended packages %f' % (int(num_packages), top_k_total_error))\n print('Sum of errors=' + str(top_k_total_error))\n","repo_name":"huipingcao/ICWS2019","sub_path":"model/GRmodel_baseline.py","file_name":"GRmodel_baseline.py","file_ext":"py","file_size_in_byte":8911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17935326522","text":"# Databricks notebook source\nfrom databricks.feature_store import feature_table\nimport pyspark.pandas as ps\n\n# COMMAND ----------\n\nfrom pyspark.sql import functions as F\n\ngamma = spark.table('dufry_data_load.gamma_silver_merge')\nitems = spark.table('dufry_data_load.sap_item_global_silver')\n\ndata = gamma \\\n .join(items, how='left', on=gamma['GLOBAL_ITEM_CODE'] == items['GlobalItemCode']) \\\n .withColumn('DAY_OF_WEEK', F.dayofweek(gamma['REAL_DATE_OF_SALE'])) \\\n .withColumn('RECEIPT_ID', F.sha2(F.concat(\n gamma['REAL_DATE_OF_SALE'], F.lit('_'),\n gamma['TIME_OF_SALE'], F.lit('_'),\n gamma['GLOBAL_COMPANY_ID'], F.lit('_'),\n gamma['LOCAL_STORE_CODE'], F.lit('_'),\n gamma['SALES_RECEIPT_NUMBER']), 256))\n\n# COMMAND ----------\n\ndata.display()\n\n# COMMAND ----------\n\nimport re\n\ndef compute_features(data):\n \n # Convert to koalas\n data = data.pandas_api()\n \n # OHE\n data = ps.get_dummies(data, \n columns=['SAPDufryCategoryDesc'], dtype = 'int64')\n\n# 'NATIONALITY_CODE': 'first',\n# 'AIRLINE_CODE': 'first',\n\n data = data.groupby(\"RECEIPT_ID\").aggregate({\n 'RECEIPT_ID': 'first',\n 'GENDER_CODE': 'first',\n 'DAY_OF_WEEK': 'first',\n 'SALES_RECEIPT_LINE': 'max',\n 'SAPDufryCategoryDesc_Alcoholic Beverages': 'max'\n })\n\n # OHE 2\n data = ps.get_dummies(data, \n columns=['GENDER_CODE', 'dayOfWeek'], dtype = 'int64')\n \n # Rename featrue column \n # data = data.rename({'SAPDufryCategoryDesc_Alcoholic Beverages': 'bought_alcohol'})\n\n # Clean up column names\n data.columns = [re.sub(r'[\\(\\)]', ' ', name).lower() for name in data.columns]\n data.columns = [re.sub(r'[ -]', '_', name).lower() for name in data.columns]\n\n # Drop missing values\n data = data.dropna()\n \n return data\n\n# COMMAND ----------\n\nfeatures_df = compute_features(data)\n\n# COMMAND ----------\n\nfor f in features_df.columns:\n print(f)\n\n# COMMAND ----------\n\nfeatures_df.display()\n\n# COMMAND ----------\n\nfrom databricks.feature_store import FeatureStoreClient\n\nfs = FeatureStoreClient()\n\ntry:\n #drop table if exists\n fs.drop_table(f'dufry_data_load.model_alcohol_bev')\nexcept:\n pass\n#Note: You might need to delete the FS table using the UI\nfeature_table = fs.create_table(\n name=f'dufry_data_load.model_alcohol_bev',\n primary_keys='receipt_id',\n schema=features_df.spark.schema(),\n description='Feature table'\n)\n\nfs.write_table(df=features_df.to_spark().limit(100000), name=f'dufry_data_load.model_alcohol_bev', mode='overwrite')\n","repo_name":"jakubaugustin/dufry_ws","sub_path":"ML/Feature Engineering.py","file_name":"Feature Engineering.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43125094457","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 17 21:48:11 2020\r\n\r\n@author: Admin_invitado\r\n\"\"\"\r\nimport csv\r\n\r\n# Creamos la clase node para los clientes\r\nclass node:\r\n def __init__(self, Id = None, Tipo=None, Operacion=None, Tiempo=0, next = None):\r\n self.Id = Id\r\n self.Tipo=Tipo\r\n self.Operacion=Operacion\r\n self.Tiempo=0\r\n self.next = next\r\n \r\n def __repr__(self):\r\n return str(self.__dict__)\r\n\r\n# Creamos la clase linked_list\r\nclass linked_list: \r\n def __init__(self):\r\n self.head = None\r\n \r\n # Método para agregar elementos al final de la cola de la linked list\r\n def add_at_cola(self, nodo):\r\n nodo.next=self.head\r\n self.head = nodo\r\n\r\n # Método para verificar si la estructura de datos esta vacia\r\n def is_empty(self):\r\n return self.head == None\r\n\r\n # Método para agregar elementos al inicio de la cola en la linked list\r\n def add_at_primero(self, nodo):\r\n if not self.head:\r\n self.head = nodo\r\n return\r\n curr = self.head\r\n while curr.next:\r\n curr = curr.next\r\n curr.next = nodo\r\n \r\n # Método para eleminar nodos según el valor del ID\r\n def delete_node(self, key):\r\n curr = self.head\r\n prev = None\r\n while curr and curr.Id != key:\r\n prev = curr\r\n curr = curr.next\r\n if prev is None:\r\n self.head = curr.next\r\n elif curr:\r\n prev.next = curr.next\r\n curr.next = None\r\n\r\n # Método para obtener el ultimo nodo\r\n def get_last_node(self):\r\n temp = self.head\r\n while(temp.next is not None):\r\n temp = temp.next\r\n return temp\r\n\r\n # Método para imprimir la lista de nodos\r\n def print_list( self ):\r\n node = self.head\r\n while node != None:\r\n print (\"Cliente ID {0} del Tipo {1} y Tiempo de atención {2}\".format(node.Id, node.Tipo, node.Tiempo))\r\n node = node.next\r\n\r\n# Esta función permite leer un archivo .csv datos separados por coma\r\ndef leer_archivo_csv(vector, archivo):\r\n '''\r\n El archivo en cada línea trae el IdCliente, Tipo de Cliente, Operación\r\n El Tipo de Cliente será P (preferencial), T (Tercera Edad) y N (Normal)\r\n La operación a realizar será C (Consignar), R (Retirar) y T (Transferencia)\r\n '''\r\n with open(archivo, newline='') as File: \r\n reader = csv.reader(File)\r\n for row in reader:\r\n '''\r\n Cada fila (row) trae los 3 campos Id, Tipo y operación\r\n row es una lista con los tres elementos que son los campos del nodo\r\n y entonces se mueven al nodo para la lista que queramos utilizar\r\n '''\r\n v=node(Id = row[0], Tipo=row[1], Operacion=row[2], Tiempo=0) \r\n #v=node(row[0], row[1], row[2], Tiempo=0)\r\n vector.append(v)\r\n\r\n\r\n\r\n# Código del programa\r\nprint(\"** BIENVENIDO **\")\r\n'''\r\nAquí debe colocar el código para las opciones de menú\r\n'''\r\n\r\n'''\r\nA continuación se leeran los datos del archivo\r\ny se almacenan en una lista (vector)\r\nsin tener en cuenta el tipo de cliente\r\n'''\r\nv=[] #declaro una lista vacia\r\nleer_archivo_csv(v, 'clientes.csv')\r\n\r\n'''\r\nSe propone ahora recorrer la lista y aplicar los criterios\r\ndel problema, para este ejemplo solo se armara la lista\r\nenlazada pero sin los criterios del problema\r\n'''\r\n\r\ncola = linked_list() # Instancia de la clase lista\r\nfor elemento in v:\r\n if elemento.Tipo ==\"T\":\r\n print(\"Cliente Tercera Edad\")\r\n # Aquí se agrega de primero hay que ajustar según el problema\r\n cola.add_at_primero(elemento)\r\n elif elemento.Tipo==\"P\":\r\n print(\"Cliente Preferencial\")\r\n # Aquí se agrega de último hay que ajustar según el problema\r\n cola.add_at_cola(elemento)\r\n else:\r\n print(\"Cliente Normal\")\r\n # Aquí se agrega de último sería el caso de cliente normal\r\n cola.add_at_cola(elemento)\r\n\r\n'''\r\ns.add_at_front(5) # Agregamos un elemento al frente del nodo\r\ns.add_at_end(8) # Agregamos un elemento al final del nodo\r\ns.add_at_front(9) # Agregamos otro elemento al frente del nodo\r\n'''\r\ncola.print_list() # Imprimimos la lista de nodos\r\n\r\n#Ejemplo de como eliminar un nodo por el ID\r\ncola.delete_node(\"1015\")\r\nprint(\"** prueba **\")\r\ncola.print_list() # Imprimimos la lista de nodos","repo_name":"Hardken/CrusoATSpython","sub_path":"armar_lista_lab2.py","file_name":"armar_lista_lab2.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10293754754","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport sys\nsys.path.append(\"..\")\nimport crypten\nimport crypten.mpc as mpc\nimport crypten.communicator as comm\n\nfrom crypten.mpc import MPCTensor\nfrom crypten.mpc.ptype import ptype as Ptype\n\nimport torchvision\nimport torchvision.models as models\nimport torch.autograd.profiler as profiler\n\nimport logging\nimport time\nimport timeit\nimport argparse\n\nfrom network import *\nimport torchvision\nfrom torch.utils.data import DataLoader\ndef test_accuracy(model, input_size, batch_size, num_classes, device=\"cuda\"):\n comm.get().set_verbosity(True)\n rank = comm.get().get_rank()\n #读取数据\n #train_data = torchvision.datasets.MNIST(root='./mnist/', train=True, transform=torchvision.transforms.ToTensor(), download=True)\n #test_data = torchvision.datasets.MNIST(root='./mnist/', train=False, transform=torchvision.transforms.ToTensor(), download=True)\n train_data = torchvision.datasets.CIFAR10(root='./cifar10/', train=True, transform=torchvision.transforms.ToTensor(), download=True)\n \n test_data = torchvision.datasets.CIFAR10(root='./cifar10/', train=False, transform=torchvision.transforms.ToTensor(), download=True)\n train_dataloader = DataLoader(train_data, batch_size=batch_size)\n test_dataloader = DataLoader(test_data, batch_size=batch_size)\n\n c, h, w = input_size\n bs = batch_size\n #print(model)\n criterion = crypten.nn.CrossEntropyLoss()\n model = crypten.nn.from_pytorch(model, dummy_input=torch.empty(bs, c, h, w))\n #print(model._modules)\n model = model.to(device)\n model.encrypt() \n model.train()\n\n for data in train_dataloader:\n #it_num += 1\n img, targets = data\n img = img.cuda()\n targets = targets.cuda()\n\n labels = F.one_hot(targets, num_classes=num_classes)\n\n labels = crypten.cryptensor(labels, src=0)\n #input = torch.randn([bs,c,w,h], requires_grad=False)\n input = crypten.cryptensor(img, src=0)\n #print(labels)\n #训练\n tic = time.perf_counter()\n output = model(input)\n toc = time.perf_counter()\n #print(output.get_plain_text())\n loss = criterion(output, labels)\n print(loss.get_plain_text(),toc-tic)\n\n model.zero_grad()\n loss.backward()\n model.update_parameters(learning_rate=0.1)\n \n \n #预测部分\n model.eval()\n #model.replicate_parameters()\n total_accuracy = 0\n total_num = 0 \n #tic = time.perf_counter()\n ii_num=0\n \n for data in test_dataloader:\n ii_num+=1\n img, targets = data\n img = img.cuda()\n targets = targets.cuda()\n \n labels = F.one_hot(targets, num_classes=num_classes)\n labels = crypten.cryptensor(labels, src=0)\n\n input = crypten.cryptensor(img, src=0).to(device)\n \n output = model(input)\n \n #loss = criterion(output, labels)\n\n output = output.get_plain_text()\n #print(output.argmax(1))\n\n total_accuracy += (output.argmax(1) == targets).sum()\n \n total_num += targets.shape[0]\n print(total_accuracy,total_num)\n \n #print(f\"infer Runtime: {(t_t) / ii_num}\")\n print(f\"accuracy: {total_accuracy / total_num}\")\ndef inference(model, input_size, batch_size=1, device=\"cuda\"):\n comm.get().set_verbosity(True)\n\n bs = batch_size\n c, w, h = input_size\n x = crypten.cryptensor(torch.rand((bs, c, w, h)), device=device, requires_grad=False)\n\n model = crypten.nn.from_pytorch(model, dummy_input=torch.empty(bs, c, w, h))\n model = model.encrypt()\n model = model.to(device) \n\n model.eval()\n model.replicate_parameters()\n\n total_time = 0\n comm_time = 0\n conv_time, pool_time, relu_time, matmul_time = 0, 0, 0, 0\n for i in range(6):\n comm.get().reset_communication_stats()\n \n tic = time.perf_counter()\n model(x)\n toc = time.perf_counter()\n\n if i != 0:\n total_time += toc - tic\n comm_time += comm.get().comm_time\n conv_time += comm.get().time_conv\n relu_time += comm.get().time_relu\n pool_time += comm.get().time_pool\n matmul_time += comm.get().time_matmul\n\n # if comm.get().get_rank() == 0:\n # print(f\"Iteration {i} runtime: {toc - tic}\")\n\n comm.get().print_total_communication()\n\n if comm.get().get_rank() == 0:\n print(\"----------- Statistics ----------------\")\n print(f\"Total Communication: {comm.get().total_comm_bytes}\")\n print(f\"Avg Runtime: {total_time / 5}\")\n print(f\"Avg Comm: {comm_time / 5}\")\n print(f\"Avg Linear: {(conv_time + matmul_time)/ 5}\")\n print(f\"Avg ReLU: {relu_time / 5}\")\n print(f\"Avg Pool: {pool_time / 5}\")\n\n\ndef training(model, input_size, batch_size, num_classes, device=\"cuda\"):\n comm.get().set_verbosity(True)\n rank = comm.get().get_rank()\n\n c, h, w = input_size\n bs = batch_size\n criterion = crypten.nn.CrossEntropyLoss()\n model = crypten.nn.from_pytorch(model, dummy_input=torch.empty(bs, c, h, w))\n model = model.to(device)\n model.encrypt() \n model.train()\n\n labels = torch.ones(bs, requires_grad=False).long().to(device)\n labels = F.one_hot(labels, num_classes=num_classes)\n labels = crypten.cryptensor(labels, src=0)\n\n input = torch.randn([bs,c,w,h], requires_grad=False)\n input = crypten.cryptensor(input, src=0).to(device)\n\n total_time = 0\n comm_time = 0\n conv_time, pool_time, relu_time, matmul_time, softmax_time = 0, 0, 0, 0, 0\n #from torch.cuda.amp import autocast as autocast\n time_t=6\n for i in range(time_t):\n comm.get().reset_communication_stats()\n tic = time.perf_counter()\n #with autocast():\n output = model(input)\n\n loss = criterion(output, labels)\n\n #model.zero_grad()\n loss.backward()\n model.update_parameters(learning_rate=0.1)\n\n toc = time.perf_counter()\n\n if i != 0:\n total_time += toc - tic\n comm_time += comm.get().comm_time\n conv_time += comm.get().time_conv\n relu_time += comm.get().time_relu\n pool_time += comm.get().time_pool\n matmul_time += comm.get().time_matmul\n softmax_time += comm.get().time_softmax\n\n # if comm.get().get_rank() == 0:\n # print(f\"Iteration {i} runtime: {toc - tic}\")\n\n comm.get().print_total_communication()\n\n if comm.get().get_rank() == 0:\n print(\"----------- Statistics ----------------\")\n print(f\"Total Communication: {comm.get().total_comm_bytes}\")\n print(f\"Avg Runtime: {total_time / 5}\")\n print(f\"Avg Comm: {comm_time / 5}\")\n print(f\"Avg Linear: {(conv_time + matmul_time )/ 5}\")\n print(f\"Avg ReLU: {relu_time / 5}\")\n print(f\"Avg Pool: {pool_time / 5}\")\n print(f\"Avg Softmax: {softmax_time / 5}\")\n\n\n\ndef inference_plaintext(model, input_size, device=\"cuda\"):\n\n c, w, h = input_size\n x = torch.rand((1, c, w, h), device=device, requires_grad=False)\n\n model = model.to(device) \n model.eval()\n\n total_time = 0\n for i in range(101):\n comm.get().reset_communication_stats()\n \n tic = time.perf_counter()\n model(x)\n toc = time.perf_counter()\n\n if i != 0:\n total_time += toc - tic\n\n comm.get().print_total_communication()\n\n if comm.get().get_rank() == 0:\n print(\"----------- Statistics ----------------\")\n print(f\"Avg Runtime: {total_time / 100}\")\n\n\ndef training_plaintext(model, input_size, batch_size, num_classes, device=\"cuda\"):\n\n c, h, w = input_size\n bs = batch_size\n\n model = model.to(device)\n model.train()\n\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.01)\n\n\n input = torch.randn([bs,c,w,h], requires_grad=True).to(device)\n labels = torch.ones(bs, requires_grad=False).long().to(device)\n\n total_time = 0\n comm_time = 0\n conv_time, pool_time, relu_time, matmul_time = 0, 0, 0, 0\n for i in range(101):\n tic = time.perf_counter()\n\n output = model(input)\n optimizer.zero_grad()\n \n loss = criterion(output, labels)\n loss.backward()\n optimizer.step()\n\n toc = time.perf_counter()\n\n if i != 0:\n total_time += toc - tic\n\n # if comm.get().get_rank() == 0:\n # print(f\"Iteration {i} runtime: {toc - tic}\")\n\n if comm.get().get_rank() == 0:\n print(\"----------- Statistics ----------------\")\n print(f\"Avg Runtime: {total_time / 100}\")\n\n\ndef select_model(dataset, network):\n if dataset == \"mnist\":\n input_size = (1,28,28)\n num_classes = 10\n if network == \"lenet\":\n model = LeNet()\n elif dataset == \"cifar10\":\n input_size = (3,32,32)\n num_classes = 10\n if network == \"alexnet\":\n model = AlexNet(num_classes=10)\n elif network == \"vgg16\":\n model = VGG16(num_classes=10)\n elif dataset == 'tinyin':\n input_size = (3,64,64)\n num_classes = 200\n if network == 'alexnet':\n model = AlexNet(num_classes=200)\n elif network == \"vgg16\":\n model = VGG16(num_classes=200)\n elif dataset == 'imagenet':\n input_size = (3, 224, 224)\n num_classes = 1000\n if network == 'alexnet':\n model = AlexNet(num_classes=1000)\n elif network == \"vgg16\":\n model = VGG16(num_classes=1000)\n elif network == \"resnet34\":\n model = models.resnet34()\n model.maxpool = nn.AvgPool2d(kernel_size=3, stride=2)\n elif network == \"resnet50\":\n model = models.xin()\n model.maxpool = nn.AvgPool2d(kernel_size=3, stride=2)\n elif network == \"resnet101\":\n model = models.resnet101()\n model.maxpool = nn.AvgPool2d(kernel_size=3, stride=2)\n elif network == \"resnet152\":\n model = models.resnet152()\n model.maxpool = nn.AvgPool2d(kernel_size=3, stride=2)\n\n return model, input_size, num_classes\n\ndef train_all():\n train_config = [\n [\"mnist\", \"lenet\", 128],\n [\"cifar10\", \"alexnet\", 128],\n [\"cifar10\", \"vgg16\", 32],\n [\"tinyin\", \"alexnet\", 128],\n [\"tinyin\", \"vgg16\", 8],\n ]\n for dataset, network, bs in train_config:\n model, input_size, num_classes = select_model(dataset, network)\n if comm.get().get_rank() == 0:\n print(f\"Training on {dataset} dataset with {network} network\")\n training(model, input_size, bs, num_classes, device=\"cuda\")\n\n\ndef inference_all():\n inference_config = [\n [\"mnist\", \"lenet\"],\n [\"cifar10\", \"alexnet\"],\n [\"cifar10\", \"vgg16\"],\n [\"tinyin\", \"alexnet\"],\n [\"tinyin\", \"vgg16\"],\n [\"imagenet\", \"alexnet\"],\n [\"imagenet\", \"vgg16\"],\n [\"imagenet\", \"resnet50\"],\n [\"imagenet\", \"resnet101\"],\n [\"imagenet\", \"resnet152\"]\n ]\n for dataset, network in inference_config:\n model, input_size, num_classes = select_model(dataset, network)\n if comm.get().get_rank() == 0:\n print(f\"Running inference on {dataset} dataset with {network} network\")\n inference(model, input_size, device=\"cuda\")\n\n\ndef train_all_plaintext():\n train_config = [\n [\"mnist\", \"lenet\", 128],\n [\"cifar10\", \"alexnet\", 128],\n [\"cifar10\", \"vgg16\", 32],\n [\"tinyin\", \"alexnet\", 128],\n [\"tinyin\", \"vgg16\", 8],\n ]\n for dataset, network, bs in train_config:\n model, input_size, num_classes = select_model(dataset, network)\n if comm.get().get_rank() == 0:\n print(f\"Training on {dataset} dataset with {network} network\")\n training_plaintext(model, input_size, bs, num_classes, device=\"cuda\")\n\n\ndef inference_all_plaintext():\n inference_config = [\n [\"mnist\", \"lenet\"],\n [\"cifar10\", \"alexnet\"],\n [\"cifar10\", \"vgg16\"],\n [\"tinyin\", \"alexnet\"],\n [\"tinyin\", \"vgg16\"],\n [\"imagenet\", \"alexnet\"],\n [\"imagenet\", \"vgg16\"],\n [\"imagenet\", \"resnet50\"],\n [\"imagenet\", \"resnet101\"],\n [\"imagenet\", \"resnet152\"]\n ]\n for dataset, network in inference_config:\n model, input_size, num_classes = select_model(dataset, network)\n if comm.get().get_rank() == 0:\n print(f\"Running inference on {dataset} dataset with {network} network\")\n inference_plaintext(model, input_size, device=\"cuda\")\n\n\ndef batch_inference():\n inference_config = [\n [\"cifar10\", \"alexnet\", 64],\n [\"cifar10\", \"vgg16\", 64],\n [\"imagenet\", \"resnet50\", 8],\n [\"imagenet\", \"resnet101\", 8],\n [\"imagenet\", \"resnet152\", 8]\n ]\n\n for dataset, network, bs in inference_config:\n model, input_size, num_classes = select_model(dataset, network)\n inference(model, input_size, bs, device='cuda')\n\n# A playground to test different network and dataset combinations\ndef test():\n dataset = \"mnist\"\n\n network = \"lenet\"\n device = \"cuda\"\n train = False\n batch_size = 128\n \n model, input_size, num_classes = select_model(dataset, network)\n\n\n if train:\n training(model, input_size, batch_size, num_classes, device)\n else:\n inference(model, input_size, batch_size, device)\n\ndef acc():\n dataset = \"cifar10\"\n\n network = \"alexnet\"\n device = \"cuda\"\n train = True\n batch_size = 128\n \n model, input_size, num_classes = select_model(dataset, network)\n\n test_accuracy(model, input_size, batch_size, num_classes, device)\n\nparser = argparse.ArgumentParser()\nexperiments = ['test', 'train_all', 'inference_all', 'train_all_plaintext', 'inference_all_plaintext', 'batch_inference','acc','training_plaintext']\nparser.add_argument(\n \"--exp\",\n \"-e\",\n required=False,\n default=\"test\",\n help=\"Experiment to run\",\n)\n\nif __name__ == '__main__':\n #测试relu\n '''\n import re_lu\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n a=torch.randint(1,2,[1500])\n b=torch.randint(1,1000,[1500])\n c=torch.randint(1,10,[1500])\n\n a=a.to(device)\n b=b.to(device)\n c=c.to(device)\n\n tic = time.perf_counter()\n re_lu.torch_launch_relu(c,a,b,b,1500)\n print(c)\n toc = time.perf_counter()\n print(toc-tic)\n \n import mat_mul\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n a=torch.randint(-10000,10000,[128,500])\n b=torch.randint(-100000,1000000,[500,20])\n c=torch.randint(1,2,[128,20])\n\n a=a.to(device)\n b=b.to(device)\n c=c.to(device)\n import crypten\n crypten.init()\n x=crypten.cuda.CUDALongTensor(a)\n y=crypten.cuda.CUDALongTensor(b) \n tic = time.perf_counter()\n z=crypten.cuda.CUDALongTensor.matmul(x, y)\n toc = time.perf_counter()\n print(toc-tic)\n print(z)\n tic = time.perf_counter()\n mat_mul.torch_launch_matmul(c,a,b,128, 500, 20)\n print(c)\n toc = time.perf_counter()\n print(toc-tic)\n\n import mat_mul\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n a = torch.LongTensor([(1<<62)-1])\n b = torch.LongTensor([-((1<<62)-2)])\n\n c=a*1024*1024\n d=b*1024*1024\n print(c+d)\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n a=torch.randint(0,10,[2,3,3,3],device=device)\n print(a)\n b = a.permute(1,0,2,3)\n print(b.reshape(b.shape[0],-1))\n \n \n import torchvision\n from torch.utils.data import DataLoader\n\n train_data = torchvision.datasets.MNIST(\n root='./mnist/',\n train=True, # this is training data\n transform=torchvision.transforms.ToTensor(), # Converts a PIL.Image or numpy.ndarray to\n # torch.FloatTensor of shape (C x H x W) and normalize in the range [0.0, 1.0]\n download=True,\n )\n train_dataloader = DataLoader(train_data, batch_size=10)\n for data in train_dataloader:\n img, targets=data\n print(img.shape,targets.shape)\n \n \n from crypten.common.util import im2col_indices, col2im_indices, im2col\n input = torch.randint(0,10,[2,4])\n bias = torch.randint(1,2,[2])\n print(input)\n print(input.transpose(0,1)-bias)\n #input = input.reshape(1,-1)\n #a = im2col(input, 2, 2, padding=0, stride=2)\n #a = a.reshape(2,2,-1)\n #print(a)\n '''\n import os\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\n import multiprocess_launcher\n args = parser.parse_args()\n assert args.exp in experiments\n func = globals()[args.exp]\n\n launcher = multiprocess_launcher.MultiProcessLauncher(\n 3, func,\n )\n launcher.start()\n launcher.join()\n launcher.terminate()\n \n","repo_name":"yh9-4/CryptGPU","sub_path":"scripts/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":17016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"27418749361","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom sklearn.model_selection import KFold\nfrom preprocess import dataset\n\nfrom sklearn.metrics import precision_score, recall_score, f1_score\n\nfrom model import *\n\ndef predict(model, data_loader):\n predictions = []\n model.eval() # 设置模型为评估模式\n with torch.no_grad():\n for inputs, _ in data_loader:\n inputs = inputs.to(device) # 将输入移动到设备上(如果使用GPU)\n outputs = model(inputs) # 模型前向传播\n _, predicted_labels = torch.max(outputs, dim=1) # 获取预测标签\n predictions.extend(predicted_labels.tolist())\n return predictions\ndevice = 'cuda' if torch.cuda.is_available() else 'cup'\nprint(device)\n\nmodel=Net2().to(device)\n# 定义 Loss 函数和优化器\ncriterion = nn.CrossEntropyLoss().to(device)\noptimizer = optim.Adam(model.parameters(), lr=0.001)\n\n# 定义训练函数\ndef train(model, train_loader, optimizer, criterion):\n model.train()\n for inputs, targets in train_loader:\n inputs, targets=inputs.to(device),targets.to(device)\n optimizer.zero_grad()\n\n # 前向传播\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n\n # 反向传播和优化\n loss.backward()\n optimizer.step()\n\n# 定义测试函数\ndef test(model, test_loader):\n model.eval()\n correct = 0\n total = 0\n with torch.no_grad():\n for inputs, targets in test_loader:\n inputs, targets=inputs.to(device),targets.to(device)\n outputs = model(inputs)\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += (predicted == targets).sum().item()\n accuracy = correct / total\n return accuracy\n\n# 假设你已经准备好了你的数据集:train_dataset、test_dataset\n\n# 定义数据加载器\n\n\n# 定义 5 折交叉验证\nkfold = KFold(n_splits=5, shuffle=True,random_state=43)\n\n\n\nacc_count = 0.0\nprecision_count = 0.0\nrecall_count = 0.0\nf1_count = 0.0\nnum_folds = 5 # 设置折数\n\nfor fold, (train_indices, test_indices) in enumerate(kfold.split(dataset)):\n # 创建对应折的训练集和验证集的数据子集加载器\n train_subset = torch.utils.data.Subset(dataset, train_indices)\n test_subset = torch.utils.data.Subset(dataset, test_indices)\n train_loader = torch.utils.data.DataLoader(train_subset, batch_size=32, shuffle=True)\n test_loader = torch.utils.data.DataLoader(test_subset, batch_size=32, shuffle=False)\n\n # 创建新的模型并进行训练和测试\n model = model # 请确保此处正确设置了您要使用的模型\n optimizer = optim.Adam(model.parameters(), lr=0.001)\n\n for epoch in range(30):\n train(model, train_loader, optimizer, criterion)\n\n accuracy = test(model, test_loader)\n predictions = predict(model, test_loader)\n\n # 计算精确度、召回率和 F1 分数\n targets = [label for _, label in test_subset]\n precision = precision_score(targets, predictions, average='weighted', zero_division=0)\n recall = recall_score(targets, predictions, average='weighted', zero_division=0)\n f1 = f1_score(targets, predictions, average='weighted', zero_division=0)\n\n acc_count += accuracy\n precision_count += precision\n recall_count += recall\n f1_count += f1\n print(f\"Fold {fold + 1}:\")\n print(f\"Accuracy: {accuracy:.4f}\")\n print(f\"Precision: {precision:.4f}\")\n print(f\"Recall: {recall:.4f}\")\n print(f\"F1-Score: {f1:.4f}\")\n\naverage_accuracy = acc_count / num_folds\naverage_precision = precision_count / num_folds\naverage_recall = recall_count / num_folds\naverage_f1 = f1_count / num_folds\n\nprint(f\"Average Accuracy: {average_accuracy:.4f}\")\nprint(f\"Average Precision: {average_precision:.4f}\")\nprint(f\"Average Recall: {average_recall:.4f}\")\nprint(f\"Average F1-Score: {average_f1:.4f}\")","repo_name":"FuWenda/datamining","sub_path":"NN_Classification.py","file_name":"NN_Classification.py","file_ext":"py","file_size_in_byte":3910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34285022780","text":"lst=[4,2,1,6,7,8] #output 3,1,0,7,8,9\n\n\nresult=list(map(lambda num:num-1 if num<5 else num+1,lst))\nprint(result)\n\n# if num<5:\n# num-1\n# else:\n# num+1","repo_name":"vivekvgsk/pythonallpgm","sub_path":"functionalprogramming/listmap.py","file_name":"listmap.py","file_ext":"py","file_size_in_byte":157,"program_lang":"python","lang":"ar","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73087942614","text":"import base64\nimport argparse\nimport os\nimport time\nimport random\nimport string\nimport googleapiclient.discovery\nfrom google.cloud import pubsub_v1\nimport json\nfrom google.cloud import error_reporting\nimport logging\nimport sys\nimport os\nimport firebase_admin\nfrom firebase_admin import credentials, firestore, storage\nimport yaml\n\ncred = credentials.ApplicationDefault()\nprint(cred)\nfirebase_admin.initialize_app(cred)\ndb = firestore.Client()\nsubscriber = pubsub_v1.SubscriberClient()\n\ndef spawn_from_subscription():\n # TODO: use the firebase default bucket, not manually specified\n # TODO: ack messages that cause failure, otherwise system will get stuck\n bucket = storage.bucket(os.environ['OCQ_BUCKET'])\n evproject = os.environ['GCP_PROJECT']\n sub_name = os.environ['OCQ_JOB_START_SUB']\n subscription_path = subscriber.subscription_path(evproject, sub_name)\n print(\"Subscription:\" + subscription_path)\n response = subscriber.pull({\n 'subscription':subscription_path,\n 'max_messages':1,\n })\n if not response.received_messages:\n print('No jobs to launch')\n return\n msg = response.received_messages[0]\n subscriber.modify_ack_deadline(request={\n 'subscription':subscription_path,\n 'ack_ids': [msg.ack_id],\n 'ack_deadline_seconds':60\n })\n ack_id = msg.ack_id\n job_id = msg.message.data.decode('utf8')\n print(\"AckID:\" + str(ack_id))\n print(f'JobID:{job_id}')\n job_document = db.collection('jobs').document(job_id)\n job_data = job_document.get().to_dict()\n run_config = {'run':{\n 'genome': job_data['genome'],\n 'skip': ['reporter'],\n 'annotators': job_data['annotators'],\n }}\n config_blob = bucket.blob(f'jobs/{job_id}/config.yml')\n config_blob.upload_from_string(yaml.dump(run_config))\n filepath = job_data['inputPaths'][job_data['inputNames'][0]]\n configfilepath = config_blob.name\n basefilepath = configfilepath.rsplit('/', 1)[0]\n ocinput = 'gs://'+bucket.name + '/' + filepath\n occonfig = 'gs://'+bucket.name + '/' + configfilepath\n filename = filepath.rsplit('/', 1)[1]\n configfilename = configfilepath.rsplit('/', 1)[1]\n print(\"Bucket:\" + bucket.name)\n print(\"Basefilepath:\" + basefilepath)\n print(\"Inputs:\" + ocinput)\n print(\"Filename:\" + filename)\n print(\"Config:\" + occonfig)\n print(\"Config Filename:\" + configfilename)\n compute = googleapiclient.discovery.build('compute', 'v1')\n image_check = compute.images().getFromFamily(\n project=evproject, \n family=os.environ['OCQ_INSTANCE_FAMILY'],\n ).execute()\n region = os.environ['FUNCTION_REGION']\n zone = 'a'\n evzone = f'{region}-{zone}'\n source_disk_image = image_check['selfLink']\n machine_type = 'zones/' + evzone + '/machineTypes/n1-highcpu-8' \n instance_name = \"oc-compute-instance-\" + job_id.lower()\n service_acct = os.environ['OCQ_SERVICE_ACCOUNT_EMAIL']\n startup = open(os.path.join(os.path.dirname(__file__), 'startup.sh'), 'r').read()\n done_topic = os.environ['OCQ_JOB_DONE_TOPIC']\n worker_label = os.environ['OCQ_WORKER_LABEL']\n config = {\n 'name': instance_name,\n \"serviceAccounts\": [\n {\n \"email\": service_acct,\n \"scopes\": [\"https://www.googleapis.com/auth/compute\",\n \"https://www.googleapis.com/auth/devstorage.read_write\",\n \"https://www.googleapis.com/auth/pubsub\",\n \"https://www.googleapis.com/auth/cloud-platform\"\n ]\n }\n ],\n 'machineType': machine_type,\n 'disks': [\n {\n 'boot': True,\n 'autoDelete': True,\n 'initializeParams': {\n 'sourceImage': source_disk_image,\n }\n }\n ],\n 'networkInterfaces': [{\n 'network': 'global/networks/default',\n 'accessConfigs': [\n {'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}\n ]\n }],\n 'labels': {\n 'instance_type': worker_label,\n },\n 'metadata': {\n 'items': [\n {\n 'key': 'ocinput',\n 'value': ocinput\n },\n {\n 'key': 'occonfig',\n 'value': occonfig\n },\n {\n 'key': 'bucket',\n 'value': bucket.name\n },\n { \n 'key': 'filename',\n 'value': filename\n },\n { \n 'key': 'configfilename',\n 'value': configfilename\n },\n {\n 'key': 'basefilepath',\n 'value': basefilepath\n },\n {\n 'key': 'subscription',\n 'value': subscription_path\n },\n {\n 'key': 'ack_id',\n 'value': ack_id\n },\n {\n 'key': 'startup-script',\n 'value': startup\n },\n {\n 'key': 'done_topic',\n 'value': done_topic\n },\n {\n 'key': 'job_id',\n 'value':job_id\n }\n ]\n }\n }\n ret = compute.instances().insert(project=evproject,zone=evzone,body=config).execute()\n print(\"Launch:\" + str(ret))\n job_document.update({'status':{'code':20,'display':'Provisioning'}})\n return ret\n\ndef worker_space_available():\n # Return True if num workers is below limit\n evproject = os.environ['GCP_PROJECT']\n region = os.environ['FUNCTION_REGION']\n zone = 'a'\n evzone = f'{region}-{zone}'\n worker_label = os.environ['OCQ_WORKER_LABEL']\n compute = googleapiclient.discovery.build('compute', 'v1')\n cur_instances = compute.instances().list(project=evproject, zone=evzone).execute()\n if 'items' not in cur_instances:\n # No instances running of any type\n return True\n else:\n matching_instances = 0\n instance_limit = int(os.environ['OCQ_WORKER_LIMIT'])\n for instance in cur_instances['items']:\n instance_type = instance.get('labels',{}).get('instance_type')\n instance_status = instance.get('status')\n if instance_type == worker_label and instance_status in ('PROVISIONING', 'STAGING', 'RUNNING'):\n matching_instances += 1\n return matching_instances < instance_limit\n\ndef job_start(event, context):\n if worker_space_available():\n return spawn_from_subscription()\n else:\n print('Too many instances running')\n return\n\ndef job_done(event, context):\n evproject = os.environ['GCP_PROJECT']\n sub_name = os.environ['OCQ_JOB_DONE_SUB']\n subscription_path = subscriber.subscription_path(evproject, sub_name)\n print(\"Subscription:\" + subscription_path)\n response = subscriber.pull({\n 'subscription':subscription_path,\n 'max_messages':1,\n })\n if not response.received_messages:\n print('No message to pull')\n return\n msg = response.received_messages[0]\n print(msg.message.data.decode('utf8'))\n msgd = json.loads(msg.message.data.decode('utf8'))\n job_id = msgd['jobId']\n db_path = msgd['dbPath']\n csv_path = msgd['csvPath']\n job_doc = db.collection('jobs').document(job_id)\n job_doc.update({\n 'status':{\n 'code':40,\n 'display':'Done'\n },\n 'output':{\n 'database':db_path,\n 'csv':csv_path\n }\n })\n subscriber.acknowledge(request={\n \"subscription\": subscription_path,\n \"ack_ids\": [msg.ack_id],\n })\n time.sleep(10)\n if worker_space_available():\n return spawn_from_subscription()\n else:\n print('Too many instances running')\n return\n\nif __name__ == '__main__':\n import yaml\n with open('C:/a/gcloud/oc-cloudqueue/env.yml') as f:\n d = yaml.safe_load(f.read())\n os.environ.update(d)\n # spawn_from_subscription()\n # worker_space_available()\n job_done(None,None)\n","repo_name":"KarchinLab/open-cravat-cloudqueue","sub_path":"cloud-functions/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71921929812","text":"import functools\nfrom typing import Tuple\n\nimport jax.numpy as jnp\nimport matplotlib.pyplot as plt # type: ignore[import]\nimport numpy as onp\nfrom skimage import measure # type: ignore[import]\n\nfrom fmmax import basis, beams, fields, fmm, scattering, sources\n\nPERMITTIVITY_AMBIENT: complex = (1.0 + 0.0j) ** 2\nPERMITTIVITY_SLAB: complex = (1.5 + 0.0j) ** 2\nTHICKNESS_AMBIENT: float = 2.0\nTHICKNESS_SLAB: float = 0.8\nPITCH: float = 1.0\nDIAMETER: float = 0.7\nRESOLUTION: float = 0.01\nRESOLUTION_FIELDS: float = 0.01\nWAVELENGTH: float = 0.63\nMULTIPLE_WAVELENGTHS: jnp.ndarray = jnp.asarray([0.62, 0.63, 0.64])\nAPPROXIMATE_NUM_TERMS: int = 50\nBRILLOUIN_GRID_SHAPE: Tuple[int, int] = (9, 9)\nWAVELENGTH_AXIS: int = 0\n\n\ndef simulate_crystal_with_internal_source(\n permittivity_ambient: complex = PERMITTIVITY_AMBIENT,\n permittivity_slab: complex = PERMITTIVITY_SLAB,\n thickness_ambient: float = THICKNESS_AMBIENT,\n thickness_slab: float = THICKNESS_SLAB,\n pitch: float = PITCH,\n diameter: float = DIAMETER,\n resolution: float = RESOLUTION,\n resolution_fields: float = RESOLUTION_FIELDS,\n wavelength: float = WAVELENGTH,\n approximate_num_terms: int = APPROXIMATE_NUM_TERMS,\n brillouin_grid_shape: Tuple[int, int] = BRILLOUIN_GRID_SHAPE,\n) -> Tuple[\n Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray], # (ex, ey, ez)\n Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray], # (hx, hy, hz)\n Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray], # (x, y, z)\n Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray], # (xy, xz, yz) cross sections\n]:\n \"\"\"Simulates a dipole source inside a photonic crystal slab.\n\n The crystal has a square unit cell with circular holes, having cross section\n and dipole position as illustrated below. The dipole is located the lower-left\n corner of the unit cell centered in the supercell defined by the Brillouin grid\n shape. The dipole is x-oriented and centered vertically within the photonic\n crystal slab.\n ________________\n | |\n |XX XX|\n |XXXX XXXX|\n |XXXX XXXX|\n |XX XX|\n x-dipole -> o________________|\n\n Args:\n permittivity_ambient: Permittivity of the region above and below the slab, and\n of the holes in the slab.\n permittivity_slab: Permittivity of the slab.\n thickness_ambient: Thickness of the ambient layers above and below the slab.\n thickness_slab: Thickness of the photonic crystal slab.\n pitch: The unit cell pitch.\n diameter: The diameter of the holes in the photonic crystal.\n resolution: The size of a pixel in permittivity arrays.\n resolution_fields: The size of a pixel in field arrays.\n wavelength: The wavelength, of the dipole emission.\n approximate_num_terms: The number of terms in the Fourier expansion.\n brillouin_grid_shape: The shape of the grid used for Brillouin zone integration.\n\n Returns:\n The electric and magnetic fields, and the grid coordinates, `((ex, ey, ez),\n (hx, hy, hz), (x, y, z))`. The fields are returned for an xz slice centered\n on the dipole.\n \"\"\"\n thickness_ambient_ = jnp.asarray(thickness_ambient)\n thickness_slab_ = jnp.asarray(thickness_slab)\n del thickness_ambient, thickness_slab\n\n primitive_lattice_vectors = basis.LatticeVectors(\n u=pitch * basis.X, v=pitch * basis.Y\n )\n expansion = basis.generate_expansion(\n primitive_lattice_vectors=primitive_lattice_vectors,\n approximate_num_terms=approximate_num_terms,\n truncation=basis.Truncation.CIRCULAR,\n )\n\n # Brillouin zone integration creates a batch of in-plane wavevectors which are\n # distributed throughout the first Brillouin zone.\n in_plane_wavevector = basis.brillouin_zone_in_plane_wavevector(\n brillouin_grid_shape, primitive_lattice_vectors\n )\n assert in_plane_wavevector.shape[-1] == 2\n assert in_plane_wavevector.ndim == 3\n\n eigensolve = functools.partial(\n fmm.eigensolve_isotropic_media,\n wavelength=jnp.asarray(wavelength),\n in_plane_wavevector=in_plane_wavevector,\n primitive_lattice_vectors=primitive_lattice_vectors,\n expansion=expansion,\n formulation=fmm.Formulation.FFT,\n )\n\n mask = unit_cell_pattern(pitch, diameter, resolution)\n permittivity_crystal = jnp.where(mask, permittivity_ambient, permittivity_slab)\n solve_result_crystal = eigensolve(permittivity=permittivity_crystal)\n solve_result_ambient = eigensolve(\n permittivity=jnp.asarray(permittivity_ambient)[jnp.newaxis, jnp.newaxis]\n )\n\n # First, we model a dipole inside the photonic crystal. For this, we must break\n # the stack into two, and compute scattering matrices for the stacks above and\n # below the plane containing the dipole. Since we want to visualize fields, we\n # also need the interior scattering matrices.\n s_matrices_interior_before_source = scattering.stack_s_matrices_interior(\n layer_solve_results=[solve_result_ambient, solve_result_crystal],\n layer_thicknesses=[thickness_ambient_, thickness_slab_ / 2],\n )\n s_matrices_interior_after_source = scattering.stack_s_matrices_interior(\n layer_solve_results=[solve_result_crystal, solve_result_ambient],\n layer_thicknesses=[thickness_slab_ / 2, thickness_ambient_],\n )\n # Extract the scattering matrices relating fields at the two ends of each substack.\n s_matrix_before_source = s_matrices_interior_before_source[-1][0]\n s_matrix_after_source = s_matrices_interior_after_source[-1][0]\n\n # Generate the Fourier representation of a point dipole.\n dipole_x = pitch * brillouin_grid_shape[0] // 2\n dipole_y = pitch * brillouin_grid_shape[1] // 2\n dipole = sources.dirac_delta_source(\n location=jnp.asarray([[dipole_x, dipole_y]]),\n in_plane_wavevector=in_plane_wavevector,\n primitive_lattice_vectors=primitive_lattice_vectors,\n expansion=expansion,\n )\n # Compute backward eigenmode amplitudes at the end of the layer before the\n # source, and the forward amplitudes the start of the layer after the source.\n (\n _,\n _,\n bwd_amplitude_before_end,\n fwd_amplitude_after_start,\n _,\n _,\n ) = sources.amplitudes_for_source(\n jx=dipole,\n jy=jnp.zeros_like(dipole),\n jz=jnp.zeros_like(dipole),\n s_matrix_before_source=s_matrix_before_source,\n s_matrix_after_source=s_matrix_after_source,\n )\n\n # Compute the fields inside the structure.\n amplitudes_interior = fields.stack_amplitudes_interior_with_source(\n s_matrices_interior_before_source=s_matrices_interior_before_source,\n s_matrices_interior_after_source=s_matrices_interior_after_source,\n backward_amplitude_before_end=bwd_amplitude_before_end,\n forward_amplitude_after_start=fwd_amplitude_after_start,\n )\n # Coordinates where fields are to be evaluated.\n x = jnp.arange(0, pitch * brillouin_grid_shape[0], resolution_fields)\n y = jnp.ones_like(x) * pitch * brillouin_grid_shape[1] // 2\n (ex, ey, ez), (hx, hy, hz), (x, y, z) = fields.stack_fields_3d_on_coordinates(\n amplitudes_interior=amplitudes_interior,\n layer_solve_results=[\n solve_result_ambient,\n solve_result_crystal,\n solve_result_crystal,\n solve_result_ambient,\n ],\n layer_thicknesses=[\n thickness_ambient_,\n thickness_slab_ / 2,\n thickness_slab_ / 2,\n thickness_ambient_,\n ],\n layer_znum=[\n int(thickness_ambient_ / resolution_fields),\n int(thickness_slab_ / resolution_fields / 2),\n int(thickness_slab_ / resolution_fields / 2),\n int(thickness_ambient_ / resolution_fields),\n ],\n x=x,\n y=y,\n )\n\n # Perform the Brillouin zone integration by averaging over the Brillouin zone\n # grid batch axes.\n ex, ey, ez, hx, hy, hz = [\n jnp.mean(field, axis=(0, 1)) for field in (ex, ey, ez, hx, hy, hz)\n ]\n\n # Compute some cross sections for visualizing the structure.\n section_xy, section_xz, section_yz = crystal_cross_sections(\n thickness_ambient=float(thickness_ambient_),\n thickness_slab=float(thickness_slab_),\n pitch=pitch,\n diameter=diameter,\n resolution=resolution,\n num_unit_cells=brillouin_grid_shape,\n )\n return (ex, ey, ez), (hx, hy, hz), (x, y, z), (section_xy, section_xz, section_yz)\n\n\ndef simulate_crystal_with_gaussian_beam(\n polar_angle: float = 0.15 * jnp.pi,\n azimuthal_angle: float = 0.0,\n polarization_angle: float = 0.0,\n beam_waist: float = 1.0,\n beam_focus_offset: float = 0.0,\n permittivity_ambient: complex = PERMITTIVITY_AMBIENT,\n permittivity_slab: complex = PERMITTIVITY_SLAB,\n thickness_ambient: float = THICKNESS_AMBIENT,\n thickness_slab: float = THICKNESS_SLAB,\n pitch: float = PITCH,\n diameter: float = DIAMETER,\n resolution: float = RESOLUTION,\n resolution_fields: float = RESOLUTION_FIELDS,\n wavelengths: jnp.ndarray = MULTIPLE_WAVELENGTHS,\n approximate_num_terms: int = APPROXIMATE_NUM_TERMS,\n brillouin_grid_shape: Tuple[int, int] = BRILLOUIN_GRID_SHAPE,\n) -> Tuple[\n Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray], # (ex, ey, ez)\n Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray], # (hx, hy, hz)\n Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray], # (x, y, z)\n Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray], # (xy, xz, yz) cross sections\n]:\n \"\"\"Simulates a \"broadband\" Gaussian beam incident on photonic crystal slab.\n\n The crystal has a square unit cell with circular holes as illustrated below.\n ________________\n | |\n |XX XX|\n |XXXX XXXX|\n |XXXX XXXX|\n |XX XX|\n |________________|\n\n Args:\n polar_angle: The polar angle of the incident beam.\n azimuthal_angle: The azimuthal angle of the incident beam.\n polarization_angle: The angle giving the polarization rotation about the\n propagation axis.\n beam_waist: The Gaussian beam waist.\n beam_focus_offset: The offset of the Gaussian beam focus from the top of the\n photonic crystal slab.\n permittivity_ambient: Permittivity of the region above and below the slab, and\n of the holes in the slab.\n permittivity_slab: Permittivity of the slab.\n thickness_ambient: Thickness of the ambient layers above and below the slab.\n thickness_slab: Thickness of the photonic crystal slab.\n pitch: The unit cell pitch.\n diameter: The diameter of the holes in the photonic crystal.\n resolution: The size of a pixel in permittivity arrays.\n resolution_fields: The size of a pixel in field arrays.\n wavelengths: The wavelengths, of the gaussian beam.\n approximate_num_terms: The number of terms in the Fourier expansion.\n brillouin_grid_shape: The shape of the grid used for Brillouin zone integration.\n\n Returns:\n The electric and magnetic fields, and the grid coordinates, `((ex, ey, ez),\n (hx, hy, hz), (x, y, z))`. The fields are returned for an xz slice centered\n on the incident beam.\n \"\"\"\n wavelengths = jnp.expand_dims(jnp.atleast_1d(wavelengths), axis=(1, 2))\n assert wavelengths.ndim == 3\n\n thickness_ambient_ = jnp.asarray(thickness_ambient)\n thickness_slab_ = jnp.asarray(thickness_slab)\n del thickness_ambient, thickness_slab\n\n primitive_lattice_vectors = basis.LatticeVectors(\n u=pitch * basis.X, v=pitch * basis.Y\n )\n expansion = basis.generate_expansion(\n primitive_lattice_vectors=primitive_lattice_vectors,\n approximate_num_terms=approximate_num_terms,\n truncation=basis.Truncation.CIRCULAR,\n )\n\n # Brillouin zone integration creates a batch of in-plane wavevectors which are\n # distributed throughout the first Brillouin zone. We shift the expansion so\n # that it is centered on the direction of the incident beam.\n in_plane_wavevector = basis.brillouin_zone_in_plane_wavevector(\n brillouin_grid_shape, primitive_lattice_vectors\n )\n in_plane_wavevector += basis.plane_wave_in_plane_wavevector(\n wavelength=jnp.asarray(wavelengths),\n polar_angle=jnp.asarray(polar_angle),\n azimuthal_angle=jnp.asarray(azimuthal_angle),\n permittivity=jnp.asarray(permittivity_ambient),\n )\n\n assert in_plane_wavevector.shape[0] == wavelengths.size\n assert in_plane_wavevector.shape[1] == brillouin_grid_shape[0]\n assert in_plane_wavevector.shape[2] == brillouin_grid_shape[1]\n assert in_plane_wavevector.shape[-1] == 2\n assert in_plane_wavevector.ndim == 4\n\n eigensolve = functools.partial(\n fmm.eigensolve_isotropic_media,\n wavelength=jnp.asarray(wavelengths),\n in_plane_wavevector=in_plane_wavevector,\n primitive_lattice_vectors=primitive_lattice_vectors,\n expansion=expansion,\n formulation=fmm.Formulation.FFT,\n )\n\n mask = unit_cell_pattern(pitch, diameter, resolution)\n permittivity_crystal = jnp.where(mask, permittivity_ambient, permittivity_slab)\n solve_result_crystal = eigensolve(permittivity=permittivity_crystal)\n solve_result_ambient = eigensolve(\n permittivity=jnp.asarray(permittivity_ambient)[jnp.newaxis, jnp.newaxis]\n )\n\n s_matrices_interior = scattering.stack_s_matrices_interior(\n layer_solve_results=[\n solve_result_ambient,\n solve_result_crystal,\n solve_result_ambient,\n ],\n layer_thicknesses=[thickness_ambient_, thickness_slab_, thickness_ambient_],\n )\n\n # Now compute the eigenmode amplitudes for an incident Gaussian beam.\n # This is done by first obtaining the electric and magnetic fields for the\n # beam, and then solving for the eigenmodes.\n # TODO: replace paraxial Gaussian with a more rigorous expression.\n\n def _paraxial_gaussian_field_fn(x, y, z):\n # Returns the fields of a z-propagating, x-polarized Gaussian beam.\n # See https://en.wikipedia.org/wiki/Gaussian_beam\n\n # Adjust array dimensions for proper batching\n wavelengths_padded = wavelengths[..., jnp.newaxis, jnp.newaxis]\n\n k = 2 * jnp.pi / wavelengths_padded\n z_r = (\n jnp.pi\n * beam_waist**2\n * jnp.sqrt(permittivity_ambient)\n / wavelengths_padded\n )\n w_z = beam_waist * jnp.sqrt(1 + (z / z_r) ** 2)\n r = jnp.sqrt(x**2 + y**2)\n ex = (\n beam_waist\n / w_z\n * jnp.exp(-(r**2) / w_z**2)\n * jnp.exp(\n 1j\n * (\n (k * z) # Phase\n + k * r**2 / 2 * z / (z**2 + z_r**2) # Wavefront curvature\n - jnp.arctan(z / z_r) # Gouy phase\n )\n )\n )\n ey = jnp.zeros_like(ex)\n ez = jnp.zeros_like(ex)\n hx = jnp.zeros_like(ex)\n hy = ex / jnp.sqrt(permittivity_ambient)\n hz = jnp.zeros_like(ex)\n return (ex, ey, ez), (hx, hy, hz)\n\n # Solve for the fields of the beam with the desired rotation and shift.\n x, y = basis.unit_cell_coordinates(\n primitive_lattice_vectors=primitive_lattice_vectors,\n shape=permittivity_crystal.shape[-2:], # type: ignore[arg-type]\n num_unit_cells=brillouin_grid_shape,\n )\n (beam_ex, beam_ey, _), (beam_hx, beam_hy, _) = beams.shifted_rotated_fields(\n field_fn=_paraxial_gaussian_field_fn,\n x=x,\n y=y,\n z=jnp.zeros_like(x),\n beam_origin_x=jnp.amax(x) / 2,\n beam_origin_y=jnp.amax(y) / 2,\n beam_origin_z=thickness_ambient_ - beam_focus_offset,\n polar_angle=jnp.asarray(polar_angle),\n azimuthal_angle=jnp.asarray(azimuthal_angle),\n polarization_angle=jnp.asarray(polarization_angle),\n )\n\n brillouin_grid_axes = (1, 2)\n # Add an additional axis for the number of sources\n fwd_amplitude, _ = sources.amplitudes_for_fields(\n ex=beam_ex[..., jnp.newaxis],\n ey=beam_ey[..., jnp.newaxis],\n hx=beam_hx[..., jnp.newaxis],\n hy=beam_hy[..., jnp.newaxis],\n layer_solve_result=solve_result_ambient,\n brillouin_grid_axes=brillouin_grid_axes,\n )\n\n # Compute the fields inside the structure.\n amplitudes_interior = fields.stack_amplitudes_interior(\n s_matrices_interior=s_matrices_interior,\n forward_amplitude_0_start=fwd_amplitude,\n backward_amplitude_N_end=jnp.zeros_like(fwd_amplitude),\n )\n # Coordinates where fields are to be evaluated.\n x = jnp.arange(0, pitch * brillouin_grid_shape[0], resolution_fields)\n y = jnp.ones_like(x) * pitch * brillouin_grid_shape[1] / 2\n (ex, ey, ez), (hx, hy, hz), (x, y, z) = fields.stack_fields_3d_on_coordinates(\n amplitudes_interior=amplitudes_interior,\n layer_solve_results=[\n solve_result_ambient,\n solve_result_crystal,\n solve_result_ambient,\n ],\n layer_thicknesses=[\n thickness_ambient_,\n thickness_slab_,\n thickness_ambient_,\n ],\n layer_znum=[\n int(thickness_ambient_ / resolution_fields),\n int(thickness_slab_ / resolution_fields),\n int(thickness_ambient_ / resolution_fields),\n ],\n x=x,\n y=y,\n )\n\n # Perform the Brillouin zone integration by averaging over the Brillouin zone\n # grid batch axes.\n ex, ey, ez, hx, hy, hz = [\n jnp.mean(field, axis=brillouin_grid_axes) for field in (ex, ey, ez, hx, hy, hz)\n ]\n\n # Compute some cross sections for visualizing the structure.\n section_xy, section_xz, section_yz = crystal_cross_sections(\n thickness_ambient=float(thickness_ambient_),\n thickness_slab=float(thickness_slab_),\n pitch=pitch,\n diameter=diameter,\n resolution=resolution,\n num_unit_cells=brillouin_grid_shape,\n )\n return (ex, ey, ez), (hx, hy, hz), (x, y, z), (section_xy, section_xz, section_yz)\n\n\ndef unit_cell_pattern(\n pitch: float,\n diameter: float,\n resolution: float,\n) -> jnp.ndarray:\n \"\"\"Defines the pattern of the photonic crystal.\"\"\"\n x, y = jnp.meshgrid(\n jnp.arange(0, pitch, resolution),\n jnp.arange(0, pitch, resolution),\n indexing=\"ij\",\n )\n return (jnp.sqrt((x - pitch / 2) ** 2 + y**2) < diameter / 2) | (\n jnp.sqrt((x - pitch / 2) ** 2 + (y - pitch) ** 2) < diameter / 2\n )\n\n\ndef crystal_cross_sections(\n thickness_ambient: float,\n thickness_slab: float,\n pitch: float,\n diameter: float,\n resolution: float,\n num_unit_cells: Tuple[int, int],\n) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:\n \"\"\"Computes cross sections of the photonic crystal structure.\"\"\"\n mask = unit_cell_pattern(pitch, diameter, resolution)\n\n xy_section = jnp.tile(mask, num_unit_cells)\n\n xz_slab = mask[:, 0]\n xz_section = jnp.stack(\n (\n [jnp.ones_like(xz_slab)] * int(thickness_ambient / resolution)\n + [xz_slab] * int(thickness_slab / resolution)\n + [jnp.ones_like(xz_slab)] * int(thickness_ambient / resolution)\n ),\n axis=-1,\n )\n xz_section = jnp.tile(xz_section, (num_unit_cells[0], 1))\n\n yz_slab = mask[0, :]\n yz_section = jnp.stack(\n (\n [jnp.ones_like(yz_slab)] * int(thickness_ambient / resolution)\n + [yz_slab] * int(thickness_slab / resolution)\n + [jnp.ones_like(yz_slab)] * int(thickness_ambient / resolution)\n ),\n axis=-1,\n )\n yz_section = jnp.tile(yz_section, (num_unit_cells[1], 1))\n\n return xy_section, xz_section, yz_section\n\n\ndef plot_dipole_fields(\n pitch: float = PITCH,\n resolution: float = RESOLUTION,\n resolution_fields: float = RESOLUTION_FIELDS,\n brillouin_grid_shape: Tuple[int, int] = BRILLOUIN_GRID_SHAPE,\n **sim_kwargs,\n) -> None:\n \"\"\"Plots an electric field slice for the crystal with embedded source.\"\"\"\n sim_kwargs.update(\n {\n \"pitch\": pitch,\n \"brillouin_grid_shape\": brillouin_grid_shape,\n \"resolution\": resolution,\n \"resolution_fields\": resolution_fields,\n }\n )\n (\n (ex, ey, ez),\n (hx, hy, hz),\n (x, y, z),\n (section_xy, section_xz, section_yz),\n ) = simulate_crystal_with_internal_source(**sim_kwargs)\n\n xplot, zplot = jnp.meshgrid(x, z, indexing=\"ij\")\n field_plot = ex[:, :, 0].real\n\n plt.figure(figsize=(float(jnp.amax(xplot)), float(jnp.amax(zplot))), dpi=80)\n ax = plt.subplot(111)\n im = plt.pcolormesh(xplot, zplot, field_plot, shading=\"nearest\", cmap=\"bwr\")\n\n im.set_clim((-float(jnp.amax(field_plot)), float(jnp.amax(field_plot))))\n\n contours = measure.find_contours(onp.array(section_xz))\n scale_factor = pitch / resolution\n for c in contours:\n ax.plot(c[:, 0] / scale_factor, c[:, 1] / scale_factor, \"k\")\n\n ax.axis(\"equal\")\n ax.axis(\"off\")\n ax.set_ylim(ax.get_ylim()[::-1])\n\n plt.subplots_adjust(left=0, bottom=0, right=1, top=1)\n\n plt.savefig(\"crystal_dipole.png\", bbox_inches=\"tight\")\n\n\ndef plot_gaussian_fields(\n pitch: float = PITCH,\n resolution: float = RESOLUTION,\n resolution_fields: float = RESOLUTION_FIELDS,\n brillouin_grid_shape: Tuple[int, int] = BRILLOUIN_GRID_SHAPE,\n wavelength_idx: int = 0,\n **sim_kwargs,\n) -> None:\n \"\"\"Plots an electric field slice for the crystal with Gaussian beam.\"\"\"\n sim_kwargs.update(\n {\n \"pitch\": pitch,\n \"brillouin_grid_shape\": brillouin_grid_shape,\n \"resolution\": resolution,\n \"resolution_fields\": resolution_fields,\n }\n )\n (\n (ex, ey, ez),\n (hx, hy, hz),\n (x, y, z),\n (section_xy, section_xz, section_yz),\n ) = simulate_crystal_with_gaussian_beam(**sim_kwargs)\n\n xplot, zplot = jnp.meshgrid(x, z, indexing=\"ij\")\n field_plot = ex[wavelength_idx, :, :, 0].real\n\n plt.figure(figsize=(float(jnp.amax(xplot)), float(jnp.amax(zplot))), dpi=80)\n ax = plt.subplot(111)\n im = plt.pcolormesh(xplot, zplot, field_plot, shading=\"nearest\", cmap=\"bwr\")\n\n im.set_clim((-float(jnp.amax(field_plot)), float(jnp.amax(field_plot))))\n\n contours = measure.find_contours(onp.array(section_xz))\n scale_factor = pitch / resolution\n for c in contours:\n ax.plot(c[:, 0] / scale_factor, c[:, 1] / scale_factor, \"k\")\n\n ax.axis(\"equal\")\n ax.axis(\"off\")\n ax.set_ylim(ax.get_ylim()[::-1])\n\n plt.subplots_adjust(left=0, bottom=0, right=1, top=1)\n plt.savefig(\"crystal_gaussian.png\", bbox_inches=\"tight\")\n\n\nif __name__ == \"__main__\":\n plot_dipole_fields()\n plot_gaussian_fields()\n","repo_name":"facebookresearch/fmmax","sub_path":"examples/crystal.py","file_name":"crystal.py","file_ext":"py","file_size_in_byte":23094,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"67"} +{"seq_id":"20615005058","text":"from flask import Flask\nfrom flask_restful import Resource, Api, reqparse\nimport database \nimport os\n\nfrom colorama import init, Fore\n\ninit(autoreset=True)\n\napp = Flask(__name__)\napi = Api(app)\n\n# uri : /problems\nclass ProblemsIndex(Resource):\n def get(self)->list:\n \"\"\" ProblemsIndex.get : db에서 고민 목록을 받아와 간략한 정보를 json으로 반환함 \"\"\"\n problemsList = database.get_problems_list()\n problemsCount = len(problemsList)\n problemsJsonList = list(map(lambda p:\n {\n \"problemId\" : p[0],\n \"problemTitle\" : p[1],\n \"problemAuthor\" : p[2],\n \"problemTime\" : p[3],\n \"problemContent\" : p[4][0:50]\n } ,problemsList))\n\n return {\n \"StatusCode\" : 400,\n \"Message\" : \"Get Successful\", \n \"Counts\" : problemsCount,\n \"Problems\" : problemsJsonList\n }\n\n def post(self):\n \"\"\" ProblemsIndex.post : post로 json형식으로 보낸 나의 고민 정보를 데이터베이스에 추가시켜줌 \"\"\"\n\n try:\n # argument 값이 이상하게 들어오면 400 에러를 표시\n parser = reqparse.RequestParser()\n parser.add_argument('problemTitle', type=str)\n parser.add_argument('problemAuthor', type=str)\n parser.add_argument('problemTime', type=str)\n parser.add_argument('problemContent', type=str)\n args = parser.parse_args()\n \n resultId = database.insert_problem(\n args['problemTitle'],\n args['problemAuthor'],\n args['problemTime'],\n args['problemContent']\n )\n\n return {'StatusCode' : '400', 'Message' : 'Post Successful', 'ResultId' : resultId}\n\n except Exception as e:\n return {'StatusCode' : '1000', 'Message' : f'Post Failed : {e}', 'ResultId' : -1}\n \n\n# uri : /problems//problem\nclass Problems(Resource) :\n def get(self, problem_id):\n \"\"\" Problems.get : db에서 problemId == problem_id 인 고민의 정보 전체를 json으로 반환함 \"\"\"\n problemData = database.get_problem(problem_id)\n return {\n \"StatusCode\" : 400,\n \"Message\" : \"Get Successful\",\n \"ProblemData\" : { \n \"problemId\" : problemData[0],\n \"problemTitle\" : problemData[1],\n \"problemAuthor\" : problemData[2],\n \"problemTime\" : problemData[3],\n \"problemContent\" : problemData[4]\n }\n }\n\n# uri : /problems//replys\nclass ReplysIndex(Resource):\n def get(self, problem_id):\n \"\"\" problemId == id 인 고민 정보를 최신순으로 모두 json형태로 반환한다. \"\"\"\n replysList = database.get_replys_list(problem_id)\n replysCount = len(replysList)\n replysJsonList = list(map(lambda reply:\n {\n \"replyId\" : reply[0],\n \"problemId\" : reply[1],\n \"replyTitle\" : reply[2],\n \"replyAuthor\" : reply[3],\n \"replyTime\" : reply[4],\n \"replyContent\" : reply[5][0:50]\n } ,replysList))\n\n return { \n \"StatusCode\" : 400,\n \"Message\" : \"Get Successful\",\n \"Counts\" : replysCount,\n \"Replys\" : replysJsonList\n }\n\n def post(self, problem_id):\n \"\"\" problemId == id인 고민에 대한 답장 정보를 받아 DB에 추가한다. 그 후 응답을 한다. \"\"\"\n try:\n parser = reqparse.RequestParser()\n parser.add_argument('replyTitle', type=str)\n parser.add_argument('replyAuthor', type=str)\n parser.add_argument('replyTime', type=str)\n parser.add_argument('replyContent', type=str)\n args = parser.parse_args()\n \n database.insert_reply(\n problem_id,\n args['replyTitle'],\n args['replyAuthor'],\n args['replyTime'],\n args['replyContent']\n )\n\n return {'StatusCode' : '400', 'Message' : 'Post Successful'}\n\n except Exception as e:\n return {'StatusCode' : '1000', 'Message' : f'Post Failed : {e}'}\n\n#uri : /problems//replys/\nclass Replys(Resource):\n def get(self, problem_id, reply_id):\n \"\"\" Replys.get : replyId == reply_id 인 답장의 전체 내용을 json파일로 보내준다. \"\"\"\n replyData = database.get_reply(reply_id)\n return {\n \"StatusCode\" : 400,\n \"Message\" : \"Get Successful\",\n \"ReplyData\":{\n \"problemId\" : replyData[0],\n \"replyTitle\" : replyData[1],\n \"replyAuthor\" : replyData[2],\n \"problemTime\" : replyData[3],\n \"replyTime\" : replyData[4],\n \"replyContent\" : replyData[5]\n }\n }\n\n# uri : /lights/setLevel\nclass Lights(Resource):\n def get(self, level):\n print(f\"[api.py /lights/setLevel/] : setting Level to {level}\")\n try :\n print(f\"Successfully set light level to {level}\")\n return {\"Message\": f\"Successfully set light level to {level}\"}\n\n except Exception as e:\n print(f\"Failed to set light level to {level}\")\n return {\"Message\": f\"Failed to Set light level to {level} | {e}\" }\n\n\napi.add_resource(ProblemsIndex, '/problems/')\napi.add_resource(Problems, '/problems//problem')\napi.add_resource(ReplysIndex, '/problems//reply')\napi.add_resource(Replys, '/problems//reply/')\napi.add_resource(Lights, '/lights/setLevel')\n\ndef runServer(f_debug: bool= False) : \n database.create_table()\n app.run(debug=True)\n\nif __name__ == '__main__':\n runServer(f_debug=True)\n","repo_name":"Rescura/Your_Problem","sub_path":"FlaskPart/api_windows.py","file_name":"api_windows.py","file_ext":"py","file_size_in_byte":5937,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"41600999569","text":"import functools\n\nimport embodied\n\n\ndef load_env(\n task, amount=1, parallel='none', daemon=False, restart=False, seed=None,\n **kwargs):\n ctors = []\n for index in range(amount):\n ctor = functools.partial(load_single_env, task, **kwargs)\n if seed is not None:\n ctor = functools.partial(ctor, seed=hash((seed, index)) % (2 ** 31 - 1))\n if parallel != 'none':\n ctor = functools.partial(embodied.Parallel, ctor, parallel, daemon)\n if restart:\n ctor = functools.partial(embodied.wrappers.RestartOnException, ctor)\n ctors.append(ctor)\n envs = [ctor() for ctor in ctors]\n return embodied.BatchEnv(envs, parallel=(parallel != 'none'))\n\n\ndef load_single_env(\n task, size=(64, 64), repeat=1, mode='train', camera=-1, gray=False,\n length=0, logdir='/dev/null', discretize=0, sticky=True, lives=False,\n episodic=True, again=False, termination=False, weaker=1.0, checks=False,\n seed=None):\n suite, task = task.split('_', 1)\n if suite == 'dummy':\n from . import dummy\n env = dummy.Dummy(task, size, length or 100)\n elif suite == 'gym':\n from . import gym\n env = gym.Gym(task)\n elif suite == 'bsuite':\n import bsuite\n from . import dmenv\n env = bsuite.load_from_id(task)\n env = dmenv.DMEnv(env)\n env = embodied.wrappers.FlattenTwoDimObs(env)\n elif suite == 'dmc':\n from . import dmc\n env = dmc.DMC(task, repeat, size, camera)\n elif suite == 'atari':\n from . import atari\n env = atari.Atari(task, repeat, size, gray, lives=lives, sticky=sticky)\n elif suite == 'crafter':\n from . import crafter\n assert repeat == 1\n # outdir = embodied.Path(logdir) / 'crafter' if mode == 'train' else None\n outdir = None\n env = crafter.Crafter(task, size, outdir)\n elif suite == 'dmlab':\n from . import dmlab\n env = dmlab.DMLab(task, repeat, size, mode, seed=seed, episodic=episodic)\n elif suite == 'robodesk':\n from . import robodesk\n env = robodesk.RoboDesk(task, mode, repeat, length or 2000)\n elif suite == 'minecraft':\n from . import minecraft\n env = minecraft.Minecraft(task, repeat, size, length or 24000)\n elif suite == 'loconav':\n from . import loconav\n env = loconav.LocoNav(\n task, repeat, size, camera,\n again=again, termination=termination, weaker=weaker)\n elif suite == 'pinpad':\n from . import pinpad\n assert repeat == 1\n assert size == (64, 64)\n env = pinpad.PinPad(task, length or 2000)\n else:\n raise NotImplementedError(suite)\n for name, space in env.act_space.items():\n if name == 'reset':\n continue\n if space.discrete:\n env = embodied.wrappers.OneHotAction(env, name)\n elif discretize:\n env = embodied.wrappers.DiscretizeAction(env, name, discretize)\n else:\n env = embodied.wrappers.NormalizeAction(env, name)\n if length:\n env = embodied.wrappers.TimeLimit(env, length)\n env = embodied.wrappers.ExpandScalars(env)\n if checks:\n env = embodied.wrappers.CheckSpaces(env)\n return env\n\n\n__all__ = [\n k for k, v in list(locals().items())\n if type(v).__name__ in ('type', 'function') and not k.startswith('_')]\n","repo_name":"danijar/director","sub_path":"embodied/envs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"67"} +{"seq_id":"22046657889","text":"import math\nfrom itertools import accumulate\nimport folding_utils\nfrom ComplexNumber import ComplexNumber\nfrom folding_utils import read_from_file\n\n\nclass BaseFolding:\n def __init__(self):\n self.folding = []\n\n def fold(self, file):\n for seq in read_from_file(file):\n self.folding.append(self.find_folding(seq))\n\n def find_folding(self, seq):\n return [1] * (len(seq) - 1)\n\n def get_folding_list(self):\n return self.folding\n\n\nclass SpiralFolding(BaseFolding):\n def find_folding(self, seq):\n seq_len = len(seq)\n configuration = [None] * (seq_len - 1)\n conf = ComplexNumber(1, 0)\n left_turn = ComplexNumber(0, 1)\n increase = False\n steps = 1\n step = 0\n for i in range(seq_len - 1):\n step += 1\n configuration[i] = conf\n if step == steps:\n step = 0\n conf *= left_turn\n if increase:\n steps += 1\n increase = False\n else:\n increase = True\n\n return configuration\n\n\nclass BedSheetFolding(BaseFolding):\n def find_folding(self, seq):\n seq_len = len(seq)\n seq_parts = self.separate_seq_into_parts(seq)\n conf_dict = {}\n max_row = 20 if seq_len > 20 else seq_len\n for row in range(1, max_row):\n conf_dict[row] = self.free_energy(seq, self.fold_sheet(seq_parts, row, seq_len))\n best_row = min(conf_dict.keys(), key=(lambda k: conf_dict[k]))\n return self.fold_sheet(seq_parts, best_row, seq_len)\n\n @staticmethod\n def fold_sheet(parts, row, length):\n iter_parts = iter(parts)\n part = next(iter_parts)\n tmp_len = length - 1 - part\n configuration = [ComplexNumber(1, 0)] * part # Skip leading zeroes\n tmp = [ComplexNumber(1, 0)] * tmp_len\n part = next(iter_parts)\n next_part = part\n color, first_row, loop, step, back = True, True, False, 0, 0\n direction = ComplexNumber(1, 0)\n for i in range(tmp_len):\n if i + 1 == next_part:\n color = not color\n part = next(iter_parts)\n next_part += part\n if not color and first_row and part > 1:\n loop = True\n loop_step = i - next_part + part + 1\n if loop_step < part // 2:\n tmp[i] *= ComplexNumber(0, -1)\n elif loop_step > part // 2:\n tmp[i] *= ComplexNumber(0, 1)\n else:\n step += 1\n else:\n loop = False\n step += 1\n if step == row:\n first_row = False\n overhang = next_part - i - 1\n if not color and overhang >= 2 - back:\n tmp[i] = direction\n step -= 1\n back -= 1\n elif loop:\n tmp[i] = direction\n tmp = [direction if k > i else c for k, c in enumerate(tmp)]\n print(\"dodged it\")\n step += 1\n else:\n tmp[i] = ComplexNumber(0, 1)\n direction *= ComplexNumber(-1, 0)\n tmp = [direction if k > i else c for k, c in enumerate(tmp)]\n step = back\n back = 0\n\n configuration.extend(tmp)\n return configuration\n\n @staticmethod\n def separate_seq_into_parts(seq):\n color = '0'\n parts = []\n part = 0\n for am in seq:\n if am != color:\n color = am\n parts.append(part)\n part = 1\n else:\n part += 1\n if part != 0:\n parts.append(part)\n return parts\n\n @staticmethod\n def free_energy(seq, conf):\n return folding_utils.compute_free_energy(seq, conf)\n\n\nclass BedSheetFoldingOld(BedSheetFolding):\n @staticmethod\n def fold_sheet(parts, row, length):\n iter_parts = iter(parts)\n part = next(iter_parts)\n configuration = [ComplexNumber(1, 0)] * part # Skip leading zeroes\n tmp = [ComplexNumber(1, 0)] * (length - 1 - part)\n bend = ComplexNumber(0, 1)\n color, step, back = True, 0, 0\n part = next(iter_parts)\n next_part = part\n tmp_len = len(tmp)\n for i in range(tmp_len):\n step += 1\n if i + 1 == next_part:\n color = not color\n part = next(iter_parts)\n next_part += part\n if step == row:\n overhang = next_part - i - 1\n if not color and overhang >= 2 - back:\n step -= 1\n back -= 1\n else:\n tmp[i] *= bend\n tmp = [c * ComplexNumber(-1, 0) if k > i else c for k, c in enumerate(tmp)]\n bend *= ComplexNumber(-1, 0)\n step = back\n back = 0\n configuration.extend(tmp)\n return configuration\n","repo_name":"Vojtasii/protein-folding","sub_path":"simfold.py","file_name":"simfold.py","file_ext":"py","file_size_in_byte":5138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"47680289348","text":"import sys\nsys.stdin = open(\"hcn.inp\",\"r\")\nsys.stdout = open(\"hcn.out\",\"w\")\nm, n, k = [int(x) for x in input().split()]\na = []\nb = [0]*1001\nfor i in range(k):\n a.append(tuple([int(x) for x in input().split()]))\na.append((0,0))\na.append((m+1,n+1))\na.sort()\nsmax = 0\nk+=2\nfor i in range(k-2):\n b[0] = 0 \n b[1] = a[i+1][1]\n b[2] = n+1\n p = 3\n for j in range(i+2, k):\n ymax = 0 \n for t in range(p-2):\n ymax = max(ymax, b[t+2] - b[t] - 1)\n smax = max(smax, ymax * (a[j][0] - a[i][0] - 1))\n t = p-1;\n while b[t] >= a[j][1]:\n b[t+1] = b[t] \n t-=1 \n b[t+1] = a[j][1]\n p+=1 \nprint(smax)\n","repo_name":"hoclentop/hoclentop.github.io","sub_path":"mau/b221d4.py","file_name":"b221d4.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28859400447","text":"import matplotlib.pyplot as plt\nfrom random import uniform, seed\nimport numpy as np\nimport pandas as pd\nimport time\nfrom igraph import *\nimport random\n\nfrom IMTools.MonteCarloIC_greedy import IC\n\n\ndef celf(g, k, p=0.1, mc=1000):\n start_time = time.time()\n margin_gain = [IC(g, [v], p, mc) for v in range(g.vcount())]\n\n Q = sorted(zip(range(g.vcount()), margin_gain), key=lambda x: x[1], reverse=True)\n\n S, spread, SPREAD = [Q[0][0]], Q[0][1], [Q[0][1]]\n Q = Q[1:]\n LOOKUPS = [g.vcount()]\n timelapse = [time.time() - start_time]\n\n for _ in range(k - 1):\n check, node_lookup = False, 0\n\n while not check:\n node_lookup += 1\n\n current = Q[0][0]\n\n Q[0] = (current, IC(g, S + [current], p, mc) - spread)\n\n Q = sorted(Q, key=lambda x: x[1], reverse=True)\n\n check = (Q[0][0] == current)\n\n spread = Q[0][1]\n S.append(Q[0][0])\n SPREAD.append(spread)\n LOOKUPS.append(node_lookup)\n timelapse.append(time.time() - start_time)\n\n Q = Q[1:]\n\n return (S, SPREAD, timelapse, LOOKUPS)","repo_name":"Braylon1002/IMTool","sub_path":"IMTools/CELF.py","file_name":"CELF.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"67"} +{"seq_id":"2786728784","text":"from matplotlib import animation, rc\nimport matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\nimport math\nimport os\nimport itertools\n\n# get cooridnates of all points on hull\ndef video_to_points(hull_data):\n \"\"\"\n Function to find coordinates of all points on a hull\n Args\n hull_data np.ndarray float\n Returns\n coord_list np.ndarray float\n\n This function returns a list of coordinates of all the \n points on the input hull data.\n\n \"\"\"\n\n return np.vstack(np.nonzero(hull_data)).transpose(1,0)\n\ndef from_hull_to_ct_coords(ct_coords, nifti):\n \"\"\"\n Converts the coordinates from the hull to the coordinates of the CT scan\n Args:\n ct_coords np.ndarray float (N,3)\n nifti nibabel.nifti1.Nifti1Image \n Returns\n ct_coords np.ndarray float (N,3)\n \"\"\"\n Tmatrix = np.transpose(np.linalg.inv(nifti.affine))\n ct_coords = np.hstack((ct_coords, np.ones((ct_coords.shape[0],1)))) @ Tmatrix\n return ct_coords[:,0:3]\n \ndef dist(x,y):\n return np.sqrt(np.sum( (x-y)**2))\n\ndef euclidean_distance_coords(predictions, ground_truth):\n \"\"\"\n Given two set of points, find the euclidean distance between them.\n This function is invariant to the order of the points.\n We assume that the points closest to each other are the same points. i.e. represent the same object.\n predictions nd.array nx3\n ground_truth nd.array nx3\n \"\"\"\n pred_perm = np.zeros_like(predictions)\n dsts = []\n perms = list(itertools.permutations([0, 1, 2, 3]))\n for perm in perms:\n pred_perm[0], pred_perm[1], pred_perm[2], pred_perm[3] = \\\n predictions[perm[0]], predictions[perm[1]], predictions[perm[2]], predictions[perm[3]]\n dsts.append(np.mean(np.sqrt( np.sum((pred_perm-ground_truth)**2, axis=1 ))))\n return np.min(dsts) \n \ndef create_video(orig_image, dim=0):\n \"\"\"\n Creates a video given the numpy array along dim\n Args\n orig_image np.ndarray float\n dim int\n Return\n anim matplotlib.animation.FuncAnimation\n \"\"\"\n fig, ax = plt.subplots()\n plt.close()\n def animator(N): # N is the animation frame number\n if dim == 0:\n ax.imshow(orig_image[N,:,:], cmap='gray') \n elif dim==1:\n ax.imshow(orig_image[:,N,:], cmap='gray') \n else:\n ax.imshow(orig_image[:,N,:], cmap='gray') \n \n ax.axis('off')\n return ax\n PlotFrames = range(0, orig_image.shape[0], 1)\n anim = animation.FuncAnimation(fig, animator,frames=PlotFrames,interval=100)\n rc('animation', html='jshtml') # embed in the HTML for Google Colab\n return anim\n\ndef rotate(img, angle):\n \"\"\"\n Function to rotate an image\n Args\n img np.ndarray float \n angle float\n Retruns\n dst np.ndarray float\n \"\"\"\n rows,cols = img.shape\n M = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)\n dst = cv2.warpAffine(img,M,(cols,rows))\n return dst \n\ndef procrustes(X, Y):\n \"\"\"\n Ya = procrustes(X, Y)\n\n Returns Ya = alpha * (Y - muY) * Q + muX, where muX and muY are the m x n\n matrices whose rows contain copies of the centroids of X and Y, and alpha\n (scalar) and Q (m x m orthogonal matrix) are the solutions to the Procrustes\n + scaling problem\n\n Inputs: `X` and `Y` are m x n matrices\n\n Output: `Ya` is an m x n matrix containing the Procrustes-aligned version\n of Y aligned to X and Q the optimal orthogonal matrix\n\n min_{alpha, Q: Q^T Q = I} |(X - muX) - alpha * (Y - muY) Q|_F\n \"\"\"\n muX = np.mean(X, axis=0)\n muY = np.mean(Y, axis=0)\n \n X0 = X - muX \n Y0 = Y - muY \n # Procrustes rotation\n U, _, V = np.linalg.svd(np.transpose(X0) @ Y0, full_matrices=False)\n V=np.transpose(V)\n Q = V @ np.transpose(U)\n # Optimal scaling\n alpha = np.trace(np.transpose(X0) @ Y0 @ Q) / np.trace(np.transpose(Y0) @ Y0)\n\n # Align data\n Ya = alpha * (Y0 @ Q) + muX\n\n return Ya, Q, muX, muY, alpha \n\ndef naive_project(coord_list,points_hull):\n \"\"\"\n Function to get the coordinates of 2d points on the 3d surface\n It simulates projection of ray along x axis stepwise and uses integers to find intersection\n Args \n coord_list np.ndarray float\n points_hull np.ndarray float\n Returns\n projected_coord_list np.ndarray int\n \"\"\"\n coord_list = coord_list[:,1:3]\n coord_list = coord_list.astype(int)\n points_hull = points_hull.astype(int)\n projected_coord_list = []\n\n for coord in coord_list:\n # find where the last two coordinates are equal\n idx = np.where( ( points_hull[:, 1:3] == np.array(coord)).all(axis=1))\n if len(idx) == 0:\n continue\n else:\n projected_coord_list.append(points_hull[idx][np.argmax(points_hull[idx][:,0])])\n\n return projected_coord_list\n\ndef naive_project2(coord_list,points_hull):\n \"\"\"\n Function to get the coordinates of 2d points on the 3d surface\n It simulates projection of ray along x axis stepwise and uses integers to find intersection\n Args \n coord_list np.ndarray float\n points_hull np.ndarray float\n Returns\n projected_coord_list np.ndarray int\n \"\"\"\n coord_list = coord_list[:,1:3]\n projected_coord_list = []\n\n for coord in coord_list:\n coord = [100, coord[0], coord[1]]\n ## find the point on the hull closest to cooord\n dist = np.linalg.norm(points_hull - coord, axis=1)\n idx = np.argmin(dist)\n projected_coord_list.append(points_hull[idx])\n\n return projected_coord_list\ndef resize_coords(old_coords, old_size, new_size):\n new_coords = []\n Rx = new_size[0]/old_size[0]\n Ry = new_size[1]/old_size[1]\n for i, old_coord in enumerate(old_coords):\n new_coords[i] = [round(Rx*old_coord[0]), round(Ry*old_coord[1])]\n return new_coords\n\ndef calc_arc_length(p1, p2, c):\n \"\"\"\n Calculate the clockwise arc length from p1 to p2 with center c\n \"\"\"\n x1 = p1[0]; y1 = p1[1] # Start point\n xc = c[0]; yc = c[1] # Center point\n r = math.sqrt((x1-xc)**2 + (y1-yc)**2)\n # End point\n x2 = xc+r*(p2[0]-xc)/math.sqrt((p2[0]-xc)**2+(p2[1]-yc)**2)\n y2 = yc+r*(p2[1]-yc)/math.sqrt((p2[1]-yc)**2+(p2[0]-xc)**2)\n x3 = 2*xc-x1; y3 = 2*yc-y1 # Point opposite start point\n d = math.sqrt((x1-x2)**2 + (y1-y2)**2)\n theta = math.acos(1 - (d**2)/(2*r**2))\n if x2 > x3: # This only works if p1 is bottom point now\n theta = 2*math.pi-theta\n arc_length = r*theta\n return arc_length\n\ndef coord_from_arc_length(p1, c, arc_length):\n \"\"\"\n Calculate the coordinate p2 with arc length from p1\n \"\"\"\n x1 = p1[0]; y1 = p1[1] # Start point\n xc = c[0]; yc = c[1] # Center point\n r = math.sqrt((x1-xc)**2 + (y1-yc)**2)\n circum = 2*math.pi*r\n theta = 2*math.pi*arc_length/circum\n coord = [x1-r*math.sin(theta), y1-r*(1-math.cos(theta))]\n return coord\n\ndef detect(c):\n # From https://pyimagesearch.com/2016/02/08/opencv-shape-detection/\n # initialize the shape name and approximate the contour\n shape = \"unidentified\"\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.04 * peri, True)\n \n # if the shape is a triangle, it will have 3 vertices\n if len(approx) == 3:\n shape = \"triangle\"\n # if the shape has 4 vertices, it is either a square or\n # a rectangle\n elif len(approx) == 4:\n # compute the bounding box of the contour and use the\n # bounding box to compute the aspect ratio\n (x, y, w, h) = cv2.boundingRect(approx)\n ar = w / float(h)\n # a square will have an aspect ratio that is approximately\n # equal to one, otherwise, the shape is a rectangle\n shape = \"square\" if ar >= 0.95 and ar <= 1.05 else \"rectangle\"\n # if the shape is a pentagon, it will have 5 vertices\n elif len(approx) == 5:\n shape = \"pentagon\"\n # otherwise, we assume the shape is a circle\n else:\n shape = \"circle\"\n # return the name of the shape\n return shape\n\ndef create_circular_mask(h, w, center=None, radius=None):\n # From https://stackoverflow.com/a/44874588\n if center is None: # use the middle of the image\n center = (int(w/2), int(h/2))\n if radius is None: # use the smallest distance between the center and image walls\n radius = min(center[0], center[1], w-center[0], h-center[1])\n\n Y, X = np.ogrid[:h, :w]\n dist_from_center = np.sqrt((X - center[0])**2 + (Y-center[1])**2)\n\n mask = dist_from_center <= radius\n return mask\n\nif __name__ ==\"__main__\":\n import ex\n ex.hey()","repo_name":"vukadinovic936/BrainReg","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17147126935","text":"import gtk\nimport gobject\nfrom draw import *\nfrom utils import *\nimport copy\n\nclass ListView(gtk.DrawingArea):\n '''List view.'''\n \n SORT_DESCENDING = True\n SORT_ASCENDING = False\n SORT_PADDING_X = 5\n\t\n def __init__(self):\n '''Init list view.'''\n # Init.\n gtk.DrawingArea.__init__(self)\n self.add_events(gtk.gdk.POINTER_MOTION_MASK)\n self.add_events(gtk.gdk.BUTTON_PRESS_MASK)\n self.add_events(gtk.gdk.BUTTON_RELEASE_MASK)\n self.add_events(gtk.gdk.ENTER_NOTIFY_MASK)\n self.add_events(gtk.gdk.LEAVE_NOTIFY_MASK)\n self.items = []\n self.cell_widths = []\n self.cell_min_widths = []\n self.cell_min_heights = []\n self.button_press = False\n self.hover_row = None\n self.titles = None\n self.title_offset_y = 0\n self.item_height = 0\n \n # Signal.\n self.connect(\"expose-event\", self.expose_list_view) \n self.connect(\"motion-notify-event\", self.motion_list_view)\n self.connect(\"button-press-event\", self.button_press_list_view)\n self.connect(\"button-release-event\", self.button_release_list_view)\n self.connect(\"leave-notify-event\", self.leave_list_view)\n \n def add_titles(self, titles, title_height=24):\n '''Add titles.'''\n self.titles = titles\n self.title_select_column = None\n self.title_adjust_column = None\n self.title_separator_width = 2\n self.title_clicks = map_value(self.titles, lambda _: False)\n self.title_sorts = map_value(self.titles, lambda _: self.SORT_DESCENDING)\n self.set_title_height(title_height)\n \n def add_items(self, items):\n '''Add items in list.'''\n # Add new items.\n self.items += items\n\n # Re-calcuate.\n title_sizes = map_value(self.titles, lambda title: get_content_size(title, DEFAULT_FONT_SIZE))\n sort_pixbuf = theme.get_dynamic_pixbuf(\"listview/sort_descending.png\").get_pixbuf()\n sort_icon_width = sort_pixbuf.get_width() + self.SORT_PADDING_X * 2\n sort_icon_height = sort_pixbuf.get_height()\n \n cell_min_sizes = []\n for item in items:\n sizes = item.get_column_sizes()\n if cell_min_sizes == []:\n cell_min_sizes = sizes\n else:\n for (index, (width, height)) in enumerate(sizes):\n if self.titles == None:\n max_width = max([cell_min_sizes[index][0], width])\n max_height = max([cell_min_sizes[index][1], sort_icon_height, height])\n else:\n max_width = max([cell_min_sizes[index][0], title_sizes[index][0] + sort_icon_width * 2, width])\n max_height = max([cell_min_sizes[index][1], title_sizes[index][1], sort_icon_height, height])\n \n cell_min_sizes[index] = (max_width, max_height)\n \n # Get value.\n (cell_min_widths, cell_min_heights) = unzip(cell_min_sizes)\n self.cell_min_widths = mix_list_max(self.cell_min_widths, cell_min_widths)\n self.cell_min_heights = mix_list_max(self.cell_min_heights, cell_min_heights)\n self.cell_widths = mix_list_max(self.cell_widths, copy.deepcopy(cell_min_widths))\n \n self.item_height = max(self.item_height, max(copy.deepcopy(cell_min_heights))) \n \n # Set size request.\n if len(self.items) > 0:\n self.set_size_request(sum(self.cell_min_widths), self.item_height * len(self.items) + self.title_offset_y)\n \n def set_title_height(self, title_height):\n '''Set title height.'''\n self.title_height = title_height\n if self.titles:\n self.title_offset_y = self.title_height\n else:\n self.title_offset_y = 0\n\n def get_column_sort_type(self, column):\n '''Get sort type.'''\n if 0 <= column <= len(self.title_sorts) - 1:\n return self.title_sorts[column]\n else:\n return None\n \n def set_column_sort_type(self, column, sort_type):\n '''Set sort type.'''\n if 0 <= column <= len(self.title_sorts) - 1:\n self.title_sorts[column] = sort_type\n \n def get_cell_widths(self):\n '''Get cell widths.'''\n return self.cell_widths\n \n def set_cell_width(self, column, width):\n '''Set cell width.'''\n if column <= len(self.cell_min_widths) - 1 and width >= self.cell_min_widths[column]:\n self.cell_widths[column] = width\n \n def set_adjust_cursor(self):\n '''Set adjust cursor.'''\n set_cursor(self, gtk.gdk.SB_H_DOUBLE_ARROW)\n self.adjust_cursor = True \n \n def reset_cursor(self):\n '''Reset cursor.'''\n set_cursor(self, None)\n self.adjust_cursor = False\n \n def get_offset_coordinate(self, widget):\n '''Get offset coordinate.'''\n # Init.\n rect = widget.allocation\n\n # Get coordinate.\n viewport = get_match_parent(widget, \"Viewport\")\n if viewport: \n (offset_x, offset_y) = widget.translate_coordinates(viewport, rect.x, rect.y)\n return (-offset_x, -offset_y, viewport)\n else:\n return (0, 0, viewport)\n \n def expose_list_view(self, widget, event):\n '''Expose list view.'''\n # Init.\n cr = widget.window.cairo_create()\n rect = widget.allocation\n cell_widths = self.get_cell_widths()\n \n # Get offset.\n (offset_x, offset_y, viewport) = self.get_offset_coordinate(widget)\n \n # Draw background.\n pixbuf = theme.get_dynamic_pixbuf(BACKGROUND_IMAGE).get_pixbuf().subpixbuf(\n viewport.allocation.x,\n viewport.allocation.y,\n viewport.allocation.width,\n viewport.allocation.height)\n draw_pixbuf(cr, pixbuf, offset_x, offset_y)\n \n # Draw mask.\n draw_vlinear(cr, offset_x, offset_y, viewport.allocation.width, viewport.allocation.height,\n theme.get_dynamic_shadow_color(\"linearBackground\").get_color_info())\n \n if len(self.items) > 0:\n # Save cairo status.\n cr.save()\n \n # Don't draw any item under title area.\n cr.rectangle(offset_x, offset_y + self.title_offset_y,\n viewport.allocation.width, viewport.allocation.height - self.title_offset_y) \n cr.clip()\n \n # Draw hover row.\n if self.hover_row != None:\n draw_vlinear(cr, offset_x, self.title_offset_y + self.hover_row * self.item_height,\n viewport.allocation.width, self.item_height,\n theme.get_dynamic_shadow_color(\"listviewHover\").get_color_info())\n \n \n # Get viewport index.\n start_y = offset_y - self.title_offset_y\n end_y = offset_y + viewport.allocation.height - self.title_offset_y\n start_index = max(start_y / self.item_height, 0)\n if (end_y - end_y / self.item_height * self.item_height) == 0:\n end_index = min(end_y / self.item_height + 1, len(self.items))\n else:\n end_index = min(end_y / self.item_height + 2, len(self.items)) \n \n # Draw list item.\n for (row, item) in enumerate(self.items[start_index:end_index]):\n renders = item.get_renders()\n for (column, render) in enumerate(renders):\n cell_width = cell_widths[column]\n cell_x = sum(cell_widths[0:column])\n render(cr, gtk.gdk.Rectangle(\n rect.x + cell_x,\n rect.y + (row + start_index) * self.item_height + self.title_offset_y,\n cell_width, \n self.item_height\n ))\n \n # Restore cairo status to draw title area.\n cr.restore() \n \n # Draw titles.\n if self.titles:\n for (column, width) in enumerate(cell_widths):\n # Get offset x coordinate.\n cell_offset_x = sum(cell_widths[0:column])\n \n # Calcuate current cell width.\n if column == len(cell_widths) - 1:\n if sum(cell_widths) < rect.width:\n cell_width = rect.width - cell_offset_x\n else:\n cell_width = width\n else:\n cell_width = width\n \n # Draw title column background.\n if self.title_select_column == column:\n if self.button_press:\n shadow_color = \"listviewHeaderPress\"\n else:\n shadow_color = \"listviewHeaderSelect\"\n else:\n shadow_color = \"listviewHeader\"\n draw_vlinear(cr, cell_offset_x, offset_y, cell_width, self.title_height,\n theme.get_dynamic_shadow_color(shadow_color).get_color_info())\n \n # Draw sort icon.\n sort_type = self.get_column_sort_type(column) \n if sort_type == self.SORT_DESCENDING:\n sort_pixbuf = theme.get_dynamic_pixbuf(\"listview/sort_descending.png\").get_pixbuf()\n elif sort_type == self.SORT_ASCENDING:\n sort_pixbuf = theme.get_dynamic_pixbuf(\"listview/sort_ascending.png\").get_pixbuf()\n \n draw_pixbuf(cr, sort_pixbuf,\n cell_offset_x + cell_width - sort_pixbuf.get_width() - self.SORT_PADDING_X,\n offset_y + (self.title_height - sort_pixbuf.get_height()) / 2) \n \n for (column, title) in enumerate(self.titles):\n # Draw title split line.\n cell_x = sum(cell_widths[0:column])\n \n if cell_x != 0:\n draw_vlinear(cr, cell_x, offset_y, 1, self.title_height,\n theme.get_dynamic_shadow_color(\"listviewHeaderSplit\").get_color_info())\n \n # Draw title.\n draw_font(cr, title, DEFAULT_FONT_SIZE, \n theme.get_dynamic_color(\"listItemText\").get_color(),\n cell_x, offset_y, cell_widths[column], self.title_height) \n \n return False\n \n def motion_list_view(self, widget, event):\n '''Motion list view.'''\n if self.titles:\n # Get offset.\n (offset_x, offset_y, viewport) = self.get_offset_coordinate(widget)\n \n if self.title_adjust_column != None:\n # Set column width.\n cell_min_end_x = sum(self.cell_widths[0:self.title_adjust_column]) + self.cell_min_widths[self.title_adjust_column]\n (ex, ey) = get_event_coords(event)\n if ex >= cell_min_end_x:\n self.set_cell_width(self.title_adjust_column, ex - sum(self.cell_widths[0:self.title_adjust_column]))\n else:\n if offset_y <= event.y <= offset_y + self.title_height:\n cell_widths = self.get_cell_widths()\n for (column, _) in enumerate(cell_widths):\n if column == len(cell_widths) - 1:\n cell_start_x = widget.allocation.width\n cell_end_x = widget.allocation.width\n else:\n cell_start_x = sum(cell_widths[0:column + 1]) - self.title_separator_width\n cell_end_x = sum(cell_widths[0:column + 1]) + self.title_separator_width\n \n if event.x < cell_start_x:\n self.title_select_column = column\n self.reset_cursor()\n break\n elif cell_start_x <= event.x <= cell_end_x:\n self.title_select_column = None\n self.set_adjust_cursor()\n break\n elif len(self.items) > 0:\n # Rest cursor and title select column.\n self.title_select_column = None\n self.reset_cursor()\n \n # Set hover row.\n (event_x, event_y) = get_event_coords(event)\n self.hover_row = (event_y - self.title_offset_y) / self.item_height\n \n # Redraw after motion.\n self.queue_draw()\n elif len(self.items) > 0:\n # Rest cursor and title select column.\n self.title_select_column = None\n self.reset_cursor()\n \n # Set hover row.\n (event_x, event_y) = get_event_coords(event)\n self.hover_row = (event_y - self.title_offset_y) / self.item_height\n \n # Redraw after motion.\n self.queue_draw()\n \n def button_press_list_view(self, widget, event):\n '''Button press event handler.'''\n self.button_press = True \n if self.titles:\n # Get offset.\n (offset_x, offset_y, viewport) = self.get_offset_coordinate(widget)\n if offset_y <= event.y <= offset_y + self.title_height:\n cell_widths = self.get_cell_widths()\n for (column, _) in enumerate(cell_widths):\n if column == len(cell_widths) - 1:\n cell_end_x = widget.allocation.width\n else:\n cell_end_x = sum(cell_widths[0:column + 1]) - self.title_separator_width\n \n if column == 0:\n cell_start_x = 0\n else:\n cell_start_x = sum(cell_widths[0:column]) + self.title_separator_width\n \n if cell_start_x < event.x < cell_end_x:\n self.title_clicks[column] = True\n break\n elif cell_end_x <= event.x <= cell_end_x + self.title_separator_width * 2:\n self.title_adjust_column = column\n break\n \n self.queue_draw()\n\n def button_release_list_view(self, widget, event):\n '''Button release event handler.'''\n self.button_press = False\n if self.titles:\n # Get offset.\n (offset_x, offset_y, viewport) = self.get_offset_coordinate(widget)\n if offset_y <= event.y <= offset_y + self.title_height:\n cell_widths = self.get_cell_widths()\n for (column, _) in enumerate(cell_widths):\n if column == len(cell_widths) - 1:\n cell_end_x = widget.allocation.width\n else:\n cell_end_x = sum(cell_widths[0:column + 1]) - self.title_separator_width\n \n if column == 0:\n cell_start_x = 0\n else:\n cell_start_x = sum(cell_widths[0:column]) + self.title_separator_width\n \n if cell_start_x < event.x < cell_end_x:\n if self.title_clicks[column]:\n self.title_sorts[column] = not self.title_sorts[column]\n self.title_clicks[column] = False\n break\n \n self.title_adjust_column = None\n self.queue_draw()\n \n def leave_list_view(self, widget, event):\n '''leave-notify-event signal handler.'''\n self.title_select_column = None\n self.title_adjust_column = None\n self.reset_cursor()\n\n self.queue_draw()\n \ngobject.type_register(ListView)\n\nclass ListItem(object):\n '''List item.'''\n \n def __init__(self, icon_path, title, artist, length, listen_button):\n '''Init list item.'''\n self.update(icon_path, title, artist, length, listen_button)\n \n def update(self, icon_path, title, artist, length, listen_button):\n '''Update.'''\n # Update.\n self.icon_path = icon_path\n self.title = title\n self.artist = artist\n self.length = length\n self.listen_button = listen_button\n \n # Calculate item size.\n self.icon_padding_x = 10\n self.icon_padding_y = 5\n pixbuf = theme.get_dynamic_pixbuf(icon_path).get_pixbuf()\n self.icon_width = pixbuf.get_width()\n self.icon_height = pixbuf.get_height()\n \n self.title_padding_x = 10\n self.title_padding_y = 5\n (self.title_width, self.title_height) = get_content_size(self.title, DEFAULT_FONT_SIZE)\n \n self.artist_padding_x = 10\n self.artist_padding_y = 5\n (self.artist_width, self.artist_height) = get_content_size(self.artist, DEFAULT_FONT_SIZE)\n\n self.length_padding_x = 10\n self.length_padding_y = 5\n (self.length_width, self.length_height) = get_content_size(self.length, DEFAULT_FONT_SIZE)\n \n pixbuf = theme.get_dynamic_pixbuf(listen_button).get_pixbuf()\n self.listen_width = pixbuf.get_width()\n self.listen_height = pixbuf.get_height()\n \n def render_icon(self, cr, rect):\n '''Render icon.'''\n render_image(cr, rect, self.icon_path,\n rect.x + (rect.width - self.icon_width) / 2,\n rect.y + (rect.height - self.icon_height) / 2)\n \n def render_title(self, cr, rect):\n '''Render title.'''\n rect.x += self.title_padding_x\n render_text(cr, rect, self.title)\n \n def render_artist(self, cr, rect):\n '''Render artist.'''\n rect.x += self.artist_padding_x\n render_text(cr, rect, self.artist)\n \n def render_length(self, cr, rect):\n '''Render length.'''\n rect.width -= self.length_padding_x\n render_text(cr, rect, self.length, ALIGN_END)\n \n def render_listen(self, cr, rect):\n '''Render listen.'''\n render_image(cr, rect, self.listen_button,\n rect.x + (rect.width - self.listen_width) / 2,\n rect.y + (rect.height - self.listen_height) / 2)\n \n def get_column_sizes(self):\n '''Get sizes.'''\n return [(self.icon_width + self.icon_padding_x * 2, \n self.icon_height + self.icon_padding_y * 2),\n (self.title_width + self.title_padding_x * 2, \n self.title_height + self.title_padding_y * 2),\n (self.artist_width + self.artist_padding_x * 2, \n self.artist_height + self.artist_padding_y * 2),\n (self.length_width + self.length_padding_x * 2, \n self.length_height + self.length_padding_y * 2),\n (self.listen_width + self.icon_padding_x * 2, \n self.listen_height + self.icon_padding_y * 2)] \n \n def get_renders(self):\n '''Get render callbacks.'''\n return [self.render_icon, \n self.render_title,\n self.render_artist,\n self.render_length,\n self.render_listen]\n\ndef render_text(cr, rect, content, align=ALIGN_START, font_size=DEFAULT_FONT_SIZE):\n '''Render text.'''\n draw_font(cr, content, font_size, \n theme.get_dynamic_color(\"listItemText\").get_color(), \n rect.x, rect.y, rect.width, rect.height, align)\n \ndef render_image(cr, rect, image_path, x, y):\n '''Render image.'''\n draw_pixbuf(cr, theme.get_dynamic_pixbuf(image_path).get_pixbuf(), x, y)\n","repo_name":"iiiyu/old-oh-my-password","sub_path":"linux/gui/listview.py","file_name":"listview.py","file_ext":"py","file_size_in_byte":20175,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"6370656127","text":"def my():\n n, m = map(int, input().split())\n in_list = list(map(int, input().split()))\n in_list.sort()\n lt = 0\n rt = len(in_list) - 1\n mid = 0\n \n while lt != rt:\n mid = (lt + rt) // 2\n print(mid, lt, rt)\n if in_list[mid] > m:\n rt = mid - 1\n elif in_list[mid] < m:\n lt = mid + 1\n else:\n print(mid+1)\n\n \n\ndef solution():\n n, m = map(int, input().split())\n a = list(map(int, input().split()))\n a.sort()\n\n lt = 0\n rt = n-1\n\n while lt <= rt:\n mid = (lt + rt) // 2\n print(mid, lt, rt)\n if a[mid] == m:\n print(mid + 1)\n break\n elif a[mid] > m:\n rt = mid - 1\n elif a[mid] < m:\n lt = mid + 1\n\nsolution()\n\n# 입력 받고 기존 리스트에 append 후 find 함수 쓰면 될듯\n# 바이너리 서치\n# 결과가 나올때 까지 중간 값을 반복적으로 찾아서 도달\n# 아이고 잘못 풀었네 내가 짠 코드는 반복문을 덜 돈다.\n\n# 이것만 기억하자 lt, rt, mid를 사용하여서 풀고\n# while문의 범위는 lt가 rt보다 크거나 같아질때 까지\n\n# 여기서 부터는 알고리즘을 활용해서 풀이 해야 하네","repo_name":"poeynus/algorithm_study","sub_path":"2022/New/이분 탐색 & 그리디/이분검색.py","file_name":"이분검색.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9357333402","text":"class Animal:\r\n zoo_name = \"Hayaton\"\r\n\r\n def __init__(self, name):\r\n self.name_ = name\r\n self.hunger_ = 0\r\n\r\n def get_name(self):\r\n return self.name_\r\n\r\n def is_hungry(self):\r\n return self.hunger_ > 0\r\n\r\n def feed(self):\r\n if self.hunger_ > 0:\r\n self.hunger_ -= 1\r\n\r\n def talk(self):\r\n print(\"Animal talk\")\r\n\r\n\r\nclass Dog(Animal):\r\n def talk(self):\r\n print(\"Woof woof\")\r\n\r\n def fetch_stick(self):\r\n print(\"There you go, sir!\")\r\n\r\n\r\nclass Cat(Animal):\r\n def talk(self):\r\n print(\"Meow\")\r\n\r\n def chase_laser(self):\r\n print(\"Meeeeow\")\r\n\r\n\r\nclass Skunk(Animal):\r\n def __init__(self, name):\r\n super().__init__(name)\r\n self.stink_count_ = 6\r\n\r\n def talk(self):\r\n print(\"Tsssss\")\r\n\r\n def stink(self):\r\n print(\"Dear Lord!\")\r\n\r\n\r\nclass Unicorn(Animal):\r\n def talk(self):\r\n print(\"Good morning, darling\")\r\n\r\n def sing(self):\r\n print(\"I'm not your toy...\")\r\n\r\n\r\nclass Dragon(Animal):\r\n def __init__(self, name):\r\n super().__init__(name)\r\n self.color_ = \"Green\"\r\n\r\n def talk(self):\r\n print(\"Raaaawr\")\r\n\r\n def breath_fire(self):\r\n print(\"$@#$#@$\")\r\n\r\n\r\ndef main():\r\n zoo_lst = []\r\n\r\n dog = Dog(\"Brownie\")\r\n zoo_lst.append(dog)\r\n\r\n cat = Cat(\"Zelda\")\r\n zoo_lst.append(cat)\r\n\r\n skunk = Skunk(\"Stinky\")\r\n zoo_lst.append(skunk)\r\n\r\n unicorn = Unicorn(\"Keith\")\r\n zoo_lst.append(unicorn)\r\n\r\n dragon = Dragon(\"Lizzy\")\r\n zoo_lst.append(dragon)\r\n\r\n for animal in zoo_lst:\r\n print(animal.get_name())\r\n animal.talk()\r\n\r\n while animal.is_hungry():\r\n animal.feed()\r\n\r\n if isinstance(animal, Dog):\r\n animal.fetch_stick()\r\n elif isinstance(animal, Cat):\r\n animal.chase_laser()\r\n elif isinstance(animal, Skunk):\r\n animal.stink()\r\n elif isinstance(animal, Unicorn):\r\n animal.sing()\r\n elif isinstance(animal, Dragon):\r\n animal.breath_fire()\r\n\r\n print()\r\n\r\n doggo = Dog(\"Doggo\")\r\n zoo_lst.append(doggo)\r\n\r\n kitty = Cat(\"Kitty\")\r\n zoo_lst.append(kitty)\r\n\r\n stinky_jr = Skunk(\"Stinky Jr.\")\r\n zoo_lst.append(stinky_jr)\r\n\r\n clair = Unicorn(\"Clair\")\r\n zoo_lst.append(clair)\r\n\r\n mc_fly = Dragon(\"McFly\")\r\n zoo_lst.append(mc_fly)\r\n\r\n for animal in zoo_lst:\r\n print(animal.get_name())\r\n animal.talk()\r\n\r\n while animal.is_hungry():\r\n animal.feed()\r\n\r\n if isinstance(animal, Dog):\r\n animal.fetch_stick()\r\n elif isinstance(animal, Cat):\r\n animal.chase_laser()\r\n elif isinstance(animal, Skunk):\r\n animal.stink()\r\n elif isinstance(animal, Unicorn):\r\n animal.sing()\r\n elif isinstance(animal, Dragon):\r\n animal.breath_fire()\r\n\r\n print()\r\n\r\n print(\"Zoo name:\", Animal.zoo_name)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"TavorTheBeast/advancedPY","sub_path":"Unit_2/2.5.py","file_name":"2.5.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2445171344","text":"import argparse\nimport asyncio\nimport atexit\nimport logging\nimport signal\n\nfrom concurrent import futures\n\nimport gbulb\n\nfrom uchroma.log import Log, LOG_PROTOCOL_TRACE, LOG_TRACE\nfrom uchroma.util import ensure_future\n\nfrom .dbus import DeviceManagerAPI\nfrom .device_manager import UChromaDeviceManager\nfrom .power import PowerMonitor\n\n\nclass UChromaServer:\n\n def __init__(self):\n gbulb.install()\n\n parser = argparse.ArgumentParser(description='UChroma daemon')\n parser.add_argument('-v', \"--version\", action='version', version='self.version')\n parser.add_argument('-d', \"--debug\", action='append_const', const=True,\n help=\"Increase logging verbosity\")\n parser.add_argument('-C', \"--colorlog\", action='store_true',\n help=\"Use colored log output\")\n\n args = parser.parse_args()\n\n\n self._loop = asyncio.get_event_loop()\n\n level = logging.INFO\n asyncio_debug = False\n colorlog = False\n if args.colorlog is not None:\n colorlog = args.colorlog\n\n Log.enable_color(colorlog)\n self._logger = Log.get('uchroma.server')\n\n if args.debug is not None:\n if len(args.debug) > 2:\n level = LOG_PROTOCOL_TRACE\n asyncio_debug = True\n elif len(args.debug) == 2:\n level = LOG_TRACE\n asyncio_debug = True\n elif len(args.debug) == 1:\n level = logging.DEBUG\n\n logging.getLogger().setLevel(level)\n self._loop.set_debug(asyncio_debug)\n\n\n def _shutdown_callback(self):\n self._logger.info(\"Shutting down\")\n self._loop.stop()\n\n\n def run(self):\n try:\n self._run()\n except KeyboardInterrupt:\n pass\n\n\n def _run(self):\n dm = UChromaDeviceManager()\n\n atexit.register(UChromaServer.exit, self._loop)\n\n dbus = DeviceManagerAPI(dm, self._logger)\n power = PowerMonitor()\n\n for sig in (signal.SIGINT, signal.SIGTERM):\n self._loop.add_signal_handler(sig, self._shutdown_callback)\n\n try:\n dbus.run()\n power.start()\n\n ensure_future(dm.monitor_start(), loop=self._loop)\n\n self._loop.run_forever()\n\n except KeyboardInterrupt:\n pass\n\n finally:\n for sig in (signal.SIGTERM, signal.SIGINT):\n self._loop.remove_signal_handler(sig)\n\n power.stop()\n\n self._loop.run_until_complete(asyncio.wait( \\\n [dm.close_devices(), dm.monitor_stop()],\n return_when=futures.ALL_COMPLETED))\n\n\n @staticmethod\n def exit(loop):\n try:\n loop.run_until_complete(asyncio.wait( \\\n list(asyncio.Task.all_tasks()),\n return_when=futures.ALL_COMPLETED))\n loop.close()\n\n except KeyboardInterrupt:\n pass\n\n\ndef run_server():\n UChromaServer().run()\n","repo_name":"cyanogen/uchroma","sub_path":"uchroma/server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"67"} +{"seq_id":"4677424804","text":"import urllib.request\nimport json\n\n\napi = 'https://elevation-api.io/api/elevation?points='\n\n \ndef elevation(long, lati):\n # Elevation calcultation using external API elevation-api.io\n try:\n elev_request = '({},{})'.format(long,lati)\n request = api + elev_request\n response = urllib.request.urlopen(request).read()\n elevation = json.loads(response)\n elev_result = elevation['elevations'][0]['elevation']\n except:\n print(\"Problem with collecting your elevation\")\n return elev_result \n\n \n \n \n","repo_name":"IzaGr/LocationAPI","sub_path":"locapi/funcmodule.py","file_name":"funcmodule.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6585180876","text":"num=int(input(\"Digite el numero que quiera covertir: \"))\nbas=int(input(\"Digite la base que quiera convertir: \"))\ndef convertir_Entero_Base(numero, base):\n conversion_cadena=\"0123456789ABCDEF\"\n if numero < base:\n resultado = conversion_cadena[numero]\n else:\n resultado=convertir_Entero_Base(numero//base, base)+conversion_cadena[numero % base]\n return resultado\n\nejecutar=convertir_Entero_Base(num,bas)\nprint(f\"El numero {num} a base {bas} es: {ejecutar}\")\n","repo_name":"EmersonElvis/Laboratorio_semana_05","sub_path":"Ejercicio_03.py","file_name":"Ejercicio_03.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34777489223","text":"import argparse\r\n\r\nif __name__ == '__main__':\r\n ''' \r\n python analysis3.py --inputData /usr1/public/yifeng/Github/inputData --outputData /usr1/public/yifeng/Github/outputData\r\n '''\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--inputData', help = 'directory of input data', type = str)\r\n parser.add_argument('--outputData', help = 'directory of output data', type = str)\r\n args = parser.parse_args()\r\n\r\n path_inputData = args.inputData \r\n path_outputData = args.outputData\r\n\r\n f = open(path_inputData+'/go-basic.obo', 'r')\r\n # Format: isA GoTermX GoTermY\r\n # Meaning: GoTermX is GoTermY\r\n fo = open(path_outputData+'/isA.cfacts', 'w')\r\n\r\n flag_term = False\r\n #flag_biopro = true\r\n src_id = ''\r\n name_space = ''\r\n dst_id = list()\r\n for line in f:\r\n line = line.strip()\r\n if line == '[Term]': flag_term = True\r\n\r\n if flag_term == False: continue\r\n\r\n # Switch...case...\r\n if line.startswith('id: GO:'):\r\n src_id = line\r\n continue\r\n if line.startswith('namespace:'):\r\n name_space = line\r\n continue\r\n if line.startswith('is_a: GO:'):\r\n line = line.split(':')[2].split(' ! ')[0]\r\n dst_id.append('go'+line)\r\n # if dst_id != '': dst_id += '\\n' \r\n # dst_id += line\r\n continue\r\n if line == '':\r\n \r\n if name_space == 'namespace: biological_process':\r\n #print >> fo, '======================='\r\n src = 'go'+src_id.split(':')[2]\r\n\r\n for dst in dst_id:\r\n print >> fo, 'isA\\t'+src+'\\t'+dst\r\n # print >> fo, src\r\n # #print >> fo, name_space\r\n # print >> fo, ' '.join(dst_id)\r\n #for dst in dst_id:\r\n # print >> fo, dst\r\n # if dst_id == '':\r\n # dst_id = 'terminal'\r\n # else:\r\n # dst_id = dst_id.split(':')[2]\r\n # print >> fo, dst_id\r\n dst_id = list()\r\n flag_term = False\r\n continue\r\n #print line\r\n f.close()\r\n fo.close()\r\n#EOF.\r\n","repo_name":"cwt1/OncoExplorer","sub_path":"src_analysis/analysis3.py","file_name":"analysis3.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22546936158","text":"from utils import input_num\n\ndef question_8():\n hourly_wage = input_num('Enter hourly wage: ')\n total_reg_hours = input_num('Enter total regular hours: ')\n total_overtime_hours = input_num('Enter total overtime hours: ')\n\n weekly_wage = hourly_wage * total_reg_hours + \\\n 1.5 * hourly_wage * total_overtime_hours\n\n print(f'Total weekly pay of an employee: RM{weekly_wage}')\n","repo_name":"Spimy/CSC1024-Practicals","sub_path":"01_Practical/q8.py","file_name":"q8.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26174914807","text":"from distutils.util import convert_path\nfrom setuptools import setup, find_packages\n\nmodule = 'nb2xls'\n\n# get version from __meta__\nmeta_ns = {}\npath = convert_path(module+'/__meta__.py')\nwith open(path) as meta_file:\n exec(meta_file.read(), meta_ns)\n\n# read requirements.txt\nwith open('requirements.txt', 'r') as f:\n content = f.read()\nli_req = content.split('\\n')\ninstall_requires = [e.strip() for e in li_req if len(e)]\n\n\nname = module\nname_url = name.replace('_', '-')\n\npackages = [module]\nversion = meta_ns['__version__']\ndescription = 'Export Jupyter notebook as an Excel xls file.'\nlong_description = 'Export Jupyter notebook as an Excel xls file.'\nauthor = 'ideonate'\nauthor_email = 'dan@ideonate.com'\n# github template\nurl = 'https://github.com/{}/{}'.format(author,\n name_url)\ndownload_url = 'https://github.com/{}/{}/tarball/{}'.format(author,\n name_url,\n version)\nkeywords = ['jupyter',\n 'nbconvert',\n ]\nlicense = 'MIT'\nclassifiers = ['Development Status :: 4 - Beta',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7'\n ]\ninclude_package_data = True\n\nzip_safe = False\n\nextra_requirements = {\n 'test': ['pytest', 'testpath', 'openpyxl', 'matplotlib']\n}\n\n# ref https://packaging.python.org/tutorials/distributing-packages/\nsetup(\n name=name,\n version=version,\n packages=packages,\n author=author,\n author_email=author_email,\n description=description,\n long_description=long_description,\n url=url,\n download_url=download_url,\n keywords=keywords,\n license=license,\n classifiers=classifiers,\n include_package_data=include_package_data,\n install_requires=install_requires,\n extras_require=extra_requirements,\n zip_safe=zip_safe,\n\n entry_points = {\n 'nbconvert.exporters': [\n 'xls = nb2xls:XLSExporter'\n ],\n }\n)\n\n","repo_name":"ideonate/nb2xls","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":153,"dataset":"github-code","pt":"67"} +{"seq_id":"73490578773","text":"try:\n from .Ui import Ui_Timer\n from .timer_fnxs import Time_calc\n from .record_handler import RecordHandler\n from .Records import Ui_Records\nexcept:\n from Ui import Ui_Timer\n from timer_fnxs import Time_calc\n from record_handler import RecordHandler\n from Records import Ui_Records\nfrom time import time_ns, sleep\nfrom PyQt5.QtWidgets import QWidget,QApplication\nfrom PyQt5.QtCore import QTimer\nfrom math import inf\n\nclass Timer(QWidget):\n def __init__(self, parent: QWidget):\n super().__init__(parent)\n \n self.ui = Ui_Timer(parent)\n self._time_measurer = Time_calc()\n self._recorder = RecordHandler()\n self.SetupUI()\n\n lcd_updater = QTimer(self)\n lcd_updater.setInterval(10)\n lcd_updater.timeout.connect(self.update_lcd)\n lcd_updater.start()\n \n def SetupUI(self):\n self.ui.deck.buttons[0].clicked.connect(self.start_clicked)\n self.ui.deck.buttons[1].clicked.connect(self.pause_clicked)\n self.ui.deck.buttons[2].clicked.connect(self.reset_clicked)\n self.ui.deck.buttons[3].clicked.connect(self.records_clicked)\n self.ui.deck.buttons[4].clicked.connect(self.del_clicked)\n self.ui.deck.buttons[5].clicked.connect(self.dnf_clicked)\n self.ui.deck.buttons[6].clicked.connect(self.ok_clicked)\n self.ui.deck.buttons[7].clicked.connect(self.plus2_clicked)\n \n def start_clicked(self, foo):\n self._time_measurer.start()\n\n def pause_clicked(self, foo):\n self._time_measurer.pause()\n\n def reset_clicked(self, foo):\n self._time_measurer.reset()\n self.reset_disp()\n\n def ok_clicked(self, foo):\n self._recorder.update(\"3x3\", self._time_measurer.elapsed, time_ns())\n self._time_measurer.reset()\n self.reset_disp()\n\n def del_clicked(self, foo):\n self._time_measurer.reset()\n self.reset_disp()\n\n def dnf_clicked(self, foo):\n self._recorder.update(\"3x3\", inf, time_ns(), \"dnf\")\n self._time_measurer.reset()\n self.reset_disp()\n\n def plus2_clicked(self, foo):\n self._recorder.update(\"3x3\", self._time_measurer.elapsed + 2000000000, time_ns(), comment=\"+2'ed\")\n self._time_measurer.reset()\n self.reset_disp()\n\n def records_clicked(self, foo):\n rec_win = Ui_Records()\n rec_win.set_modes(self._recorder.datas)\n rec_win.set_table_datas(self._recorder.datas)\n rec_win.reload_datas.clicked.connect(lambda x: rec_win.set_table_datas(self._recorder.datas))\n \n def reset_disp(self):\n for i in range(6):\n self.ui.disp.lcd[i].display(0)\n \n def update_lcd(self):\n if self._time_measurer.state == 1:\n el = self._time_measurer.parser(str(time_ns() - self._time_measurer.initial))\n for i in range(6):\n self.ui.disp.lcd[i].display(int(el[i]))\n\nif __name__ == \"__main__\":\n import sys\n app = QApplication(sys.argv)\n main_window = QWidget()\n ui = Timer(main_window)\n main_window.show()\n app.exec_()","repo_name":"BijanRegmi/Cuber","sub_path":"timer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"20803746219","text":"import random\n\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib import messages\nfrom .models import UserPost, UserImage, CustomUser, UserFollowing, LikesTable, dog, userDogPreference\nfrom .forms import PostForm, CustomUserCreationForm, CustomUserChangeForm, YourModelForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.http import require_http_methods\n\n# Here we begin using class based views:\nfrom django.views.generic import ListView\nfrom django.views.generic.edit import CreateView, DeleteView\nfrom django.contrib.auth import login, logout, authenticate\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db.models import Q\n\nfrom django.template.defaulttags import register\nfrom django.http import HttpResponse\n\n# HTMX Imports\nfrom django_htmx.http import trigger_client_event\n\n\n# Create your views here.\n\n\ndef new_post(request):\n form = PostForm\n context = {'form': form}\n return render(request, 'postitApp/new_post.html', context)\n\n\ndef new_post_submit(request):\n if request.method == \"POST\":\n user = request.user\n caption = request.POST.get('caption')\n form = PostForm(request.POST, request.FILES)\n\n if form.is_valid():\n form.instance.created_by = request.user\n just_posted = form.save(commit=True)\n\n for img in request.FILES.getlist('images'):\n # TODO \"File Validator\"\n image = UserImage.objects.create(\n post=just_posted,\n image=img,\n )\n\n pass\n\n else:\n print('no save')\n\n return redirect('index')\n\n\ndef delete_post(request, pk):\n try:\n record = UserPost.objects.get(id=pk)\n # record.image.delete() # This deletes the actual file stored at the path of record.image\n # record.video.delete() # This deletes the actual file stored at the path of record.video\n record.delete()\n except:\n print('record does not exist')\n\n return redirect('index')\n\n\ndef queryUserPostFeed(request):\n \"\"\"return the last fifty posts of the users that the request user follows\n and return to the context object name\"\"\"\n user = request.user\n following = user.following.filter(user=user)\n \"\"\"Build list of users for query filter\"\"\"\n users = []\n for u in following:\n users.append(u.following)\n \"\"\"Create query object and build it at runtime\"\"\"\n\n if len(users) is not 0:\n query = Q()\n for name in users:\n query = query | Q(user=name)\n \"\"\"Chain and filter results\"\"\"\n posts = UserPost.objects.filter(query).order_by('-publish_date')[:50]\n else:\n posts = None\n return posts\n\n\n@login_required()\ndef generateNotFollowingPool(request):\n \"\"\" Generate a pool of unfollowed users for creating dynamic suggested following list. Store\n on session object \"\"\"\n number_results = 50\n all_user_id_list = list(CustomUser.objects.all().values_list('id', flat=True))\n followed_user_id_list = list(UserFollowing.objects.filter(user=request.user).values_list('following',\n flat=True))\n all_user_not_followed_id_set = set(all_user_id_list) - set(followed_user_id_list)\n all_user_not_followed_id_list = list(all_user_not_followed_id_set)\n sample_size = len(all_user_not_followed_id_list) if len(\n all_user_not_followed_id_list) < number_results else number_results\n suggested_unfollowed_users_list = random.sample(all_user_not_followed_id_list, sample_size)\n\n request.session['unfollowed_pool'] = suggested_unfollowed_users_list\n return None\n\n\n@login_required()\ndef generateNotFollowingList(request, number_results):\n \"\"\" Generate a random set of people that are not followed for the suggested users to follow\n do this by getting all users as list, then getting all followed users, convert them to a set\n then do set subtraction operation, convert the result to a list, and use random to pick 5 from that list\n if the set size is less than 10 users, this is handled by sample_size one line conditional\"\"\"\n if not number_results:\n number_results = 5\n\n unfollowed_pool = request.session.get('unfollowed_pool')\n number_results = len(unfollowed_pool) if len(unfollowed_pool) < number_results else number_results\n suggested_unfollowed_users_list, unfollowed_pool_remaining = unfollowed_pool[:number_results], unfollowed_pool[\n number_results:]\n request.session['unfollowed_pool'] = unfollowed_pool_remaining\n\n context_dict = {'not_following': CustomUser.objects.filter(pk__in=suggested_unfollowed_users_list)}\n u_list = context_dict.get('not_following')\n fq = {} # following query dictionary creation for showing the number of followers that an unfollowed user has\n \"\"\" Create the following query dictionary for all the suggested users \"\"\"\n for u in u_list:\n following_query = UserFollowing.objects.filter(following=u)\n fq[u.username] = following_query\n context_dict['fq'] = fq # add the feature query dictionary to the context data dictionary\n return context_dict\n\n@login_required()\ndef likesQuery(request, post):\n likes_query = LikesTable.objects.filter(post=post)\n return likes_query\n\n@login_required()\ndef queryIfUserLikedPost(request, posts):\n lq = {}\n request_user_liked_post = {}\n if not posts is None:\n for post in posts:\n likes_query = likesQuery(request, post)\n lq[post] = likes_query\n users_who_liked_post = likes_query.values('user_id')\n for user in users_who_liked_post:\n if request.user.id in user.values():\n request_user_liked_post[post] = True\n\n return lq, request_user_liked_post\n\n\n# Class based views:\n\nclass IndexView(LoginRequiredMixin, ListView):\n # model = UserPost\n login_url = 'login'\n redirect_field_name = 'redirect_to'\n queryset = UserPost.objects.order_by('-publish_date')\n context_object_name = 'user_posts'\n template_name = 'postitApp/index.html'\n\n def get_queryset(self):\n posts = queryUserPostFeed(self.request)\n return posts\n\n def get_context_data(self, **kwargs):\n \"\"\" need to have multiple queries beyond the get_queryset call. Construct a dictionary and call\n the super class get_context_data to get the 'user_posts' context as queried above in get_queryset\n this is returned as a dict. Then add additional items as needed to the dict before returning it\"\"\"\n generateNotFollowingPool(self.request)\n cd = super(IndexView, self).get_context_data(**kwargs)\n not_following = generateNotFollowingList(self.request, 5)\n cd['not_following'] = not_following.get('not_following')\n cd['fq'] = not_following.get('fq') # add the feature query dictionary to the context data dictionary\n \"\"\" Get the likes and comments for each post. Place this in context data. \"\"\"\n\n if not cd.get('user_posts') is None:\n cd['lq'], cd['request_user_liked_post'] = queryIfUserLikedPost(self.request, cd.get('user_posts'))\n return cd\n\n\n@register.filter\ndef get_item(dictionary, key):\n return dictionary.get(key)\n\n\nclass NewPost(LoginRequiredMixin, CreateView):\n login_url = 'login'\n redirect_field_name = 'redirect_to'\n template_name = 'postitApp/new_post.html'\n # model = UserPost\n form_class = PostForm\n # fields = ['caption']\n\n\nclass DeletePost(LoginRequiredMixin, DeleteView):\n login_url = 'login'\n redirect_field_name = 'redirect_to'\n model = UserPost\n success_url = '/'\n template_name = 'postitApp/delete_object.html'\n\n\ndef aboutPage(request):\n return render(request, 'postitApp/about.html', context={})\n\n\ndef landingPage(request):\n return render(request, 'postitApp/registration/landing_page.html', context={})\n\n\ndef loginUser(request):\n if request.method == 'POST':\n email = request.POST.get('email')\n password = request.POST.get('password')\n try:\n username = CustomUser.objects.get(email=email.lower()).username\n user = authenticate(username=username, password=password)\n except:\n user = None\n if user is not None:\n login(request, user)\n return redirect('index')\n else:\n messages.error(request, \"User or Password not found\")\n\n return render(request, 'postitApp/registration/login.html', context={})\n\n\ndef signup(request):\n form = CustomUserCreationForm()\n if request.method == 'POST':\n form = CustomUserCreationForm(request.POST)\n if form.is_valid():\n email = request.POST.get('email')\n user = form.save(commit=False)\n user.username = email.lower()\n try:\n user.save()\n login(request, user)\n return redirect('index')\n except:\n messages.error(request, \"Could not save user\")\n\n else:\n messages.error(request, \"Could not register user\")\n\n return render(request, 'postitApp/registration/signup.html', context={'form': form})\n\n\n@login_required()\ndef editUser(request):\n user = request.user\n form = CustomUserChangeForm(instance=user)\n\n following = user.following.filter(user=user)\n followed_by = user.followed_by.filter(following=user)\n\n if request.method == 'POST':\n form = CustomUserChangeForm(request.POST, request.FILES, instance=user)\n if form.is_valid():\n form.save()\n return redirect('edit-user')\n\n else:\n return render(request, 'postitApp/registration/settings.html', context={'form': form,\n 'following': following,\n 'followed_by': followed_by})\n\n\n@login_required()\ndef logoutUser(request):\n logout(request)\n return redirect('index')\n\n\n\"\"\" This method called when the logged in user wishes to view their own public profile page \"\"\"\n\n\n@login_required()\ndef publicProfile(request, pk):\n context = {}\n return render(request, 'postitApp/profile/public_profile.html', context)\n\n\n\"\"\" This method called when user selects to unfollow another user pk=user_id to unfollow \"\"\"\n\n\n@login_required()\ndef unfollowUser(request, pk):\n try:\n relationsip = UserFollowing.objects.get(user=request.user, following=pk)\n relationsip.delete()\n except:\n pass\n return redirect('index')\n\n\n\"\"\" This method called when user selects to follow another user's public profile pk=user_id to follow \"\"\"\n\n\n@login_required()\ndef followUser(request, pk):\n requesting_user = CustomUser.objects.get(pk=request.user.id)\n followed_user = CustomUser.objects.get(pk=pk)\n\n \"\"\" SQL Lite does not enforce unique_together uniqueness, therefore have to \n implement in logic. Try to find the relationhip. If it exists then there's no error, don't save\n if the relationship search does throw an error, then it does not exist. Create the relationship.\"\"\"\n\n try:\n UserFollowing.objects.get(user=requesting_user, following=followed_user)\n print('following already exists')\n # TODO generate error message to template\n except:\n relationship = UserFollowing(user=requesting_user, following=followed_user)\n relationship.save()\n\n return HttpResponse('')\n\n\n\"\"\" This method called if the user performs a like on a post. Check to see if like exists, \n if not then generate instance and save in database. If the like already exists, then unlike\n by deleting the existing entry \"\"\"\n\n\n@login_required()\ndef likePost(request, post_id):\n post_instance = UserPost.objects.get(id=post_id)\n like = LikesTable(user=request.user, post=post_instance)\n \"\"\" see if this instance already exists \"\"\"\n existing_like = LikesTable.objects.filter(user=request.user, post=post_instance)\n if not existing_like:\n \"\"\" Like the post by saving the new instance in the database \"\"\"\n like.save()\n else:\n \"\"\" Unlike the post by deleting the existing database instance \"\"\"\n existing_like.delete()\n\n queryset = UserPost.objects.order_by('-publish_date')\n\n cd = {'lq': (queryIfUserLikedPost(request, queryset))[0],\n 'request_user_liked_post': (queryIfUserLikedPost(request, queryset))[1], 'post': post_instance}\n\n response = render(request, 'postitApp/HTMX/Partials/like_fire_icon.html', cd)\n trigger_client_event(response, 'updated_likes' + str(post_id), {})\n return response\n\n\n\"\"\" HTMX Playground \"\"\"\n\n\n@login_required()\ndef htmxPlay(request):\n form = CustomUserCreationForm()\n emoji_form = YourModelForm()\n posts = request.user.posts.all()\n context = {'form': form, 'posts': posts, 'emoji_form': emoji_form}\n return render(request, 'postitApp/HTMX/htmx_play.html', context)\n\n\n\"\"\" htmx called method to determine if a user email exists \"\"\"\n\n\n@login_required()\n@require_http_methods(['POST'])\ndef checkUsername(request):\n email = request.POST.get('email')\n if CustomUser.objects.filter(email=email).exists():\n return HttpResponse(\"
This email already exists
\")\n else:\n return HttpResponse(\"
Available
\")\n\n\n\"\"\" htmx create a non image user post \"\"\"\n\n\n@login_required()\n@require_http_methods(['POST'])\ndef createnoimagepost(request):\n caption = request.POST.get('captiontext')\n post = UserPost.objects.get_or_create(caption=caption)[0]\n request.user.posts.add(post)\n posts = request.user.posts.all()\n return render(request, 'postitApp/HTMX/user_posts.html', {'posts': posts})\n\n\n\"\"\" htmx delete a non image user post \"\"\"\n\n\n@login_required()\n@require_http_methods(['DELETE'])\ndef deletepost(request, pk):\n # request.user.posts.remove(pk)\n post = UserPost.objects.get(pk=pk)\n post.delete()\n # request.user.posts.remove(post)\n posts = request.user.posts.all()\n return render(request, 'postitApp/HTMX/user_posts.html', {'posts': posts})\n\n\n\"\"\" Dynamic Searchbar for User Search \"\"\"\n\n\n@login_required()\ndef searchuser(request):\n search_text = request.POST.get('search')\n if not search_text:\n results = \"\"\n searchbool = False # Used in order to display 'no results' or not on the template\n else:\n results = CustomUser.objects.filter(username__startswith=search_text)\n searchbool = True\n return render(request, 'postitApp/HTMX/dynamic_user_search_results.html', {'results': results,\n 'searchbool': searchbool})\n\n\n\"\"\" Dogs List - filtered by dogs that don't exist in the user's preferred dogs \"\"\"\n\n\ndef dogsList(request):\n dogs = dog.objects.filter(user_owns=request.user)\n return render(request, 'postitApp/HTMX/DogsSortedList.html', {'dogs': dogs})\n\n\ndef dogsNotPreferredList(request):\n not_preferred = dog.objects.exclude(user_owns=request.user)\n context = {'not_preferred': not_preferred}\n return render(request, 'postitApp/HTMX/DogsNotPreferred.html', context)\n\n\ndef addDog(request, pk):\n dog_instance = get_object_or_404(dog, id=pk)\n if not userDogPreference.objects.filter(user=request.user, dog=dog_instance).exists():\n userDogPreference.objects.create(user=request.user, dog=dog_instance, order=1)\n response = dogsList(request)\n trigger_client_event(response, 'edited_dog_list', {})\n return response\n\n\ndef deleteAllDogs(request):\n userDogPreference.objects.all().delete()\n response = dogsList(request)\n trigger_client_event(response, 'edited_dog_list', {})\n return response\n\n\ndef suggestedUsers(request, number_results):\n context = generateNotFollowingList(request, number_results=number_results)\n\n return render(request, 'postitApp/suggested_following.html', context)\n\n\ndef followAndGetNewSuggestedUser(request, pk):\n response = followUser(request, pk)\n context = generateNotFollowingList(request, number_results=1)\n single_user = {'u': context.pop('not_following').first(), 'fq': context.pop('fq')}\n return render(request, 'postitApp/HTMX/Partials/suggested_user.html', single_user)\n\n\ndef updateLikesDisplayed(request, post_id):\n post = UserPost.objects.get(id=post_id)\n t_dict = {post: likesQuery(request, post)}\n context = {'lq': t_dict, 'post': post}\n return render(request, 'postitApp/HTMX/Partials/likes_count.html', context)\n\n\ndef getCommentsForPost(request, post_id):\n post = UserPost.objects.get(id=post_id)\n comments = post.comments.all().order_by('-created')\n context = {'comments': comments}\n return render(request, 'postitApp/HTMX/Partials/post_comments.html', context)\n\n\n\"\"\" ***************************** \"\"\"\n","repo_name":"gregrell/Django2ndProject","sub_path":"EcomBuddy/postitApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3262040882","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nfrom flask_login import LoginManager\nfrom flask_wtf.csrf import CSRFProtect\n\nimport os\nimport json\n\napp = Flask(__name__)\napp.config.from_object(os.environ['APP_SETTINGS'])\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\ncsrf = CSRFProtect(app)\nlogin = LoginManager(app)\nlogin.login_view = 'login'\n\nfrom app import routes, models\n\n# CLI\nfrom app.etl import teardown, setup, mongo, users, inferencing\nfrom app.etl import denormalize\nimport click\n\n@app.cli.command()\n@click.option('--tables','-t', multiple=True)\ndef empty_tables(tables=[]):\n teardown.clear_data(tables)\n\n@app.cli.command()\ndef seed():\n setup.load_multivalued_attributes()\n setup.load_many_to_many()\n\n@app.cli.command()\n@click.argument('datafile')\ndef mongo_migration(datafile):\n mongo.load_data(datafile)\n\n@app.cli.command()\ndef rebuild():\n teardown.clear_data()\n users.add_users('data/disa_users.json')\n setup.load_multivalued_attributes()\n setup.load_many_to_one()\n setup.load_many_to_many()\n setup.load_many_to_many_with_attr()\n setup.load_role_relationships()\n mongo.load_data(os.path.join(\n app.config['APP_DIR'], 'data/mongo/entries_01_31.json') )\n inferencing.extract_information()\n\n@app.cli.command()\ndef browse_data():\n with open('app/static/data/denormalized.json','w') as f:\n data = denormalize.json_for_browse()\n json.dump(data, f)\n\n# END CLI\n\n# TEMPLATES\n@app.template_filter('century')\ndef get_century_from_year(yearInt):\n\treturn yearInt // 100\n\n@app.template_filter('decade')\ndef get_decade_from_year(yearInt):\n\treturn yearInt % 100 // 10\n\n@app.template_filter('year')\ndef get_year_from_datetime(yearInt):\n\treturn yearInt % 10","repo_name":"prashleigh/disa","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"26454088464","text":"import requests\t #请求模块\nimport re #正则模块\nimport os\t #文件模块\n\n\nif __name__ == \"__main__\":\n if not os.path.exists('./img'):\n os.mkdir('./img')\n\n headers={\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36'\n }\n\n url = 'https://movie.douban.com/top250?start=%d&filter=' \n\n\n for pageNum in range(1,10): #page是按什么方式递增要自己注意\n\n pageStart = (pageNum-1)*25\n\n new_url = format(url%pageStart)\n\n respone = requests.get(url=new_url,headers=headers).text\n\n ex = '
.*?\".*?\"' #正则表达式,不难,为了找出括号里的所有链接\n\n img_list = re.findall(ex,respone,re.S) #re.S是把整个数据看出一个字符串来处理,返回了一个符合条件的所有链接的列表\n for imgSrc in img_list:\n img = requests.get(url=imgSrc,headers=headers).content\n imgName=imgSrc.split('/')[-1] #字符串的split方法可以以某个字符为界,取其之后或之前的所有内容 \n with open('./img/'+imgName,'wb') as fp:\n fp.write(img)\n print(imgName+\"下载成功\")\n\n","repo_name":"chandlerye/spider-set","sub_path":"Python/re_used.py","file_name":"re_used.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7038553474","text":"from .....account.models import User\nfrom ....core.utils import to_global_id_or_none\nfrom ....tests.utils import assert_no_permission, get_graphql_content\n\nMUTATION_CHECKOUT_CUSTOMER_DETACH = \"\"\"\n mutation checkoutCustomerDetach($id: ID) {\n checkoutCustomerDetach(id: $id) {\n checkout {\n token\n }\n errors {\n field\n message\n }\n }\n }\n \"\"\"\n\n\ndef test_checkout_customer_detach(user_api_client, checkout_with_item, customer_user):\n checkout = checkout_with_item\n checkout.user = customer_user\n checkout.save(update_fields=[\"user\"])\n previous_last_change = checkout.last_change\n\n variables = {\"id\": to_global_id_or_none(checkout)}\n\n # Mutation should succeed if the user owns this checkout.\n response = user_api_client.post_graphql(\n MUTATION_CHECKOUT_CUSTOMER_DETACH, variables\n )\n content = get_graphql_content(response)\n data = content[\"data\"][\"checkoutCustomerDetach\"]\n assert not data[\"errors\"]\n checkout.refresh_from_db()\n assert checkout.user is None\n assert checkout.last_change != previous_last_change\n\n # Mutation should fail when user calling it doesn't own the checkout.\n other_user = User.objects.create_user(\"othercustomer@example.com\", \"password\")\n checkout.user = other_user\n checkout.save()\n response = user_api_client.post_graphql(\n MUTATION_CHECKOUT_CUSTOMER_DETACH, variables\n )\n assert_no_permission(response)\n\n\ndef test_checkout_customer_detach_by_app(\n app_api_client, checkout_with_item, customer_user, permission_impersonate_user\n):\n checkout = checkout_with_item\n checkout.user = customer_user\n checkout.save(update_fields=[\"user\"])\n previous_last_change = checkout.last_change\n\n variables = {\"id\": to_global_id_or_none(checkout)}\n\n # Mutation should succeed if the user owns this checkout.\n response = app_api_client.post_graphql(\n MUTATION_CHECKOUT_CUSTOMER_DETACH,\n variables,\n permissions=[permission_impersonate_user],\n )\n content = get_graphql_content(response)\n data = content[\"data\"][\"checkoutCustomerDetach\"]\n assert not data[\"errors\"]\n checkout.refresh_from_db()\n assert checkout.user is None\n assert checkout.last_change != previous_last_change\n\n\ndef test_checkout_customer_detach_by_app_without_permissions(\n app_api_client, checkout_with_item, customer_user\n):\n checkout = checkout_with_item\n checkout.user = customer_user\n checkout.save(update_fields=[\"user\"])\n previous_last_change = checkout.last_change\n\n variables = {\"id\": to_global_id_or_none(checkout)}\n\n # Mutation should succeed if the user owns this checkout.\n response = app_api_client.post_graphql(MUTATION_CHECKOUT_CUSTOMER_DETACH, variables)\n\n assert_no_permission(response)\n checkout.refresh_from_db()\n assert checkout.last_change == previous_last_change\n\n\ndef test_with_active_problems_flow(user_api_client, checkout_with_problems):\n # given\n checkout_with_problems.user = user_api_client.user\n checkout_with_problems.save(update_fields=[\"user\"])\n channel = checkout_with_problems.channel\n channel.use_legacy_error_flow_for_checkout = False\n channel.save(update_fields=[\"use_legacy_error_flow_for_checkout\"])\n\n variables = {\"id\": to_global_id_or_none(checkout_with_problems)}\n\n response = user_api_client.post_graphql(\n MUTATION_CHECKOUT_CUSTOMER_DETACH, variables\n )\n content = get_graphql_content(response)\n\n # then\n assert not content[\"data\"][\"checkoutCustomerDetach\"][\"errors\"]\n","repo_name":"saleor/saleor","sub_path":"saleor/graphql/checkout/tests/mutations/test_checkout_customer_detach.py","file_name":"test_checkout_customer_detach.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","stars":19331,"dataset":"github-code","pt":"67"} +{"seq_id":"15924021664","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse, Http404\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.decorators import login_required\nfrom .models import *\nfrom .forms import *\n\n\n@login_required(login_url='login')\ndef receiving_data(request):\n \"\"\"Страница для ввода данных от лектора\"\"\"\n if request.method == 'GET':\n form = LectureForm()\n return render(request, 'lecturer/receiving_data.html', context={'form': form})\n elif request.method == 'POST':\n bound_form = LectureForm(request.POST)\n if bound_form.is_valid():\n bound_form.save()\n id = Lecture.objects.last().id\n return redirect('qr_generator_url', id) # Редирект после добавления записи на QR по id\n\n\ndef qr_generator(request, id):\n \"\"\"Генератор QR кода\"\"\"\n try:\n if Lecture.objects.get(id__iexact=id):\n response = render(\n request,\n 'lecturer/QR.html',\n context={\n 'link': request.build_absolute_uri('check_your_self/')\n }\n )\n except Lecture.DoesNotExist:\n raise Http404\n return response\n\n\ndef check(request, id):\n \"\"\"Генерация ссылки для QR кода на шаблон\n для выбора студентов к конкретному id лекции с\n последующим созданием записи о присутсвующем студенте\"\"\"\n if 'name' in request.COOKIES:\n return HttpResponse('Вы уже отмечались сегодня!')\n if request.method == 'GET':\n # Если лекция под таким id существует генерируется QR иначе 404\n try:\n if Lecture.objects.get(id__iexact=id):\n context = Student.objects.all()\n response = render(request, 'lecturer/check_your_self.html', context={'name': context, 'id': id})\n except Lecture.DoesNotExist:\n raise Http404\n return response\n elif request.method == 'POST':\n # Ограничение создания записи по students_count от лектора\n if Lecture.objects.get(id=id).student.count() < Lecture.objects.get(id=id).students_count:\n # Ограничение создания записи если этот студент уже отметился\n stud = Student.objects.get(name=request.POST['Student'])\n if Student.objects.get(id=stud.pk) in Lecture.objects.get(id=id).student.all():\n return HttpResponse('Этот студент уже отмечен!')\n else:\n Lecture.objects.get(id=id).student.add(Student.objects.get(id=stud.pk))\n response = HttpResponse('Успех!')\n response.set_cookie('name', max_age=30) # max_age=86400 - сутки\n return response\n else:\n return HttpResponse('Привышен лимит пришедших на лекцию!')\n\n\ndef register(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('login')\n else:\n return redirect('registration_url')\n else:\n form = UserCreationForm()\n context = {'form': form}\n return render(request, 'registration/registration.html', context=context)\n\n\ndef logout_view(request):\n if request.user.is_authenticated:\n logout(request)\n return render(request, 'registration/logout.html')\n else:\n return redirect('login')\n\n\ndef lecture(request):\n return render(request, 'lecturer/lecturer.html', {\n 'lectures': Lecture.objects.all(),\n })\n\n\ndef student(request):\n return render(request, 'lecturer/students.html', {\n 'students': Student.objects.all(),\n })\n","repo_name":"Layts/QR_code","sub_path":"lecturer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23083424243","text":"# TO CALCULATE EUCLIDEAN DISTANCE BETWEEN TWO DATA SETS AND MAKE A PLOT\r\n# AUTHOR: QI LIU\r\n# DATE: 07/02/2022\r\n\r\n# TODO extract Correlation coefficient between two data sets\r\n# TODO 3D PLOT X ENERGY, Y ARRAY RATIO, Z\r\nimport nextnanopy as nn\r\nimport numpy as np\r\nimport time\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef euclidean_dist(array1, array2):\r\n dummy_array = []\r\n if array1.size != array2.size:\r\n return False\r\n for i in range(len(array1)):\r\n dist_sqr = (array1[i] - array2[i]) ** 2\r\n dummy_array = np.append(dummy_array, dist_sqr)\r\n dist = np.sqrt(np.sum(dummy_array))\r\n return dist\r\n\r\n\r\n# edit input file\r\nmy_input = nn.InputFile(r'F:\\NextNano Data\\TLL\\C2617_adoptedyiqing.in')\r\nBarrier_factor = 10 # import from input file\r\n\r\nC2617_file = nn.DataFile(r'E:\\OneDrive--University of Cambridge\\OneDrive - University of '\r\n r'Cambridge\\Documents\\nextnano\\Output\\C2617_adoptedyiqing(30)'\r\n r'\\bias_00000\\transmission_cbr_Gamma1.dat', product='nextnano++')\r\nW0938_file = nn.DataFile(r'E:\\OneDrive--University of Cambridge\\OneDrive - University of '\r\n r'Cambridge\\Documents\\nextnano\\Output\\W0938full(1)'\r\n r'\\bias_00000\\transmission_cbr_Gamma1.dat', product='nextnano++')\r\nW0939_file = nn.DataFile(r'E:\\OneDrive--University of Cambridge\\OneDrive - University of '\r\n r'Cambridge\\Documents\\nextnano\\Output\\W0939full(4)'\r\n r'\\bias_00000\\transmission_cbr_Gamma1.dat', product='nextnano++')\r\nW0940_file = nn.DataFile(r'E:\\OneDrive--University of Cambridge\\OneDrive - University of '\r\n r'Cambridge\\Documents\\nextnano\\Output\\W0940full'\r\n r'\\bias_00000\\transmission_cbr_Gamma1.dat', product='nextnano++')\r\nW0941_file = nn.DataFile(r'E:\\OneDrive--University of Cambridge\\OneDrive - University of '\r\n r'Cambridge\\Documents\\nextnano\\Output\\W0941full'\r\n r'\\bias_00000\\transmission_cbr_Gamma1.dat', product='nextnano++')\r\n\r\nC2617 = C2617_file.variables['1->2'].value\r\nW0938 = W0938_file.variables['1->2'].value\r\nW0939 = W0939_file.variables['1->2'].value\r\nW0940 = W0940_file.variables['1->2'].value\r\nW0941 = W0941_file.variables['1->2'].value\r\n\r\nvar1_name = 'alloy1_x'\r\n# my_input.set_variable(var1_name, value=var)\r\n# Remeber to delete duplicate files after this step\r\n# Sweep variable\r\ndist_array_C2617 = []\r\ndist_array_W0938 = []\r\ndist_array_W0939 = []\r\ndist_array_W0940 = []\r\ndist_array_W0941 = []\r\n\r\nlimit = np.linspace(0, 1, 51)\r\nfor var in limit: # remember the 3rd argument needs to +1\r\n my_input.set_variable(var1_name, value=var)\r\n var1 = my_input.variables[var1_name].value\r\n my_input.save(r'F:\\NextNano Data\\TLL'\r\n r'\\C2617_f={factor}_{var1}.in'.format(factor=Barrier_factor, var1=var1_name + '=' + str(var1)),\r\n automkdir=False,\r\n overwrite=True)\r\n my_input.execute()\r\n time.sleep(0.5)\r\n print(f\"New variable: {my_input.get_variable('{inputvar}'.format(inputvar=var1_name)).text}\")\r\n output = nn.DataFile(r'E:\\OneDrive--University of Cambridge\\OneDrive - University of Cambridge'\r\n r'\\Documents\\nextnano\\Output\\C2617_f={factor}_{var1}'\r\n r'\\bias_00000\\transmission_cbr_Gamma1.dat'\r\n .format(factor=Barrier_factor, var1=var1_name + '=' + str(var1)), product='nextnano++')\r\n output_trans = output.variables['1->2'].value\r\n dist_C2617 = euclidean_dist(C2617, output_trans)\r\n dist_W0938 = euclidean_dist(W0938, output_trans)\r\n dist_W0939 = euclidean_dist(W0939, output_trans)\r\n dist_W0940 = euclidean_dist(W0940, output_trans)\r\n dist_W0941 = euclidean_dist(W0941, output_trans)\r\n dist_array_C2617 = np.append(dist_array_C2617, dist_C2617)\r\n dist_array_W0938 = np.append(dist_array_W0938, dist_W0938)\r\n dist_array_W0939 = np.append(dist_array_W0939, dist_W0939)\r\n dist_array_W0940 = np.append(dist_array_W0940, dist_W0940)\r\n dist_array_W0941 = np.append(dist_array_W0941, dist_W0941)\r\n\r\nfig, axs = plt.subplots(2, 3)\r\naxs[0, 0].plot(limit, dist_array_C2617)\r\naxs[0, 0].set_title(r'R to C2617')\r\naxs[0, 1].plot(limit, dist_array_W0940)\r\naxs[0, 1].set_title(r'R to W0940')\r\naxs[0, 2].plot(limit, dist_array_W0941)\r\naxs[0, 2].set_title(r'R to W0941')\r\naxs[1, 0].plot(limit, dist_array_W0938)\r\naxs[1, 0].set_title(r'R to W0938')\r\naxs[1, 1].plot(limit, dist_array_W0939)\r\naxs[1, 1].set_title(r'R to W0939')\r\n\r\nfig.text(0.55, 0.0015, 'Alloy Ratio', ha='center')\r\nfig.text(0.0015, 0.5, 'Euclidean Dist R', va='center', rotation='vertical')\r\nfig.suptitle('Barrier factor = {fac}'.format(fac=Barrier_factor))\r\n# plt.title('Barrier factor = 20')\r\nplt.tight_layout()\r\nfig.savefig(r'E:\\OneDrive--University of Cambridge'\r\n r'\\OneDrive - University of Cambridge\\Desktop\\Research\\MBE Wafer Growth'\r\n r'\\Python figs\\RVsAlloy_f={factor}2.png'.format(factor=Barrier_factor))\r\nplt.show()\r\n\r\n##countor plpt\r\n\r\n# print(C2617_file.coords)\r\n# C2617 = C2617_file.variables['1->2'].value\r\n# x = C2617_file.coords['Energy']\r\n# var1_name = 'alloy1_x'\r\n# # my_input.set_variable(var1_name, value=var)\r\n# # Remeber to delete duplicate files after this step\r\n# # Sweep variable\r\n# dist_array = []\r\n#\r\n# limit = np.linspace(0.46, 0.47, 6)\r\n# trans_stack = np.zeros((limit.size, C2617.size))\r\n# count = 0\r\n# for var in limit: # remember the 3rd argument needs to +1\r\n# my_input.set_variable(var1_name, value=var)\r\n# var1 = my_input.variables[var1_name].value\r\n# my_input.save(r'F:\\NextNano Data\\TLL'\r\n# r'\\C2617_adoptedyiqing_{var1}.in'.format(var1=var1_name + '=' + str(var1)), automkdir=False,\r\n# overwrite=True)\r\n# my_input.execute()\r\n# time.sleep(0.5)\r\n# print(f\"New variable: {my_input.get_variable('{inputvar}'.format(inputvar=var1_name)).text}\")\r\n# output = nn.DataFile(r'E:\\OneDrive--University of Cambridge\\OneDrive - University of Cambridge'\r\n# r'\\Documents\\nextnano\\Output\\C2617_adoptedyiqing_{var1}'\r\n# r'\\bias_00000\\transmission_cbr_Gamma1.dat'\r\n# .format(var1=var1_name + '=' + str(var1)), product='nextnano++')\r\n# output_trans = output.variables['1->2'].value\r\n# trans_stack[count, :] = output_trans\r\n# dist0 = euclidean_dist(C2617, output_trans)\r\n# dist_array = np.append(dist_array, dist0)\r\n# count += 1\r\n#\r\n# print(C2617.size, x.size, trans_stack.size)\r\n# plt.plot(limit, dist_array)\r\n# plt.xlabel('Alloy ratio')\r\n# plt.ylabel('R')\r\n# plt.show()\r\n","repo_name":"Kevin-QiLiu/TLL_Python","sub_path":"Wafer_simulation/Correlation.py","file_name":"Correlation.py","file_ext":"py","file_size_in_byte":6667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34692471697","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import classification_report, confusion_matrix\n\n# membaca csv\ndata = pd.read_csv(\"data.csv\")\n#memisah atribute dan class\nx = data.drop([\"hasil\"], axis=1)\ny = data[\"hasil\"]\n\n#membagi data traning dan data testing\nx_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.20, random_state=1)\n\n#iniasialisasi KNN\nknn = KNeighborsClassifier(n_neighbors=3)\nknn.fit(x_train, y_train)\n\n# prediksi traning data traing dan data testing\nknn_pred_tr = knn.predict(x_train)\nknn_pred_te = knn.predict(x_test)\n\n# creating a confusion matrix\ncm = confusion_matrix(y_test, knn_pred_te)\nprint(cm)\n\n# Menghitung akurasi training\nprint('----- Evaluation on Training Data -----')\nscore_tr = knn.score(x_train, y_train)\nprint('Accuracy Score: ', score_tr)\n# Menghitung akurasi testing\nprint(classification_report(y_train, knn_pred_tr))\nprint('--------------------------------------------------------')\nprint('----- Evaluation on Test Data -----')\nscore_te = knn.score(x_test, y_test)\nprint('Accuracy Score: ', score_te)\n# Look at classification report to evaluate the model\nprint(classification_report(y_test, knn_pred_te))\nprint('--------------------------------------------------------')\n\n","repo_name":"zainurroziqin/pythonProject","sub_path":"knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4374775591","text":"class Node:\r\n def __init__(self, value):\r\n self.data = value\r\n self.next = None\r\n\r\n\r\nclass Solution(object):\r\n def mergeTwoLists(self, list1, list2):\r\n list3 = list1 + list2\r\n list3 = list(list3)\r\n list3.sort()\r\n return list3\r\n\r\n \r\na = Solution()\r\nlist1 = [1, 2, 4]\r\nlist2 = [1, 3, 4]\r\nprint(a.mergeTwoLists(list1, list2))\r\n\r\n","repo_name":"priyanshu-73/DSA","sub_path":"merge_two_list.py","file_name":"merge_two_list.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4928130109","text":"# import all gem5's objects\r\nimport m5\r\n# import all SimObjects\r\nfrom m5.objects import *\r\nfrom caches import *\r\n\r\nfrom optparse import OptionParser\r\n\r\nparser = OptionParser()\r\nparser.add_option('--l1i_size', help=\"L1 instruction cache size\")\r\nparser.add_option('--l1d_size', help=\"L1 data cache size\")\r\nparser.add_option('--l2_size', help=\"Unified L2 cache size\")\r\n\r\n(options, args) = parser.parse_args()\r\n\r\n# instantiate a system (a Python class wrapper for System c++ SimObjects)\r\nsystem = System()\r\n\r\n# Initialize a clock and voltage domain\r\n# clk_domain is a parameter of the System SimObject\r\nsystem.clk_domain = SrcClockDomain()\r\nsystem.clk_domain.clock = '1GHz'\r\n\r\n# gem5 smart enough to automatically convert units\r\nsystem.clk_domain.voltage_domain = VoltageDomain()\r\n\r\n# Set Up memory System\r\nsystem.mem_mode = 'timing'\r\n\r\n# All Systems need Memory\r\nsystem.mem_ranges = [AddrRange('512MB')]\r\n\r\n# Create a CPU\r\nsystem.cpu = TimingSimpleCPU()\r\n\r\n# Need a Memory Bus\r\nsystem.membus = SystemXBar()\r\n\r\n# Hook Up CPU\r\nsystem.cpu.icache = L1ICache(options)\r\nsystem.cpu.dcache = L1DCache(options)\r\nsystem.cpu.icache.connectCPU(system.cpu)\r\nsystem.cpu.dcache.connectCPU(system.cpu)\r\n\r\n# Helper function to connect the L1 caches to the L2 bus\r\nsystem.l2bus = L2XBar()\r\nsystem.cpu.icache.connectBus(system.l2bus)\r\nsystem.cpu.dcache.connectBus(system.l2bus)\r\n\r\n# Create out L2 cache and connect it to the L2 bus and the memory bus\r\nsystem.l2cache = L2Cache(options)\r\nsystem.l2cache.connectCPUSideBus(system.l2bus)\r\nsystem.l2cache.connectMemSideBus(system.membus)\r\n\r\n# Some BS to get things right\r\nsystem.cpu.createInterruptController()\r\nsystem.cpu.interrupts[0].pio = system.membus.master\r\nsystem.cpu.interrupts[0].int_master = system.membus.slave\r\nsystem.cpu.interrupts[0].int_slave = system.membus.master\r\n\r\nsystem.system_port = system.membus.slave\r\n\r\n# Finally, let's make the memory Controller\r\nsystem.mem_ctrl = DDR3_1600_8x8()\r\n\r\n# Set Up physical mempry ranges\r\nsystem.mem_ctrl.range = system.mem_ranges[0]\r\n\r\n# connect memory to bus\r\nsystem.mem_ctrl.port = system.membus.master\r\n\r\n# Tell the system what we want to do.\r\nclass Cholesky(Process):\r\n cwd = '/home/useless2020/Documents/Development/gem5/benchmark/splash2/codes' + '/kernels/cholesky'\r\n executable = '/home/useless2020/Documents/Development/gem5/benchmark/splash2/codes' + '/kernels/cholesky/CHOLESKY'\r\n cmd = ['CHOLESKY', '-p' + str(1),\r\n '/home/useless2020/Documents/Development/gem5/benchmark/splash2/codes' + '/kernels/cholesky/inputs/tk23.O']\r\nclass Raytrace(Process):\r\n executable = '/home/useless2020/Documents/Development/gem5/benchmark/splash2/codes' + '/apps/raytrace/RAYTRACE'\r\n cmd = ['RAYTRACE', '-p' + str(1),\r\n '/home/useless2020/Documents/Development/gem5/benchmark/splash2/codes' + '/apps/raytrace/inputs/balls4.env']\r\n cwd = '/home/useless2020/Documents/Development/gem5/benchmark/splash2/codes' + '/apps/raytrace'\r\n\r\nprocess = Process()\r\nprocess.cmd = ['tests/test-progs/hello/bin/x86/linux/hello']\r\n# system.cpu.workload = Cholesky()\r\nsystem.cpu.createThreads()\r\n\r\n# Create a root object\r\nroot = Root(full_system = False, system = system)\r\nroot.workload = Raytrace()\r\nsystem.cpu.workload = root.workload\r\n# Instantiate all of the c++\r\nm5.instantiate()\r\n\r\n# Ready to Run!\r\nexit_event = m5.simulate()\r\n\r\nprint(\"Exiting\")\r\n","repo_name":"VatsalSin/Gem5_project","sub_path":"configs/test/two_level.py","file_name":"two_level.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21655217304","text":"#!/usr/bin/python\n\nimport os\n\npath = raw_input('Enter path to folder: ')\n\nlistOfFiles = os.listdir(path)\n\ncountOfFiles = len(listOfFiles)\n\nos.chdir(path)\n\nfor i in range(0, countOfFiles):\n os.rename(path + listOfFiles[i], str(i+1)+'.csv')","repo_name":"sharaalfa/iam","sub_path":"src/main/python/sharafutdinov/artur/iam/renamer.py","file_name":"renamer.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5731275267","text":"import mindspore as ms\nimport mindspore.nn as nn\nimport mindspore.ops as ops\n\n\nclass TransD(nn.Cell):\n def __init__(self, n_entity, n_relation, n_entity_dim, n_relation_dim, margin=1.0, norm=1):\n super(TransD, self).__init__()\n self.n_entity = n_entity # 数据集[包含训练、验证和测试集]实体数\n self.n_relation = n_relation # 数据集[包含训练、验证和测试集]关系类型数\n self.norm = norm # 损失函数所用范数\n self.margin = margin # 算法中参数\n\n self.n_entity_dim = n_entity_dim # 实体编码维度\n self.n_relation_dim = n_relation_dim # 关系编码维度,仅考虑其等于实体编码维度的情形\n\n self.normalizer = ops.L2Normalize(axis=-1) # 归一化器\n self.abs = ops.Abs()\n self.maximum = ops.Maximum()\n self.square = ops.Square()\n uniformreal = ops.UniformReal(seed=1)\n # 实体编码\n self.entities_emb = ms.Parameter(self.normalizer(uniformreal((n_entity, n_entity_dim))), name='entities_emb')\n # 关系编码\n self.relations_emb = ms.Parameter(self.normalizer(uniformreal((n_relation, n_relation_dim))), name=\"relations_emb\")\n # 实体映射向量\n self.entities_proj = ms.Parameter(self.normalizer(uniformreal((n_entity, n_entity_dim))), name=\"entities_proj\")\n # 关系映射向量\n self.relations_proj = ms.Parameter(self.normalizer(uniformreal((n_relation, n_relation_dim))), name=\"relations_proj\")\n\n \n def construct(self, pos_triple, neg_triple):\n \"\"\"\n pos_triple: ms.Tensor : shape=(batch_size, 3, n_dim)\n neg_triple: ms.Tensor : shape=(batch_size, 3, n_dim)\n \"\"\"\n # 归一化\n self.entities_emb.set_data(self.normalizer(self.entities_emb))\n self.relations_emb.set_data(self.normalizer(self.relations_emb))\n self.entities_proj.set_data(self.normalizer(self.entities_proj))\n self.relations_proj.set_data(self.normalizer(self.relations_proj))\n\n # 取出正、负数样本编码向量\n pos_head, pos_relation, pos_tail = self.embed(pos_triple) # shape = (batch_size, n_dim)\n neg_head, neg_relation, neg_tail = self.embed(neg_triple)\n \n # 计算距离\n pos_distance = self.get_distance(pos_head, pos_relation, pos_tail, self.norm) # shape = (batch_size)\n neg_distance = self.get_distance(neg_head, neg_relation, neg_tail, self.norm)\n \n # 计算损失\n loss = self.maximum(0, pos_distance - neg_distance + self.margin).sum() # 所有损失求和\n return loss\n \n def embed(self, triple):\n \"\"\"获得编码向量\"\"\"\n head = self.entities_emb[triple[:, 0]]\n relation = self.relations_emb[triple[:, 1]]\n tail = self.entities_emb[triple[:, 2]]\n\n # 对head和tail进行映射\n head_proj = self.entities_proj[triple[:, 0]] # 获取映射矩阵\n relation_proj = self.relations_proj[triple[:, 1]]\n tail_proj = self.entities_proj[triple[:, 2]]\n head = self._project(head, head_proj, relation_proj) # 映射结果\n tail = self._project(tail, tail_proj, relation_proj)\n\n return head, relation, tail \n\n\n def _project(self, entity_emb, entity_proj, relation_proj):\n \"\"\"将实体向量进行映射\n entity_emb: ms.Tensor : shape=(batch_size, n_entity_dim)\n entity_proj: ms.Tensor : shape=(batch_size, n_entity_dim)\n relation_proj: ms.Tensor : shape=(batch_size, n_relation_dim)\n return: ms.Tensor : shape=(batch_size, n_relation_dim==n_entity_dim)\n \"\"\"\n # assert self.n_entity_dim == self.n_relation_dim # 仅仅考虑编码长度相同的情形\n return entity_emb + ops.batch_dot(entity_emb, entity_proj) * relation_proj\n\n\n def get_distance(self, head, relation, tail, norm=1):\n \"\"\"计算距离\n head: ms.Tensor : shape=(batch_size, n_dim)\n relation: ms.Tensor : shape=(batch_size, n_dim)\n tail: ms.Tensor : shape=(batch_size, n_dim)\n return: ms.Tensor : shape=(batch_size)\n \"\"\"\n if norm == 1:\n return self.abs(head + relation - tail).sum(axis=1) # L1距离\n return self.square(head + relation - tail).sum(axis=1) # L2距离\n\n","repo_name":"forcekeng/TransX","sub_path":"src/transD.py","file_name":"transD.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"33889046642","text":"#coding: utf-8\n\nimport random\nfrom ui import Point\nfrom vpoint import VPoint as V\nfrom objects import *\n\nclass Track():\n active = True\n \nclass Open_Range(Track):\n name = 'Open Range'\n description = 'is just open space - nothing between you and the race buoys, and nothing to blame if you lose.'\n \n ship_distance = 1000\n buoy_area_radius = 800\n number_of_buoys = 5\n \n @staticmethod\n def set_ship_starting_vectors(game):\n angle_gap = 140/len(game.players)\n game.current_turn = {}\n for i, player in enumerate(game.players_in_order):\n ship = player.ship\n ship_vector = V(Point(Open_Range.ship_distance, 0))\n ship_vector.degrees = 20+i*angle_gap\n ship.center = ship_vector\n target_vector = V(Point(0,0) - ship_vector)\n target_vector.magnitude = 100\n ship.rotation = target_vector.degrees\n ship.velocity = target_vector\n game.current_turn[player.id] = {\n 'action': 'move',\n 'data': {\n 'velocity': target_vector,\n 'thrust': target_vector,\n }\n }\n \n @staticmethod\n def get_buoys():\n return [Buoy(i+1) for i in range(Open_Range.number_of_buoys)]\n \n @staticmethod\n def set_buoy_positions(buoys, game):\n # First always in the same position\n buoy = buoys[0]\n buoy_position = Vector(100,0)\n buoy_position.degrees = 270\n buoy.position = tuple(buoy_position)\n # Rest in a spread\n buoy_gap = 140/(2*Open_Range.number_of_buoys)\n buoy_vectors = []\n for i in range(2*Open_Range.number_of_buoys):\n buoy_vector = Vector(200+random.randint(0, Open_Range.buoy_area_radius), 0)\n buoy_vector.degrees = 20+i*buoy_gap\n buoy_vectors.append(buoy_vector)\n random_vectors = random.sample(buoy_vectors, len(buoys))\n for i, buoy in enumerate(buoys[1:]):\n buoy.position = tuple(random_vectors[i])\n \nclass MoonRace(Track):\n name = 'Once In A Blue Moon'\n description = 'is exactly how many tries you have when you fly around the Blue Moon – rare, beautiful, powerful, dangerous.'\n active = False\n \nclass AsteroidField(Track):\n name = 'Cosmic Billiards'\n description = 'has bouncing obstacles, providing just that little bit of extra challenge to seasoned pilots.'\n active = False\n","repo_name":"mikaelho/vod","sub_path":"tracks.py","file_name":"tracks.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"42007816480","text":"# coding: utf-8\nimport sys\nimport os\nfrom math import sqrt\nfrom math import ceil\nimport numpy as np\nfrom PIL import Image\nfrom dataset.mnist.mnist import load_mnist\n\n\nsys.path.append(os.pardir) # to access parent dir\n\n\ndef img_show(img, num_img_show=100):\n grid_size = ceil(sqrt(num_img_show))\n pil_img_size = 28 * grid_size\n pil_img_show = Image.new(mode='P', size=(pil_img_size, pil_img_size), color=255)\n\n count_col = 0\n count_row = 0\n for idx in range(0, num_img_show):\n if idx > train_img.shape[0]:\n break;\n else:\n pass\n\n img_tmp = train_img[idx]\n img_tmp = img_tmp.reshape(28, 28)\n pil_img_tmp = Image.fromarray(np.uint8(img_tmp))\n\n offset = (28*count_col, 28*count_row)\n pil_img_show.paste(pil_img_tmp, offset)\n\n if count_col < grid_size-1:\n count_col += 1\n else:\n count_col = 0\n count_row += 1\n\n pil_img_show.show()\n\n\nif __name__ == '__main__':\n (train_img, train_label), (test_img, test_label) = load_mnist()\n img_show(train_img, num_img_show=100)\n\n\n\n","repo_name":"carlsj/deep-learning-study","sub_path":"dataset/mnist/mnist_show.py","file_name":"mnist_show.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18524372034","text":"# Preprocess.py\n\nimport cv2\nimport numpy as np\n\n# module level variables ##########################################################################\nCAL_VAL = np.loadtxt(\"calibrated_value.txt\")\n(w, h, rotationx, rotationy, rotationz, panX, panY, stretchX, dist, G_S_F_W, G_S_F_H, A_T_B, A_T_W, T_V, Xtrans,\n Ytrans) = np.loadtxt(\"calibrated_value.txt\")\nGAUSSIAN_SMOOTH_FILTER_SIZE = (int(G_S_F_W), int(G_S_F_H)) # last best = 3,3\nADAPTIVE_THRESH_BLOCK_SIZE = int(A_T_B) # 19 , last best = 19\nADAPTIVE_THRESH_WEIGHT = int(A_T_W) # 9, last best = 11\nTHRESHOLD_VALUE = int(T_V)\n\n\n##\n###################################################################################################\ndef preprocess(imgOriginal):\n # coba = cv2.cvtColor(imgOriginal, cv2.COLOR_BGR2HSV)\n # cv2.imshow(\"hsv\", coba )\n imgGrayscale = extractValue(imgOriginal)\n # cv2.imshow(\"imgGrayscale\", imgGrayscale )\n\n imgGrayscale = np.invert(imgGrayscale) # last best use this\n # cv2.imshow(\"invert\", imgGrayscale )\n imgMaxContrastGrayscale = maximizeContrast(imgGrayscale)\n # cv2.imshow(\"imgMaxContrastGrayscale\", imgMaxContrastGrayscale )\n # imgMaxContrastGrayscale = np.invert(imgMaxContrastGrayscale)\n height, width = imgGrayscale.shape\n\n imgBlurred = np.zeros((height, width, 1), np.uint8)\n # cv2.imshow(\"c_3\", imgBlurred )\n\n imgBlurred = cv2.GaussianBlur(imgMaxContrastGrayscale, GAUSSIAN_SMOOTH_FILTER_SIZE, 0)\n # cv2.imshow(\"imgBlurred\", imgBlurred )\n # imgBlurred = np.invert(imgBlurred)\n imgThresh = cv2.adaptiveThreshold(imgBlurred, THRESHOLD_VALUE, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\n cv2.THRESH_BINARY_INV, ADAPTIVE_THRESH_BLOCK_SIZE, ADAPTIVE_THRESH_WEIGHT)\n # imgThresh = np.invert(imgThresh)\n # cv2.imshow(\"cobaaa\", imgThresh)\n\n return imgGrayscale, imgThresh\n\n\n# end function\n\n###################################################################################################\ndef extractValue(imgOriginal):\n height, width, numChannels = imgOriginal.shape\n\n imgHSV = np.zeros((height, width, 3), np.uint8)\n\n imgHSV = cv2.cvtColor(imgOriginal, cv2.COLOR_BGR2HSV)\n\n imgHue, imgSaturation, imgValue = cv2.split(imgHSV)\n\n return imgValue\n\n\n# end function\n\n###################################################################################################\ndef maximizeContrast(imgGrayscale):\n height, width = imgGrayscale.shape\n\n imgTopHat = np.zeros((height, width, 1), np.uint8)\n imgBlackHat = np.zeros((height, width, 1), np.uint8)\n\n structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))\n\n imgTopHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_TOPHAT, structuringElement)\n imgBlackHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_BLACKHAT, structuringElement)\n\n imgGrayscalePlusTopHat = cv2.add(imgGrayscale, imgTopHat)\n imgGrayscalePlusTopHatMinusBlackHat = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat)\n\n return imgGrayscalePlusTopHatMinusBlackHat\n# end function\n","repo_name":"muchlisinadi/ALPR-Indonesia","sub_path":"Preprocess.py","file_name":"Preprocess.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"67"} +{"seq_id":"74422037012","text":"# CH07_07: A baby sitter charges $2.50/hr until 9 pm and $1.75/hr after. Write a program that accepts a starting and\n# ending time in hours and minutes then calculates the total bill.\n# Partial hours are appropriately prorated\n#-----------------------------------------------------------------------------------------------------------------------\n\nimport math\n\ndef main():\n\n start = (input(\"Enter the starting time using 24:00 hour notation: \"))\n finish = (input(\"Enter the finishing time using 24:00 hour notation: \"))\n Shrs, Smins = start.split(\":\")\n Fhrs, Fmins = finish.split(\":\")\n Shrs, Smins, Fhrs, Fmins = float(Shrs), float(Smins), float(Fhrs), float(Fmins)\n\n if Fhrs + (Fmins/60) > 21:\n reg = ((21 - Shrs) + (round(Smins / 60))) * 2.5\n low = ((Fhrs - 21) + (round(Fmins / 60))) * 1.75\n total = str(round((reg + low),2))\n print(\"The post-9pm total bill comes to: ${0}\\n\".format(total))\n\n else:\n bill = (Fhrs - Shrs + abs(round((Fmins - Smins) / 60,2))) * 2.5\n str(round(bill,2))\n print(\"The pre-9pm total bill comes to: ${0}\\n\".format(bill))\n\nmain()","repo_name":"damani-14/supplementary-materials","sub_path":"Python_Exercises/Chapter07/CH07_07.py","file_name":"CH07_07.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16662275953","text":"ti_version = '0.1'\n#\n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Can be used alongside static inventory files in the same directory \n#\n# This inventory script expects to find Terraform tags of the form \n# group: ans_group associated with each tf instance to define the \n# host group membership for Ansible. Multiple group tags are allowed per host\n# \n# terraform_inv.ini file in the same directory as this script, points to the \n# location of the terraform.tfstate file to be inventoried\n# [TFSTATE]\n# TFSTATE_FILE = /usr/share/terraform/ibm/Demoapp2x/terraform.tfstate\n# \n# Validate correct execution: \n# With ini file './terraform.py'\n# Successful execution returns groups with lists of hosts and _meta/hostvars with a detailed\n# host listing.\n# Validate successful operation with ansible:\n# With - 'ansible-inventory -i inventory --list'\n\n\n\nimport json\nimport configparser\nimport os\nfrom os import getenv\nfrom collections import defaultdict\nfrom argparse import ArgumentParser\n\n\ndef parse_params():\n parser = ArgumentParser('IBM Cloud Terraform inventory')\n parser.add_argument('--list', action='store_true', default=True, help='List Terraform hosts')\n parser.add_argument('--tfstate', '-t', action='store', dest='tfstate', help='Terraform state file in current or specified directory (terraform.tfstate default)')\n parser.add_argument('--version', '-v', action='store_true', help='Show version')\n args = parser.parse_args()\n # read location of terrafrom state file from ini if it exists \n if not args.tfstate:\n args.tfstate = \"terraform.tfstate\"\n return args\n\n\ndef get_tfstate(filename):\n return json.load(open(filename))\n\nclass TerraformInventory:\n def __init__(self):\n self.args = parse_params()\n if self.args.version:\n print(ti_version)\n elif self.args.list:\n print(self.list_all())\n\n def list_all(self):\n hosts_vars = {}\n attributes = {}\n groups = {}\n inv_output = {}\n group_hosts = defaultdict(list)\n hosts = self.get_tf_instances()\n if hosts is not None: \n for host in hosts:\n hosts_vars[host[0]] = host[1]\n groups = host[2]\n if groups is not None: \n for group in groups:\n group_hosts[group].append(host[0])\n\n for group in group_hosts:\n inv_output[group] = {'hosts': group_hosts[group]}\n inv_output[\"_meta\"] = {'hostvars': hosts_vars} \n return json.dumps(inv_output, indent=2) \n #return json.dumps({'all': {'hosts': hosts}, '_meta': {'hostvars': hosts_vars}}, indent=2)\n\n def get_tf_instances(self):\n tfstate = get_tfstate(self.args.tfstate)\n for resource in tfstate['resources']:\n\n if (resource['type'] == 'ibm_is_instance') & (resource['mode'] == 'managed'):\n for instance in resource['instances']:\n tf_attrib = instance['attributes']\n name = tf_attrib['name']\n group = []\n\n attributes = {\n 'id': tf_attrib['id'],\n 'image': tf_attrib['image'],\n #'metadata': tf_attrib['user_data'],\n 'region': tf_attrib['zone'],\n 'ram': tf_attrib['memory'],\n 'cpu': tf_attrib['vcpu'][0]['count'],\n 'ssh_keys': tf_attrib['keys'],\n 'private_ipv4': tf_attrib['primary_network_interface'][0]['primary_ipv4_address'],\n 'ansible_host': tf_attrib['primary_network_interface'][0]['primary_ipv4_address'],\n 'ansible_ssh_user': 'root',\n 'provider': 'provider.ibm',\n 'tags': tf_attrib['tags'],\n }\n \n \n #tag of form ans_group: xxxxxxx is used to define ansible host group\n for value in list(attributes[\"tags\"]):\n try:\n curprefix, rest = value.split(\":\", 1)\n except ValueError:\n continue\n if curprefix != \"ans_group\" :\n continue \n group.append(rest)\n\n yield name, attributes, group\n\n else: \n continue \n\n\nif __name__ == '__main__':\n TerraformInventory()\n","repo_name":"stevestrutt/ans_ssh_vpc","sub_path":"terraform_hosts.py","file_name":"terraform_hosts.py","file_ext":"py","file_size_in_byte":5016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70021651735","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n####################\n\nimport time\nimport requests\nimport logging\nimport json\n\nimport asyncio\n\ntry:\n from pymyq import login\n from pymyq.errors import MyQError, RequestError\n from pymyq.__version__ import __version__\n from aiohttp import ClientSession\nexcept ImportError:\n raise ImportError(\"'Required Python libraries missing. Run 'pip3 install pymyq==3.1.5' in Terminal window, then reload plugin.\")\n\nif __version__ != \"3.1.5\":\n raise ImportError(\"'Wrong version of MyQ library installed. Run 'pip3 install pymyq==3.1.5' in Terminal window, then reload plugin.\")\n\nkCurDevVersCount = 2 # current version of plugin devices\n\nSTATE_CLOSED = \"closed\"\nSTATE_CLOSING = \"closing\"\nSTATE_OPEN = \"open\"\nSTATE_OPENING = \"opening\"\nSTATE_STOPPED = \"stopped\"\nSTATE_TRANSITION = \"transition\"\nSTATE_AUTOREVERSE = \"autoreverse\"\nSTATE_UNKNOWN = \"unknown\"\n\n\n################################################################################\nclass Plugin(indigo.PluginBase):\n\n ########################################\n # Main Plugin methods\n ########################################\n def __init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs):\n indigo.PluginBase.__init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs)\n\n self.logLevel = int(pluginPrefs.get(\"logLevel\", logging.INFO))\n self.indigo_log_handler.setLevel(self.logLevel)\n log_format = logging.Formatter('%(asctime)s.%(msecs)03d\\t[%(levelname)8s] %(name)20s.%(funcName)-25s%(msg)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n self.plugin_file_handler.setFormatter(log_format)\n self.logger.debug(f\"logLevel = {self.logLevel}\")\n\n self.needsUpdate = False\n self.triggers = {}\n self.myqOpeners = {}\n self.myqLamps = {}\n self.knownOpeners = {}\n self.knownLamps = {}\n self.device_info = {}\n\n self.statusFrequency = float(self.pluginPrefs.get('statusFrequency', \"10\")) * 60.0\n self.logger.debug(f\"statusFrequency = {self.statusFrequency}\")\n self.next_status_check = time.time()\n\n def startup(self): # noqa\n self.logger.info(\"Starting MyQ\")\n indigo.devices.subscribeToChanges() # Watch for changes to sensors associated with an opener\n\n def shutdown(self): # noqa\n self.logger.info(\"Stopping MyQ\")\n\n def runConcurrentThread(self):\n try:\n while True:\n if self.needsUpdate or (time.time() > self.next_status_check):\n self.next_status_check = time.time() + self.statusFrequency\n self.needsUpdate = False\n asyncio.run(self.pymyq_update())\n self.sleep(1.0)\n except self.StopThread:\n self.logger.debug(\"Stopping runConcurrentThread\")\n\n def deviceStartComm(self, device):\n\n self.logger.info(f\"{device.name}: Starting {device.deviceTypeId} Device {device.id}\")\n\n if device.deviceTypeId == 'myqOpener':\n\n myqID = device.pluginProps.get(\"myqID\", None)\n if not device.address and myqID:\n newProps = device.pluginProps\n newProps[\"address\"] = myqID\n device.replacePluginPropsOnServer(newProps)\n self.logger.debug(f\"{device.name}: deviceStartComm: updated address to myqID {myqID}\")\n\n instanceVers = int(device.pluginProps.get('devVersCount', 0))\n if instanceVers >= kCurDevVersCount:\n self.logger.debug(f\"{device.name}: deviceStartComm: Device version is up to date ({instanceVers})\")\n elif instanceVers < kCurDevVersCount:\n newProps = device.pluginProps\n newProps['IsLockSubType'] = True\n newProps[\"devVersCount\"] = kCurDevVersCount\n device.replacePluginPropsOnServer(newProps)\n device.stateListOrDisplayStateIdChanged()\n self.logger.debug(f\"{device.name}: deviceStartComm: Updated to device version {kCurDevVersCount}, props = {newProps}\")\n else:\n self.logger.error(f\"{device.name}: deviceStartComm: Unknown device version: {instanceVers}\")\n\n newProps = device.pluginProps\n if device.pluginProps.get(\"use_sensor\", -1) == -1: # old device\n if device.pluginProps.get(\"sensor\", None):\n newProps['use_sensor'] = True\n else:\n newProps['use_sensor'] = False\n elif not device.pluginProps.get(\"use_sensor\"):\n newProps['sensor'] = None\n device.replacePluginPropsOnServer(newProps)\n\n self.logger.debug(f\"{device.name}: deviceStartComm: Adding device ({device.id}) to self.myqOpeners\")\n assert device.id not in self.myqOpeners\n self.myqOpeners[device.id] = device\n self.needsUpdate = True\n\n elif device.deviceTypeId == 'myqLight':\n\n self.logger.debug(f\"{device.name}: deviceStartComm: Adding device ({device.id}) to self.myqLamps\")\n assert device.id not in self.myqLamps\n self.myqLamps[device.id] = device\n self.needsUpdate = True\n\n def deviceStopComm(self, device):\n\n self.logger.info(f\"{device.name}: Stopping {device.deviceTypeId} Device {device.id}\")\n\n if device.deviceTypeId == 'myqOpener':\n self.logger.debug(f\"{device.name}: deviceStopComm: Removing device ({device.id}) from self.myqOpeners\")\n assert device.id in self.myqOpeners\n del self.myqOpeners[device.id]\n\n elif device.deviceTypeId == 'myqLight':\n self.logger.debug(f\"{device.name}: deviceStopComm: Removing device ({device.id}) from self.myqLamps\")\n assert device.id in self.myqLamps\n del self.myqLamps[device.id]\n\n def triggerStartProcessing(self, trigger):\n self.logger.debug(f\"Adding Trigger {trigger.name} ({trigger.id}) - {trigger.pluginTypeId}\")\n assert trigger.id not in self.triggers\n self.triggers[trigger.id] = trigger\n\n def triggerStopProcessing(self, trigger):\n self.logger.debug(f\"Removing Trigger {trigger.name} ({trigger.id})\")\n assert trigger.id in self.triggers\n del self.triggers[trigger.id]\n\n def triggerCheck(self, device):\n try:\n sensor = indigo.devices[int(device.pluginProps[\"sensor\"])]\n except (Exception,):\n self.logger.debug(f\"Skipping triggers, no linked sensor for MyQ device {device.name}\")\n return\n\n for triggerId, trigger in sorted(self.triggers.items()):\n self.logger.debug(f\"Checking Trigger {trigger.name} ({trigger.id}), Type: {trigger.pluginTypeId}\")\n if isinstance(sensor, indigo.SensorDevice):\n sensor_state = sensor.onState\n elif isinstance(sensor, indigo.MultiIODevice):\n sensor_state = not sensor.states[\n \"binaryInput1\"] # I/O devices are opposite from sensors in terms of the state binary\n else:\n sensor_state = None\n if device.onState == sensor_state: # these values are supposed to be opposite due to difference between sensor and lock devices\n indigo.trigger.execute(trigger) # so execute the out of sync trigger when they're not opposite\n\n ########################################\n # Menu Methods\n ########################################\n\n def requestUpdate(self):\n self.needsUpdate = True\n return True\n\n def menuDumpMyQ(self):\n self.logger.info(\n f\"MyQ Devices:\\n{json.dumps(self.device_info, sort_keys=True, indent=4, separators=(',', ': '))}\")\n return True\n\n ########################################\n # ConfigUI methods\n ########################################\n\n def validateDeviceConfigUi(self, valuesDict, typeId, devId):\n self.logger.debug(f\"validateDeviceConfigUi, valuesDict = {valuesDict}\")\n errorsDict = indigo.Dict()\n\n if not valuesDict['address']:\n errorsDict['address'] = \"Invalid Device\"\n self.logger.warning(f\"validateDeviceConfigUi: invalid device ID\")\n\n if len(errorsDict) > 0:\n return False, valuesDict, errorsDict\n return True, valuesDict\n\n def closedDeviceConfigUi(self, valuesDict, userCancelled, typeId, devId):\n if not userCancelled:\n try:\n self.logLevel = int(valuesDict[u\"logLevel\"])\n except (Exception,):\n self.logLevel = logging.INFO\n self.indigo_log_handler.setLevel(self.logLevel)\n self.logger.debug(f\"logLevel = {self.logLevel}\")\n\n def validatePrefsConfigUi(self, valuesDict):\n self.logger.debug(\"validatePrefsConfigUi called\")\n errorDict = indigo.Dict()\n\n try:\n self.logLevel = int(valuesDict[u\"logLevel\"])\n except (Exception,):\n self.logLevel = logging.INFO\n self.indigo_log_handler.setLevel(self.logLevel)\n self.logger.debug(u\"logLevel = \" + str(self.logLevel))\n\n if len(valuesDict['myqLogin']) < 5:\n errorDict['myqLogin'] = u\"Enter your MyQ login name (email address)\"\n\n if len(valuesDict['myqPassword']) < 1:\n errorDict['myqPassword'] = u\"Enter your MyQ login password\"\n\n statusFrequency = int(valuesDict['statusFrequency'])\n if (statusFrequency < 5) or (statusFrequency > (24 * 60)):\n errorDict['statusFrequency'] = u\"Status frequency must be at least 5 min and no more than 24 hours\"\n\n if len(errorDict) > 0:\n return False, valuesDict, errorDict\n\n return True, valuesDict\n\n def closedPrefsConfigUi(self, valuesDict, userCancelled):\n if not userCancelled:\n try:\n self.logLevel = int(valuesDict[u\"logLevel\"])\n except (Exception,):\n self.logLevel = logging.INFO\n self.indigo_log_handler.setLevel(self.logLevel)\n self.logger.debug(f\"logLevel = {self.logLevel}\")\n\n self.statusFrequency = float(self.pluginPrefs.get('statusFrequency', \"10\")) * 60.0\n self.logger.debug(f\"statusFrequency = {self.statusFrequency}\")\n self.next_status_check = time.time() + self.statusFrequency\n\n def availableDeviceList(self, dev_filter=\"\", valuesDict=None, typeId=\"\", targetId=0):\n\n in_use = []\n retList = []\n\n if dev_filter == \"garagedoor\":\n for dev in indigo.devices.iter(filter=\"self.myqOpener\"):\n in_use.append(dev.address)\n\n for myqID, myqName in self.knownOpeners.items():\n if myqID not in in_use:\n retList.append((myqID, myqName))\n\n if targetId:\n try:\n dev = indigo.devices[targetId]\n retList.insert(0, (dev.pluginProps[\"address\"], self.knownOpeners[dev.pluginProps[\"address\"]]))\n except (Exception,):\n pass\n\n elif dev_filter == \"lamp\":\n for dev in indigo.devices.iter(filter=\"self.myqLight\"):\n in_use.append(dev.address)\n\n for myqID, myqName in self.knownLamps.items():\n if myqID not in in_use:\n retList.append((myqID, myqName))\n\n if targetId:\n try:\n dev = indigo.devices[targetId]\n retList.insert(0, (dev.pluginProps[\"address\"], self.knownLamps[dev.pluginProps[\"address\"]]))\n except (Exception,):\n pass\n\n self.logger.debug(f\"availableDeviceList for {dev_filter}: retList = {retList}\")\n return retList\n\n ################################################################################\n #\n # delegate methods for indigo.devices.subscribeToChanges()\n #\n ################################################################################\n\n def deviceDeleted(self, dev):\n indigo.PluginBase.deviceDeleted(self, dev)\n self.logger.debug(f\"deviceDeleted: {dev.name} \")\n\n for myqDeviceId, myqDevice in sorted(self.myqOpeners.items()):\n try:\n sensorDev = myqDevice.pluginProps[\"sensor\"]\n except (Exception,):\n return\n\n try:\n sensorID = int(sensorDev)\n except (Exception,):\n return\n\n if dev.id == sensorID:\n self.logger.info(f\"A device ({dev.name}) that was associated with a MyQ device has been deleted.\")\n newProps = myqDevice.pluginProps\n newProps[\"sensor\"] = \"\"\n myqDevice.replacePluginPropsOnServer(newProps)\n\n def deviceUpdated(self, origDev, newDev):\n indigo.PluginBase.deviceUpdated(self, origDev, newDev)\n\n for myqDeviceId, myqDevice in sorted(self.myqOpeners.items()):\n try:\n sensorDev = int(myqDevice.pluginProps[\"sensor\"])\n except (Exception,):\n pass\n else:\n if origDev.id == sensorDev:\n if isinstance(newDev, indigo.SensorDevice):\n old_sensor_state = origDev.onState\n sensor_state = newDev.onState\n elif isinstance(newDev, indigo.MultiIODevice):\n old_sensor_state = not origDev.states[\n \"binaryInput1\"] # I/O devices are opposite from sensors in terms of the state binary\n sensor_state = not newDev.states[\"binaryInput1\"]\n else:\n self.logger.error(f\"deviceUpdated: unknown device type for {origDev.name}\")\n return\n\n if old_sensor_state == sensor_state:\n self.logger.debug(f\"deviceUpdated: {origDev.name} has not changed\")\n return\n\n self.logger.debug(f\"deviceUpdated: {origDev.name} has changed state: {sensor_state}\")\n # sensor \"On\" means the door's open, which is False for lock type devices (unlocked)\n # sensor \"Off\" means the door's closed, which is True for lock type devices (locked)\n if sensor_state:\n myqDevice.updateStateOnServer(key=\"onOffState\", value=False)\n else:\n myqDevice.updateStateOnServer(key=\"onOffState\", value=True)\n self.triggerCheck(myqDevice)\n\n ########################################\n\n def actionControlDevice(self, action, dev):\n\n if action.deviceAction == indigo.kDeviceAction.Unlock:\n self.logger.debug(f\"actionControlDevice: Unlock {dev.name}\")\n asyncio.run(self.pymyq_open(dev.address))\n\n elif action.deviceAction == indigo.kDeviceAction.Lock:\n self.logger.debug(f\"actionControlDevice: Lock {dev.name}\")\n asyncio.run(self.pymyq_close(dev.address))\n\n elif action.deviceAction == indigo.kDeviceAction.TurnOn:\n self.logger.debug(f\"actionControlDevice: TurnOn {dev.name}\")\n asyncio.run(self.pymyq_turnon(dev.address))\n\n elif action.deviceAction == indigo.kDeviceAction.TurnOff:\n self.logger.debug(f\"actionControlDevice: TurnOff {dev.name}\")\n asyncio.run(self.pymyq_turnoff(dev.address))\n\n elif action.deviceAction == indigo.kDeviceAction.RequestStatus:\n self.logger.debug(\"actionControlDevice: Request Status\")\n asyncio.run(self.pymyq_update())\n\n else:\n self.logger.error(f\"actionControlDevice: Unsupported action requested: {action} for {dev.name}\")\n\n ########################################\n\n def changeDeviceAction(self, pluginAction):\n self.logger.debug(f\"changeDeviceAction, deviceId = {pluginAction.deviceId}, actionId = {pluginAction.pluginTypeId}\")\n\n if pluginAction is not None:\n myqDevice = indigo.devices[pluginAction.deviceId]\n myqActionId = pluginAction.pluginTypeId\n if myqActionId == \"openDoor\":\n asyncio.run(self.pymyq_open(myqDevice.address))\n elif myqActionId == \"closeDoor\":\n asyncio.run(self.pymyq_close(myqDevice.address))\n else:\n self.logger.debug(f\"changeDeviceAction, unknown myqActionId = {myqActionId}\")\n return\n\n ################################################################################\n\n async def pymyq_update(self):\n async with ClientSession() as web_session:\n try:\n api = await login(self.pluginPrefs['myqLogin'], self.pluginPrefs['myqPassword'], web_session)\n except MyQError as err:\n self.logger.warning(f\"Error logging into MyQ server: {err}\")\n return\n\n await api.update_device_info()\n\n for device_id in api.devices:\n device_json = api.devices[device_id].device_json\n name = device_json['name']\n myqID = device_json['serial_number']\n family = device_json['device_family']\n self.logger.debug(f\"pymyq_update: got {name} - {family} ({myqID})\")\n self.device_info[myqID] = device_json\n\n if family == 'garagedoor':\n\n state = device_json['state']['door_state']\n self.logger.debug(f\"pymyq_read: door state = {state}\")\n\n if myqID not in self.knownOpeners:\n self.knownOpeners[myqID] = name\n\n for dev in indigo.devices.iter(filter=\"self.myqOpener\"):\n self.logger.debug(f'Checking Opener Device: {dev.name} ({dev.address}) against {myqID}')\n if dev.address == myqID:\n dev.updateStateOnServer(key=\"doorStatus\", value=state)\n if state == STATE_CLOSED:\n dev.updateStateOnServer(key=\"onOffState\", value=True) # closed is True (Locked)\n else:\n dev.updateStateOnServer(key=\"onOffState\",\n value=False) # anything other than closed is \"Unlocked\"\n self.triggerCheck(dev)\n break\n\n elif family == 'lamp':\n state = device_json['state']['lamp_state']\n self.logger.debug(f\"pymyq_read: lamp state = {state}\")\n\n if myqID not in self.knownLamps:\n self.knownLamps[myqID] = name\n\n for dev in indigo.devices.iter(filter=\"self.myqLight\"):\n self.logger.debug(\n f\"Checking Lamp Device: {dev.name} ({dev.address}) against {myqID}\")\n if dev.address == myqID:\n if state == \"on\":\n dev.updateStateOnServer(key=\"onOffState\", value=True)\n else:\n dev.updateStateOnServer(key=\"onOffState\", value=False)\n break\n\n async def pymyq_open(self, myqid):\n async with ClientSession() as web_session:\n try:\n api = await login(self.pluginPrefs['myqLogin'], self.pluginPrefs['myqPassword'], web_session)\n except MyQError as err:\n self.logger.warning(f\"Error logging into MyQ server: {err}\")\n return\n\n device = api.devices[myqid]\n if not device.open_allowed:\n self.logger.warning(f\"Opening of '{device.name}' is not allowed.\")\n return\n\n if device.state == STATE_OPEN:\n self.logger.info(f\"'{device.name}' is already open.\")\n return\n\n try:\n wait_task = await device.open(wait_for_state=False)\n except MyQError as err:\n self.logger.error(f\"Error trying to open '{device.name}': {err}\")\n return\n\n if not await wait_task:\n self.logger.warning(f\"Failed to open '{device.name}'.\")\n self.needsUpdate = True\n return\n\n async def pymyq_close(self, myqid):\n async with ClientSession() as web_session:\n try:\n api = await login(self.pluginPrefs['myqLogin'], self.pluginPrefs['myqPassword'], web_session)\n except MyQError as err:\n self.logger.warning(f\"Error logging into MyQ server: {err}\")\n return\n\n device = api.devices[myqid]\n if not device.close_allowed:\n self.logger.warning(f\"Closing of '{device.name}' is not allowed.\")\n return\n\n if device.state == STATE_CLOSED:\n self.logger.info(f\"'{device.name}' is already closed.\")\n return\n\n try:\n wait_task = await device.close(wait_for_state=False)\n except MyQError as err:\n self.logger.error(f\"Error trying to close '{device.name}': {err}\")\n return\n\n if not await wait_task:\n self.logger.warning(f\"Failed to close '{device.name}'.\")\n self.needsUpdate = True\n return\n\n async def pymyq_turnon(self, myqid):\n async with ClientSession() as web_session:\n try:\n api = await login(self.pluginPrefs['myqLogin'], self.pluginPrefs['myqPassword'], web_session)\n except MyQError as err:\n self.logger.warning(f\"Error logging into MyQ server: {err}\")\n return\n\n device = api.devices[myqid]\n try:\n wait_task = await device.turnon(wait_for_state=False)\n except MyQError as err:\n self.logger.error(f\"Error trying to turn on '{device.name}': {err}\")\n return\n\n if not await wait_task:\n self.logger.warning(f\"Failed to turn on '{device.name}'.\")\n self.needsUpdate = True\n return\n\n async def pymyq_turnoff(self, myqid):\n async with ClientSession() as web_session:\n try:\n api = await login(self.pluginPrefs['myqLogin'], self.pluginPrefs['myqPassword'], web_session)\n except MyQError as err:\n self.logger.warning(f\"Error logging into MyQ server: {err}\")\n return\n\n device = api.devices[myqid]\n try:\n wait_task = await device.turnoff(wait_for_state=False)\n except MyQError as err:\n self.logger.error(f\"Error trying to turn off '{device.name}': {err}\")\n return\n\n if not await wait_task:\n self.logger.warning(f\"Failed to turn off '{device.name}'.\")\n self.needsUpdate = True\n return\n","repo_name":"FlyingDiver/Indigo-MyQ","sub_path":"MyQ.indigoPlugin/Contents/Server Plugin/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":23018,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"67"} +{"seq_id":"28338183357","text":"import re\n\n\ndef solution(str1, str2):\n str1, str2 = str.upper(str1), str.upper(str2)\n s1 = re.sub('[^A-Z]', \"\", str1)\n str1 = list(zip(str1, str1[1:]))\n s1 = list(set(zip(s1, s1[1:])) & set(str1))\n s2 = re.sub('[^A-Z]', \"\", str2)\n str2 = list(zip(str2, str2[1:]))\n s2 = list(set(zip(s2, s2[1:])) & set(str2))\n intersection = list(set(s1) & set(s2))\n union = list(set(s1) | set(s2))\n dup = []\n for i in intersection:\n m = min(str1.count(i), str2.count(i))\n for j in range(1, m):\n dup.append(i)\n intersection.extend(dup)\n dup = []\n for i in union:\n m = max(str1.count(i), str2.count(i))\n for j in range(1, m):\n dup.append(i)\n union.extend(dup)\n if len(union) == 0:\n return 65536\n return int(len(intersection) / len(union) * 65536)\n","repo_name":"jojojohhh/python-algorithm-study","sub_path":"programmers/kakao_1st_clustering.py","file_name":"kakao_1st_clustering.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"357438137","text":"\"\"\"USB SD Card MUX controller class to manage the mux and connected cards.\"\"\"\nimport glob\nimport logging\nimport os\nimport random\nimport re\nimport string\nimport time\nfrom pathlib import Path\n\nimport pyudev\nfrom nebula.common import utils\nfrom usbsdmux import usbsdmux\n\nlog = logging.getLogger(__name__)\n\n\nclass usbmux(utils):\n \"\"\"USB SD Card MUX controller and helper methods\"\"\"\n\n search_path = \"/dev/usb-sd-mux/\"\n target_mux = None\n _mux_in_use = None\n _mux = None\n _target_sdcard = None\n\n def __init__(\n self, yamlfilename=None, board_name=None, target_mux=None, search_path=None\n ):\n self.target_mux = target_mux\n if search_path:\n self.search_path = search_path\n\n self.update_defaults_from_yaml(\n yamlfilename, __class__.__name__, board_name=board_name\n )\n self.find_mux_device()\n self._mux = usbsdmux.UsbSdMux(self._mux_in_use)\n\n def find_mux_device(self):\n \"\"\"Find the mux device itself.\"\"\"\n devs = os.listdir(self.search_path)\n if not devs:\n raise Exception(\"No devices found\")\n\n if self.target_mux:\n if self.target_mux not in devs:\n raise Exception(\"Target mux device not found\")\n self._mux_in_use = os.path.join(self.search_path, self.target_mux)\n else:\n # Pick the first one\n self._mux_in_use = os.path.join(self.search_path, devs[0])\n\n def get_mux_mode(self):\n \"\"\"Get the current mux mode.\"\"\"\n return self._mux.get_mode()\n\n def set_mux_mode(self, mode):\n \"\"\"Set the mux mode.\n\n Args:\n mode (str): The mode to set the mux to. Options are: \"host\", \"dut\", \"off\".\n \"\"\"\n if mode == \"dut\":\n self._mux.mode_DUT()\n elif mode == \"host\":\n self._mux.mode_host()\n elif mode == \"off\":\n self._mux.mode_disconnect()\n else:\n raise Exception(\"Unknown mode: \" + mode)\n\n def find_muxed_sdcard(self):\n \"\"\"Find SD card connected through SD card mux.\n\n Before calling this method PLEASE POWER DOWN THE DUT.\n \"\"\"\n self.set_mux_mode(\"host\")\n time.sleep(5)\n context = pyudev.Context()\n for device in context.list_devices(subsystem=\"block\"):\n if device.get(\"ID_SERIAL_SHORT\") == os.path.basename(\n self._mux_in_use\n ).strip(\"id-\"):\n self._target_sdcard = re.sub(\n r\"[0-9]+\", \"\", os.path.basename(device.get(\"DEVNAME\"))\n )\n break\n\n if not self._target_sdcard:\n raise Exception(\"No muxed SD card found\")\n\n def write_img_file_to_sdcard(self, img_filename):\n \"\"\"Write an image file to the SD card.\n\n Args:\n img_filename (str): The path to the image file to write.\n \"\"\"\n if not os.path.isfile(img_filename):\n raise Exception(\"File not found: \" + img_filename)\n if not self._target_sdcard:\n self.find_muxed_sdcard()\n self.set_mux_mode(\"host\")\n time.sleep(5)\n # Check to make sure SD card is there\n devs = os.listdir(\"/dev\")\n if self._target_sdcard not in devs:\n raise Exception(\"Target SD card not found\")\n log.warn(\n f\"WARNING: Writing image file to SD card. Will destroy all data on {self._target_sdcard}\"\n )\n time.sleep(5)\n e = os.system(\n f'dd if=\"{img_filename}\" of=\"/dev/{self._target_sdcard}\" bs=4M conv=fsync status=progress'\n )\n if e != 0:\n raise Exception(\"Error writing image file to SD card\")\n\n def _mount_sd_card(self, include_root_partition=False):\n if not self._target_sdcard:\n self.find_muxed_sdcard()\n self.set_mux_mode(\"host\")\n time.sleep(5)\n # mount the SD card\n devs = os.listdir(\"/dev\")\n boot_p = f\"{self._target_sdcard}1\"\n if boot_p not in devs:\n raise Exception(f\"Target BOOT partition not found {boot_p}\")\n boot_p = os.path.join(\"/dev\", boot_p)\n folder = \"\".join(random.choices(string.ascii_lowercase, k=5))\n os.system(f\"mkdir /tmp/{folder}\")\n time.sleep(1)\n os.system(f\"mount {boot_p} /tmp/{folder}\")\n\n if include_root_partition:\n root_p = f\"{self._target_sdcard}2\"\n if root_p not in devs:\n raise Exception(f\"Target Root FS partition not found {root_p}\")\n root_p = os.path.join(\"/dev\", root_p)\n rootfs_folder = \"\".join(random.choices(string.ascii_lowercase, k=5))\n os.system(f\"mkdir /tmp/{rootfs_folder}\")\n time.sleep(1)\n os.system(f\"mount {root_p} /tmp/{rootfs_folder}\")\n return folder, boot_p, rootfs_folder, root_p\n\n return folder, boot_p\n\n def backup_files_to_external(\n self,\n partition=\"boot\",\n target=[],\n destination=\"backup\",\n subfolder=None,\n ):\n \"\"\"Backup specified files to an external location\n\n Args:\n partition (str): Source partition. Either boot or root\n target (list): Filenames that will be backup'd\n destination (str): Directory name at host to place the backup'd files\n subfolder (str): Directory name under destination to place the backup'd files, random by default\n \"\"\"\n folder, boot_p, rootfs_folder, root_p = self._mount_sd_card(\n include_root_partition=True\n )\n\n target_folder = folder\n if partition == \"root\":\n target_folder = rootfs_folder\n\n back_up_path = Path(os.path.join(destination, target_folder))\n if subfolder:\n back_up_path = Path(os.path.join(destination, subfolder))\n back_up_path.mkdir(parents=True, exist_ok=True)\n\n try:\n for f in target:\n files = glob.glob(os.path.join(f\"/tmp/{target_folder}\", f))\n if not files:\n raise Exception(f\"Cannot enumerate target {f}\")\n for file_path in files:\n log.info(f\"Backing up {file_path} to {str(back_up_path)}\")\n if os.path.exists(file_path):\n os.system(f\"cp -r {file_path} {str(back_up_path)}\")\n else:\n raise Exception(\"File not found \" + file_path)\n except Exception as ex:\n log.error(str(ex))\n raise ex\n finally:\n # unmount sd card\n os.system(f\"umount /tmp/{folder}\")\n os.system(f\"umount /tmp/{rootfs_folder}\")\n\n return subfolder if subfolder else target_folder\n\n def update_boot_files_from_external(\n self,\n bootbin_loc=None,\n kernel_loc=None,\n devicetree_loc=None,\n devicetree_overlay_loc=None,\n devicetree_overlay_config_loc=None,\n ):\n \"\"\"Update the boot files from outside SD card itself.\n\n Args:\n bootbin_loc (str): The path to the boot.bin file\n kernel_loc (str): The path to the kernel file\n devicetree_loc (str): The path to the devicetree file\n devicetree_overlay_loc (str): The path to the devicetree overlay file\n devicetree_overlay_config (str): The devicetree overlay configuration to be written on /boot/config.txt\n \"\"\"\n args = locals()\n folder, boot_p = self._mount_sd_card()\n\n try:\n for btfiletype, loc in args.items():\n if loc:\n if not isinstance(loc, (str, bytes, os.PathLike)):\n if isinstance(loc, type(self)):\n continue\n raise Exception(f\"Invalid type {type(loc)}\")\n if btfiletype == \"bootbin_loc\":\n outfile = os.path.join(\"/tmp\", folder, \"BOOT.BIN\")\n elif btfiletype == \"devicetree_overlay_loc\":\n outfile = os.path.join(\n \"/tmp\", folder, \"overlays\", os.path.basename(loc)\n )\n else:\n outfile = os.path.join(\"/tmp\", folder, os.path.basename(loc))\n if not os.path.isfile(loc):\n raise Exception(\"File not found: \" + loc)\n log.info(f\"Copying {loc} to {outfile} \")\n os.system(f\"cp -r {loc} {outfile}\")\n\n log.info(\"Updated boot files successfully... unmounting\")\n except Exception as ex:\n log.error(str(ex))\n raise ex\n finally:\n os.system(f\"umount /tmp/{folder}\")\n os.system(f\"rm -rf /tmp/{folder}\")\n\n def update_rootfs_files_from_external(self, target, destination):\n \"\"\"Update the root file system from outside SD card itself.\n\n Args:\n target (str): The path to the external target file/folder.\n destination (str): The path to the destination file/folder.\n \"\"\"\n folder, boot_p, rootfs_folder, root_p = self._mount_sd_card(\n include_root_partition=True\n )\n\n try:\n outfile = os.path.join(\"/tmp\", rootfs_folder, destination)\n if not os.path.exists(target):\n raise Exception(\"File/Folder not found: \" + target)\n command = f\"cp -r {target} {outfile}\"\n if os.system(command) != 0:\n raise Exception(f\"{command} failed\")\n log.info(\"Updated rootfs successfully... unmounting\")\n finally:\n os.system(f\"umount /tmp/{folder}\")\n os.system(f\"rm -rf /tmp/{folder}\")\n os.system(f\"umount /tmp/{rootfs_folder}\")\n os.system(f\"rm -rf /tmp/{rootfs_folder}\")\n\n def update_boot_files_from_sdcard_itself(\n self, bootbin_loc=None, kernel_loc=None, devicetree_loc=None\n ):\n \"\"\"Update the boot files from the SD card itself.\n\n Args:\n bootbin_loc (str): The path to the boot.bin file on the SD card.\n kernel_loc (str): The path to the kernel file on the SD card.\n devicetree_loc (str): The path to the devicetree file on the SD card.\n \"\"\"\n folder, boot_p = self._mount_sd_card()\n\n if bootbin_loc:\n bootbin_loc = os.path.join(\"/tmp/\", folder, bootbin_loc)\n if not os.path.isfile(bootbin_loc):\n options = os.listdir(f\"/tmp/{folder}\")\n options = [\n folder for o in options if os.path.isdir(f\"/tmp/{folder}/{o}\")\n ]\n os.system(f\"umount /tmp/{folder}\")\n os.system(f\"rm -rf /tmp/{folder}\")\n raise Exception(\n \"File not found: \"\n + bootbin_loc\n + \"\\nOptions are: \"\n + \"\\n\".join(options)\n )\n os.system(f\"cp {bootbin_loc} /tmp/{folder}/BOOT.BIN\")\n if kernel_loc:\n kernel_loc = os.path.join(\"/tmp/\", folder, kernel_loc)\n if not os.path.isfile(kernel_loc):\n os.system(f\"umount /tmp/{folder}\")\n os.system(f\"rm -rf /tmp/{folder}\")\n raise Exception(\"File not found: \" + kernel_loc)\n image = os.path.basename(kernel_loc)\n os.system(f\"cp {kernel_loc} /tmp/{folder}/{image}\")\n if devicetree_loc:\n devicetree_loc = os.path.join(\"/tmp/\", folder, devicetree_loc)\n if not os.path.isfile(devicetree_loc):\n options = os.listdir(f\"/tmp/{folder}\")\n options = [\n folder for o in options if os.path.isdir(f\"/tmp/{folder}/{o}\")\n ]\n os.system(f\"umount /tmp/{folder}\")\n os.system(f\"rm -rf /tmp/{folder}\")\n raise Exception(\n \"File not found: \"\n + devicetree_loc\n + \"\\nOptions are: \"\n + \"\\n\".join(options)\n )\n dt = os.path.basename(devicetree_loc)\n os.system(f\"cp {devicetree_loc} /tmp/{folder}/{dt}\")\n\n log.info(\"Updated boot files successfully... unmounting\")\n os.system(f\"umount /tmp/{folder}\")\n os.system(f\"rm -rf /tmp/{folder}\")\n\n def update_devicetree_for_mux(self, devicetree_filename=\"system.dtb\"):\n\n folder, boot_p = self._mount_sd_card()\n\n # Update the devicetree\n devicetree_loc = os.path.join(\"/tmp/\", folder, devicetree_filename)\n if not os.path.isfile(devicetree_loc):\n os.system(f\"umount /tmp/{folder}\")\n os.system(f\"rm -rf /tmp/{folder}\")\n raise Exception(\"File not found: \" + devicetree_loc)\n\n dts = devicetree_filename.replace(\".dtb\", \".dts\")\n dts_loc = os.path.join(\"/tmp/\", folder, dts)\n # Decompile the devicetree\n os.system(\n f\"dtc -I dtb /tmp/{folder}/{devicetree_filename} \" + f\" -O dts -o {dts_loc}\"\n )\n\n with open(dts_loc, \"r\") as f:\n dt = f.read()\n\n s = \"mmc@ff160000\"\n sn = \"sdc16:mmc@ff160000\"\n if s not in dt:\n log.warn(f\"{s.strip()} not found\")\n if sn not in dt:\n dt = dt.replace(s, sn)\n dt = dt + \"\\n&sdc16 { no-1-8-v ;};\"\n else:\n log.warn(f\"{sn.strip()} already exists\")\n s = \"mmc@ff170000\"\n sn = \"sdc17:mmc@ff170000\"\n if s not in dt:\n log.warn(f\"{s.strip()} not found\")\n if sn not in dt:\n dt = dt.replace(s, sn)\n dt = dt + \"\\n&sdc17 { no-1-8-v ;};\"\n else:\n log.warn(f\"{sn.strip()} already exists\")\n\n with open(dts_loc, \"w\") as f:\n f.write(dt)\n\n # Compile the devicetree\n os.system(\n f\"dtc -I dts {dts_loc} \" + f\" -O dtb -o /tmp/{folder}/{devicetree_filename}\"\n )\n\n log.info(\"Updated devicetree successfully... unmounting\")\n os.system(f\"umount /tmp/{folder}\")\n os.system(f\"rm -rf /tmp/{folder}\")\n","repo_name":"sdgtt/nebula","sub_path":"nebula/usbmux.py","file_name":"usbmux.py","file_ext":"py","file_size_in_byte":14073,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"598244238","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\nfrom scipy import sparse\r\n\r\nclass Collaborative(object):\r\n # Neighborhood-based Collaborative Filtering (NBCF)\r\n def __init__(self, Y_data, k, dist_func=cosine_similarity, uuCF=1):\r\n self.uuCF = uuCF # user-user Collaborative Filtering\r\n self.Y_data = Y_data if uuCF else Y_data[:, [1, 0, 2]]\r\n self.k = k\r\n self.dist_func = dist_func\r\n self.Ybar_data = None\r\n # Number of users and hotel.\r\n self.n_users = int(np.max(self.Y_data[:, 0])) + 1\r\n self.n_items = int(np.max(self.Y_data[:, 1])) + 1\r\n \r\n \r\n # Update Y_data with new data\r\n def addNewData(self, newRating):\r\n self.Y_data = np.concatenate((self.Y_data, newRating), axis=0)\r\n\r\n # Normalize\r\n def normalize_Y(self):\r\n users = self.Y_data[:, 0] # Get users\r\n self.Ybar_data = self.Y_data.copy() # Make a copy of Y_data\r\n self.utilityMatrix = np.zeros((self.n_users,))\r\n for n in range(self.n_users):\r\n # rating index that was voted by user n\r\n ids = np.where(users == n)[0].astype(np.int32)\r\n # hotelId that rating by user n\r\n item_ids = self.Y_data[ids, 1]\r\n # and the rating\r\n ratings = self.Y_data[ids, 2]\r\n # Calc mean of rating\r\n mean = np.mean(ratings)\r\n if np.isnan(mean):\r\n mean = 0 # to avoid empty array and NaN value\r\n self.utilityMatrix[n] = mean\r\n # Normalize\r\n self.Ybar_data[ids, 2] = ratings - self.utilityMatrix[n]\r\n print(type(self.Ybar_data[ids, 2]))\r\n \r\n # create -sparse matrix\r\n # -> just save values which != 0 and the position\r\n self.Ybar = sparse.coo_matrix((self.Ybar_data[:, 2],\r\n (self.Ybar_data[:, 1], self.Ybar_data[:, 0])), (self.n_items, self.n_users))\r\n # print(self.Ybar)\r\n self.Ybar = self.Ybar.tocsr()\r\n \r\n # Similarity function\r\n def similarity(self):\r\n self.S = self.dist_func(self.Ybar.T, self.Ybar.T)\r\n\r\n # Training function\r\n def training(self):\r\n # Normalize data and calc sim_matrix after add ratings \r\n self.normalize_Y()\r\n self.similarity()\r\n\r\n # Rating Prediction function\r\n def pred(self, u, i, normalized=1):\r\n # Predict the rating of user u for item i (normalized)\r\n\r\n # Find all users that rated i\r\n ids = np.where(self.Y_data[:, 1] == i)[0].astype(np.int32)\r\n users_rated_i = (self.Y_data[ids, 0]).astype(np.int32)\r\n\r\n # Find similarities between current user and others\r\n sim = self.S[u, users_rated_i]\r\n\r\n # Find k most similar users\r\n a = np.argsort(sim)[-self.k:]\r\n # And the degree of similarity\r\n nearest_s = sim[a]\r\n r = self.Ybar[i, users_rated_i[a]]\r\n if normalized:\r\n # Add a small number to avoid division by 0\r\n return (r*nearest_s)[0]/(np.abs(nearest_s).sum() + 1e-8)\r\n\r\n return (r*nearest_s)[0]/(np.abs(nearest_s).sum() + 1e-8) + self.utilityMatrix[u]\r\n\r\n # Rating Prediction function for uuCF and iiCF\r\n def predict(self, user, item, normalized = 1):\r\n if self.uuCF: \r\n return self.pred(user, item, normalized)\r\n return self.pred(item, user, normalized)\r\n\r\n # Recommend item\r\n def recommendItems(self, user):\r\n # Identify all items that should be recommended to user.\r\n # Based on : self.pred(u, i) > 0 -> Assume looking at items not yet rated by user\r\n ids = np.where(self.Y_data[:, 0] == user)[0]\r\n items_rated_by_user = self.Y_data[ids, 1].tolist()\r\n recommended_items = [] # List recommended items\r\n for i in range(self.n_items):\r\n if i not in items_rated_by_user:\r\n rating = self.pred(user, i)\r\n if rating > 0:\r\n recommended_items.append([i, rating])\r\n # Sorting\r\n recommended_items.sort(reverse=True, key=lambda x: x[1])\r\n\r\n return recommended_items\r\n\r\n # Get all items which should be recommended for each user\r\n def get_recommendation(self, userId):\r\n recommended_items = self.recommendItems(userId)\r\n return recommended_items\r\n\r\n # Get RMSE\r\n def getRMSE(self, rate_test): \r\n n_tests = rate_test.shape[0]\r\n SE = 0 # squared error\r\n for index, row in rate_test.iterrows():\r\n u_id = row[0]\r\n i_id = row[1]\r\n point = row[2]\r\n pred = self.predict(u_id, i_id, normalized = 0)\r\n SE += (pred - point)**2 \r\n\r\n RMSE = np.sqrt(SE/n_tests)\r\n return RMSE ","repo_name":"Tiensp/recommender_sys","sub_path":"Collaborative.py","file_name":"Collaborative.py","file_ext":"py","file_size_in_byte":4874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3418434792","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect\nimport pandas as pd\nimport numpy as np\nimport json\nfrom bs4 import BeautifulSoup\nimport requests\nimport time\nimport urllib.request as urllib2\nfrom .stock_module import stock\nfrom .stock_data import data_about_stock\nfrom ..fusioncharts import FusionCharts\nfrom ..fusioncharts import FusionTable\nfrom ..fusioncharts import TimeSeries\nfrom yahoo_fin.stock_info import get_live_price, get_data\n\n# import time\nimport yfinance as yf\n\n# def index(request):\n# return HttpResponse('Hello')\n \ndef symbol_company(request):\n \n symbol = data_about_stock().company()\n monetary = stock(symbol).MonetaryPolicy()\n\n return render(request, 'base.html', {'company': symbol,\n 'monetary':monetary, })\n\n\ndef stock_info(request, Symbol):\n data = yf.Ticker(Symbol+'.BK')\n information = data.info\n stocks = stock(Symbol) # class จาก file stock_module เพื่อเข้าสู่ฟังก์ชั่นที่ทำการเขียนไว้\n monetary = stocks.MonetaryPolicy() # GDP Market\n under_over = stocks.under_over_stock(Symbol)\n balance = stocks.finance_balance(Symbol) #งบแสดงฐานการเงินแบบเต็มของล่าสุด\n income = stocks.income_statement(Symbol) # งบกำไรขาดทุนเบ็ดเสร็จ แสดงที่การเงิน\n stockholder = stocks.stockholder(Symbol) # รายชื่อผู้ถือหุ้น\n \n return render(request, 'stock_overview.html',{\n 'Symbol':Symbol,\n 'monetary':monetary,\n 'balance':balance,\n 'stockholder':stockholder,\n 'under':under_over,\n }\n )\n\ndef stock_financial(request, Symbol):\n stocks = stock(Symbol) # class จาก file stock_module เพื่อเข้าสู่ฟังก์ชั่นที่ทำการเขียนไว้\n balance = stocks.finance_balance(Symbol) #งบแสดงฐานการเงินแบบเต็มของล่าสุด\n income = stocks.income_statement(Symbol) # งบกำไรขาดทุนเบ็ดเสร็จ แสดงที่การเงิน\n stockholder = stocks.stockholder(Symbol) # รายชื่อผู้ถือหุ้น\ndef finance_chart(request):\n stock = yf.Ticker('JMART.BK')\n infor = stock.history(period=\"5y\")\n infor = infor.reset_index().round(4)\n infor['Date'] = pd.to_datetime(infor['Date']).dt.date.astype(str)\n infor = infor.rename(columns={\"Date\": \"t\", \"Close\": \"y\"})\n data = infor[['t','y']].to_dict('records')\n fusionTable = FusionTable(schema, data)\n timeSeries = TimeSeries(fusionTable)\n\n # Wrapper constructor parameters\n # charttype, chartID, width, height, renderAt, data format, TimeSeries object\n\n fcChart = FusionCharts(\"timeseries\", \"MyFirstChart\" , \"700\", \"450\", \"chart-container\", \"json\", timeSeries)\n\n # Returning complete JavaScript and HTML code, which is used to generate chart in the browsers.\n return render(request,'base.html', {'output':fcChart.render()})\n \ndef chart(request):\n return render(request,'base.html')\n\n","repo_name":"61070328/stock-project","sub_path":"Django_app/django_stock/stock/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"th","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23377008293","text":"# 7 8\n# 1 2\n# 1 5\n# 2 3\n# 2 6\n# 3 4\n# 4 7\n# 5 6\n# 6 4\n\ndef find_min_index():\n for i in range(1, v + 1):\n if indegree[i] == 0 and not visited[i]:\n return i\n\n\nv, e = map(int, input().split())\n\nindegree = [0] * (v + 1)\nvisited = [False] * (v + 1)\ngraph = [[] for _ in range(v + 1)]\nfor _ in range(e):\n a, b = map(int, input().split())\n graph[a].append(b)\n indegree[b] += 1\n\nprint(indegree)\n\nq = []\nstart = find_min_index()\nq.append(start)\nvisited[start] = True\n\nwhile q:\n node = q.pop(0)\n print(node, end=' ')\n for item in graph[node]:\n indegree[item] -= 1\n if indegree[item] == 0 and not visited[item]:\n q.append(item)\n visited[item] = True\n\n","repo_name":"punkryn/algorithm","sub_path":"Practice/GraphTheory/topologySort.py","file_name":"topologySort.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74378867412","text":"from datetime import datetime, timedelta\nfrom time import sleep\n\nimport parsedatetime\nfrom gcsa.conference import ConferenceSolutionCreateRequest, SolutionType\nfrom gcsa.event import Event\nfrom gcsa.google_calendar import GoogleCalendar, SendUpdatesMode\n\nfrom pytz import timezone\n\nfrom config import peachorobo_config\n\n\nclass CalendarService:\n def __init__(self):\n self.calendar = GoogleCalendar(\n \"primary\", credentials_path=\"credentials.json\", token_path=\"token.pickle\"\n )\n\n def create_event(self, start_dt: datetime) -> Event:\n end_dt = start_dt + timedelta(hours=2)\n attendees = peachorobo_config.calendar_emails\n event = Event(\n \"Mystery Dinner\",\n start=start_dt,\n end=end_dt,\n attendees=attendees,\n conference_solution=ConferenceSolutionCreateRequest(\n solution_type=SolutionType.HANGOUTS_MEET,\n ),\n )\n event_response = self.calendar.add_event(\n event, send_updates=SendUpdatesMode.ALL\n )\n hangout_link = None\n retries = 5\n while hangout_link is None and retries > 0:\n try:\n hangout_link = event_response.conference_solution.entry_points[0].uri\n except Exception as e:\n event = self.get_event(event.id)\n retries -= 1\n sleep(5)\n return event_response\n\n def get_event(self, event_id: str) -> Event:\n event_response = self.calendar.get_event(event_id)\n return event_response\n\n def delete_event(self, event_id: str) -> None:\n try:\n event = self.calendar.get_event(event_id)\n self.calendar.delete_event(event)\n except Exception:\n pass\n\n\ndef main():\n calendar_service = CalendarService()\n cal_parser = parsedatetime.Calendar()\n start_dt, _ = cal_parser.parseDT(\n datetimeString=\"tomorrow at 3pm\", tzinfo=timezone(\"US/Eastern\")\n )\n event = calendar_service.create_event(start_dt)\n event\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jzengg/peachorobo","sub_path":"peachorobo/calendar_service.py","file_name":"calendar_service.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73926934292","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport uno\nimport string\n\nlogfile=\"e:\\\\xulin\\\\TEMP\\\\py_desc_compare.txt\"\ndiffoutput=\"e:\\\\xulin\\\\TEMP\\\\py_desc_diff.txt\"\n#logfile=\"/home/cylinc/tmp/py_desc_compare.txt\"\n\ndef GetOooDesktop():\n\tlocal=uno.getComponentContext()\n\tresolver=local.ServiceManager.createInstanceWithContext(\"com.sun.star.bridge.UnoUrlResolver\",local)\n\tcontext=resolver.resolve(\"uno:socket,host=localhost,port=8100;urp;StarOffice.ComponentContext\")\n\tDesktop=context.ServiceManager.createInstanceWithContext(\"com.sun.star.frame.Desktop\",context)\n\tdoc=Desktop.getCurrentComponent()\n\treturn doc\n\n#res_cn=\"µç×è\".encode('utf-8')\n#smd_cn=\"ÌùƬ\".encode('utf-8')\n#dip_cn=\"²å¼þ\".encode('utf-8')\n#cap_cn=\"µçÈÝ\".encode('utf-8')\n\nres_cn=b'\\xe7\\x94\\xb5\\xe9\\x98\\xbb'\nsmd_cn=b'\\xe8\\xb4\\xb4\\xe7\\x89\\x87'\ndip_cn=b'\\xe6\\x8f\\x92\\xe4\\xbb\\xb6'\ncap_cn=b'\\xe7\\x94\\xb5\\xe5\\xae\\xb9'\n\nResName=[\"RES\",\"RESISTOR\",res_cn]\nResMountType1=[\"SMD\",\"SURFACE MOUNT\",smd_cn]\nResMountType2=[\"DIP\",dip_cn]\n\nResDecal=[\"0402\",\"0603\",\"0805\",\"1206\",\"1210\"]\nResPrecision=[\"5%\",\"1%\"]\nResValueUnit=[\"K\",\"R\",\"M\"]\nResPowerDiss=[\"W\"]\n\n# get the digits arround the keyword\ndef getValue(sourcestr,keyword):\n\t#print(\"getValue(): keyword: \" + keyword)\n\tif keyword==\"\" or sourcestr==\"\":\n\t\treturn \"\"\t\n\tposition=sourcestr.find(keyword)\n\tif position==-1:\n\t\treturn \"\"\n\tif position==0:\n\t\tposition=sourcestr.find(keyword,1)\n\twhile not position==-1:\n\t\tstartpos=position-1\n\t\t# try to find the postion of '1' in the string \"1.5pF\" or \"1/16W\"\n\t\twhile sourcestr[startpos].isdigit() or sourcestr[startpos]==\".\" or sourcestr[startpos]==\"/\" :\n\t\t\tstartpos=startpos-1\n\t\t\tif startpos==-1:\n\t\t\t\tbreak\n\t\t# if there is no digit before the keyword, then try to find the next position of keyword\n\t\tif startpos==position-1:\n\t\t\tposition=sourcestr.find(keyword,position+1)\n\t\t\tcontinue\n\n\t\tlength=len(sourcestr)\n\t\tendpos=position+len(keyword)\n\t\t# if the keyword is the last charater in the string\n\t\tif endpos==length:\n\t\t\t#print(\"getvalue(): return from pos1\")\n\t\t\treturn sourcestr[startpos+1:]\n\n\t\t# try to find the digits after the keyword like \"1K5\"\n\t\twhile sourcestr[endpos].isdigit():\n\t\t\tendpos=endpos+1\n\t\t\tif endpos==length:\n\t\t\t\tbreak\n\t\t# debug_print\n\t\t'''\n\t\t#print(\"getvalue(): return from pos2\")\n\t\t#print(\"getvalue():start: {0};end: {1}\".format(startpos+1,endpos))\n\t\t#print(\"getvalue(): \" + sourcestr[startpos+1:endpos]) \n\t\t'''\n\t\treturn sourcestr[startpos+1:endpos]\n\treturn \"\"\n\ndef isResiterStr(description):\n\tfor string in ResName:\n\t\tposition=description.find(string)\n\t\tif position>-1:\n\t\t\ttempstr1=description[position+len(string)]\n\t\t\ttempstr2=description[position-1]\n\t\t\t#print(\"tempstr1: {0};tempstr2: {1}\".format(tempstr1,tempstr2))\n\t\t\tif position==0:\n\t\t\t\tif tempstr1>=\"A\" and tempstr1<=\"Z\":\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\treturn 1\n\t\t\telif (tempstr1>=\"A\" and tempstr1<=\"Z\") or (tempstr2>=\"A\" and tempstr2<=\"Z\"):\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\treturn 1\n\treturn 0\n\ndef CreateStdResDesc(description):\n\tresult=\"\"\n\tif description==\"\":\n\t\treturn result\n\tif not isResiterStr(description):\n\t\treturn \"\"\n\tresult=result+\"Resistor\"\n\tfor mounttype in ResMountType1:\n\t\tif description.find(mounttype)>-1:\n\t\t\tresult=result+ \" SMD\"\n\t\t\tbreak\n\tfor mounttype in ResMountType2:\n\t\tif description.find(mounttype)>-1:\n\t\t\tresult=result+ \" DIP\"\n\t\t\tbreak\n\tfor decal in ResDecal:\n\t\tif description.find(decal)>-1:\n\t\t\tresult=result+ \" \" + decal\n\t\t\tbreak\n\tfor powerdiss in ResPowerDiss:\n\t\tvalue=getValue(description,powerdiss)\n\t\tif value==None:\n\t\t\t#print(\"[W]aring: Error in getValue()\")\n\t\t\tcontinue\n\t\tif not value==\"\":\n\t\t\tresult=result+ \" \" + value\n\t\t\tbreak\n\tfor unit in ResValueUnit:\n\t\tvalue=getValue(description,unit)\n\t\tif value==None:\n\t\t\t#print(\"[W]aring: Error in getValue()\")\n\t\t\tcontinue\n\t\tif not value==\"\":\n\t\t\t#print(\"value: \" +str(type(value)))\n\t\t\tresult=result+ \" \" + value\n\t\t\tbreak\n\tfor precision in ResPrecision:\n\t\tif description.find(precision)>-1:\n\t\t\tresult=result+ \" \" + precision\n\t\t\tbreak\n\treturn result\n\nCapName=[\"CAP\",\"CAPACITOR\",cap_cn]\nCapMountType1=[\"SMD\",\"SURFACE MOUNT\",smd_cn]\nCapMountType2=[\"DIP\",dip_cn]\nCapDecal=[\"0402\",\"0603\",\"0805\",\"1206\",\"1210\"]\nCapPrecision=[\"20%\",\"10%\",\"5%\",\"+20/-80%\"]\nCapValueUnit=[\"PF\",\"NF\",\"UF\",\"P\",\"N\",\"U\"]\nCapVoltage=[\"V\"]\nCapMeterial=[\"NPO\",\"X7R\",\"X5R\",\"Y5V\"]\n\ndef isCapacitor(description):\n\tfor string in CapName:\n\t\tif description.find(string)>-1:\n\t\t\treturn 1\n\treturn 0\n\ndef CreateStdCapDesc(description):\n\tresult=\"\"\n\tif description==\"\":\n\t\treturn result\n\tif not isCapacitor(description):\n\t\treturn \"\"\n\tresult=result+\"Capacitor\"\n\tfor mounttype in CapMountType1:\n\t\tif description.find(mounttype)>-1:\n\t\t\tresult=result+ \" SMD\"\n\t\t\tbreak\n\tfor mounttype in CapMountType2:\n\t\tif description.find(mounttype)>-1:\n\t\t\tresult=result+ \" DIP\"\n\t\t\tbreak\n\tfor decal in CapDecal:\n\t\tif description.find(decal)>-1:\n\t\t\tresult=result+ \" \" + decal\n\t\t\tbreak\n\tfor unit in CapValueUnit:\n\t\tvalue=getValue(description,unit)\n\t\tif value==None:\n\t\t\t#print(\"[W]aring: Error in getValue()\")\n\t\t\tcontinue\n\t\tif not value==\"\":\n\t\t\tresult=result+ \" \" + value\n\t\t\tbreak\n\tfor voltage in CapVoltage:\n\t\tvalue=getValue(description,voltage)\n\t\tif not value==\"\":\n\t\t\tresult=result+ \" \" + value\n\t\t\tbreak\n\tfor precision in CapPrecision:\n\t\tif description.find(precision)>-1:\n\t\t\tresult=result+ \" \" + precision\n\t\t\tbreak\n\tfor material in CapMeterial:\n\t\tif description.find(material)>-1:\n\t\t\tresult=result+ \" \" + material\n\t\t\tbreak\n\t#print(\"Capstd: result: \"+result)\n\treturn result\n\ndef isstrEqual(str1,str2):\n\tdivider=\" \"\n\tif str1==str2:\n\t\treturn 1\n\tif str1==\"\" or str2==\"\":\n\t\treturn -1\n\tlist1=str1.split(divider)\n\tlist2=str2.split(divider)\n\tnewlist1=[]\n\tfor member in list1:\n\t\tif not member=='':\n\t\t\tnewlist1.append(member)\n\tnewlist2=[]\n\tfor member in list2:\n\t\tif not member=='':\n\t\t\tnewlist2.append(member)\n\tnewlist1.sort()\n\tnewlist2.sort()\n\t#print(\"newlist1: \"+str(newlist1)+\"\\n\")\n\t#print(\"newlist2: \"+str(newlist2)+\"\\n\")\n\tf.write(\"newlist1: \"+str(newlist1)+\"\\n\")\n\tf.write(\"newlist2: \"+str(newlist2)+\"\\n\")\n\tif newlist1==newlist2:\n\t\treturn 1\n\telse:\n\t\tfdiff.write(\"\\n\")\n\t\tfdiff.write(\"newlist1: \"+str(newlist1)+\"\\n\")\n\t\tfdiff.write(\"newlist2: \"+str(newlist2)+\"\\n\")\n\t\treturn 0\n\n\nf=open(logfile,'w')\nfdiff=open(diffoutput,'w')\n\ndef Startprocess():\n\t'''\n\tusage:\n\t\tthe target of this function is to compare the two kinds of descriptions of one PN material, if in case of:\n\t\t\t1) resister: unify the descriptions to be stand description\n\t2) capitance:unify the descriptions to be stand description\n\t3) others: trim the two description strings,then to compare\n\n\t1. set the sheet name to be \"sheet1\",you are encouraged to create a new file to do this\n\t2. the first row of data is 1\n\t3. the 1st col should be pn, the 2nd col should be description1,\n\t the 3rd should be description2\n\t4. current,the last row is set to be 600\n\t'''\n\n\tdoc=GetOooDesktop()\n\tsheet=doc.getSheets().getByName(\"sheet1\")\n\t#inputstr1=\"Resistor 0402 Surface Mount 1% 2R0 1/16W \"\n\t#inputstr2=\"SMD¡¡CAP\tCapacitor SM 0402 NPO Ceramic 5% 50V 10P\"\n\t#inputstr2=inputstr2.upper()\n\t#print(inputstr2)\n\t#print(CreateStdCapDesc(inputstr2))\n\tstartrow=0\n\tendrow=600\n\titem_col=0\n\tdesc_col1=1\n\tdesc_col2=2\n\toutputcol=3\n\t'''\n\tResName[2]=ResName[2].encode('utf-8')\n\tResMountType1[2]=ResMountType1[2].encode('utf-8')\n\tResMountType2[1]=ResMountType2[1].encode('utf-8')\n\tCapName[2]=CapName[2].encode('utf-8')\n\tCapMountType1[2]=CapMountType1[2].encode('utf-8')\n\tCapMountType2[1]=CapMountType2[1].encode('utf-8')\n\t'''\n\n\tfor i in range(startrow,endrow):\n\t\tstr1=sheet.getCellByPosition(desc_col1,i).getString()\n\t\tstr1=str1.encode('utf-8')\n\t\tstr1=str1.upper()\n\t\tf.write(\"str1: \"+str1+\"\\n\")\n\n\t\tstr2=sheet.getCellByPosition(desc_col2,i).getString()\n\t\tstr2=str2.encode('utf-8')\n\t\tstr2=str2.upper()\n\t\tf.write(\"str2: \"+str2+\"\\n\")\n\n\t\tif isResiterStr(str1) and isResiterStr(str2):\n\t\t\tresult1=CreateStdResDesc(str1)\n\t\t\tresult2=CreateStdResDesc(str2)\n\t\t\tf.write(\"result1: \"+result1+\"\\n\")\n\t\t\tf.write(\"result2: \"+result2+\"\\n\")\n\t\t\tif result1==result2:\n\t\t\t\tsheet.getCellByPosition(outputcol,i).setString(\"same\")\n\t\t\telse:\n\t\t\t\tsheet.getCellByPosition(outputcol,i).setString(\"different\")\n\t\t\t\tfdiff.write(\"\\n\")\n\t\t\t\tfdiff.write(sheet.getCellByPosition(item_col,i).getString())\n\t\t\t\tfdiff.write(\"\\n\")\n\t\t\t\tfdiff.write(\"result1: \"+result1+\"\\n\")\n\t\t\t\tfdiff.write(\"result2: \"+result2+\"\\n\")\n\t\t\tcontinue\n\t\tif isCapacitor(str1) and isCapacitor(str2):\n\t\t\tresult1=CreateStdCapDesc(str1)\n\t\t\tresult2=CreateStdCapDesc(str2)\n\t\t\tf.write(\"result1: \"+result1+\"\\n\")\n\t\t\tf.write(\"result2: \"+result2+\"\\n\")\n\t\t\tif result1==result2:\n\t\t\t\tsheet.getCellByPosition(outputcol,i).setString(\"same\")\n\t\t\telse:\n\t\t\t\tsheet.getCellByPosition(outputcol,i).setString(\"different\")\n\t\t\t\tfdiff.write(\"\\n\")\n\t\t\t\tfdiff.write(sheet.getCellByPosition(item_col,i).getString())\n\t\t\t\tfdiff.write(\"\\n\")\n\t\t\t\tfdiff.write(\"result1: \"+result1+\"\\n\")\n\t\t\t\tfdiff.write(\"result2: \"+result2+\"\\n\")\n\t\t\tcontinue\n\t\tif isstrEqual(str1,str2)==1:\n\t\t\tsheet.getCellByPosition(outputcol,i).setString(\"same\")\n\t\telse:\n\t\t\tsheet.getCellByPosition(outputcol,i).setString(\"different\")\n\t\t\tfdiff.write(sheet.getCellByPosition(item_col,i).getString())\n\t\t\t'''\n\t\t\tfdiff.write(\"\\n\")\n\t\t\tfdiff.write(\"str1: \"+str1+\"\\n\")\n\t\t\tfdiff.write(\"str2: \"+str2+\"\\n\")\n\t\t\t'''\n\tf.closed\n\tfdiff.closed\n\ndef Test():\n\tinputstr1=\"SMD IC MCU STM8S103 TSSOP20 \".encode('utf-8')\n\tinputstr2=\"SMD IC MCU STM8S103 TSSOP20\".encode('utf-8')\n\tinputstr1=inputstr1.upper()\n\tinputstr2=inputstr2.upper()\n# print(inputstr1)\n\t#print(CreateStdResDesc(inputstr1))\n\t#print(isstrEqual(inputstr1,inputstr2))\n\nTest()\n","repo_name":"xuyuanwei/pcscode","sub_path":"libreoffice/desc_compare.py","file_name":"desc_compare.py","file_ext":"py","file_size_in_byte":9417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35041703710","text":"from zoo.common.utils import get_file_list\nfrom zoo.orca.data import SparkXShards\nfrom zoo.orca.data.utils import get_size\nfrom zoo.util.utils import convert_row_to_numpy\nimport numpy as np\n\n\ndef find_latest_checkpoint(model_dir, model_type=\"bigdl\"):\n import os\n import re\n import datetime\n ckpt_path = None\n latest_version = None\n optim_prefix = None\n optim_regex = None\n if model_type == \"bigdl\":\n optim_regex = \".*\\.([0-9]+)$\"\n elif model_type == \"pytorch\":\n optim_regex = \"TorchModel[0-9a-z]*\\.([0-9]+)$\"\n elif model_type == \"tf\":\n optim_regex = \"TFParkTraining\\.([0-9]+)$\"\n else:\n ValueError(\"Only bigdl, pytorch and tf are supported for now.\")\n\n file_list = get_file_list(model_dir, recursive=True)\n optim_dict = {}\n pattern_re = re.compile('(.*)(\\d{4}-\\d{2}-\\d{2}_\\d{2}-\\d{2}-\\d{2})(.*)optimMethod-'\n + optim_regex)\n for file_path in file_list:\n matched = pattern_re.match(file_path)\n if matched is not None:\n try:\n # check if dir name is date time\n timestamp = matched.group(2)\n datetime.datetime.strptime(timestamp, '%Y-%m-%d_%H-%M-%S')\n if timestamp in optim_dict:\n optim_dict[timestamp].append((int(matched.group(4)),\n os.path.dirname(file_path),\n os.path.basename(file_path).split('.')[0]))\n else:\n optim_dict[timestamp] = [(int(matched.group(4)),\n os.path.dirname(file_path),\n os.path.basename(file_path).split('.')[0])]\n except:\n continue\n if optim_dict:\n latest_timestamp = max(optim_dict)\n latest_version, ckpt_path, optim_prefix = max(optim_dict[latest_timestamp],\n key=lambda version_path: version_path[0])\n\n return ckpt_path, optim_prefix, latest_version\n\n\ndef convert_predict_rdd_to_xshard(data, prediction_rdd):\n import numpy as np\n from zoo.orca.data import SparkXShards\n\n def group_index(iter):\n for data in iter:\n size = get_size(data[\"x\"])\n for i in range(size):\n yield size\n\n def transform_predict(predictions):\n # list of np array\n if isinstance(predictions[0], list):\n predictions = np.array(predictions).T.tolist()\n result = [np.array(predict) for predict in predictions]\n return result\n # np array\n else:\n return np.array(predictions)\n\n def group(iter):\n this_index = 0\n buffer = []\n this_count = None\n for (count, pred) in iter:\n if this_index == 0:\n this_count = count\n if this_index < this_count:\n buffer.append(pred)\n this_index += 1\n if this_index == this_count:\n yield transform_predict(buffer)\n buffer.clear()\n this_index = 0\n\n def add_pred(shard_pred):\n shard, pred = shard_pred\n shard[\"prediction\"] = pred\n return shard\n\n indexed_rdd = data.rdd.mapPartitions(group_index)\n grouped_pred = indexed_rdd.zip(prediction_rdd).mapPartitions(group)\n result_rdd = data.rdd.zip(grouped_pred).map(add_pred)\n return SparkXShards(result_rdd)\n\n\ndef update_predict_xshards(xshard, pred_xshards):\n def updates(d1_d2):\n d1, d2 = d1_d2\n d1.update(d2)\n return d1\n\n result = SparkXShards(xshard.rdd.zip(pred_xshards.rdd).map(updates))\n return result\n\n\ndef convert_predict_xshards_to_dataframe(df, pred_shards):\n def flatten(data):\n data = data[\"prediction\"]\n is_list = isinstance(data, list)\n is_tuple = isinstance(data, tuple)\n if is_list or is_tuple:\n length = data[0].shape[0]\n ls_data = data\n else:\n length = data.shape[0]\n ls_data = [data]\n\n for i in range(length):\n row = [elem[i] for elem in ls_data]\n if is_list:\n yield row\n elif is_tuple:\n yield tuple(row)\n else:\n yield row[0]\n\n pred_rdd = pred_shards.rdd.flatMap(flatten)\n result = convert_predict_rdd_to_dataframe(df, pred_rdd)\n return result\n\n\ndef convert_predict_rdd_to_dataframe(df, prediction_rdd):\n from pyspark.sql import Row\n from pyspark.sql.types import StructType, StructField, FloatType, ArrayType\n from pyspark.ml.linalg import VectorUDT, Vectors\n\n def combine(pair):\n # list of np array\n if isinstance(pair[1], list):\n row = Row(*([pair[0][col] for col in pair[0].__fields__] +\n [[Vectors.dense(elem) for elem in pair[1]]]))\n return row, ArrayType(VectorUDT())\n # scalar\n elif len(pair[1].shape) == 0:\n row = Row(*([pair[0][col] for col in pair[0].__fields__] + [float(pair[1].item(0))]))\n return row, FloatType()\n # np ndarray\n else:\n dim = len(pair[1].shape)\n if dim == 1:\n # np 1-D array\n row = Row(*([pair[0][col] for col in pair[0].__fields__] +\n [Vectors.dense(pair[1])]))\n return row, VectorUDT()\n else:\n # multi-dimensional array\n structType = FloatType()\n for _ in range(dim):\n structType = ArrayType(structType)\n row = Row(*([pair[0][col] for col in pair[0].__fields__] + [pair[1].tolist()]))\n return row, structType\n\n combined_rdd = df.rdd.zip(prediction_rdd).map(combine)\n type = combined_rdd.map(lambda data: data[1]).first()\n result_rdd = combined_rdd.map(lambda data: data[0])\n schema = StructType(df.schema.fields + [StructField('prediction', type)])\n result_df = result_rdd.toDF(schema)\n return result_df\n\n\ndef arrays2dict(iter, feature_cols, label_cols, shard_size=None):\n def init_result_lists():\n feature_lists = [[] for col in feature_cols]\n if label_cols is not None:\n label_lists = [[] for col in label_cols]\n else:\n label_lists = None\n return feature_lists, label_lists\n\n def add_row(data, results):\n if not isinstance(data, list):\n arrays = [data]\n else:\n arrays = data\n\n for i, arr in enumerate(arrays):\n results[i].append(arr)\n\n def merge_rows(results):\n result_arrs = [np.stack(l) for l in results]\n if len(result_arrs) == 1:\n result_arrs = result_arrs[0]\n else:\n result_arrs = tuple(result_arrs)\n return result_arrs\n\n def generate_output(feature_lists, label_lists):\n feature_arrs = merge_rows(feature_lists)\n if label_cols is not None:\n label_arrs = merge_rows(label_lists)\n return {\"x\": feature_arrs, \"y\": label_arrs}\n else:\n return {\"x\": feature_arrs}\n\n feature_lists, label_lists = init_result_lists()\n counter = 0\n\n for row in iter:\n counter += 1\n add_row(row[0], feature_lists)\n if label_cols is not None:\n add_row(row[1], label_lists)\n\n if shard_size and counter % shard_size == 0:\n yield generate_output(feature_lists, label_lists)\n feature_lists, label_lists = init_result_lists()\n\n if feature_lists[0]:\n yield generate_output(feature_lists, label_lists)\n\n\ndef transform_to_shard_dict(data, feature_cols, label_cols=None):\n def to_shard_dict(df):\n result = dict()\n result[\"x\"] = [df[feature_col].to_numpy() for feature_col in feature_cols]\n if label_cols:\n result[\"y\"] = df[label_cols[0]].to_numpy()\n return result\n data = data.transform_shard(to_shard_dict)\n return data\n\n\ndef process_xshards_of_pandas_dataframe(data, feature_cols, label_cols=None, validation_data=None,\n mode=None):\n data = transform_to_shard_dict(data, feature_cols, label_cols)\n if mode == \"fit\":\n if validation_data:\n assert validation_data._get_class_name() == 'pandas.core.frame.DataFrame',\\\n \"train data and validation data should be both XShards of Pandas DataFrame\"\n validation_data = transform_to_shard_dict(validation_data, feature_cols, label_cols)\n return data, validation_data\n else:\n return data\n\n\ndef _dataframe_to_xshards(data, feature_cols, label_cols=None, accept_str_col=False):\n from zoo.orca import OrcaContext\n schema = data.schema\n shard_size = OrcaContext._shard_size\n numpy_rdd = data.rdd.map(lambda row: convert_row_to_numpy(row,\n schema,\n feature_cols,\n label_cols,\n accept_str_col))\n shard_rdd = numpy_rdd.mapPartitions(lambda x: arrays2dict(x,\n feature_cols,\n label_cols,\n shard_size))\n return SparkXShards(shard_rdd)\n\n\ndef dataframe_to_xshards(data, validation_data, feature_cols, label_cols, mode=\"fit\",\n num_workers=None, accept_str_col=False):\n from pyspark.sql import DataFrame\n valid_mode = {\"fit\", \"evaluate\", \"predict\"}\n assert mode in valid_mode, f\"invalid mode {mode} \" \\\n f\"mode should be one of {valid_mode}\"\n assert validation_data is None or isinstance(validation_data, DataFrame), \\\n \"validation data must be a spark DataFrame when data is a DataFrame\"\n assert feature_cols is not None, \\\n \"feature_col must be provided if data is a spark dataframe\"\n\n if mode != \"predict\":\n assert label_cols is not None, \\\n \"label_cols must be provided if data is a spark dataframe\"\n # avoid empty partition for worker\n if data.rdd.getNumPartitions() < num_workers:\n data = data.repartition(num_workers)\n if validation_data is not None:\n num_data_part = data.rdd.getNumPartitions()\n validation_data = validation_data.repartition(num_data_part)\n\n data = _dataframe_to_xshards(data, feature_cols, label_cols, accept_str_col)\n if validation_data is not None:\n validation_data = _dataframe_to_xshards(validation_data, feature_cols, label_cols,\n accept_str_col)\n\n return data, validation_data\n\n\ndef maybe_dataframe_to_xshards(data, validation_data, feature_cols, label_cols, mode=\"fit\",\n num_workers=None, accept_str_col=False):\n from pyspark.sql import DataFrame\n if isinstance(data, DataFrame):\n data, validation_data = dataframe_to_xshards(data, validation_data,\n feature_cols=feature_cols,\n label_cols=label_cols,\n mode=mode,\n num_workers=num_workers,\n accept_str_col=accept_str_col)\n return data, validation_data\n\n\ndef bigdl_metric_results_to_dict(results):\n return dict([(r.method, r.result) for r in results])\n","repo_name":"intel-analytics/analytics-zoo","sub_path":"pyzoo/zoo/orca/learn/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11770,"program_lang":"python","lang":"en","doc_type":"code","stars":2592,"dataset":"github-code","pt":"67"} +{"seq_id":"3197857478","text":"def transposta(matriz):\n nlin = len(matriz)\n ncol = len(matriz[0])\n\n ret = []\n for i in range(nlin):\n for j in range(ncol):\n ret[i][j] = matriz[j][i]\n return ret\n\nres =transposta([\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n])\nfor line in res:\n print(line)","repo_name":"rubskaiserman/ufrj","sub_path":"comp_I/aula-9/teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19378979511","text":"''' LISTA 02 - EXERCICIO 12\r\n Nome: Guilherme Augusto Sbizero Correa\r\n Data: Setembro /2020\r\n Enunciado: Escreva um programa que calcule o tempo de uma viagem de carro. \r\n Pergunte a distância a percorrer e a velocidade média esperada para a viagem.\r\n'''\r\n\r\ndistancia = float(input(\"Digite a distancia da viagem em km: \"))\r\nvelocidade = float(input(\"Digite a velocidade media da viagem: \"))\r\ntempo = (distancia/velocidade)\r\nprint(\"O tempo da viagem foi de: \",tempo,\" horas\")","repo_name":"Guilherme-Sbizero/IPE-UNIP-2020","sub_path":"I.P.E. Pratico/Aula 03/Exercicio12.py","file_name":"Exercicio12.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35382053269","text":"from datetime import datetime\nimport json\nimport mock\nimport unittest\n\nimport requests\n\nimport urbanairship as ua\nfrom tests import TEST_KEY, TEST_SECRET, TEST_TOKEN\n\n\nclass TestSMS(unittest.TestCase):\n def test_sms_channel_reg(self):\n sender = \"12345\"\n msisdn = \"15035556789\"\n channel_id = None\n\n with mock.patch.object(ua.Airship, \"_request\") as mock_request:\n response = requests.Response()\n response._content = json.dumps({\"ok\": True, \"status\": \"pending\"}).encode(\n \"utf-8\"\n )\n response.status_code = 202\n mock_request.return_value = response\n\n airship = ua.Airship(TEST_KEY, TEST_SECRET)\n sms_obj = ua.Sms(airship, sender=sender, msisdn=msisdn)\n\n sms_obj.register()\n\n self.assertEqual(channel_id, sms_obj.channel_id)\n\n def test_sms_channel_reg_with_optin(self):\n sender = \"12345\"\n msisdn = \"15035556789\"\n channel_id = \"59968b83-4e21-4e4a-85ce-25bb59a93993\"\n opt_in_date = \"2018-02-13T11:58:59\"\n\n with mock.patch.object(ua.Airship, \"_request\") as mock_request:\n response = requests.Response()\n response._content = json.dumps(\n {\"ok\": True, \"channel_id\": channel_id}\n ).encode(\"utf-8\")\n response.status_code = 201\n mock_request.return_value = response\n\n airship = ua.Airship(TEST_KEY, TEST_SECRET)\n sms_obj = ua.Sms(airship, sender=sender, msisdn=msisdn)\n\n sms_obj.register(opted_in=opt_in_date)\n\n self.assertEqual(channel_id, sms_obj.channel_id)\n\n def test_sms_opt_out(self):\n sender = \"12345\"\n msisdn = \"15035556789\"\n\n with mock.patch.object(ua.Airship, \"_request\") as mock_request:\n response = requests.Response()\n response._content = json.dumps({\"ok\": True}).encode(\"utf-8\")\n response.status_code = 202\n mock_request.return_value = response\n\n airship = ua.Airship(TEST_KEY, TEST_SECRET)\n sms_obj = ua.Sms(airship, sender=sender, msisdn=msisdn)\n\n r = sms_obj.opt_out()\n\n self.assertTrue(r.ok)\n\n def test_sms_uninstall(self):\n sender = \"12345\"\n msisdn = \"15035556789\"\n\n with mock.patch.object(ua.Airship, \"_request\") as mock_request:\n response = requests.Response()\n response._content = json.dumps({\"ok\": True}).encode(\"utf-8\")\n response.status_code = 202\n mock_request.return_value = response\n\n airship = ua.Airship(TEST_KEY, TEST_SECRET)\n sms_obj = ua.Sms(airship, sender=sender, msisdn=msisdn)\n\n r = sms_obj.uninstall()\n\n self.assertTrue(r.ok)\n\n def test_sms_registration_payload_property(self):\n sms = ua.Sms(\n airship=ua.Airship(TEST_KEY, TEST_SECRET),\n sender=\"12345\",\n msisdn=\"15035556789\",\n opted_in=\"2018-02-13T11:58:59\",\n locale_country=\"us\",\n locale_language=\"en\",\n timezone=\"America/Los_Angeles\",\n )\n\n self.assertEqual(\n sms._registration_payload,\n {\n \"sender\": \"12345\",\n \"msisdn\": \"15035556789\",\n \"locale_language\": \"en\",\n \"locale_country\": \"us\",\n \"timezone\": \"America/Los_Angeles\",\n \"opted_in\": \"2018-02-13T11:58:59\",\n },\n )\n\n def test_sms_lookup(self):\n sender = \"12345\"\n msisdn = \"15035556789\"\n\n with mock.patch.object(ua.Airship, \"_request\") as mock_request:\n response = requests.Response()\n response._content = json.dumps(\n {\n \"ok\": True,\n \"channel\": {\n \"channel_id\": \"84e36d69-873b-4ffe-81cd-e74c9f002057\",\n \"device_type\": \"sms\",\n \"installed\": True,\n \"push_address\": None,\n \"named_user_id\": None,\n \"alias\": None,\n \"tags\": [],\n \"tag_groups\": {\n \"ua_channel_type\": [\"sms\"],\n \"ua_sender_id\": [\"12345\"],\n \"ua_opt_in\": [\"true\"],\n },\n \"created\": \"2018-04-27T22:06:21\",\n \"opt_in\": True,\n \"last_registration\": \"2018-05-14T19:51:38\",\n },\n }\n ).encode(\"utf-8\")\n response.status_code = 200\n mock_request.return_value = response\n\n airship = ua.Airship(TEST_KEY, TEST_SECRET)\n sms_obj = ua.Sms(airship, sender=sender, msisdn=msisdn)\n\n r = sms_obj.lookup()\n\n self.assertTrue(r.ok)\n\n\nclass TestSmsKeywordInteraction(unittest.TestCase):\n def setUp(self):\n self.airship = ua.Airship(TEST_KEY, TEST_SECRET)\n self.sender_ids = [\"12345\", \"09876\"]\n self.msisdn = \"15035556789\"\n self.keyword = \"from_a_motel_six\"\n self.timestamp = datetime(2014, 10, 8, 12, 0, 0)\n self.interaction = ua.KeywordInteraction(\n airship=self.airship,\n keyword=self.keyword,\n msisdn=self.msisdn,\n sender_ids=self.sender_ids,\n timestamp=self.timestamp,\n )\n\n def test_payload(self):\n self.assertEqual(\n self.interaction.payload,\n {\n \"keyword\": self.keyword,\n \"sender_ids\": self.sender_ids,\n \"timestamp\": \"2014-10-08T12:00:00\",\n },\n )\n\n def test_url(self):\n self.assertEqual(\n self.interaction.url,\n \"https://go.urbanairship.com/api/sms/15035556789/keywords\",\n )\n\n\nclass TestSmsCustomResponse(unittest.TestCase):\n def setUp(self) -> None:\n self.maxDiff = 2000\n airship = ua.Airship(key=TEST_KEY, token=TEST_TOKEN)\n self.mo_id = \"886f53d4-3e0f-46d7-930e-c2792dac6e0a\"\n self.custom_response = ua.SmsCustomResponse(\n airship=airship,\n mobile_originated_id=self.mo_id,\n )\n\n def test_mms_payload(self):\n self.custom_response.mms = ua.mms(\n fallback_text=\"mms alert\",\n url=\"http://www.airship.com\",\n content_type=\"image/gif\",\n content_length=12345,\n shorten_links=True,\n )\n\n self.assertEqual(\n self.custom_response._payload,\n {\n \"mobile_originated_id\": self.mo_id,\n \"mms\": {\n \"fallback_text\": \"mms alert\",\n \"slides\": [\n {\n \"media\": {\n \"content_type\": \"image/gif\",\n \"url\": \"http://www.airship.com\",\n \"content_length\": 12345,\n }\n }\n ],\n \"shorten_links\": True,\n },\n },\n )\n\n def test_sms_payload(self):\n self.custom_response.sms = ua.sms(alert=\"sms alert\", shorten_links=False)\n\n self.assertEqual(\n self.custom_response._payload,\n {\n \"sms\": {\"alert\": \"sms alert\", \"shorten_links\": False},\n \"mobile_originated_id\": self.mo_id,\n },\n )\n\n def test_neither_payload_raises(self):\n with self.assertRaises(ValueError, msg=\"One of mms or sms must be set.\"):\n self.custom_response._payload\n\n def test_both_payloads_raises(self):\n self.custom_response.sms = ua.sms(alert=\"test_alert\")\n self.custom_response.mms = ua.mms(\n content_length=12345,\n content_type=\"image/png\",\n fallback_text=\"test mms\",\n url=\"url\",\n )\n\n with self.assertRaises(ValueError, msg=\"Cannot use both mms and sms.\"):\n self.custom_response._payload\n","repo_name":"urbanairship/python-library","sub_path":"tests/devices/test_sms.py","file_name":"test_sms.py","file_ext":"py","file_size_in_byte":8120,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"67"} +{"seq_id":"44640874495","text":"def uvod():\n\n print('\\n Zbog vas pozicije, mozete da obavljate sledece funkcije:\\n ')\n print(' 1. Dodavanje novih hotela')\n print(' 2. Dodavanje recepcionera u hotele')\n print(' 3. Azuriranje hotela')\n print(' 4. Pretraga recepcionera')\n print(' 5. Brisanje hotela') \n print(' 6. Brisanje recepcionera')\n print(' 7. Izadjite iz aplikacije')\n print(' 8. Odjavite se')\n\ndef administrator():\n\n uvod()\n while True:\n x = input('\\n Unesite broj od 1 do 8: ')\n if x == '1': #DODAVANJEhotela\n print('\\n *** Izabrali ste da zelite da dodate novi hotel! ***\\n')\n import dodavanjeHotela\n dodavanjeHotela.dodavanjeHotela()\n break\n elif x == '2': #DODAVANJErecepcionera\n print('\\n *** Izabrali ste da zelite da dodate novog recepcionera u hotel! ***\\n')\n import dodavanjeRecepcionera\n dodavanjeRecepcionera.dodavanje()\n break \n elif x == '3': #AZURIRANJEhotela\n print('\\n *** Izabrali ste da zelite da azurirate hotel! ***\\n')\n import azuriranjeHotela\n azuriranjeHotela.azuriranje()\n break\n elif x == '4': #PRETRAGArecepcionera\n print('\\n *** Izabrali ste da hocete da pretrazite recepcionera! ***\\n')\n import pretragaRecepcionera \n pretragaRecepcionera.pretragaRec()\n break\n elif x == '5': #BRISANJEhotela\n print('\\n *** Izabrali ste da zelite da izbrisete hotel! ***\\n')\n import brisanjeHotela \n brisanjeHotela.brisanjeJednogHotela()\n break\n elif x == '6': #BRISANJErecepcionera\n print('\\n *** Izabrali ste da zelite da izbrisete recepcionera! ***\\n')\n import brisanjeRecepcionera \n brisanjeRecepcionera.brisanjeRec()\n break\n elif x == '7':\n print('\\n*** Uspesno ste izasli iz aplikacije! ***\\n')\n exit()\n break\n elif x == '8':\n print('\\n*** Uspesno ste se odjavili sa aplikacije! ***\\n')\n import main\n main.main()\n break\n else:\n print(' Uneli ste pogresan broj, molim vas ponovo unesite broj. ')\n\n","repo_name":"ppetar3333/Pyhon-Console-App","sub_path":"administrator.py","file_name":"administrator.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"sr","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71051681493","text":"import us\nfrom datetime import date, datetime\n\n# allowed, required, or forbidden values and formats\nUS_STATES = [state.abbr for state in us.states.STATES]\nSTATES_LESS_NY = [str(state.abbr) for state in us.states.STATES if state.abbr != 'NY']\nDATE_FORMAT = '%b %d, %Y'\ntoday = date.today()\nZIPCODE_LENGTHS = [5, 9]\nTODAY_LESS_21_YEARS = date(year=today.year - 21, month = today.month, day=today.day)\nFORBIDDEN_STATES = ['NJ', 'CT', 'PA', 'MA', 'OR', 'ID', 'IL']\nMAX_ZIPCODE_SUM = 20\n\nVALIDATION_CONSTANTS = ['US_STATES',\n\t\t\t\t\t\t'STATES_LESS_NY', \n\t\t\t\t\t\t'DATE_FORMAT', \n\t\t\t\t\t\t'ZIPCODE_LENGTHS', \n\t\t\t\t\t\t'TODAY_LESS_21_YEARS', \n\t\t\t\t\t\t'FORBIDDEN_STATES', \n\t\t\t\t\t\t'MAX_ZIPCODE_SUM']\n\n# validation error messages\nLENGTH_CHOICE_ERROR = 'length must be valid, choose from {}'\nLENGTH_RANGE_ERROR = 'length must be valide, between {} and {}'\nSUM_MAX_ERROR = 'sum of values must be no more than {}'\nFORBIDDEN_VALUE_ERROR = 'the value is not allowed, do not choose from {}'\nAT_MOST_DATE_RANGE_ERROR = 'value must be at most {}'\nLOWER_THAN_DATE_RANGE_ERROR = 'value must be lower than {}'\nHIGHER_THAN_DATE_RANGE_ERROR = 'value must be higher than {}'\nAT_LEAST_DATE_RANGE_ERROR = 'value must be at least {}'\nCOERCE_DATE_ERROR = 'value does not represent a valid date, the date format must follow {}'.format(DATE_FORMAT)\nCOERCE_INT_ERROR = 'value does not represent an integer'\nINVALID_EMAIL_ERROR = 'email address is not valid'\nNY_EMAIL_ERROR = 'NY email address can not belong to a .net domain'\nMISSING_ENDING_ERROR = 'the value must end with {}'\nUNALLOWED_ENDING_ERROR = 'the value can not end with {}'\nLENGTH_CHOICE_ERROR_ZIPCODE = LENGTH_CHOICE_ERROR.format(ZIPCODE_LENGTHS)\n\nVALIDATION_ERRORS = ['LENGTH_CHOICE_ERROR',\n\t\t\t\t\t'LENGTH_RANGE_ERROR',\n\t\t\t\t\t'SUM_MAX_ERROR',\n\t\t\t\t\t'FORBIDDEN_VALUE_ERROR',\n\t\t\t\t\t'AT_MOST_DATE_RANGE_ERROR',\n\t\t\t\t\t'LOWER_THAN_DATE_RANGE_ERROR',\n\t\t\t\t\t'HIGHER_THAN_DATE_RANGE_ERROR',\n\t\t\t\t\t'AT_LEAST_DATE_RANGE_ERROR',\n\t\t\t\t\t'COERCE_DATE_ERROR',\n\t\t\t\t\t'COERCE_INT_ERROR',\n\t\t\t\t\t'INVALID_EMAIL_ERROR',\n\t\t\t\t\t'NY_EMAIL_ERROR',\n\t\t\t\t\t'MISSING_ENDING_ERROR',\n\t\t\t\t\t'UNALLOWED_ENDING_ERROR',\n\t\t\t\t\t'LENGTH_CHOICE_ERROR_ZIPCODE']\n","repo_name":"malkaR/sample-flask-site","sub_path":"api/config/validation_constants.py","file_name":"validation_constants.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40025369869","text":"import urllib\nfrom zope.component import getMultiAdapter\nfrom zope.browserpage.viewpagetemplatefile import ViewPageTemplateFile\nfrom plone.app.content.browser.foldercontents \\\n import FolderContentsView as BaseFolderContentsView\nfrom plone.app.content.browser.foldercontents \\\n import FolderContentsTable as BaseFolderContentsTable\nfrom plone.app.content.browser.tableview import Table as BaseTable\nfrom plone.memoize import instance\nfrom Products.ATContentTypes.interface import IATTopic\nfrom Products.CMFCore.utils import getToolByName\n\n\nclass Table(BaseTable):\n \"\"\"\n \"\"\"\n render = ViewPageTemplateFile(\"table.pt\")\n\n\nclass FolderContentsView(BaseFolderContentsView):\n \"\"\"\n \"\"\"\n\n def contents_table(self):\n table = FolderContentsTable(self.context, self.request)\n return table.render()\n\n\nclass FolderContentsTable(BaseFolderContentsTable):\n \"\"\"\n The foldercontents table renders the table and its actions.\n \"\"\"\n\n def __init__(self, context, request, contentFilter={}):\n self.context = context\n self.request = request\n self.contentFilter = contentFilter\n url = self.context.absolute_url()\n view_url = url + '/@@folder_contents'\n self.table = Table(request, url, view_url, self.items,\n show_sort_column=self.show_sort_column,\n buttons=self.buttons)\n\n @property\n @instance.memoize\n def items(self):\n \"\"\"\n We override state_class and state_title\n when returning values for multi-state content.\n \"\"\"\n context = self.context\n request = self.request\n self.context_state = getMultiAdapter((context, request),\n name='plone_context_state')\n self.tools = getMultiAdapter((context, request), name='plone_tools')\n self.workflows = self.tools.workflow().getWorkflowsFor(self.context)\n plone_utils = getToolByName(self.context, 'plone_utils')\n plone_view = getMultiAdapter(\n (self.context, self.request), name=u'plone')\n portal_properties = getToolByName(self.context, 'portal_properties')\n site_properties = portal_properties.site_properties\n use_view_action = site_properties.getProperty(\n 'typesUseViewActionInListings', ())\n browser_default = self.context.browserDefault()\n\n if IATTopic.providedBy(self.context):\n contentsMethod = self.context.queryCatalog\n else:\n contentsMethod = self.context.getFolderContents\n\n results = []\n for i, obj in enumerate(contentsMethod(self.contentFilter)):\n if (i + 1) % 2 == 0:\n table_row_class = \"draggable even\"\n else:\n table_row_class = \"draggable odd\"\n\n url = obj.getURL()\n path = obj.getPath or \"/\".join(obj.getPhysicalPath())\n icon = plone_view.getIcon(obj)\n\n type_class = 'contenttype-' + plone_utils.normalizeString(\n obj.portal_type)\n\n review_state = obj.review_state\n state_class = 'state-' + plone_utils.normalizeString(review_state)\n\n relative_url = obj.getURL(relative=True)\n obj_type = obj.portal_type\n\n modified = plone_view.toLocalizedTime(\n obj.ModificationDate, long_format=1)\n\n if obj_type in use_view_action:\n view_url = url + '/view'\n elif obj.is_folderish:\n view_url = url + \"/folder_contents\"\n else:\n view_url = url\n\n is_browser_default = len(browser_default[1]) == 1 and (\n obj.id == browser_default[1][0])\n\n # XXX: This does not get the proper workflow chain when we\n # are dealing with an item that is under placeful\n # workflow control.\n state_list = []\n wf_chain = self.tools.workflow().getChainForPortalType(obj_type)\n for w in wf_chain:\n wf_obj = self.tools.workflow()[w]\n state_var = wf_obj.state_var\n state_id = getattr(obj, state_var, None)\n wf_states = wf_obj.states\n # XXX: This is a bit of a hack, if there is a placeful\n # worklflow in use then this might still be wrong.\n # Since the state id could be the same in different\n # workflows. The title it ends up getting might\n # not be the correct one.\n if state_id is None or state_id not in wf_states.objectIds():\n continue\n stitle = wf_states[state_id].title\n state_list.append('%s'\n % (w, state_id, stitle))\n # XXX: If the state didn't exist in the workflow chain,\n # fall back to the `review_state` on the brain. This\n # Will happen when a placeful workflow is in place\n # and the workflow chain cannot be determined.\n if not state_list and isinstance(review_state, basestring):\n state_list = [review_state]\n state_string = ', '.join(state_list)\n\n results.append(dict(\n url=url,\n id =obj.getId,\n quoted_id=urllib.quote_plus(obj.getId),\n path=path,\n title_or_id=obj.pretty_title_or_id(),\n description=obj.Description,\n obj_type=obj_type,\n size=obj.getObjSize,\n modified=modified,\n icon=icon.html_tag(),\n type_class=type_class,\n wf_state=review_state,\n state_title=state_string,\n state_class=state_class,\n is_browser_default=is_browser_default,\n folderish=obj.is_folderish,\n relative_url=relative_url,\n view_url=view_url,\n table_row_class=table_row_class,\n is_expired=self.context.isExpired(obj),\n ))\n return results\n","repo_name":"sixfeetup/sixfeetup.workflow.chained","sub_path":"sixfeetup/workflow/chained/browser/foldercontents.py","file_name":"foldercontents.py","file_ext":"py","file_size_in_byte":6185,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"4000924701","text":"from PIL import Image\n\n# 指定输入图像文件夹和输出图像文件夹的路径\ninput_folder = \"D:/B/data/DomainAdaptation2D/data/3DRA/val/image\"\noutput_folder = \"D:/B/data/DomainAdaptation2D/data/3DRA/val/image\"\n\n# 为新的大小创建一个矩形框\nnew_width = 320\nnew_height = 384\n\n# 遍历输入文件夹中的图像文件\nimport os\nfor filename in os.listdir(input_folder):\n if filename.endswith(\".jpg\") or filename.endswith(\".png\"):\n # 打开图像文件\n img = Image.open(os.path.join(input_folder, filename))\n\n # 获取图像的宽度和高度\n width, height = img.size\n\n # 计算剪裁框的左上角坐标\n left = (width - new_width) / 2\n top = (height - new_height) / 2\n right = (width + new_width) / 2\n bottom = (height + new_height) / 2\n\n # 执行中心剪裁\n img_cropped = img.crop((left, top, right, bottom))\n\n # 保存剪裁后的图像到输出文件夹\n img_cropped.save(os.path.join(output_folder, filename))\n\nprint(\"中心剪裁完成\")\n","repo_name":"Fengming-Lin/ggbond","sub_path":"code/tools/crop/crop_2D_SMILE.py","file_name":"crop_2D_SMILE.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33047205755","text":"# This file is part of GroomRL by S. Carrazza and F. A. Dreyer\nimport cffi\nffibuilder = cffi.FFI()\n\nwith open('groomrl/cgroomrl.h') as f:\n ffibuilder.embedding_api(f.read())\n\nffibuilder.set_source('cgroomrl', r'''\n #include \"groomrl/cgroomrl.h\"\n''', source_extension='.cc')\n\nwith open('wrapper.py') as f:\n ffibuilder.embedding_init_code(f.read())\n\nffibuilder.emit_c_code('cgroomrl.cc')\n","repo_name":"JetsGame/libGroomRL","sub_path":"src/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"73413549653","text":"\"\"\"\n date: 2021/4/20 4:42 下午\n written by: neonleexiang\n https://github.com/NeonLeexiang/EDSR/blob/master/EDSR_model/edsr_model_pytorch.py\n\"\"\"\nimport torch\nimport torch.nn as nn\nfrom common import ResBlock,Upsampler,MeanShift\n\nclass model(nn.Module):\n \"\"\"\n 作者认为增加网络的宽度更加高效。\n n_resblocks:表示残差块的数量\n n_feats=256\n res_scale:残差缩放,作者在发现网络太宽模型出现了数值不稳定的现象,所以在每一个残差块的最后加一个残差缩放层来稳定训练\n \"\"\"\n def __init__(self,scale=2, n_channels=3, n_resblocks=32, n_feats=256, res_scale=0.1, rgb_range=1):\n super(model, self).__init__()\n\n self.n_channels = n_channels\n self.n_resblocks = n_resblocks\n self.n_feats = n_feats\n self.scale = scale\n self.res_scale = res_scale\n self.rgb_range = rgb_range\n\n self.kernel_size = (3, 3)\n self.padding = (1, 1)\n self.act = nn.ReLU(True)\n\n self.sub_mean = MeanShift(self.rgb_range)\n self.add_mean = MeanShift(self.rgb_range, sign=1)\n\n net_head = [nn.Conv2d(self.n_channels, self.n_feats, kernel_size=self.kernel_size, padding=self.padding)]\n net_body = [\n ResBlock(\n n_feats=self.n_feats, kernel_size=self.kernel_size, padding=self.padding,\n act=self.act, res_scale=self.res_scale\n ) for _ in range(self.n_resblocks)\n ]\n net_body.append(nn.Conv2d(in_channels=self.n_feats, out_channels=self.n_feats,kernel_size=self.kernel_size, padding=self.padding))\n net_tail = [\n Upsampler(self.scale, self.n_feats, act=False),\n nn.Conv2d(in_channels=self.n_feats, out_channels=self.n_channels,\n kernel_size=self.kernel_size, padding=self.padding)\n ]\n\n self.net_head = nn.Sequential(*net_head)\n self.net_body = nn.Sequential(*net_body)\n self.net_tail = nn.Sequential(*net_tail)\n\n def forward(self, x):\n x = self.sub_mean(x)\n x = self.net_head(x)\n\n res = self.net_body(x)\n res = torch.add(x, res)\n\n x = self.net_tail(res)\n x = self.add_mean(x)\n\n return x\nif __name__=='__main__':\n from torchinfo import summary\n model=model(scale=2,n_feats=64,n_resblocks=3)\n x=torch.rand(1,3,32,32)\n summary(model,x.shape)","repo_name":"laity-sir/sr","sub_path":"models/edsr.py","file_name":"edsr.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35798580872","text":"import jieba\r\nimport os\r\nimport re\r\nimport numpy as np\r\n\r\n\r\ndef getCorpus(text):\r\n r1 = u'[a-zA-Z0-9’\"#$%&\\'()()*+-./::;<=>@,★、…【】《》“”‘’[\\\\]^_`{|}~]+'\r\n text = re.sub(r1, \"\", text)\r\n p1 =[\"\\t\", \"\\n\", \"\\u3000\", \"\\u0020\", \"\\u00A0\", \" \"]\r\n for i in p1:\r\n text = text.replace(i, \"\")\r\n p2 = [\"?\", \"?\", \"!\", \"!\", \",\"]\r\n for i in p2:\r\n text = text.replace(i, \"。\")\r\n corpus = text.split(\"。\")\r\n return corpus\r\n\r\n\r\ndef getWord(corpus, n=1):\r\n word_dict = {}\r\n for line in corpus:\r\n words = list(jieba.cut(line))\r\n for i in range(len(words) + 1 - n):\r\n key = tuple(words[i:i + n])\r\n word_dict[key] = word_dict.get(key, 0) + 1\r\n return word_dict\r\n\r\n\r\ndef calWordEntropy(corpus, n=1):\r\n if n > 1:\r\n word_dict_n = getWord(corpus, n)\r\n word_dict_n1 = getWord(corpus, n - 1)\r\n all_sum_n = np.sum(list(word_dict_n.values()))\r\n all_sum_n1 = np.sum(list(word_dict_n1.values()))\r\n entropy = -np.sum([k * np.log2(k / word_dict_n1[j[:n - 1]] * all_sum_n1 / all_sum_n) for j, k in word_dict_n.items()]) / all_sum_n\r\n else:\r\n word_dict = getWord(corpus)\r\n all_sum = np.sum(list(word_dict.values()))\r\n entropy = -np.sum([i * np.log2(i / all_sum) for i in word_dict.values()]) / all_sum\r\n return entropy\r\n\r\n\r\nif __name__ == '__main__':\r\n path = \"E:\\桌面文件\\课程学习类\\研究生课程\\研一下\\DL-NLP\\第一次大作业\\jyxstxtqj_downcc.com\"\r\n filedir = os.listdir(path)\r\n for text_file in filedir:\r\n text_position = path + '//' + text_file\r\n print(text_file)\r\n with open(text_position, 'r', encoding='ANSI') as f:\r\n data = f.read()\r\n f.close()\r\n corpus_file = getCorpus(data)\r\n\r\n cf_dict_word = getWord(corpus_file)\r\n word_num = np.sum(list(cf_dict_word.values()))\r\n print(str(word_num))\r\n\r\n for i in range(1, 4):\r\n entropy = calWordEntropy(corpus_file, i)\r\n print(f\"{entropy:.4f}\")\r\n","repo_name":"zyzgo/DL-NLP-Homework1","sub_path":"entropy_word.py","file_name":"entropy_word.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18693992672","text":"from login import email, password\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.support.ui import WebDriverWait \nfrom selenium.webdriver import Keys\n\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\nimport unicodedata\nimport time\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n\nclass Hotel:\n def __init__(self, email, password):\n self.options = webdriver.ChromeOptions()\n self.options.add_argument(\"disable-popup-blocking\")\n self.options.add_experimental_option(\"detach\", True)\n self.browser = webdriver.Chrome(options=self.options)\n\n self.username = email\n self.password = password\n\n self.hotel_names = []\n self.prices = []\n self.extra_prices = []\n self.ratio = []\n self.ratio_count = []\n\n\n\n def sign_in_facebook(self):\n #booking.com'a gir\n self.browser.get('https://www.booking.com')\n time.sleep(15)\n\n #sayfa yüklendikten sonra tam ekran yap\n self.browser.maximize_window()\n \n #açılır pop-up kapat\n self.browser.find_element(By.CSS_SELECTOR, \"button.a83ed08757.c21c56c305.f38b6daa18.d691166b09.ab98298258.deab83296e.f4552b6561\").click()\n time.sleep(1)\n\n #giris yap butonuna bas\n #self.browser.find_element(By.XPATH, \"/html/body/div[4]/div/header/nav[1]/div[2]/div/a/span\").click()\n self.browser.find_element(By.XPATH, \"/html/body/div[3]/div/header/nav[1]/div[2]/div/a\").click()\n #self.browser.find_element(By.CSS_SELECTOR, \"span[aria-label='28 Ekim 2023']\").click()\n time.sleep(3)\n \n #facebook seceneği ile giriş yap\n self.browser.find_element(By.XPATH, \"/html/body/div[1]/div/div/div/div[2]/div[1]/div/div/div/div/div/div/form/div[4]/div[2]/a[1]\").click()\n time.sleep(3)\n\n #açılan facebook giriş ekranına geçiş yap\n self.browser.switch_to.window(self.browser.window_handles[1])\n\n #kullanıcı adı ve şifre gir\n self.browser.find_element(By.ID, 'email').send_keys(self.username)\n time.sleep(1)\n self.browser.find_element(By.ID, 'pass').send_keys(self.password)\n time.sleep(1)\n\n #giriş butonuna bas - bekle - ana ekrana geri dön\n self.browser.find_element(By.XPATH, \"/html/body/div[1]/div[2]/div[1]/div/div/div[2]/div[1]/form/div/div[3]/button\").click()\n time.sleep(15)\n \n self.browser.switch_to.window(self.browser.window_handles[0])\n \n def search(self, search_key: str, search_type: str = \"default\"):\n #arama metnini ver\n #1- self.browser.find_element(By.XPATH, \"/html/body/div[5]/div[2]/div/form/div[1]/div[1]/div/div/div[1]/div/div/input\").clear()\n #2- self.browser.find_element(By.XPATH, \"/html/body/div[5]/div[2]/div/form/div[1]/div[1]/div/div/div[1]/div/div/input\").send_keys((Keys.CONTROL , \"A\"))\n self.browser.find_element(By.CSS_SELECTOR, \".eb46370fe1\").send_keys((Keys.CONTROL , \"A\"))\n time.sleep(3)\n self.browser.find_element(By.CSS_SELECTOR, \".eb46370fe1\").send_keys(Keys.DELETE)\n #self.browser.find_element(By.XPATH, \"/html/body/div[5]/div[2]/div/form/div[1]/div[1]/div/div/div[1]/div/div/input\").send_keys(Keys.DELETE)\n time.sleep(5)\n self.browser.find_element(By.CSS_SELECTOR, \".eb46370fe1\").send_keys(search_key)\n #1- self.browser.find_element(By.XPATH, \"/html/body/div[5]/div[2]/div/form/div[1]/div[1]/div/div/div[1]/div/div/input\").send_keys(search_key)\n #2- self.browser.find_element(By.XPATH, \"/html/body/div[5]/div[2]/div/form/div[1]/div[1]/div/div/div[1]/div/div/input\").send_keys((Keys.CONTROL , \"A\"), \"Alaska\")\n time.sleep(3)\n\n #boş alana tıkla\n self.browser.find_element(By.XPATH, \"//body\").click()\n\n #tarih bas\n #self.browser.find_element(By.XPATH, \"/html/body/div[5]/div[2]/div/form/div[1]/div[2]/div/div\").click()\n self.browser.find_element(By.CSS_SELECTOR, \".f73e6603bf\").click()\n time.sleep(3)\n\n #tarih sec\n self.browser.find_element(By.CSS_SELECTOR, \"span[aria-label='28 Ekim 2023']\").click()\n time.sleep(3)\n \n #arama butonuna tıkla\n #self.browser.find_element(By.XPATH, \"/html/body/div[5]/div[2]/div/form/div[1]/div[4]/button\").click()\n self.browser.find_element(By.CSS_SELECTOR, \"button.a83ed08757.c21c56c305.a4c1805887.f671049264.d2529514af.c082d89982.cceeb8986b\").click()\n\n #get_source ve parse_source fonksiyonlarını ilk sayfa için çalıştır\n src = self.get_source()\n self.parse_source(src)\n\n if search_type == \"default\":\n default_value = 3\n counter = 0\n while counter < default_value:\n next_button = self.browser.find_element(By.XPATH, \"/html/body/div[7]/div/div[6]/div[1]/div[1]/div[4]/div[2]/div[2]/div/div/div[4]/div[2]/nav/div/div[3]/button\")\n if next_button.is_enabled() == True:\n next_button.click()\n time.sleep(6)\n try:\n src = self.get_source()\n self.parse_source(src)\n except NoSuchElementException:\n print(\"OLMADI---------\")\n else:\n break\n counter += 1\n\n\n elif search_type == \"total\":\n while True:\n next_button = self.browser.find_element(By.XPATH, \"/html/body/div[7]/div/div[6]/div[1]/div[1]/div[4]/div[2]/div[2]/div/div/div[4]/div[2]/nav/div/div[3]/button\")\n if next_button.is_enabled() == True:\n next_button.click()\n time.sleep(6)\n try:\n src = self.get_source()\n self.parse_source(src)\n except NoSuchElementException:\n print(\"OLMADI---------\")\n else:\n break\n\n\n\n def get_source(self):\n #beautifulsoup ile kaynak kodunu işleme\n result = self.browser.page_source\n soup = BeautifulSoup(result, 'html.parser')\n page = list(soup.findAll('div', {\"class\": \"c82435a4b8 a178069f51 a6ae3c2b40 a18aeea94d d794b7a0f7 f53e278e95 c6710787a4\"}))\n return page\n\n def parse_source(self, page):\n for i in range(0, len(page)):\n try:\n self.hotel_names.append(page[i].find(\"div\", {\"class\": \"f6431b446c a15b38c233\"}).text)\n except:\n self.hotel_names.append(\"None\")\n\n try:\n self.prices.append(unicodedata.normalize(\"NFKD\", page[i].find(\"span\", {\"class\": \"f6431b446c fbfd7c1165 e84eb96b1f\"}).text))\n except:\n self.prices.append(\"None\")\n\n try:\n self.extra_prices.append(unicodedata.normalize(\"NFKD\", page[i].find(\"div\", {\"data-testid\": \"taxes-and-charges\"}).text))\n except:\n self.extra_prices.append(\"None\")\n\n try:\n self.ratio.append(unicodedata.normalize(\"NFKD\", page[i].find(\"div\", {\"class\": \"a3b8729ab1 d86cee9b25\"}).text))\n except:\n self.ratio.append(\"None\")\n\n try:\n self.ratio_count.append(unicodedata.normalize(\"NFKD\", page[i].find(\"div\", {\"class\": \"abf093bdfe f45d8e4c32 d935416c47\"}).text))\n except:\n self.ratio_count.append(\"None\")\n\n \n #self.hotel_names.append(page[i].find(\"div\", {\"class\": \"f6431b446c a15b38c233\"}).text) \n #self.prices.append(unicodedata.normalize(\"NFKD\", page[i].find(\"span\", {\"class\": \"f6431b446c fbfd7c1165 e84eb96b1f\"}).text))\n #self.extra_prices.append(unicodedata.normalize(\"NFKD\", page[i].find(\"div\", {\"data-testid\": \"taxes-and-charges\"}).text))\n #self.ratio.append(unicodedata.normalize(\"NFKD\", page[i].find(\"div\", {\"class\": \"a3b8729ab1 d86cee9b25\"}).text))\n #self.ratio_count.append(unicodedata.normalize(\"NFKD\", page[i].find(\"div\", {\"class\": \"abf093bdfe f45d8e4c32 d935416c47\"}).text))\n\n def export_to_excel(self):\n df = pd.DataFrame({\"Otel ismi\":self.hotel_names, \"Ücret\":self.prices, \"Ek Ücretler\":self.extra_prices, \"Puan\":self.ratio, \"Değerlendirme Sayısı\": self.ratio_count})\n df.to_excel('otel_deneme_roma4.xlsx')\n print(\"BİTTİ\")\notel = Hotel(email, password)\notel.sign_in_facebook()\notel.export_to_excel()\n\n# print(otel.hotel_names)\n# print(otel.extra_prices)\n","repo_name":"eminydn/Webscraping-booking.com-with-GUI-PyQt5","sub_path":"hotel_application.py","file_name":"hotel_application.py","file_ext":"py","file_size_in_byte":8633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22210817905","text":"'''\nManual tests for downloaders. Used for debugging.\n'''\n#from finance_quote_python.boerse_frankfurt import FwbDownloader\nfrom finance_quote_python.morningstar import MorningstarDownloader\nfrom pricedb import SecuritySymbol\n\n\n# def test_fwb():\n# ''' fwb download '''\n# dl = FwbDownloader()\n# symbol = SecuritySymbol(\"FWB\", \"VGOV\")\n# actual = dl.download(symbol, \"EUR\")\n\n# assert actual is not None\n\ndef test_xfra_morningstar():\n ''' Test dowloading Boerse Frankfurt prices through Morningstar '''\n dl = MorningstarDownloader()\n symbol = SecuritySymbol(\"FWB\", \"VGOV\")\n actual = dl.download(symbol, \"EUR\")\n\n assert actual is not None\n assert actual > 0\n\ndef test_fwb_vmid():\n ''' try to download the price for FTSE 250 ETF '''\n dl = MorningstarDownloader()\n symbol = SecuritySymbol(\"FWB\", \"VMID\")\n actual = dl.download(symbol, \"EUR\")\n\n assert actual is not None\n assert actual > 0\n\ndef test_bats():\n ''' BATS exchange '''\n dl = MorningstarDownloader()\n symbol = SecuritySymbol(\"BATS\", \"EMHY\")\n actual = dl.download(symbol, \"USD\")\n\n assert actual is not None\n assert actual > 0\n","repo_name":"alensiljak/finance-quote-python","sub_path":"tests/test_downloader.py","file_name":"test_downloader.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"2353065743","text":"#-*- coding:utf-8 _*- \n\"\"\" \n--------------------------------------------------------------------\n@function: 从hbase中导出TAG_TOPIC字段到HDFS\n@time: 2017-12-05\nauthor:baoquan3 \n@version: \n@modify: \n--------------------------------------------------------------------\n\"\"\"\nimport sys\nimport os\nfrom ctypes import *\nsys.path.append(\"./util\")\nsys.path.append(\"../util\")\nfrom Parser import Parser\nimport re\nimport time\nimport hashlib\ndefaultencoding = 'utf-8'\nif sys.getdefaultencoding() != defaultencoding:\n reload(sys)\n sys.setdefaultencoding(defaultencoding)\n\nclass TagTopic(Parser):\n \"\"\"\n 获取微博的tagTopic\n \"\"\"\n def __init__(self):\n Parser.__init__(self)\n # 根据本地文件目录和hdfs目录运行环境不同,选择不同的目录\n self.so = cdll.LoadLibrary(r\"../util/libtopicid.so\") if os.path.exists(r\"../util/libtopicid.so\") else cdll.LoadLibrary(r\"./util/libtopicid.so\")\n\n def processOneWeiboModify(self, keyList, fieldMap):\n mid = None\n\n if \"ID\" in fieldMap and fieldMap[\"ID\"] != \"\":\n mid = fieldMap[\"ID\"]\n else:\n sys.stderr.write(\"reporter:counter:weibo,noMidNum,1\\n\")\n return None\n\n line = fieldMap[\"CONTENT\"] if \"CONTENT\" in fieldMap else \"\"\n filter = fieldMap[\"FILTER\"] if \"FILTER\" in fieldMap else 0\n filterTag = None\n\n if filter != \"\" and (int(filter) & 0x4) != 0:\n filterTag = \"转发\"\n text = fieldMap[\"TEXT\"] if \"TEXT\" in fieldMap else \"\"\n if text:\n line += \"//@\" + text\n else:\n filterTag = \"原创\"\n longText = fieldMap[\"LONGTEXT\"] if \"LONGTEXT\" in fieldMap else \"\"\n if longText:\n line = longText\n\n status = self.hasTopic(line)\n tagTopic = \"null\"\n if status:\n tagTopic = self.getTagTopic(line)\n\n timeStamp = fieldMap[\"TIME\"] if \"TIME\" in fieldMap else \"null\"\n if timeStamp == \"null\":\n try:\n tmp_time = time.localtime((int(id) >> 22) + 515483463)\n timeStamp = time.strftime(\"%Y-%m-%d\",tmp_time)\n except BaseException:\n sys.stderr.write(\"reporter:counter:weibo,id2time,1\\n\")\n\n rootMid = fieldMap[\"ROOTMID\"] if \"ROOTMID\" in fieldMap else \"null\"\n rootUid = fieldMap[\"ROOTUID\"] if \"ROOTUID\" in fieldMap else \"null\"\n uid = fieldMap[\"UID\"] if \"UID\" in fieldMap else \"null\"\n userInfo = fieldMap[\"INNER_USER_INFO\"] if \"INNER_USER_INFO\" in fieldMap else \"null\"\n\n tagTopicArr = tagTopic.split(\"#\")\n for elem in tagTopicArr:\n elemArr = elem.split(\"\\t\")\n for topic in elemArr:\n outTopic = self.topicTransform(topic)\n topicMd5 = hashlib.md5(outTopic.encode('utf-8')).hexdigest()\n outlne = \"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\t{7}\\t{8}\\t{9}\\n\".format(mid, filter, filterTag, timeStamp, rootMid, rootUid, topic, topicMd5, uid, userInfo)\n sys.stdout.write(outlne)\n\n\n def hasTopic(self, line):\n \"\"\"\n 判断是否有topic\n :param line:\n :return:\n \"\"\"\n status = True\n prefix = line.find(\"#\")\n if prefix == -1:\n status = False\n else:\n suffix = line.find(\"#\", prefix + 1)\n if suffix == -1:\n status = False\n return status\n\n def getTagTopic(self, line):\n \"\"\"\n 获取tag topic\n :param line:\n :return:\n \"\"\"\n def innerTopic(elem):\n \"\"\"\n 获取同一转发内的话题\n :return:\n \"\"\"\n outLine = \"\"\n elemArr = re.findall(\"#([^#]+)#\", elem)\n if elemArr:\n for topic in elemArr:\n if not topic.strip():\n topic = topic.replace(\" \", \" \")\n outTopic = self.topicTransform(topic)\n outLine += topic + \"\\t\"\n return outLine.strip()\n\n tagTopic = \"\"\n lineArr = line.split(\"//@\")\n allNoneStatus = True # tagTopic 字段全为空检查\n for elem in lineArr:\n topic = innerTopic(elem)\n tagTopic += str(topic) + \"#\"\n if allNoneStatus and topic:\n allNoneStatus = False\n if tagTopic.endswith(\"#\"):\n end = len(tagTopic) - 1\n tagTopic = tagTopic[0: end]\n\n # 如果话题全为空,则认无话题\n if allNoneStatus:\n tagTopic = \"\"\n return tagTopic\n\n def topicTransform(self,topic):\n \"\"\"\n 对话题进行全半角、大小写、繁简进行转换\n :param topic:\n :return:\n \"\"\"\n length = len(topic)\n outTopic =(c_char * (length * 2))()\n self.so.topic_normalize(topic, outTopic)\n return outTopic.value\n\nif __name__ == \"__main__\":\n tt = TagTopic()\n for line in sys.stdin:\n # for line in open(\"D://data//weibo.dat\", \"r\"):\n tt.processOneLine(line.strip())\n tt.flush()\n\n","repo_name":"HaxByCzy/python","sub_path":"weibo/pycharmspace/temp/formatWeibByMid/WeiboFormat.py","file_name":"WeiboFormat.py","file_ext":"py","file_size_in_byte":5095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35770499890","text":"# Imports\nimport pygame\nimport random\nimport textwrap\nplaying = True\ntest = False\nread = False\n\n\n# The Game\ndef play_game(is_test, read_instructions):\n\n # Spaceship class definition\n class Spaceship(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface([68, 71])\n self.image.set_colorkey(black)\n self.image.blit(pygame.image.load(\"galaxy_explorer.png\"), (0, 0)) # Image adapted from Galaga\n\n self.rect = self.image.get_rect()\n self.rect.x = 216\n self.rect.y = 514\n\n def move_spaceship(self, x_distance, y_distance): # Moves the spaceship. If it goes out of bounds, reset the\n if self.rect.x in range(5, 428): # corresponding coordinates.\n self.rect.x += x_distance\n if self.rect.y in range(215, 579):\n self.rect.y += y_distance\n if self.rect.x < 5:\n self.rect.x = 5\n elif self.rect.x > 427:\n self.rect.x = 427\n if self.rect.y < 215:\n self.rect.y = 215\n elif self.rect.y > 578:\n self.rect.y = 578\n\n # Alien class definition\n class Alien(pygame.sprite.Sprite):\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n\n self.type = random.randint(1, 6)\n self.image = pygame.Surface([66, 51])\n self.image.set_colorkey(black)\n\n # Assigns an image, speed, and point value based off of the type of the instance.\n # Images adapted from Space Invaders.\n if self.type <= 3:\n self.image.blit(pygame.image.load(\"blue_orange_alien.png\"), (0, 0))\n self.Speed = 5\n self.Score = 5\n elif self.type <= 5:\n self.image.blit(pygame.image.load(\"red_green_alien.png\"), (0, 0))\n self.Speed = 10\n self.Score = 15\n else:\n self.image.blit(pygame.image.load(\"yellow_purple_alien.png\"), (0, 0))\n self.Speed = 15\n self.Score = 25\n\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n def move_alien(self): # Moves the alien. If it goes out of bounds, 'kill' it.\n self.rect.y += self.Speed\n if self.rect.y >= 634:\n self.kill()\n\n # Boss class definition\n class AlienBoss(pygame.sprite.Sprite):\n def __init__(self, x, health):\n pygame.sprite.Sprite.__init__(self)\n self.Direction = 'right'\n self.Health = health\n self.Score = int(health / 2)\n self.Alive = False\n\n self.image = pygame.Surface([297, 228])\n self.image.set_colorkey(white)\n self.image.blit(pygame.image.load(\"alien_invader.png\"), (0, 0)) # Image adapted from Space Invaders.\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = 5\n\n def move_boss(self): # Moves the boss. Keeps it in bounds.\n if self.Direction == 'right':\n self.rect.x += 2\n elif self.Direction == 'left':\n self.rect.x -= 2\n if self.rect.x >= 198:\n self.rect.x = 198\n self.Direction = 'left'\n elif self.rect.x <= 5:\n self.rect.x = 5\n self.Direction = 'right'\n\n def fire_weapon(self, group): # Fires a projectile back at the player.\n alien_laser = AlienLaser(self.rect.x + 149, 233, random.randint(5, 10))\n group.add(alien_laser)\n\n # Laser projectile definition\n class Laser(pygame.sprite.Sprite):\n def __init__(self, x, y, speed):\n pygame.sprite.Sprite.__init__(self)\n\n self.Speed = speed\n\n self.image = pygame.Surface([5, 10])\n self.image.set_colorkey(black)\n self.image.blit(pygame.image.load('laser_weapon.png'), (0, 0))\n\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n def move_laser(self): # Moves the laser. If it goes out of bounds, 'kill' it.\n self.rect.y -= self.Speed\n if self.rect.y <= 5:\n self.kill()\n\n # Alien projectile definition\n class AlienLaser(pygame.sprite.Sprite):\n def __init__(self, x, y, speed):\n pygame.sprite.Sprite.__init__(self)\n\n self.Speed = speed\n\n self.image = pygame.Surface([20, 20])\n self.image.set_colorkey(black)\n self.image.blit(pygame.image.load('alien_weapon.png'), (0, 0))\n\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n def move_alien_laser(self):\n self.rect.y += self.Speed\n if self.rect.y >= screen_height - 5:\n self.kill()\n\n # Powerup class definition\n class Powerup(pygame.sprite.Sprite):\n def __init__(self, x, y, speed, type_of_powerup):\n pygame.sprite.Sprite.__init__(self)\n\n self.Speed = speed\n self.Type = type_of_powerup\n\n self.image = pygame.Surface([50, 50])\n\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n def move_powerup(self): # Moves the powerup.\n self.rect.y += self.Speed\n\n # Health powerup subclass definition\n class HealthPowerup(Powerup):\n def __init__(self, x, y, speed, type_of_powerup):\n Powerup.__init__(self, x, y, speed, type_of_powerup)\n\n self.image.set_colorkey(black)\n self.image.blit(pygame.image.load('health_powerup.jpg'), (0, 0))\n\n pygame.init() # Pygame starts running.\n\n # Draws all the screens, loads a background image\n screen_width = 500\n screen_height = 690\n start_screen = pygame.display.set_mode([screen_width, screen_height])\n main_screen = pygame.display.set_mode([screen_width, screen_height])\n end_screen = pygame.display.set_mode([screen_width, screen_height])\n pygame.display.set_caption(\"Invaders From Planet X\")\n background_image = pygame.image.load('black_background.png').convert()\n background_image_2 = pygame.image.load('space_background.jpg').convert() # CC0 Public Domain\n\n clock = pygame.time.Clock()\n f_p_s = 30\n\n # Define basic colors for future reference\n black = (0, 0, 0)\n blue = (0, 0, 200)\n brown = (123, 66, 15)\n gray = (170, 170, 170)\n orange = (250, 88, 0)\n green = (22, 116, 50)\n purple = (123, 0, 149)\n red = (200, 0, 0)\n white = (255, 255, 255)\n yellow = (245, 200, 0)\n\n random_color_list = [white]\n default_font = pygame.font.Font(None, 36)\n\n # Define additional Functions and Procedures here\n game_start = False\n game_end = True\n play_again = False\n instructions = textwrap.wrap(\"Move your spaceship with WASD or the arrow keys and press space to shoot. Kill aliens \\\nto earn points. You start with three lives and a limited ammo supply. Collect powerups for various boosts. Your ammo \\\nwill regenerate over time. Click the screen to begin.\", 40)\n new_high_score = False\n\n # Creates all the groups of sprites so they can interact\n fleet = pygame.sprite.Group()\n spaceship = Spaceship()\n fleet.add(spaceship)\n\n projectiles = pygame.sprite.Group()\n alien_projectiles = pygame.sprite.Group()\n herd = pygame.sprite.Group()\n powerup_storage = pygame.sprite.Group()\n bosses = pygame.sprite.Group()\n boss_alive = False\n\n # Additional variables for future reference\n x_movement = 0\n y_movement = 0\n total_score = 0\n lives = 3\n game_difficulty = ''\n ammunition = 50\n regen_modifier = 0\n\n spawn_rate_values = {\n 'Easy': [5000, 0.25, 25],\n 'Medium': [3750, 0.5, 50],\n 'Hard': [2500, 0.75, 75],\n 'Impossible': [1250, 1, 100],\n '': [None, None]\n }\n max_spawn_rate = 250\n spawn_rate_modifier = 0\n shooting = False\n\n movement_keys = [pygame.K_LEFT, pygame.K_a, pygame.K_RIGHT, pygame.K_d, pygame.K_UP, pygame.K_w, pygame.K_DOWN,\n pygame.K_s]\n\n # Functions to make life easier.\n def create_a_font(size): # Makes a font.\n temporary_font = pygame.font.Font(None, size)\n return temporary_font\n\n def draw_text(message, screen, x_coord, y_coord, color, font, bckg_color): # Used to 'draw' text on a screen.\n message_display = font.render(message, 1, color, bckg_color)\n screen.blit(message_display, [x_coord, y_coord])\n\n def random_color(): # Random color generator.\n red_value = random.randint(0, 255)\n green_value = random.randint(0, 255)\n while red_value == green_value:\n green_value = random.randint(0, 255)\n blue_value = random.randint(0, 255)\n while green_value == blue_value:\n blue_value = random.randint(0, 255)\n return red_value, blue_value, green_value\n\n # Interprets the player's inputs or keys pressed.\n def check_input(u_i, lives_left, m_keys, shoot, x_dist, y_dist, g_e, i_t): # Creates a list of values, runs them\n if u_i.type == pygame.QUIT: # through two sub-algorithms to check\n lives_left = 0 # the inputs, and returns the updated\n g_e = False # list.\n i_t = True\n value_list = [lives_left, x_dist, y_dist, shoot, g_e, i_t]\n key_press(u_i, value_list, m_keys)\n key_release(u_i, value_list, m_keys)\n return value_list\n\n def key_press(u_i, values, m_keys): # Changes the coordinates of the spaceship if a specific key is pressed.\n if u_i.type == pygame.KEYDOWN:\n if u_i.key in [m_keys[0], m_keys[1]]:\n values[1] -= 10\n elif u_i.key in [m_keys[2], m_keys[3]]:\n values[1] += 10\n elif u_i.key in [m_keys[4], m_keys[5]]:\n values[2] -= 10\n elif u_i.key in [m_keys[6], m_keys[7]]:\n values[2] += 10\n\n if u_i.key == pygame.K_SPACE:\n values[3] = True\n\n def key_release(u_i, values, m_keys): # Resets the coordinates of the spaceship when the key is released.\n if u_i.type == pygame.KEYUP:\n if u_i.key in [m_keys[0], m_keys[1]]:\n values[1] += 10\n elif u_i.key in [m_keys[2], m_keys[3]]:\n values[1] -= 10\n elif u_i.key in [m_keys[4], m_keys[5]]:\n values[2] += 10\n elif u_i.key in [m_keys[6], m_keys[7]]:\n values[2] -= 10\n\n elif u_i.key == pygame.K_SPACE:\n values[3] = False\n\n # Formats the high scores\n def high_score():\n high_scores = repr(open('HighScores.txt', 'r').read())[1:]\n\n high_scores_list = []\n for i in range(4):\n high_scores_list.append(high_scores[0:high_scores.index('n') - 1])\n high_scores = high_scores[high_scores.index('n') + 1:]\n\n high_scores_dict = {\n 'Easy': 0,\n 'Medium': 0,\n 'Hard': 0,\n 'Impossible': 0\n }\n\n for each_record in high_scores_list:\n temp_list = each_record.split()\n high_scores_dict[temp_list[0]] = temp_list[2]\n\n return high_scores_dict\n\n # --------- Main Program Loop(s) ---------\n while not game_start: # Controls instructions/starting screen.\n timer = pygame.time.get_ticks()\n temp_timer = 5000\n\n for event in pygame.event.get(): # If you click the 'x' in the upper right, actually stop the game.\n if event.type == pygame.QUIT:\n game_start = True\n lives = -1\n game_end = False\n is_test = True\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1: # Left click\n mouse_position = list(event.pos)\n if read_instructions: # After reading instructions (or if skipped)\n if mouse_position[0] < 250 and mouse_position[1] in range(218, 410):\n # Makes sure the player actually clicks on the game difficulty they want\n m_p = mouse_position[1]\n if m_p in range(218, 259):\n game_difficulty = \"Easy\"\n regen_modifier = 10000\n elif m_p in range(260, 311):\n game_difficulty = \"Medium\"\n regen_modifier = 15000\n elif m_p in range(312, 363):\n game_difficulty = \"Hard\"\n regen_modifier = 20000\n elif m_p in range(364, 410):\n game_difficulty = \"Impossible\"\n regen_modifier = 25000\n game_start = True\n base_spawn_rate = spawn_rate_values[game_difficulty][0]\n else:\n read_instructions = True\n\n start_screen.blit(background_image, [0, 0])\n if read_instructions: # Instruction skip (if decided to play again)\n draw_text(\"Please select a difficulty:\", start_screen, 30, 82, green, default_font, None)\n draw_text(\"Easy\", start_screen, 30, 218, white, create_a_font(32), None)\n draw_text(\"Medium\", start_screen, 30, 270, yellow, create_a_font(32), None)\n draw_text(\"Hard\", start_screen, 30, 322, orange, create_a_font(32), None)\n draw_text(\"Impossible\", start_screen, 30, 374, red, default_font, None)\n else:\n if timer < temp_timer: # Only update the color every frame for a certain amount of time.\n draw_text(\"Welcome!\", start_screen, 30, 20, random_color(), create_a_font(42), None)\n else:\n draw_text(\"Welcome!\", start_screen, 30, 20, red, create_a_font(42), None)\n temp_counter = 82\n for each_line in instructions:\n draw_text(each_line, start_screen, 30, temp_counter, white, create_a_font(28), None)\n temp_counter += 40\n draw_text(\"Good luck!\", start_screen, 30, 400, purple, default_font, None)\n pygame.display.flip()\n clock.tick(f_p_s)\n\n # Control variables needed for main game.\n game_delay = pygame.time.get_ticks()\n next_laser = game_delay\n next_alien = game_delay + 2000\n next_health = game_delay + 35000\n ammo_regen = game_delay + 45000 - regen_modifier\n next_boss = game_delay + 60000 - regen_modifier\n next_alien_laser = game_delay\n\n while lives > 0: # Controls actual game screen.\n timer = pygame.time.get_ticks()\n\n for event in pygame.event.get(): # Analyzes user input.\n input_result = check_input(event, lives, movement_keys, shooting, x_movement, y_movement, game_end, is_test)\n lives = input_result[0]\n x_movement = input_result[1]\n y_movement = input_result[2]\n shooting = input_result[3]\n game_end = input_result[4]\n is_test = input_result[5]\n\n # Update sprites here - makes changes based on user input\n if timer >= ammo_regen: # Ammunition regeneration.\n ammunition += 50\n ammo_regen += 45000 - regen_modifier\n if ammunition > 150: # Max ammo.\n ammunition = 150\n\n if timer > next_alien: # Alien spawn rate.\n next_alien += random.randint(max_spawn_rate, base_spawn_rate - spawn_rate_modifier)\n x_coordinate = random.randint(5, 429)\n alien = Alien(x_coordinate, 5)\n herd.add(alien)\n\n if timer > next_health: # Health powerup spawn rate.\n next_health += 45000 - regen_modifier\n x_coordinate = random.randint(5, 445)\n health_powerup = HealthPowerup(x_coordinate, 5, 5, 'health')\n powerup_storage.add(health_powerup)\n\n if shooting: # Controls rate of fire (spaceship).\n if ammunition > 0:\n if timer > next_laser:\n laser = Laser(spaceship.rect.x + 31, spaceship.rect.y, 15)\n projectiles.add(laser)\n ammunition -= 1\n next_laser += 250\n else:\n shooting = False\n else:\n next_laser = timer\n\n if not boss_alive: # Boss spawn rate.\n if timer > next_boss:\n alien_boss = AlienBoss(random.randint(5, 198), 5000 * spawn_rate_values[game_difficulty][1])\n bosses.add(alien_boss)\n boss_alive = True\n next_alien_laser = timer + 2000\n\n for each_boss in bosses: # Controls boss rate of fire.\n if timer > next_alien_laser:\n each_boss.fire_weapon(alien_projectiles)\n next_alien_laser += 2000\n\n # Moves all of the sprites\n for each_alien in herd:\n each_alien.move_alien()\n for each_laser in projectiles:\n each_laser.move_laser()\n for each_powerup in powerup_storage:\n each_powerup.move_powerup()\n for each_boss in bosses:\n each_boss.move_boss()\n for each_alien_laser in alien_projectiles:\n each_alien_laser.move_alien_laser()\n\n # Controls collisions between sprites - lives counter and score counter\n dead_aliens = pygame.sprite.groupcollide(herd, projectiles, False, True)\n for each_alien in dead_aliens:\n total_score += each_alien.Score\n each_alien.kill()\n crashed_ships = pygame.sprite.groupcollide(fleet, herd, False, True)\n for _ in crashed_ships:\n lives -= 1\n crashed_ships = pygame.sprite.groupcollide(fleet, alien_projectiles, False, True)\n for _ in crashed_ships:\n lives -= 1\n powerups_activated = pygame.sprite.groupcollide(powerup_storage, fleet, True, False)\n for each_powerup in powerups_activated:\n if each_powerup.Type == 'health': # Add a life if acquired health power-up.\n lives += 1\n bosses_damaged = pygame.sprite.groupcollide(bosses, projectiles, False, True)\n for each_boss in bosses_damaged:\n each_boss.Health -= spawn_rate_values[game_difficulty][2]\n if each_boss.Health <= 0: # If the player killed the boss, update these variables.\n each_boss.Alive = False\n each_boss.kill()\n total_score += each_boss.Score\n next_boss += 60000\n boss_alive = False\n if lives > 5: # Max lives.\n lives = 5\n\n # Draws everything on the screen so the user can play the game\n main_screen.blit(background_image_2, [0, 0])\n if lives > 0: # Only 'draw' these things if necessary.\n spaceship.move_spaceship(x_movement, y_movement)\n fleet.draw(main_screen)\n projectiles.draw(main_screen)\n herd.draw(main_screen)\n powerup_storage.draw(main_screen)\n bosses.draw(main_screen)\n alien_projectiles.draw(main_screen)\n draw_text(str(int(total_score)), main_screen, 5, 5, gray, default_font, black)\n draw_text(\"Lives: \" + str(lives), main_screen, 5, 659, blue, default_font, black)\n draw_text(\"Ammo: \" + str(ammunition), main_screen, 350, 659, brown, default_font, black)\n pygame.display.flip()\n clock.tick(f_p_s)\n if base_spawn_rate - spawn_rate_modifier > max_spawn_rate: # Increases the spawn rate of aliens each frame.\n spawn_rate_modifier += 1\n\n # Exports the score of the game just played to a high scores file\n if not is_test: # Only export a high score if the game is legitimate.\n old_high_scores = high_score()\n if int(old_high_scores[game_difficulty]) < total_score: # If the current current score is greater, go ahead and\n old_high_scores[game_difficulty] = str(total_score) # update the existing high score.\n new_high_score = True\n final_export = ''\n for item in list(old_high_scores.items()):\n temp_string = item[0] + ' - ' + item[1]\n final_export = final_export + temp_string + '\\n'\n file = open('HighScores.txt', 'w')\n file.write(final_export)\n file.close()\n\n color_change = pygame.time.get_ticks()\n\n while game_end: # Controls game over/end screen.\n timer = pygame.time.get_ticks()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_end = False\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n m_p = list(event.pos)\n if m_p[0] in range(150, 300) and m_p[1] in range(500, 550):\n play_again = True\n game_end = False\n\n if timer > color_change:\n random_color_list.append(random_color())\n random_color_list.remove(random_color_list[0])\n color_change += 950\n\n end_screen.blit(background_image, [0, 0])\n draw_text(\"GAME OVER\", end_screen, 100, 100, random_color_list[-1], create_a_font(72), None)\n draw_text(\"Play Again\", end_screen, 155, 500, purple, create_a_font(48), None)\n draw_text(\"Score: \" + str(total_score), end_screen, 25, 375, gray, default_font, None)\n draw_text(\"Difficulty: \" + game_difficulty, end_screen, 25, 425, green, default_font, None)\n if new_high_score:\n draw_text(\"New high score!\", end_screen, 160, 160, yellow, default_font, None)\n pygame.display.flip()\n clock.tick(f_p_s)\n\n pygame.quit() # Pygame stops running.\n\n return play_again\n\n\nwhile playing:\n playing = play_game(test, read)\n read = True\n","repo_name":"cheinks/symmetrical-sniffle","sub_path":"SpaceGame/Space Arcade Game.py","file_name":"Space Arcade Game.py","file_ext":"py","file_size_in_byte":22141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71334017173","text":"'''\r\nhttps://practice.geeksforgeeks.org/problems/rank-the-permutations/0\r\n'''\r\n\r\n# TC : 0(n)\r\n\r\nmod = 1000003\r\nmaxx = 256\r\n\r\ndef fact(n):\r\n return n if (n < 2) else (n * fact(n-1))\r\n \r\ndef modifyCountArr(countArr, start):\r\n for i in range(start, maxx):\r\n countArr[i] -= 1\r\n return countArr\r\n\r\ndef getRank(s):\r\n mul = fact(len(s))\r\n countArr = [0] * maxx\r\n rank = 1\r\n \r\n for char in s:\r\n if countArr[ord(char)]:\r\n return 0\r\n countArr[ord(char)] = 1\r\n \r\n for i in range(1, maxx):\r\n countArr[i] += countArr[i-1]\r\n \r\n for i in range(len(s)):\r\n mul //= (len(s) - i)\r\n rank = (rank + (countArr[ord(s[i]) - 1] * mul) % mod) % mod\r\n countArr = modifyCountArr(countArr, ord(s[i]))\r\n \r\n return rank\r\n\r\ndef main():\r\n t = int(input())\r\n while t:\r\n s = str(input())\r\n print(getRank(s))\r\n t -= 1\r\n \r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"arpanspeaks/Fun-with-strings","sub_path":"lexicographicRank.py","file_name":"lexicographicRank.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37822547703","text":"#!/usr/bin/env python\n\nimport os\nfrom numpy.lib.function_base import corrcoef\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pylab\nimport scipy.cluster.hierarchy as sch\nimport nibabel as nib \nfrom nilearn.input_data import NiftiLabelsMasker\nimport pickle \nimport matplotlib.pyplot as plt \nimport time\nimport pandas as pd \n\nimport glob \nimport scipy \n\nfrom matplotlib.pylab import plot, boxplot, show, savefig, xlim, figure, \\\n ylim, legend, setp, axes\n\n\n\n\ndef get_plots(k): \n\n\n\n plot_dir=\"/data/NIMH_scratch/kleinrl/analyses/nullDist_pca10_FEF/plots\"\n base_dir=\"/data/NIMH_scratch/kleinrl/analyses/nullDist_pca10_FEF\"\n LUT=\"/data/kleinrl/Wholebrain2.0/ANAT/ANAT_working_recon-all/HCPMMP1_LUT_ordered_RS.txt\"\n\n lut = pd.read_csv(LUT, delimiter=' ')\n\n print('load lut')\n\n #k = 'L_V4'\n\n k_id = int(lut[lut['Lookup'] == k]['#'].values[0])\n\n print(\"{} {}\".format(k, k_id))\n\n\n # keys = {'L_FEF':10, 'L_LIPv':48, 'L_VIP':9, 'L_V4t':156, 'R_V4':186,\n # 'R_V2':184, 'R_V3':185, 'L_V1':1, 'L_MST':2, 'L_MT':23,\n # 'L_TF':135, 'L_TE1a':132, 'L_TE1p':133, 'L_TE2a':134, 'L_TE2p':136,\n # 'L_FST':157 }\n\n\n\n\n #columns = \"/data/kleinrl/Wholebrain2.0/ANAT/ANAT_working_recon-all/ANAT_mri_make_surf/LAYNII_2/columns/columns_ev_30000_borders.nii\"\n columns = \"/data/kleinrl/Wholebrain2.0/ANAT/ANAT_working_recon-all/ANAT_mri_make_surf/LAYNII_2/columns/columns_ev_30000_borders.nii\"\n parc = \"/data/kleinrl/Wholebrain2.0/ANAT/ANAT_working_recon-all/ANAT_mri_make_surf/multiAtlasTT/hcp-mmp-b/hcp-mmp-b_rmap.scaled2x.nii.gz\"\n layers = \"/data/kleinrl/Wholebrain2.0/ANAT/ANAT_working_recon-all/ANAT_mri_make_surf/LAYNII_2/layers/grow_leaky_loituma/equi_volume_layers_n10.nii\"\n nulls_3d = glob.glob(base_dir+\"/fsl_feat_*NULL*/mean/inv_pe1.fwhm3.nii.gz\") #fwhm7.L2D.columns_ev_30000_borders.downscaled2x_NN.nii.gz\")\n\n # nulls_3d_data = []\n # for null in nulls_3d: \n # img_null = nib.load(null)\n # data_null = img_null.get_fdata()\n # nulls_3d_data.append(data_null)\n\n feat_3d = base_dir+\"/fsl_feat_1010.L_FEF_pca10/mean/inv_pe1.fwhm3.nii.gz\" #L2D.columns_ev_30000_borders.downscaled2x_NN.nii.gz\")\n nulls_npy = \"\"\n feat_npy = \"\"\n img_parc = nib.load(parc)\n img_col = nib.load(columns)\n img_lay = nib.load(layers)\n\n data_parc = img_parc.get_fdata()\n data_columns = img_col.get_fdata()\n data_layers = img_lay.get_fdata()\n\n #for k in keys.keys(): \n\n # k = 'L_FEF'\n #ind_roi = np.where(data_parc == keys[k] )\n ind_roi = np.where(data_parc == k_id )\n\n unq_roi_col = np.unique(data_columns[ind_roi]) \n unq_roi_col = [ uc for uc in unq_roi_col if uc != 0 ]\n\n unq_layers = np.flip(np.unique(data_layers))[:-1]\n\n print(\"unique columns: {}\".format(len(unq_roi_col)))\n\n\n for uc in unq_roi_col: \n\n # uc = unq_roi_col[0]\n #ind_col = np.where(data_columns == uc)\n ind_col = data_columns == uc\n\n uc = int(uc) \n\n\n fig = plt.figure()\n ax = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # main axes\n\n\n boxplots = [] \n null_i = 0 \n data_null_mean = np.zeros(shape=data_columns.shape)\n\n\n null_layer_data = [ [] for lc in range(0, int(np.max(unq_layers))+1 ) ]\n # null_layer_data = group_layer_data \n for null in nulls_3d: \n # null = nulls_3d[0]\n img_null = nib.load(null)\n data_null = img_null.get_fdata()\n\n\n for lc in unq_layers: \n ind_lay = data_layers == lc \n ind_both = np.where( ind_lay & ind_col )\n\n null_layer_data[int(lc)].append( data_null[ ind_both ] )\n\n\n # mean_null_layer_data = [ [] for lc in range(0, int(np.max(unq_layers))+1 ) ]\n # for g in range(1, len(null_layer_data)): \n # mean_null_layer_data[g] = np.mean(null_layer_data[g], axis=0)\n # null_layer_data[g] = np.concatenate(null_layer_data[g]).flatten().tolist()\n\n\n\n img_feat = nib.load(feat_3d)\n data_feat = img_feat.get_fdata()\n\n emp_layer_data = [ [] for lc in range(0, int(np.max(unq_layers))+1 ) ]\n\n\n for lc in unq_layers: \n ind_lay = data_layers == lc \n ind_both = np.where( ind_lay & ind_col )\n\n emp_layer_data[int(lc)].append( data_feat[ ind_both ] )\n\n\n for g in range(1, len(emp_layer_data)): \n emp_layer_data[g] = np.concatenate(emp_layer_data[g]).flatten().tolist()\n\n\n for g in range(1, len(null_layer_data)): \n if null_layer_data[g] == []: \n null_layer_data[g] = [0]\n for g in range(1, len(emp_layer_data)): \n if emp_layer_data[g] == []: \n emp_layer_data[g] = [0] \n\n colors = ['blue','red']\n\n fig = figure()\n ax = axes()\n\n # first boxplot pair\n bp = ax.boxplot([null_layer_data[1], emp_layer_data[1]], positions = [1, 2], widths = 0.6)\n for patch, color in zip(bp['boxes'], colors):\n patch.set(color=color)\n\n scipy.wi\n\n\n\n\n # second boxplot pair\n bp = boxplot([null_layer_data[2], emp_layer_data[2]], positions = [4, 5], widths = 0.6)\n for patch, color in zip(bp['boxes'], colors):\n patch.set(color=color)\n\n # thrid boxplot pair\n bp = boxplot([null_layer_data[3], emp_layer_data[3]], positions = [7, 8], widths = 0.6)\n for patch, color in zip(bp['boxes'], colors):\n patch.set(color=color)\n\n bp = boxplot([null_layer_data[4], emp_layer_data[4]], positions = [10, 11], widths = 0.6)\n for patch, color in zip(bp['boxes'], colors):\n patch.set(color=color)\n\n # second boxplot pair\n bp = boxplot([null_layer_data[5], emp_layer_data[5]], positions = [13, 14], widths = 0.6)\n for patch, color in zip(bp['boxes'], colors):\n patch.set(color=color)\n\n # thrid boxplot pair\n bp = boxplot([null_layer_data[6], emp_layer_data[6]], positions = [16, 17], widths = 0.6)\n for patch, color in zip(bp['boxes'], colors):\n patch.set(color=color)\n\n bp = boxplot([null_layer_data[7], emp_layer_data[7]], positions = [19, 20], widths = 0.6)\n for patch, color in zip(bp['boxes'], colors):\n patch.set(color=color)\n\n # second boxplot pair\n bp = boxplot([null_layer_data[8], emp_layer_data[8]], positions = [22, 23], widths = 0.6)\n for patch, color in zip(bp['boxes'], colors):\n patch.set(color=color)\n\n # thrid boxplot pair\n bp = boxplot([null_layer_data[9], emp_layer_data[9]], positions = [25, 26], widths = 0.6)\n for patch, color in zip(bp['boxes'], colors):\n patch.set(color=color)\n\n bp = boxplot([null_layer_data[10], emp_layer_data[10]], positions = [28, 29], widths = 0.6)\n for patch, color in zip(bp['boxes'], colors):\n patch.set(color=color)\n\n\n\n\n # set axes limits and labels\n #xlim(0,10)\n #ylim(-5,5)\n ax.set_xticks([0, 29.5])\n ax.set_xticklabels(['CSF', 'WM'])\n\n # draw temporary red and blue lines and use them to create a legend\n hB, = plot([1,1],'b-')\n hR, = plot([1,1],'r-')\n legend((hB, hR),('NULL', 'VASO'))\n hB.set_visible(False)\n hR.set_visible(False)\n\n\n\n\n\n\n\n tit = \"{}-{}\".format(k, uc) \n plt.title(tit )\n\n #ax.set_xticks([1,10])\n #ax.set_xticklabels(['CSF','WM'])\n\n\n plt.ylabel('beta weights')\n\n plt.savefig(plot_dir+\"/{}.box3.png\".format(tit))\n plt.close()\n\n print(\"plot saved -- {} {}\".format(k, uc))\n\n\n\nif __name__ == \"__main__\":\n\n\n parser = argparse.ArgumentParser(description='generate layer profile')\n parser.add_argument('--k', type=str)\n\n args = parser.parse_args()\n\n k = args.k \n\n get_plots(k)\n\n\n'''\nrois=(L_FEF L_LIPv L_VIP L_V4t R_V4 R_V2 R_V3 L_V1 L_MST L_MT\n L_TF L_TE1a L_TE1p L_TE2a L_TE2p L_FST)\n\nfor r in ${rois[@]}; do \n echo \"python analysis_nullDist_FEF.py --k $r \" >> swarm.plots\ndone \n\n'''\n","repo_name":"rlk41/laminar_fmri2","sub_path":"analyses/wb2/analysis_nulldist_FEF_boxplot2.py","file_name":"analysis_nulldist_FEF_boxplot2.py","file_ext":"py","file_size_in_byte":8305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28451434986","text":"from pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nfrom pytorch_lightning import Callback, Trainer\n\nfrom anomalib.models.components import AnomalyModule\n\n\nclass SaveToCSVCallback(Callback):\n \"\"\"Callback that saves the inference results of a model.\n\n The callback generates a csv file that saves the predicted label, the true label and the image name.\n \"\"\"\n\n def __init__(self):\n \"\"\"Callback to save metrics to CSV.\"\"\"\n\n def on_test_epoch_end(self, _trainer: Trainer, pl_module: AnomalyModule) -> None:\n \"\"\"Save Results at the end of testing.\n\n Args:\n _trainer (Trainer): Pytorch lightning trainer object (unused)\n pl_module (LightningModule): Lightning modules derived from BaseAnomalyLightning object.\n \"\"\"\n results = pl_module.results\n data_frame = pd.DataFrame(\n {\n \"name\": results.filenames,\n \"true_label\": results.true_labels,\n \"pred_label\": results.pred_labels.astype(int),\n \"wrong_prediction\": np.logical_xor(results.true_labels, results.pred_labels).astype(int),\n }\n )\n data_frame.to_csv(Path(pl_module.hparams.project.path) / \"results.csv\")\n","repo_name":"siesen/anomalib","sub_path":"anomalib/utils/callbacks/save_to_csv.py","file_name":"save_to_csv.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"35165379153","text":"#!/usr/bin/python\n\nimport features\nimport operator\nimport pdb\nimport sys\nimport simplejson as json\nimport os\nfrom nltk.corpus import wordnet as nlwn\nimport sqlite3\nfrom make_pmi_db import load_concepts_mem_db\nfrom make_pmi_db import load_words_mem_db\nimport featuremap\nimport math\nfrom collections import defaultdict\n\ndef load_concept_model(concept):\n try:\n model = features.PMINounModel(concept, base_dir)\n return model\n except IOError:\n return None\n\ndef prune_concepts(current_synset, current_model=None):\n hyponym_list = []\n for hyponym in current_synset.hyponyms() + current_synset.instance_hyponyms():\n\n hypo_model = load_concept_model(hyponym.name())\n hyponym_list.append((hyponym, hypo_model))\n\n try:\n os.stat(hypo_model.lemma_to_filename(hyponym.name(), new_dir))\n hypo_model.load_from_pmi_file(new_dir, hyponym.name())\n except OSError:\n prune_concepts(hyponym, hypo_model)\n\n if current_model:\n\n union_max_list = []\n\n for hyponym, hypo_model in hyponym_list:\n if hypo_model:\n union_max_list.append(hypo_model)\n hypo_model.save_to_file(new_dir)\n\n for hypo_model in union_max_list:\n current_model.union_max(hypo_model)\n\ndef main():\n global base_dir\n base_dir = sys.argv[1]\n\n global new_dir\n new_dir = sys.argv[2]\n\n prune_concepts(nlwn.synset('entity.n.01'))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mgabilo/concept-discovery","sub_path":"build_clustered_cv.py","file_name":"build_clustered_cv.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"31924332508","text":"import torch\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ndevice\n\nimport optuna\nfrom optuna.trial import TrialState\n\ndef objective(trial):\n from src.models.models import MLP\n\n NUM_FEATURES = 32 * 32 * 3\n NUM_CLASSES = 10\n DIMS = [NUM_FEATURES, 512, NUM_CLASSES]\n\n act_name = trial.suggest_categorical(\"act\", [\"relu\", \"gelu\", \"tanh\"])\n model = MLP(DIMS, act_name).to(device)\n\n from src.common.common import LOSS_NAME_MAP\n from src.models.losses import ClassificationLoss\n\n criterion = ClassificationLoss(LOSS_NAME_MAP['ce']())\n\n\n from torch.utils.data import DataLoader\n from src.data.datasets import get_cifar10\n\n train_dataset, _, test_dataset = get_cifar10('data/')\n\n BATCH_SIZE = 128\n\n train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, pin_memory=True, num_workers=4)\n test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, pin_memory=True, num_workers=4)\n\n loaders = {\n 'train': train_loader,\n 'test': test_loader\n }\n\n from src.common.common import OPTIMIZER_NAME_MAP, SCHEDULER_NAME_MAP\n GRAD_ACCUM_STEPS = 1\n EPOCHS = 5\n T_max = (len(train_loader) // GRAD_ACCUM_STEPS) * EPOCHS\n\n lr = trial.suggest_float(\"lr\", 1e-5, 1e-1, log=True)\n momentum = trial.suggest_float(\"momentum\", 1e-1, 1)\n weight_decay = trial.suggest_float(\"weight_decay\", 1e-5, 1e-1, log=True)\n\n optim_params = {'lr': lr, 'weight_decay': weight_decay, 'momentum': momentum}\n # scheduler_params = {'T_max': T_max, 'eta_min': 1e-6}\n\n # optim, lr_scheduler = prepare_optim_and_scheduler(model, 'adamw', 'cosine', optim_params, scheduler_params, whether_exclude=False)\n\n optim = OPTIMIZER_NAME_MAP['sgd'](filter(lambda p: p.requires_grad, model.parameters()), **optim_params)\n lr_scheduler = None #SCHEDULER_NAME_MAP['cosine'](optim, **scheduler_params)\n\n from src.trainer.trainer_classification import TrainerClassification\n\n params_trainer = {\n 'model': model,\n 'criterion': criterion,\n 'loaders': loaders,\n 'optim': optim,\n 'lr_scheduler': lr_scheduler,\n }\n\n trainer = TrainerClassification(**params_trainer)\n\n\n from src.common.utils import AttrDict\n\n EXP_NAME = 'optuna_lr_wd__mlp_sgd_'\n\n config = {\n 'epoch_start_at': 0,\n 'epoch_end_at': EPOCHS,\n 'grad_accum_steps': GRAD_ACCUM_STEPS,\n 'save_multi': T_max // 10,\n 'log_multi': 100,\n 'whether_clip': False,\n 'clip_value': 2.0,\n 'base_path': 'reports',\n 'exp_name': EXP_NAME,\n 'logger_name': 'tensorboard',\n 'logger_config': {'api_token': \"07a2cd842a6d792d578f8e6c0978efeb8dcf7638\", 'project': 'early_exit', 'hyperparameters': {}},\n 'random_seed': 42,\n 'trial': trial,\n 'device': device\n\n }\n config = AttrDict(config)\n\n acc = trainer.run_exp(config)\n return acc\n\n\nif __name__ == \"__main__\":\n study = optuna.create_study(direction=\"maximize\")\n study.optimize(objective, n_trials=10, show_progress_bar=True)\n\n pruned_trials = study.get_trials(deepcopy=False, states=[TrialState.PRUNED])\n complete_trials = study.get_trials(deepcopy=False, states=[TrialState.COMPLETE])\n\n print(\"Study statistics: \")\n print(\" Number of finished trials: \", len(study.trials))\n print(\" Number of pruned trials: \", len(pruned_trials))\n print(\" Number of complete trials: \", len(complete_trials))\n\n print(\"Best trial:\")\n trial = study.best_trial\n\n print(\" Value: \", trial.value)\n\n print(\" Params: \")\n for key, value in trial.params.items():\n print(\" {}: {}\".format(key, value))\n","repo_name":"BartekKrzepkowski/NNHyperparameterSearchPipelines","sub_path":"optuna/objective.py","file_name":"objective.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40849117741","text":"from scipy import misc\n\n\nclass RootsFinder:\n \"\"\"\n Class for finding roots of the given function\n \"\"\"\n\n def __init__(self, f):\n self.f = f\n self.recursions_counter = 0\n\n\n def newton_method(self, x_0):\n self.recursions_counter = 0\n return self._newton_method(x_0)\n\n\n def naive_global_newton(self, start, end, steps_num):\n roots = []\n step = (end - start) / steps_num\n x_0 = start\n\n while x_0 < end:\n new_root = self.newton_method(x_0)\n\n if new_root is not None:\n new_root_is_new = True\n\n for root in roots:\n if abs(root - new_root) < 0.01:\n new_root_is_new = False\n break\n\n if start <= new_root < end and new_root_is_new:\n roots.append(new_root)\n\n x_0 += step\n \n return roots\n\n\n def _newton_method(self, x_i):\n x_j = x_i - self.f(x_i) / misc.derivative(self.f, x_i, dx = 1e-6, n = 1)\n\n if self.recursions_counter > 100:\n return None\n\n if abs(x_i - x_j) < 0.00001 or abs(self.f(x_j)) < 0.00001:\n return x_j\n else:\n self.recursions_counter += 1\n return self._newton_method(x_j)","repo_name":"paldynaagata/Recognition-of-the-Wigner-caustic-of-ovals","sub_path":"images_generator/roots_finder.py","file_name":"roots_finder.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32994094475","text":"class Solution:\n def maxResult(self, nums: List[int], k: int) -> int:\n queue = []\n queue.append(len(nums)-1)\n ans = [0] * len(nums)\n ans[len(nums)-1] = nums[len(nums)-1]\n \n for i in range(len(nums)-2,-1,-1):\n if queue[0]-i > k:\n queue.pop(0)\n ans[i] = nums[i]+ans[queue[0]]\n while queue and ans[queue[-1]] 0:\n recommender.train(data)\n \n def predict(self, data):\n # tally up the scores for each (user, restaurant) pair multiplied by the\n # weights\n scores = defaultdict(lambda: defaultdict(int))\n for name, model in self.recommenders.iteritems():\n # if we are ignoring a recommender's results, there's no reason to\n # run it\n if self.weights[name] == 0:\n continue\n\n for row in model.predict(data).collect():\n partial_score = self.weights[name] * row['score']\n scores[row['userID']][row['restaurantID']] += partial_score\n\n # for each user find the top self.recommendations_per_user restaurants\n recommendations = {}\n for user, reviews in scores.iteritems():\n recommendations[user] = heapq.nlargest(\n self.recommendations_per_user, reviews.iteritems(),\n key=lambda r: r[1])\n\n # put the recommendations into an appropriate format\n top_recommendations = [(user, restaurant, float(score))\n for user, restaurants in recommendations.items()\n for restaurant, score in restaurants]\n return SQLContext(self.spark).createDataFrame(top_recommendations,\n self.config.get_schema())\n\n def learn_hyperparameters(self, data): \n evaluator = Evaluator(self.spark, self, self.config)\n recommenders = self.recommenders.keys()\n best_evaluation = 0\n maximum_weight = self.config.get('System', 'maximum_weight')\n # for each combination of weights that we are considering\n for weights in self.generate_weights(maximum_weight):\n # apply the weights\n for i, recommender in enumerate(recommenders):\n self.weights[recommender] = weights[i]\n # keep track of the best weights\n evaluation = evaluator.right_total_evaluation(data)\n if evaluation > best_evaluation:\n best_evaluation = evaluation\n best_weights = weights\n # write the new combination to the config file\n self.config.set_weights(best_weights)\n\n def generate_weights(self, maximum_weight):\n '''Takes a maximum weight and generates all possible combinations of\n weights from the range [0, maximum_weight], removing redundant\n options.'''\n num_recommenders = len(self.recommenders)\n weights = set(product(range(maximum_weight + 1),\n repeat=num_recommenders))\n # remove unnecessary duplication\n # (e.g., remove (2, 2, 2) if we already have (1, 1, 1))\n weights.discard(tuple([0] * num_recommenders))\n for k in range(2, maximum_weight + 1):\n weights -= set([tuple(map(lambda c: k * c, t)) for t in weights])\n return weights\n\nclass ALS(Recommender):\n ''' Alternating least squares model base class. '''\n __metaclass__ = ABCMeta\n \n def train(self, bookings, parameters=None, load=False):\n ''' Load or train a new model. If the model doesn't exist, save it. '''\n recommender_name = type(self).__name__\n if load:\n self.model = self.load_model(recommender_name)\n else:\n if parameters == None:\n self.model = self.create_model(bookings, self.get_parameters(recommender_name))\n else:\n self.model = self.create_model(bookings, parameters)\n model_location = self.get_model_location(recommender_name)\n if os.path.isdir(model_location):\n shutil.rmtree(model_location)\n self.model.save(self.spark, model_location)\n\n @abstractmethod\n def create_model(self, bookings, parameters):\n '''Takes a DataFrame of bookings and a dictionary of parameters\n Returns a trained model.'''\n raise NotImplementedError(\"Each subclass should extend this\")\n\n def load_model(self, recommender_name, parameters=None):\n ''' Takes recommender's name, loads an existing model and returns it.\n If the model does not exist it returns None'''\n model_location = self.get_model_location(recommender_name)\n\n if os.path.isdir(model_location):\n self.model = MatrixFactorizationModel.load(self.spark, model_location) \n return self.model \n else:\n return None\n\n def predict(self, data):\n return SQLContext(self.spark).createDataFrame(\n self.model.predictAll(data), self.config.get_schema())\n\n def get_parameters(self, recommender_name):\n return dict((name, self.config.get(recommender_name, name, t))\n for name, t in [('rank', int),\n ('iterations', int),\n ('lambda', float)])\n\n def get_model_location(self,recommender_name):\n return \"models/{}/\".format(recommender_name) + '-'.join(\n map(str, self.get_parameters(recommender_name).values()))\n \n # float range function\n def frange(self,x, y, jump):\n while x < y:\n yield x\n x += jump\n\n def learn_hyperparameters(self, bookings):\n ''' Learn the best possible hyper parameters by comparing Mean Squared error. '''\n evaluator = Evaluator(self.spark,self)\n bookings = Data(self.spark).get_bookings_with_score(bookings)\n data, test_ratings = bookings.randomSplit([0.8, 0.2])\n testdata = test_ratings.rdd.map(lambda r: (r[0], r[1]))\n best_mse = float('inf')\n parameter_names = ['rank', 'iterations', 'lambda']\n types = [int, int, float]\n range_values = [self.frange(self.config.get('DEFAULT', 'min_' + parameter, t),\n self.config.get('DEFAULT', 'max_' + parameter, t),\n self.config.get('DEFAULT', parameter + '_step', t))\n for parameter, t in zip(parameter_names, types)]\n for parameters in map(lambda v: dict(zip(parameter_names, v)),\n product(*range_values)):\n self.train(data, parameters)\n predictions = self.predict(testdata)\n mse = evaluator.calculate_mse(test_ratings, predictions)\n if mse < best_mse:\n best_parameters = parameters\n best_mse = mse\n if best_mse < float('inf'):\n self.config.set_hyperparameters(type(self).__name__, best_parameters)\n\nclass ExplicitALS(ALS):\n '''Generates recommendations based on review data.'''\n\n def train(self, bookings, parameters=None, load=False):\n if not isinstance(bookings, DataFrame):\n raise TypeError('Recommender requires a DataFrame')\n\n super(ExplicitALS, self).train(\n Data(self.spark).get_bookings_with_score(bookings), parameters, load=load)\n\n def create_model(self, bookings, parameters):\n return SparkALS.train(bookings, parameters['rank'],\n parameters['iterations'], parameters['lambda'], nonnegative=True)\n\nclass ImplicitALS(ALS):\n '''Generates recommendations based on how many times a diner visited a\n restaurant.'''\n\n def train(self, bookings, parameters=None, load=False):\n if not isinstance(bookings, DataFrame):\n raise TypeError('Recommender requires a DataFrame')\n\n # calculate how many times a diner visited each restaurant\n data = defaultdict(Counter)\n for booking in bookings.collect():\n data[booking['Diner Id']][booking['Restaurant Id']] += 1\n # transform that data into an RDD and train the model\n data = [(diner, restaurant, score) for diner, counter in data.items()\n for restaurant, score in counter.iteritems()]\n \n super(ImplicitALS, self).train(self.spark.parallelize(data), parameters, load=load)\n\n def create_model(self, bookings, parameters):\n return SparkALS.trainImplicit(bookings, parameters['rank'],\n parameters['iterations'],\n alpha=parameters['lambda'],\n nonnegative=True)\n\nclass CuisineType(Recommender):\n '''Generates recommendations based on a diner's prefered cuisine types.'''\n \n def train(self, bookings, load=False):\n minimum_like_score = self.config.get('CuisineType', 'minimum_score')\n\n # stores a set of cuisines for each restaurant\n self.restaurant_cuisine = defaultdict(set)\n filename = os.path.join(self.config.get('DEFAULT', 'data_dir', str),\n self.config.get('CuisineType', 'filename', str))\n for entry in Data(self.spark).read(filename).collect():\n restaurant = entry['RestaurantId']\n self.restaurant_cuisine[restaurant].add(entry['CuisineTypeId'])\n\n # what cuisine types does this diner like?\n self.liked_cuisine = defaultdict(set)\n for booking in bookings.collect():\n diner = booking['Diner Id']\n restaurant = booking['Restaurant Id']\n score = booking['Review Score']\n if score >= minimum_like_score:\n self.liked_cuisine[diner] |= self.restaurant_cuisine[restaurant]\n\n def predict(self, diners_restaurants):\n # score is the number of cuisines that the diner and the restaurant\n # have in common\n diners_restaurants = diners_restaurants.collect()\n recommendations = [(diner, restaurant,\n len(self.restaurant_cuisine[restaurant] &\n self.liked_cuisine[diner]))\n for diner, restaurant in diners_restaurants]\n return SQLContext(self.spark).createDataFrame(\n recommendations, self.config.get_schema())\n\nclass PricePoint(Recommender):\n '''For each diner, generates recommendations based on the average price\n point of all the visited restaurants.'''\n \n def train(self, bookings, load=False):\n # record the price point of each restaurant\n self.restaurant_price_point = {}\n filename = os.path.join(self.config.get('DEFAULT', 'data_dir', str),\n self.config.get('DEFAULT', 'restaurant_file', str))\n for entry in Data(self.spark).read(filename).collect():\n price_point = entry['PricePoint']\n if price_point is not None:\n self.restaurant_price_point[entry['RestaurantId']] = price_point\n\n # record the price points of all restaurants visited by each diner\n diner_price_points = defaultdict(list)\n for booking in bookings.collect():\n diner = booking['Diner Id']\n restaurant = booking['Restaurant Id']\n score = booking['Review Score']\n if restaurant in self.restaurant_price_point:\n diner_price_points[diner].append(\n self.restaurant_price_point[restaurant])\n\n # stores the average price point of each diner's visited restaurants\n self.diner_average_price_point = dict(\n (diner, sum(diner_price_points[diner]) /\n len(diner_price_points[diner])) for diner in diner_price_points)\n\n # calculate averages if possible, otherwise resort to default price\n # point value in the config file\n default_price_point = self.config.get('PricePoint', 'default_price_point')\n self.restaurant_default_price_point = (\n sum(self.restaurant_price_point.values()) /\n len(self.restaurant_price_point.values())\n if self.restaurant_price_point else default_price_point)\n self.diner_default_price_point = (\n sum(self.diner_average_price_point.values()) /\n len(self.diner_average_price_point.values())\n if self.diner_average_price_point else default_price_point)\n\n def predict(self, diners_restaurants):\n diners_restaurants = diners_restaurants.collect()\n recommendations = []\n for diner, restaurant in diners_restaurants:\n restaurant_price_point = self.restaurant_price_point.get(\n restaurant, self.restaurant_default_price_point)\n diner_price_point = self.diner_average_price_point.get(\n diner, self.diner_default_price_point)\n score = (self.config.get('PricePoint', 'maximum_price_point') -\n abs(restaurant_price_point - diner_price_point))\n recommendations.append((diner, restaurant, score))\n\n return SQLContext(self.spark).createDataFrame(\n recommendations, self.config.get_schema())\n","repo_name":"domantasjurkus/resdiary","sub_path":"src/recommenders.py","file_name":"recommenders.py","file_ext":"py","file_size_in_byte":15350,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"5507200958","text":"# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, val=0, next=None):\r\n# self.val = val\r\n# self.next = next\r\nclass Solution:\r\n def reorderList(self, head: Optional[ListNode]) -> None:\r\n \"\"\"\r\n Do not return anything, modify head in-place instead.\r\n \"\"\"\r\n \r\n # O(2n) solution with O(n) memory\r\n # make a list and use two pointers to reorder the list\r\n # while loop with r - l > 1 since both left and right pointer will move one each time\r\n # so they get 2 spaces closer each time\r\n # final if else statement to take into account odd or even number of elements in list\r\n # make sure that the last element's next value is None\r\n\r\n curr = head\r\n\r\n nodes = []\r\n\r\n while curr:\r\n nodes.append(curr)\r\n curr = curr.next\r\n \r\n l, r = 0, len(nodes) - 1\r\n\r\n while r - l > 1:\r\n nodes[l].next = nodes[r]\r\n nodes[r].next = nodes[l + 1]\r\n l += 1\r\n r -= 1\r\n\r\n if r > l:\r\n nodes[l].next = nodes[r]\r\n nodes[r].next = None\r\n\r\n else:\r\n nodes[l].next = None\r\n \r\n# Neetcode solution with O(n) memory:\r\n\r\n# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, val=0, next=None):\r\n# self.val = val\r\n# self.next = next\r\nclass Solution:\r\n def reorderList(self, head: Optional[ListNode]) -> None:\r\n \"\"\"\r\n Do not return anything, modify head in-place instead.\r\n \"\"\"\r\n \r\n # O(2n) solution with O(1) memory\r\n # use a slow and fast pointer to find middle point of list\r\n # node after the middle point will represent the latter half of list that needs to be reversed\r\n # reverse second half of list (while keeping track of last value)\r\n # set end of first half's next value to none\r\n\r\n slow, fast = head, head.next\r\n\r\n while fast and fast.next:\r\n # have to do fast and fast.next since we are moving it 2 at a time\r\n slow = slow.next\r\n fast = fast.next.next\r\n\r\n second = slow.next\r\n # this is the first node in the second half of the list\r\n slow.next = prev = None\r\n # setting next of last value in first half of list to None so that it is no longer connected\r\n # also setting previous value to None so the first node in second half will have no value after it\r\n while second:\r\n temp = second.next\r\n second.next = prev\r\n prev = second\r\n second = temp\r\n # temp is the next value that we need since it'll be changed\r\n # second.next will be reversed to connect to the previous value\r\n # the new previous value is the current second\r\n # and now second will be the next value in the list (aka temp)\r\n\r\n first, second = head, prev\r\n # final previous value will be the new first node in second half (originally the last node of whole list)\r\n\r\n while second:\r\n # only need to do while second instead of first as well since second half's length will\r\n # be = to first half or first half - 1\r\n first_temp, second_temp = first.next, second.next\r\n # have to keep track of the next node for first and second half since they both will get changed\r\n first.next = second\r\n second.next = first_temp\r\n # if odd size list, second.next at end will be the last value of first half\r\n # if even size list, second.next will be None since the last value in first half will have .next of None\r\n first = first_temp\r\n second = second_temp","repo_name":"alexjooho/dsa","sub_path":"linked-list/143_reorder_list.py","file_name":"143_reorder_list.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11012778301","text":"\"\"\"Integration test for Java fuzz pipeline.\"\"\"\nimport pathlib\n\nfrom datasets.github.testing.access_token import ACCESS_TOKEN\nfrom datasets.github.testing.requires_access_token import requires_access_token\nfrom labm8.py import dockerutil\nfrom labm8.py import test\n\nFLAGS = test.FLAGS\n\n\n@test.XFail(reason=\"Failure during pre-processingq. Fix me.\")\n@requires_access_token\ndef test_end_to_end_pipeline(tempdir: pathlib.Path):\n scrape_java_files_image = dockerutil.BazelPy3Image(\n \"experimental/deeplearning/deepsmith/java_fuzz/scrape_java_files_image\"\n )\n mask_contentfiles_image = dockerutil.BazelPy3Image(\n \"experimental/deeplearning/deepsmith/java_fuzz/mask_contentfiles_image\"\n )\n export_java_corpus_image = dockerutil.BazelPy3Image(\n \"experimental/deeplearning/deepsmith/java_fuzz/export_java_corpus_image\"\n )\n preprocess_java_corpus_image = dockerutil.BazelPy3Image(\n \"experimental/deeplearning/deepsmith/java_fuzz/preprocess_java_corpus_image\"\n )\n re_preprocess_java_methods_image = dockerutil.BazelPy3Image(\n \"experimental/deeplearning/deepsmith/java_fuzz/re_preprocess_java_methods_image\"\n )\n encode_java_corpus_image = dockerutil.BazelPy3Image(\n \"experimental/deeplearning/deepsmith/java_fuzz/encode_java_corpus_image\"\n )\n\n # Step 1: Scrape a single repo from GitHub.\n with scrape_java_files_image.RunContext() as ctx:\n ctx.CheckCall(\n [],\n {\n \"n\": 1,\n \"db\": \"sqlite:////workdir/java.db\",\n \"github_access_token\": ACCESS_TOKEN,\n },\n volumes={tempdir: \"/workdir\",},\n timeout=600,\n )\n\n # Check that contentfiles database is created.\n assert (tempdir / \"java.db\").is_file()\n\n # Step 2: Mask a subset of the contentfiles database.\n with mask_contentfiles_image.RunContext() as ctx:\n ctx.CheckCall(\n [],\n {\"db\": \"sqlite:////workdir/java.db\", \"max_repo_count\": 1,},\n volumes={tempdir: \"/workdir\"},\n timeout=300,\n )\n\n # Check that contentfiles database is still there.\n assert (tempdir / \"java.db\").is_file()\n\n # Step 3: Export Java corpus.\n with export_java_corpus_image.RunContext() as ctx:\n ctx.CheckCall(\n [],\n {\n \"input\": \"sqlite:////workdir/java.db\",\n \"output\": \"sqlite:////workdir/export.db\",\n },\n volumes={tempdir: \"/workdir\"},\n timeout=600,\n )\n\n # Check that corpus is exported.\n assert (tempdir / \"java.db\").is_file()\n assert (tempdir / \"export.db\").is_file()\n\n # Step 4: Preprocess Java corpus.\n with preprocess_java_corpus_image.RunContext() as ctx:\n ctx.CheckCall(\n [],\n {\n \"input\": \"sqlite:////workdir/export.db\",\n \"output\": \"sqlite:////workdir/preprocessed.db\",\n },\n volumes={tempdir: \"/workdir\"},\n timeout=600,\n )\n\n # Check that corpus is exported.\n assert (tempdir / \"export.db\").is_file()\n assert (tempdir / \"preprocessed.db\").is_file()\n\n # Step 5: Re-Preprocess Java methods.\n with re_preprocess_java_methods_image.RunContext() as ctx:\n ctx.CheckCall(\n [],\n {\n \"input\": \"sqlite:////workdir/exported.db\",\n \"input_pp\": \"sqlite:////workdir/preprocessed.db\",\n \"outdir\": \"/workdir/re_preprocessed\",\n },\n volumes={tempdir: \"/workdir\"},\n timeout=600,\n )\n\n # Check that corpus is exported.\n assert (tempdir / \"preprocessed.db\").is_file()\n assert (tempdir / \"re_preprocessed\").is_dir()\n\n # Step 6: Encode Java methods.\n with encode_java_corpus_image.RunContext() as ctx:\n ctx.CheckCall(\n [],\n {\n \"input\": \"sqlite:////workdir/preprocessed.db\",\n \"output\": \"sqlite:////workdir/encoded.db\",\n },\n volumes={tempdir: \"/workdir\"},\n timeout=600,\n )\n\n # Check that corpus is encoded.\n assert (tempdir / \"preprocessed.db\").is_file()\n assert (tempdir / \"encoded.db\").is_file()\n\n\nif __name__ == \"__main__\":\n test.Main()\n","repo_name":"ChrisCummins/phd","sub_path":"experimental/deeplearning/deepsmith/java_fuzz/tests/java_fuzz_integration_test.py","file_name":"java_fuzz_integration_test.py","file_ext":"py","file_size_in_byte":3840,"program_lang":"python","lang":"en","doc_type":"code","stars":181,"dataset":"github-code","pt":"67"} +{"seq_id":"71603115732","text":"#!/usr/bin/python3\n#coding: utf8\n\nclass OrbitalObject:\n\tdef __init__(self, orbitting, name):\n\t\tself.name = name\n\t\tself.parent = None\n\t\tself.children = []\n\t\tOrbitalObject.objects[name] = self\n\t\tif orbitting is not None:\n\t\t\tself.set_orbitting(orbitting)\n\n\tdef set_orbitting(self, orbitting):\n\t\tparent = OrbitalObject.objects.get(orbitting)\n\t\tif not parent:\n\t\t\tparent = OrbitalObject(None, orbitting)\n\t\tparent.children.append(self)\n\t\tself.parent = parent\n\n\tdef get_nb_of_orbits(self, until=None):\n\t\treturn len(self.get_full_parent_list(until))\n\n\tdef get_full_parent_list(self, until=None):\n\t\tparent = self.parent\n\t\tparent_list = []\n\t\tif until is not None:\n\t\t\tuntil = OrbitalObject.objects.get(until)\n\t\twhile parent is not until and parent is not None:\n\t\t\tparent_list.append(parent)\n\t\t\tparent = parent.parent\n\t\treturn parent_list\n\nOrbitalObject.objects = dict()\n\nif __name__ == '__main__':\n\twith open('input') as f:\n\t\tfor line in f:\n\t\t\torbitting, name = line[:-1].split(\")\")\n\t\t\tobj = OrbitalObject.objects.get(name)\n\t\t\tif obj:\n\t\t\t\tobj.set_orbitting(orbitting)\n\t\t\telse:\n\t\t\t\tOrbitalObject(orbitting, name)\n\t\tYOU = OrbitalObject.objects.get('YOU')\n\t\tSAN = OrbitalObject.objects.get('SAN')\n\t\tYOU_parents = YOU.get_full_parent_list()\n\t\tSAN_parents = SAN.get_full_parent_list()\n\t\tclosest_parent_name = None\n\t\tfor parent in YOU_parents:\n\t\t\tif parent in SAN_parents:\n\t\t\t\tclosest_parent_name = parent.name\n\t\t\t\tbreak\n\t\tprint('From YOU to',closest_parent_name,':',YOU.get_nb_of_orbits(closest_parent_name))\n\t\tprint('From SAN to',closest_parent_name,':',SAN.get_nb_of_orbits(closest_parent_name))\n\t\tprint(\"Lowest required orbital transfers :\",YOU.get_nb_of_orbits(closest_parent_name)+SAN.get_nb_of_orbits(closest_parent_name))","repo_name":"jirouette/adventofcode2019","sub_path":"day6/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26805633731","text":"from django.db.models import Q\nfrom rest_framework.filters import (\n SearchFilter, OrderingFilter\n )\nfrom rest_framework.mixins import DestroyModelMixin, UpdateModelMixin\n\nfrom rest_framework.generics import (\n CreateAPIView,\n DestroyAPIView,\n ListAPIView,\n RetrieveAPIView,\n RetrieveUpdateAPIView,\n UpdateAPIView,\n )\n\nfrom rest_framework.permissions import (\n AllowAny,\n IsAuthenticated,\n IsAdminUser,\n IsAuthenticatedOrReadOnly,\n )\n\nfrom posts.api.pagination import PostLimitOffsetPagination, PostPageNumberPagination\n\nfrom posts.api.permissions import IsOwnerOrReadOnly\n\nfrom comments.models import Comment\nfrom .serializers import (\n CommentDetailSerializer,\n CommentListSerializer,\n create_comment_serializer,\n )\n\n\nclass CommentCreateAPIView(CreateAPIView):\n queryset = Comment.objects.all()\n # serializer_class = CommentCreateSerializer\n permission_classes = [IsAuthenticated]\n\n def get_serializer_class(self):\n model_type = self.request.GET.get('type')\n slug = self.request.GET.get('slug')\n parent_id = self.request.GET.get('parent_id', None)\n return create_comment_serializer(\n model_type=model_type,\n slug=slug,\n parent_id=parent_id,\n user=self.request.user,\n )\n\n\n\n# class CommentEditAPIView(RetrieveAPIView):\n# queryset = Comment.objects.all() # This '.all()' was changed by model manager to be only parent comments\n# serializer_class = CommentDetailSerializer\n# lookup_field = 'pk'\n# # lookup_url_kwarg = 'abc' # using 'slug' in the url vs. abc.\n# # def put(self, request, *args, **kwargs):\n# # return self.update(request, *args, **kwargs)\n\nclass CommentDetailAPIView(DestroyModelMixin, UpdateModelMixin, RetrieveAPIView):\n queryset = Comment.objects.filter(id__gte=0) # id__gte=0 will filter to be \"true all\"\n serializer_class = CommentDetailSerializer\n permission_classes = [IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]\n\n def put(self, request, *args, **kwargs):\n return self.update(request, *args, **kwargs)\n\n def delete(self, request, *args, **kwargs):\n return self.destroy(request, *args, **kwargs)\n\n\n\nclass CommentListAPIView(ListAPIView):\n serializer_class = CommentListSerializer\n #DRF built in search\n filter_backends = [SearchFilter, OrderingFilter]\n search_fields = ['content', 'user__first_name']\n\n pagination_class = PostPageNumberPagination\n\n def get_queryset(self, *args, **kwargs):\n queryset_list = Comment.objects.filter(id__gte=0) #filter(user=self.request.user)\n query = self.request.GET.get(\"q\")\n if query:\n queryset_list = queryset_list.filter(\n Q(content__icontains=query)|\n Q(user__first_name__icontains=query) |\n Q(user__last_name__icontains=query)\n ).distinct()\n return queryset_list\n","repo_name":"simonfromla/blog-api","sub_path":"src/comments/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"6991491850","text":"#Questão 1\n'''def maior(n1,n2):\n if n1>n2:\n print(f\"{n1} é o maior\")\n elif n140:\n div = (n1*n2)/(n1//n2)\n print(\"Sim a multiplicação dos dois numeros é maior que 40 e o valor desejado é \", div)\n else:\n print(\"A multiplicação dos dois números não é maior que 40, pois o resultado é \",n1*n2)\nn1 = float(input(\"Digite o primeiro numero: \"))\nn2 = float(input(\"Digite o segundo numero: \"))\n\nprint(f\"A soma dos valores é {n1+n2:.0f}\")\nprint(f\"A multiplicão dos valores é {n1*n2:.0f}\")\nprint(f\"A divisão dos valores é {n1//n2:.0f}\")\nmaior(n1,n2)\nprint(\"O resultado da soma dos dois numeros é \", impopar(n1,n2))\nmaior40(n1,n2)'''\n\n#Questão 2\n'''frase = input(\"Digite uma frase: \").lower()\na = 0\ne = 0\ni = 0\no = 0\nu = 0\nconsoante = 0\n\nfor cont in frase:\n if cont in \"aáâã\":\n a = a+1\n elif cont in \"eéê\":\n e = e+1\n elif cont in \"iíî\":\n i = i+1\n elif cont in \"oóôõ\":\n o = o+1\n elif cont in \"uúû\":\n u = u+1\n else:\n consoante = consoante+1\n\nprint(f\"Sua frase tem {a} letra(s) 'a' {e} letra(s) 'e' {i} letra(s) 'i' {o} letra(s) 'o' e {u} letra(s) 'u', além de {consoante} consoantes\")\n\nprint(\"A frase digitada sem as vogais fica:\")\nfor cont in frase:\n if cont not in ('aeéêiíîoóôõuúûaáâã'):\n print(cont, end=\"\")\n ''' \n#Questão 3\n'''import random\nimport os\nimport time\n\nsenha = input(\"Cadastre uma senha para entrar no jogo: \")\nprint(\"Senha cadastrada com sucesso!!\")\ntime.sleep(0.3)\nos.system('cls')\n\nresp = \"\"\nwhile resp != senha:\n resp = input(\"Digite a senha: \")\n if resp != senha:\n print(\"Senha invalida tente novamente!\")\n\nprint(\"Seja bem vindo(a) ao jogo da adivinhação!\") \n\nmaquina = str(random.randint(1,10))\nresp = input(\"Digite um número: \")\nwhile resp != maquina:\n os.system('cls')\n time.sleep(0.3)\n print(\"Resposta errada\")\n if resp > maquina:\n resp = input(\"O numero digitado é maior, tente um número menor: \")\n os.system('cls')\n time.sleep(0.5)\n else:\n resp = input(\"O numero digitado é menor, tente um número maior: \")\n os.system('cls')\n time.sleep(1)\n\nprint(\"Parabéns você acertou o número!!!!\")\n\n'''\n#Questão 4\n'''\nimport datetime\nfrom time import strptime\nopc= 0\n\ndef bissexto(ano):\n \n if ano%4==0:\n return 1\n if ano%100==0:\n return 0\n if ano%400==0:\n return 1\n else:\n return 0\n\ndef mesExtenso(data):\n \n dia = int(data[0])\n mes = int(data[1])\n ano = int(data[2])\n\n if bissexto(ano) == 1:\n if mes == 2:\n if dia <= 0 or dia >29 or mes <1 or mes >12 or ano <1:\n return print(\"Data inválida\")\n else: \n meses = [\"Janeiro\",\"Fevereiro\", \"Março\", \"Abril\", \"Maio\", \"Junho\", \"Julho\", \"Agosto\", \"Setembro\", \"Outubro\", \"Novembro\", \"Dezembro\"] \n print(f'{data[0]} de {meses[mes-1]} de {data[2]}')\n \n else:\n if dia <= 0 or dia >31 or mes <1 or mes >12 or ano <1:\n return print(\"Data inválida\")\n else: \n meses = [\"Janeiro\",\"Fevereiro\", \"Março\", \"Abril\", \"Maio\", \"Junho\", \"Julho\", \"Agosto\", \"Setembro\", \"Outubro\", \"Novembro\", \"Dezembro\"] \n print(f'{data[0]} de {meses[mes-1]} de {data[2]}')\n else:\n if mes == 2:\n if dia <= 0 or dia >28 or mes <1 or mes >12 or ano <1:\n return print(\"Data inválida\")\n else: \n meses = [\"Janeiro\",\"Fevereiro\", \"Março\", \"Abril\", \"Maio\", \"Junho\", \"Julho\", \"Agosto\", \"Setembro\", \"Outubro\", \"Novembro\", \"Dezembro\"] \n print(f'{data[0]} de {meses[mes-1]} de {data[2]}')\n else:\n if dia <= 0 or dia >31 or mes <1 or mes >12 or ano <1:\n return print(\"Data inválida\")\n else: \n meses = [\"Janeiro\",\"Fevereiro\", \"Março\", \"Abril\", \"Maio\", \"Junho\", \"Julho\", \"Agosto\", \"Setembro\", \"Outubro\", \"Novembro\", \"Dezembro\"] \n print(f'{data[0]} de {meses[mes-1]} de {data[2]}')\n\nwhile opc != 1:\n data = input(\"Digite uma data no formato 'DD/MM/AAAA: \" ).split(\"/\")\n try:\n mesExtenso(data)\n except:\n print(\"NULL\")\n resp = input(\"Deseja continuar? s/n: \")\n while resp not in ('SsNn'):\n resp = input(\"Por favor digite 's' ou 'n': \")\n if resp in \"nN\":\n opc = 1\nprint(\"Programa finalizado!!\")\n'''\n# Questão 5\n'''\ndef semvogais(frase):\n vogais = 0\n \n imprimir = []\n for cont in frase:\n if cont not in ('aeéêiíîoóôõuúûaáâã'):\n print(cont, end=\"\")\n imprimir.append(cont)\n else:\n vogais +=1\n return imprimir,print(f'\\nForam retiradas {vogais} vogais')\n \nfrase = input(\"Digite uma frase: \")\nprint('Frase impresa sem vogais:')\nsemvogais(frase)\nprint()\n\n'''\n# Questão 6\n\n'''\nperguntas = [input('Telefonou para a vítima? \\n')\n ,input('Esteve no local do crime? \\n')\n ,input('Mora perto da vítima? \\n')\n ,input('Devia para a vítima? \\n')\n ,input('Já trabalhou com a vítima? \\n')]\n\n\n\nsim = 0\n\nfor cont in range(5):\n if perguntas[cont] in ('sS'):\n sim += 1\n \nsim = perguntas.count('s')\nif sim == 2:\n print(\"Suspeito\")\nelif sim >2 and sim <5:\n print(\"Cúmplice\")\nelif sim == 5:\n print(\"Culpado\")\nelse:\n print(\"Inocente\")\n\n'''\n# Questão 7\n'''\nimpar = []\npar = []\nlista = [par,impar]\ncont = 0\nwhile cont < 7:\n cont +=1\n n1 = int(input(f\"Digite o {cont}º numero: \"))\n if n1%2 == 0:\n par.append(n1)\n else:\n impar.append(n1)\n\npar.sort()\nimpar.sort()\nprint(lista)\n'''\n# Questão 8\n\nimport datetime\nimport operator\nnome = input(\"Qual seu nome? \")\nanoNasc = int(input(\"Digite o ano de nascimento: \"))\ncarteira = input(\"Tem carteira registrada? se não digite 0: \")\ndata_atual = datetime.date.today()\nano_atual = int(data_atual.year)\n\nif carteira != '0':\n anoContrato = int(input(\"Digite o ano que foi contratado: \"))\n salario = float(input(\"Digite o seu salario: \"))\n temp_aposentar = 35 - (ano_atual - anoContrato)\n dic = {'nome': nome,'idade':ano_atual-anoNasc,'ano_aposentadoria': temp_aposentar+ano_atual}\n print(f'O Seu nome é {dic[\"nome\"]} atualmente tem {dic[\"idade\"]} anos, você ira se aposentar em {dic[\"ano_aposentadoria\"]} com {dic[\"idade\"]+35} anos.')\nelse:\n dic = {'nome': nome,'idade':ano_atual-anoNasc}\n print(f'O Seu nome é {dic[\"nome\"]} atualmente tem {dic[\"idade\"]} anos, você não possui carteira de trabalho.')\n","repo_name":"paulohenriquegama/Blue_Modulo1-LogicaDeProgramacao","sub_path":"Aula14/VerificaçãoDeAprendizagem.py","file_name":"VerificaçãoDeAprendizagem.py","file_ext":"py","file_size_in_byte":6962,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71805417814","text":"# Import necessary packages\nimport boto3\nimport os\n\n\n# Create AWS sesison\nsession = boto3.Session(\n aws_access_key_id=os.environ[\"ACCESS_KEY\"],\n aws_secret_access_key=os.environ[\"SECRET_KEY\"]\n )\n\n# Define path to bucket\ns3 = session.resource('s3')\nbucket = s3.Bucket('t206')\n\nkey = bucket.objects.filter(Prefix='cv').get_key('data.h5')\nkey.get_contents_to_filename('~/Desktop/works.h5')#\n","repo_name":"jdesilvio/T206-computer-vision","sub_path":"app/app/misc/getImageData.py","file_name":"getImageData.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12299792663","text":"# -*- mode: python ; coding: utf-8 -*-\n\n\nblock_cipher = None\n\n\na = Analysis(['__init__.py', 'controllers\\\\FileFinder.py', 'controllers\\\\PixelsManipulator.py', 'controllers\\\\Workspace.py', 'views\\\\Editor.py', 'views\\\\FilePicker.py', 'views\\\\SetColorDialog.py'],\n pathex=['C:\\\\Users\\\\x\\\\Desktop\\\\PhotoEditor', 'C:\\\\Windows\\\\System32\\\\downlevel'],\n binaries=[('c://python36/python36.dll', '.'), ('c://python36/vcruntime140.dll', '.')],\n datas=[],\n hiddenimports=[],\n hookspath=[],\n hooksconfig={},\n runtime_hooks=[],\n excludes=[],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher,\n noarchive=False)\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\n\nexe = EXE(pyz,\n a.scripts,\n a.binaries,\n a.zipfiles,\n a.datas, \n [],\n name='PhotoEditor',\n debug=False,\n bootloader_ignore_signals=False,\n strip=False,\n upx=False,\n console=True,\n disable_windowed_traceback=False,\n target_arch=None,\n codesign_identity=None,\n entitlements_file=None , icon='icon.ico')\n","repo_name":"nixs-dev/PhotoEditor","sub_path":"PhotoEditor.spec","file_name":"PhotoEditor.spec","file_ext":"spec","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38076822641","text":"from typing import List\n\nclass Solution:\n def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:\n ans = [] # store the shortest paths here!\n if endWord not in wordList:\n return ans\n\n # use this dictionary to store the shortest path from beginWord to each word in wordList.\n word_to_path = {}\n for word in wordList:\n word_to_path[word] = []\n word_to_path[beginWord] = [[beginWord]]\n \n # use this set of store the words that I've seen in higher layers.\n seen = set()\n seen.add(beginWord)\n \n found = False # use this flag to check if I've found end word.\n queue = [beginWord] # use this array as a queue on all tree nodes on current layer.\n while not found and len(queue):\n temp = [] # use this array to store the tree nodes I woud traverse in next layer.\n new_seen = set() # use this set to store the tree nodes that I've seen in current layer.\n for currentWord in queue:\n for word in wordList:\n # if I've seen this word in higher layers, then this path cannot be the shortest\n # path.\n if word in seen:\n continue\n # only proceed if this word is different in only one char with previous word.\n if self.diifer_by_one(currentWord, word):\n new_seen.add(word) # I would add this word as have seen in this layer.\n # add the path to dictionary.\n for path in word_to_path[currentWord]:\n word_to_path[word].append(path + [word])\n if word == endWord:\n # in this case I've found the end word, no need to proceed to next layer.\n found = True\n else:\n temp.append(word) \n # delete in case future contradition, no need any way.\n word_to_path[currentWord] = []\n # before proceeding to next layer, set the seen set.\n for ele in new_seen:\n seen.add(ele)\n queue = temp \n\n return word_to_path[endWord]\n \n \n def diifer_by_one(self, s1: str, s2: str) -> bool:\n d = 0\n\n for i in range(len(s1)):\n if s1[i] != s2[i]:\n d += 1\n if d >= 2:\n return False\n\n return d","repo_name":"Rui-Wang-813/RUIX","sub_path":"code_problems_set1/0126_Word_Ladder_II/ruix.py","file_name":"ruix.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"499962402","text":"\"\"\"\n단계 : 5-2\n문제번호 : 8958\n\n문제\n\"OOXXOXXOOO\"와 같은 OX퀴즈의 결과가 있다. O는 문제를 맞은 것이고, X는 문제를 틀린 것이다. 문제를 맞은 경우 그 문제의 점수는\n그 문제까지 연속된 O의 개수가 된다. 예를 들어, 10번 문제의 점수는 3이 된다.\n\n\"OOXXOXXOOO\"의 점수는 1+2+0+0+1+0+0+1+2+3 = 10점이다.\n\nOX퀴즈의 결과가 주어졌을 때, 점수를 구하는 프로그램을 작성하시오.\n\n입력\n첫째 줄에 테스트 케이스의 개수가 주어진다. 각 테스트 케이스는 한 줄로 이루어져 있고, 길이가 0보다 크고 80보다 작은 문자열이 주어진다.\n문자열은 O와 X만으로 이루어져 있다.\n\n출력\n각 테스트 케이스마다 점수를 출력한다.\n\"\"\"\n\n\"\"\"\n'X'에 해당하는 index만 받아서 피보나치로 합 구해줌, 그리고 해당 인덱스를 포함하여 이전 값들은 없애줌\nex) OOXOO -> X 인덱스 구하고 피보나치 계산하고, OO 로 만들어줌. 'X' 존재 하지 않을시 'O'의 수만큼 피보나치.\n\"\"\"\ncase = int(input())\nlist_sum = []\n\n# 피보나치 구함. 숫자 받아서 홀수/짝수 경우로 나누어서 구해서 반환.\n# ex) num=3 : 1 2 3 이므로 (1+3)*1 + 2\n# num=4 : 1 2 3 4 이므로 (1+4)*2\ndef fibonacci(num):\n if num == 1:\n return num\n if num%2 != 0:\n value = (1 + num) * int(num/2) + (1 + num)/2\n else:\n value = (1 + num) * num/2\n\n return value\n\n\nfor i in range(case):\n o_x = input()\n num_x = o_x.count('X')\n\n sum = 0\n for j in range(num_x + 1):\n index = o_x.find('X') # find는 없을시에 -1 반환 / index는 없을시에 error뜸.\n if index == -1:\n sum += fibonacci(o_x.count('O'))\n break\n sum += fibonacci(index)\n o_x = o_x[index+1:]\n list_sum.append(int(sum))\n\nfor i in range(case):\n print(list_sum[i])","repo_name":"intlabSeJun/coding_test","sub_path":"code/5_1차원배열/5_6(8958).py","file_name":"5_6(8958).py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28103206336","text":"from src.exception import CustomException\nfrom src.logger import logging\nimport numpy as np\nimport pandas as pd\nfrom src.utils import load_obj\nimport sys\n\n\nclass Predict_pipeline:\n def __init__(self):\n pass\n \n def predict(sefl,features):\n\n try:\n pre_processor=load_obj(fil_path='artifacts/preprocessor.pkl')\n model_obj=load_obj(fil_path='artifacts/model.pkl')\n preprocessed_data=pre_processor.transform(features)\n result=model_obj.predict(preprocessed_data)\n return result\n except Exception as e:\n raise CustomException(e,sys)\n\nclass CustomData:\n def __init__(self,\n gender:str,\n race_ethnicity:str,\n parental_level_of_education:str,\n lunch:str,\n test_preparation_course:str,\n reading_score:float,\n writing_score:float\n ):\n\n self.gender=gender\n self.race_ethnicity=race_ethnicity\n self.parental_level_of_education=parental_level_of_education\n self.lunch=lunch\n self.test_preparation_course=test_preparation_course\n self.reading_score=reading_score\n self.writing_score=writing_score\n\n def convert_input_to_dataframe(self):\n try:\n input_dict={\n \"gender\":[self.gender],\n \"race_ethnicity\":[self.race_ethnicity],\n \"parental_level_of_education\":[self.parental_level_of_education],\n \"lunch\":[self.lunch],\n \"test_preparation_course\":[self.test_preparation_course],\n \"reading_score\":[self.reading_score],\n \"writing_score\":[self.writing_score],\n }\n return pd.DataFrame(input_dict)\n \n except Exception as e:\n raise CustomException(e,sys)\n\n ","repo_name":"Raseena-KP/mlprojects","sub_path":"src/pipeline/predict_pipeline.py","file_name":"predict_pipeline.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5030481921","text":"from itertools import chain\nimport logging\nimport os\nimport pickle\nimport time\n\nfrom bs4 import BeautifulSoup, SoupStrainer\nfrom http_request_randomizer.requests.proxy.requestProxy import RequestProxy\n\nlogging.basicConfig(filename='bad_req.log',level=logging.DEBUG)\n\n\nURL = 'https://seekingalpha.com/market-news/m-a?page={pageno}'\nSTRAINER = SoupStrainer('ul', attrs={'class': 'mc-list',\n 'id': 'mc-list'})\n\n\ndef seeking_more_alpha(soup: BeautifulSoup):\n for tag in soup.find_all('li', attrs={'class': 'date-title'}):\n date = tag.text\n deal = tag.next_sibling\n while deal and deal.get('class') != ['date-title']:\n ticker = deal.find('div', attrs={'class': 'media-left'}).text\n link = deal.find('div', attrs={'class': 'title'}).find('a').get('href')\n bullets = deal.find('div', attrs={'class': 'bullets'})\n txt = ' '.join(bullet.text for bullet in bullets.find_all('li'))\n if not txt:\n # Fall back to old paragraph structure (2011/12)\n txt = ' '.join(bullet.text for bullet in bullets.find_all('p'))\n deal = deal.next_sibling\n yield date, link, ticker, txt\n\n\ndef read_one_pg(pageno):\n url = URL.format(pageno=pageno)\n req_proxy = RequestProxy()\n return req_proxy.generate_proxied_request(url)\n\n\ndef try_pages(limit=10, debug=False, sleep=0):\n pageno = 1\n while pageno <= limit:\n if sleep:\n time.sleep(sleep)\n response = read_one_pg(pageno)\n while not response:\n if sleep:\n time.sleep(sleep)\n response = read_one_pg(pageno)\n if not response.status_code == 200:\n if debug:\n logging.debug(response.url)\n pageno += 1\n continue\n else:\n soup = BeautifulSoup(response.text, 'html.parser',\n parse_only=STRAINER)\n if not soup:\n # May still get a 200 response from blank pages\n break\n else:\n pageno += 1\n yield from seeking_more_alpha(soup=soup)\n\n\nif __name__ == '__main__':\n mergers = tuple(try_pages(limit=300, debug=False))\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, 'mergers.pickle'), 'wb') as f:\n pickle.dump(mergers, f, pickle.HIGHEST_PROTOCOL)\n","repo_name":"bsolomon1124/seeking-more-alpha","sub_path":"src/scrape_sa.py","file_name":"scrape_sa.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23130426605","text":"import json\nimport numpy as np\nimport pandas as pd\nfrom netprop.models import PropagationResultModel\nfrom scipy import stats\nfrom pathlib import Path\n\n\ndef make_gct(df: pd.DataFrame, path: str):\n\n with open(path, 'w') as handler:\n handler.write(f\"# 1.3\\n\"\n f\"{len(df)}\\t{len(df.columns)}\\n\"\n f\"NAME\\tDescription\\tpropagation_diff\\n\")\n for i in range(len(df)):\n handler.write(f\"{df.iloc[i].names}\\tNA\\t{df.iloc[i].propagation_diff}\\n\")\n\n\ndef make_cls(sample_name: str, path:str):\n with open(path, 'w') as handler:\n handler.write(f\"1 1 1\\n\"\n f\"# {sample_name}\\n\"\n f\"{sample_name}\")\n\n\ndef make_gmx(gene_set_name: str, gene_set: list[str], path):\n with open(path, 'w') as handler:\n handler.write(f\"{gene_set_name}\\n\"\n f\"na\\n\")\n handler.write(\"\\n\".join(gene_set))\n\n\ndef make_gmt(gene_set_name: str, gene_set: list[str], path):\n with open(path, 'w') as handler:\n header = f\"{gene_set_name}\\tna\\t\"\n handler.write(header + \"\\t\".join(gene_set))","repo_name":"EtayLivne/netprop_analysis","sub_path":"prepare_for_gsea.py","file_name":"prepare_for_gsea.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19445535564","text":"class ATM:\n \n # initializes with balance and sets interest rate default\n def __init__ (self, balance = 0, int_rate = .001):\n self.balance= balance\n self.int_rate= int_rate\n self.trans = []\n\n # returns the account balance\n def check_balance(self):\n return f'{self.balance}'\n\n # deposits the given amount in the account\n def deposit(self, amount):\n self.balance += amount\n self.trans.append(f'${amount} deposited')\n return self.balance\n \n # returns true if the withdrawn amount won't put the account in the negative \n def __check_withdrawal(self, amount):\n if (self.balance - amount) >= 0:\n return True\n else:\n return False\n\n # withdraws the amount from the account and returns it\n def withdraw(self, amount):\n if self.__check_withdrawal(amount):\n self.balance -= amount\n self.trans.append(f'${amount} withdrawn')\n return self.balance\n else:\n return f\"Error: Not enough money to withdraw.\"\n\n # returns the amount of interest calculated on the account\n def calc_interest(self):\n return (self.balance * self.int_rate)\n\n # prints out a history of transactions\n def print_transactions(self):\n # for i in range(len(self.trans)):\n # print(self.trans[i])\n if not self.trans:\n return str('No transactions yet')\n else:\n return ', '.join(self.trans)\n\ncash = ATM(500)\nflag = True\nprint(\"Thank you for using VERYSECUREDEFINITELYNOTFRAUD ATM\\nWhat action would you like to do:\\n1 for Deposit\\n2 for Withdraw\\n3 for Check Balance\\n4 for Transaction History\\n5 for Finish\")\n\nwhile flag:\n user_input = int(input('>'))\n\n if user_input == 1:\n try:\n deposit_amount = int(input(\"How much would you like to deposit: $\"))\n cash.deposit(deposit_amount)\n except:\n print('Enter a valid amount.')\n elif user_input == 2:\n withdraw_amount = int(input(\"How much would you like to withdraw: $\"))\n try:\n if \"Error\" in cash.withdraw(withdraw_amount):\n print(str(\"Not enough funds for that withdrawal amount.\"))\n except:\n print(f'Account debited by ${withdraw_amount}.')\n elif user_input == 3:\n print(f'***Your balance on this account is ${cash.check_balance()}***')\n elif user_input ==4:\n print(cash.print_transactions())\n elif user_input ==5:\n flag = False\n ","repo_name":"PdxCodeGuild/class_redmage","sub_path":"code/jeffrey/Python/lab25_atm.py","file_name":"lab25_atm.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12577416798","text":"\"\"\"\nExtended Variational Density Propagation in PyTorch.\n\nThis script implements a Bayesian multi-layer perceptron with KL Loss\nusing extended variational density propagation (VDP) in PyTorch. The code is modified\nfrom exVDP_MNIST.py in https://github.com/dimahdera/Robust-Anomaly-Detection,\nwhich is an implementation of the paper \"PremiUm-CNN: Propagating Uncertainty Towards \nRobust Convolutional Neural Networks\" by Dera et al.\nThe original code was authored by Dimah Dera.\n\nThe script defines several custom PyTorch modules, including `Constant2RVLinearlayer`,\n`RV2RVLinearlayer`, `RVRelu`, and `RVSoftmax`, which are used to build the Bayesian MLP.\nIt also defines a `nll_gaussian` function for computing the negative log-likelihood of\na Gaussian distribution, as well as an `exVDPMLP` class that encapsulates the entire\nBayesian MLP.\n\nAuthor: Kyle Naddeo\nDate: 3/6/2023\n\"\"\"\n\n# Imports\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nimport torchvision.transforms as transforms\n\nimport timeit\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef nll_gaussian(y_test, y_pred_mean, y_pred_sd, num_labels):\n \"\"\"\n Compute the negative log-likelihood of a Gaussian distribution.\n\n Args:\n y_test (torch.Tensor): A tensor of shape (batch_size, num_labels).\n y_pred_mean (torch.Tensor): A tensor of shape (batch_size, num_labels).\n y_pred_sd (torch.Tensor): A tensor of shape (batch_size, num_labels, num_labels).\n num_labels (int): The number of output labels.\n\n Returns:\n torch.Tensor: A scalar tensor representing the negative log-likelihood of the predicted distribution.\n \"\"\"\n # Collect Stats\n batch_size = y_test.size(0)\n\n # Declare device\n device = y_pred_mean.device\n \n # Add small constant to diagonal for numerical stability\n NS = torch.diag_embed(torch.full((batch_size, num_labels), 1e-3)).to(device)\n y_pred_sd_ns = y_pred_sd + NS\n \n # Invert sigma\n y_pred_sd_inv = torch.linalg.inv(y_pred_sd_ns)\n\n # Calculate error\n mu_ = y_pred_mean - y_test\n\n # First term is error over sigma\n mu_sigma = torch.matmul(mu_.unsqueeze(dim=1), y_pred_sd_inv)\n ms1 = torch.mean(torch.squeeze(torch.matmul(mu_sigma, mu_.unsqueeze(dim=2))))\n\n # Second term is log determinant\n ms2 = torch.mean(torch.squeeze(torch.linalg.slogdet(y_pred_sd_ns)[1]))\n\n # Compute the mean\n ms = 0.5 * ms1 + 0.5 * ms2\n return ms\n\ndef compute_kl_loss(mu, sigma):\n \"\"\"\n Computes the Kullback-Leibler (KL) divergence between a Gaussian distribution\n with mean `mu` and covariance `sigma` and a standard normal distribution.\n\n Parameters:\n mu (torch.Tensor): the mean of the Gaussian distribution (batch_size, num_features)\n sigma (torch.Tensor): the covariance matrix of the Gaussian distribution (batch_size, num_features, num_features)\n\n Returns:\n torch.Tensor: the KL divergence between the Gaussian distribution and the standard normal distribution (batch_size,)\n\n References:\n - Derivation from https://mr-easy.github.io/2020-04-16-kl-divergence-between-2-gaussian-distributions/\n - Assumes the prior is a standard normal distribution\n\n Formula:\n The KL divergence between a Gaussian distribution q(z|x) with mean `mu` and covariance `sigma` and a\n standard normal distribution p(z) is given by:\n\n KL(q(z|x) || p(z)) = 0.5 * (mu^T mu + tr(sigma) - k - log(det(sigma)))\n\n where `tr(sigma)` is the trace of the covariance matrix, `mu^T mu` is the dot product of the mean vector with\n itself, `k` is the dimension of the Gaussian distribution (i.e., the number of features), and `det(sigma)`\n is the determinant of the covariance matrix.\n \"\"\"\n device = mu.device\n\n # calculate the KL divergence\n k = torch.tensor(mu.size(0)).view(-1, 1).to(device)\n trace_sigma = torch.diagonal(sigma, dim1=-2, dim2=-1).sum(-1).view(-1, 1)\n mu_sq = torch.bmm(mu.t().unsqueeze(1), mu.t().unsqueeze(2)).view(-1, 1)\n logdet_sigma = torch.slogdet(sigma)[1].view(-1, 1)\n kl_loss = 0.5 * (trace_sigma + mu_sq - k - logdet_sigma).sum()\n \n return kl_loss\n\nclass RVLinearlayer(nn.Module):\n \"\"\"\n Custom Bayesian Linear Input Layer that takes random variable input.\n\n Attributes:\n size_in (int): The input size of the layer.\n size_out (int): The output size of the layer.\n w_mu (nn.Parameter): The weight mean parameter.\n w_sigma (nn.Parameter): The weight sigma parameter.\n b_mu (nn.Parameter): The bias mean parameter.\n b_sigma (nn.Parameter): The bias sigma parameter.\n \"\"\"\n def __init__(self, size_in, size_out):\n super(RVLinearlayer, self).__init__()\n # collect stats\n self.size_in, self.size_out = size_in, size_out\n\n # initialize weight and bias mean and sigma parameters\n self.w_mu = nn.Parameter(torch.Tensor(size_in, size_out))\n self.w_sigma = nn.Parameter(torch.Tensor(size_out, size_in))\n self.b_mu = nn.Parameter(torch.Tensor(size_out, 1))\n self.b_sigma = nn.Parameter(torch.Tensor(size_out,))\n\n # initialize weights and biases using normal and uniform distributions\n nn.init.normal_(self.w_mu, mean=0.0, std=0.00005)\n nn.init.uniform_(self.w_sigma, a=-12.0, b=-2.0)\n nn.init.normal_(self.b_mu, mean=0.0, std=0.00005)\n nn.init.uniform_(self.b_sigma, a=-12.0, b=-2.0)\n\n def forward(self, mu_in, sigma_in):\n \n # Extract stats\n device = self.w_mu.device\n batch_size = mu_in.size(0)\n\n mu_out = torch.matmul(self.w_mu.transpose(1, 0), mu_in.view(batch_size, self.size_in, 1)) + self.b_mu\n\n # Perform a reparameterization trick\n W_Sigma = torch.log(1. + torch.exp(self.w_sigma))\n B_Sigma = torch.log(1. + torch.exp(self.b_sigma))\n \n # Creat diagonal matrices\n W_Sigma = torch.diag_embed(W_Sigma)\n B_Sigma = torch.diag_embed(B_Sigma)\n\n # Calculate Sigma_out\n mu_in_t_W_Sigma_mu_in = torch.bmm(torch.matmul(W_Sigma, mu_in.transpose(2, 1).unsqueeze(-1)).squeeze(-1), mu_in).squeeze()\n\n if sigma_in is not None:\n tr_W_Sigma_and_sigma_in = torch.matmul(W_Sigma.view(self.size_out, -1), sigma_in.view(-1, batch_size)).view(batch_size, self.size_out)\n mu_w_t_sigma_in_mu_w = torch.matmul(torch.matmul(self.w_mu.t(), sigma_in), self.w_mu)\n Sigma_out = (torch.diag_embed(tr_W_Sigma_and_sigma_in) + mu_w_t_sigma_in_mu_w + torch.diag_embed(mu_in_t_W_Sigma_mu_in)) + B_Sigma\n \n else:\n Sigma_out = torch.diag_embed(mu_in_t_W_Sigma_mu_in) + B_Sigma\n \n # KL loss\n kl_loss = compute_kl_loss(self.w_mu, W_Sigma)\n \n return mu_out, Sigma_out , kl_loss\n\nclass RVNonLinearFunc(nn.Module):\n \"\"\"\n Custom Bayesian ReLU activation function for random variables.\n\n Attributes:\n None\n \"\"\"\n def __init__(self, func):\n super(RVNonLinearFunc, self).__init__()\n self.func = func\n\n def forward(self, mu_in, Sigma_in):\n \"\"\"\n Forward pass of the Bayesian ReLU activation function.\n\n Args:\n mu_in (torch.Tensor): A tensor of shape (batch_size, input_size),\n representing the mean input to the ReLU activation function.\n Sigma_in (torch.Tensor): A tensor of shape (batch_size, input_size, input_size),\n representing the covariance input to the ReLU activation function.\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor]: A tuple of two tensors,\n including the mean of the output and the covariance of the output.\n \"\"\"\n # Collect stats\n batch_size = mu_in.size(0)\n \n # Mean\n mu_out = self.func(mu_in)\n\n # Compute the derivative of the ReLU activation function with respect to the input mean\n gradi = torch.autograd.grad(mu_out, mu_in, grad_outputs=torch.ones_like(mu_out), create_graph=True)[0].view(batch_size,-1)\n\n # add an extra dimension to gradi at position 2 and 1\n grad1 = gradi.unsqueeze(dim=2)\n grad2 = gradi.unsqueeze(dim=1)\n \n # compute the outer product of grad1 and grad2\n outer_product = torch.bmm(grad1, grad2)\n \n # element-wise multiply Sigma_in with the outer product\n # and return the result\n Sigma_out = torch.mul(Sigma_in, outer_product)\n\n return mu_out, Sigma_out\n\nclass RVSoftmax(nn.Module):\n \"\"\"\n Custom Bayesian Softmax activation function for random variables.\n\n Attributes:\n None\n \"\"\"\n def __init__(self):\n super(RVSoftmax, self).__init__() \n\n def softmax(self, x):\n # Apply softmax function along feature dimension\n return torch.softmax(x, dim=1) \n \n def forward(self, mu_in, Sigma_in):\n \"\"\"\n Forward pass of the Bayesian Softmax activation function.\n\n Args:\n mu_in (torch.Tensor): A tensor of shape (batch_size, input_size),\n representing the mean input to the Softmax activation function.\n Sigma_in (torch.Tensor): A tensor of shape (batch_size, input_size, input_size),\n representing the covariance input to the Softmax activation function.\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor]: A tuple of two tensors,\n including the mean of the output and the covariance of the output.\n \"\"\"\n \n # Collect stats\n batch_size, feature_size = mu_in.size()[:2]\n \n # Mean\n mu_out = self.softmax(mu_in.view(batch_size, feature_size)) # shape: [batch_size, output_size]\n\n # Compute Jacobian\n jac = torch.diagonal(torch.autograd.functional.jacobian(self.softmax, mu_in.view(batch_size, -1), create_graph=True, strict=True), dim1=0, dim2=2).permute(2, 0, 1)\n \n # Compute covariance\n Sigma_out = torch.bmm(jac, torch.bmm(Sigma_in, jac.transpose(1, 2)))\n\n return mu_out, Sigma_out\n\nclass exVDPMLP(nn.Module):\n \"\"\"\n A Bayesian Multi-Layer Perceptron with KL Loss.\n\n Attributes:\n input_dim (int): The number of input features.\n hidden_dim (int): The number of hidden units.\n output_dim (int): The number of output classes.\n \"\"\"\n def __init__(self, input_dim=784, hidden_dim=64, output_dim=10):\n super(exVDPMLP, self).__init__()\n self.linear_1 = RVLinearlayer(input_dim, hidden_dim)\n self.relu_1 = RVNonLinearFunc(func = torch.nn.functional.relu)\n self.linear_2 = RVLinearlayer(hidden_dim, output_dim)\n self.softmax = RVSoftmax()\n\n def forward(self, x):\n \"\"\"\n Forward pass of the Bayesian Multi-Layer Perceptron with KL Loss.\n\n Args:\n inputs (torch.Tensor): A tensor of shape (batch_size, input_dim),\n representing the input to the model.\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: A tuple of three tensors,\n including the mean of the output, the covariance of the output,\n and the sum of the KL regularization loss terms.\n \"\"\"\n\n m, s, kl_1 = self.linear_1(x, None)\n m, s = self.relu_1(m, s)\n m, s, kl_2 = self.linear_2(m, s)\n outputs, Sigma = self.softmax(m, s)\n\n total_kl_loss = kl_1 + kl_2\n \n return outputs, Sigma, total_kl_loss\n","repo_name":"naddeok96/exVDP-exRL","sub_path":"model_classes/torch_exVDP_MLP.py","file_name":"torch_exVDP_MLP.py","file_ext":"py","file_size_in_byte":11498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30378408339","text":"from scipy import io\nfrom scipy import sparse\nimport numpy as np\nimport pickle as pkl\nfrom config import data_dir\nimport math\nimport os\nfrom copy import deepcopy\n\ndef load_tensordata(data, ent, pro):\n\treturn np.array([io.mmread(data_dir + data + \"/\" + str(rel_slice) + \".mtx\") for rel_slice in range(1, -1 + len(os.listdir(data_dir + data)))]), pkl.load(open(data_dir + data + \"/\" + ent + \".pkl\", \"rb\")), pkl.load(open(data_dir + data + \"/\"+ pro + \".pkl\", \"rb\"))\n\n\t\ndef create_fold(n_folds, n_fold, X, perm, rel, ent):\n\tK = X[rel].tolil().T\n\tM = deepcopy(K)\n\t#the n-th batch of the permutation\n\tr = math.floor(len(perm)/n_folds)\t\n\tif n_fold == n_folds:\n\t\tnperm = perm[r*n_fold:]\n\telse:\n\t\tnperm = perm[r*(n_fold -1):r*n_fold]\n\n\t\n\tfor index in perm:\n\t\tif index in nperm:\n\t\t\tK.data[ent][index] = 0\n\t\telse:\n\t\t\tM.data[ent][index] = 0\n\n\tK = K.tocsr().T\n\tM = M.tocsr().T\n\n\tprint(np.sum(M.tocsr().getcol(ent).T.todense()))\n\tprint(np.sum(K.tocsr().getcol(ent).T.todense()))\n\t\n\ttest = np.array([X[i] if i != rel else M for i in range(len(X)) ])\n\ttrain = np.array([X[i] if i != rel else K for i in range(len(X))])\n\treturn test, train, nperm\n\t\n\t\n","repo_name":"adibaba/tensor-factorization","sub_path":"project-name/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20373399881","text":"from fastapi import APIRouter\nfrom urllib.parse import quote\n\n\nrouter = APIRouter()\n\n\n@router.get(\"/encode_link\")\nasync def encode_link(link: str) -> str:\n encoded_link = quote(link, safe='/')\n return encoded_link\n","repo_name":"OlyaB29/BlogApplication","sub_path":"api/routers/link_router.py","file_name":"link_router.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21950560467","text":"\"\"\"users column\n\nRevision ID: 91d94756795b\nRevises: 51330fcfe549\nCreate Date: 2020-06-11 22:37:13.030073\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '91d94756795b'\ndown_revision = '51330fcfe549'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('container', sa.String(length=120), nullable=True))\n op.add_column('user', sa.Column('fullname', sa.String(length=120), nullable=True))\n op.add_column('user', sa.Column('group', sa.String(length=120), nullable=True))\n op.add_column('user', sa.Column('unixid', sa.String(length=120), nullable=True))\n op.add_column('user', sa.Column('unixuser', sa.String(length=120), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user', 'unixuser')\n op.drop_column('user', 'unixid')\n op.drop_column('user', 'group')\n op.drop_column('user', 'fullname')\n op.drop_column('user', 'container')\n # ### end Alembic commands ###\n","repo_name":"maxmax/seriesapp","sub_path":"migrations/versions/91d94756795b_users_column.py","file_name":"91d94756795b_users_column.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23998191159","text":"#SVR with Sklearn\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndata = pd.read_csv('Position_Salaries.csv')\nx = data.iloc[:,1:2].values\ny = data.iloc[:,2:].values\n# data = pd.read_csv('studentscores.csv')\n# x = data.iloc[:,0:1].values\n# y = data.iloc[:,1:].values\n\n#use imputer if any missing value as SVM doesnot support.....\n#from sklearn.preprocessing import Imputer\n# imputer = preprocessing.Imputer(missing_values='NaN',strategy='mean',axis=0)\n# x[:,0:] = imputer.fit_transform(x[:,0:])\n\n#Feature scaling as SVR doesnot apply..\n\nfrom sklearn.preprocessing import StandardScaler\n\nsc_x = StandardScaler()\nsc_y = StandardScaler()\n\nx = sc_x.fit_transform(x)\ny = sc_y.fit_transform(y)\n\n\n\n#fitting svr\nfrom sklearn.svm import SVR\nregressor = SVR(kernel='rbf')\n#regressor = SVR(kernel='linear')\nregressor.fit(x,y)\ny_pred = regressor.predict(x)\n\nprint(y)\nprint(y_pred)\n\nplt.scatter(x, y, c='green', label=\"regression line\")\nplt.plot(x,y_pred,label=\"predicted line\")\nplt.xlabel(\"X parameters\")\nplt.ylabel(\"Y parameters\")\nplt.legend()\nplt.show()\n\n\n","repo_name":"yug95/MachineLearning","sub_path":"Regression/support vector regression/support_vector_sk_learn.py","file_name":"support_vector_sk_learn.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"67"} +{"seq_id":"7455334006","text":"import time\nfrom influxdb import InfluxDBClient\nimport requests\n\n\ndef write_to_influx():\n global metric, address, ether_balance, ethbtc, ethusd, address_txn_count, api_call, e, fields, tags, point, datapoints\n metric = 'wallet1'\n address = address\n Txn_hash = transaction['hash']\n timestamp = float(transaction['timeStamp'])\n blockNumber = transaction['blockNumber']\n _from = transaction['from']\n _to = transaction['to']\n isError = transaction['isError']\n value = transaction['value']\n txn_fee = int(transaction['gasUsed']) * int(transaction['gasPrice']) * 0.000000000000000001\n ether_balance = int(ether_balance)\n ethbtc = float(ether_price['ethbtc'])\n ethusd = float(ether_price['ethusd'])\n address_txn_count = address_txn_count\n try:\n api_call = 'https://api.etherscan.io/api?module=proxy&action=eth_blockNumber&apikey=' + api_key\n latest_block = requests.get(api_call).json()\n except Exception as e:\n print(\"error on med gas price request\", e)\n latest_block = latest_block['result']\n\n fields = {\n \"Txn_hash\": Txn_hash,\n \"blockNumber\": float(blockNumber),\n # \"blockReward\": float(blockReward),\n # \"uncleReward\": float(uncleReward),\n \"timestamp\": timestamp,\n \"address\": address,\n \"_from\": _from,\n \"_to\": _to,\n \"isError\": float(isError),\n \"value\": float(value),\n \"txn_fee\": float(txn_fee),\n \"address_txn_count\": address_txn_count,\n \"ether_balance\": float(ether_balance),\n \"ethbtc\": float(ethbtc),\n \"ethusd\": float(ethusd),\n \"latest_block\": latest_block,\n }\n tags = {\n \"address\": str(address)\n }\n point = {\"measurement\": metric, \"time\": time.time_ns(), \"fields\": fields, \"tags\": tags}\n datapoints.append(point)\n\n if len(datapoints) % batchsize == 0:\n print('Inserting %d datapoints...' % (len(datapoints)))\n\n datapoints = []\n\n\nwhile 1 == 1:\n\n try:\n client = InfluxDBClient(host='80.211.140.70', port='8086', username='ashen', password='1234', database='db1')\n except Exception as e:\n client = None\n print(e)\n\n # client.drop_database('db1')\n # client.create_database('db1')\n\n client.create_retention_policy('week_only', '10m', 2, database=\"db1\", default=True)\n wallets = ['0x78aC091fc36d97EC7fC60352827B4A79641475DC', '0x7D48ABeA39EED4D60DD77c1a470b8f6D464b810E']\n api_key = 'CBMQ6J698ZEVZ3UJDBRYTTYAGHAA1TZDUH'\n datapoints = []\n\n # general data----------------------------------------------------------------------------\n ether_price_respond = {}\n market_cap_respond = {}\n med_gas_price_respond = {}\n gas_confirmation_time = {}\n\n try:\n api_call = 'https://api.etherscan.io/api?module=stats&action=ethprice&apikey=' + api_key\n ether_price_respond = requests.get(api_call).json()\n except Exception as e:\n ether_price_respond = {}\n print(\"error on price request\", e)\n\n try:\n api_call = 'https://api.etherscan.io/api?module=stats&action=ethsupply&apikey=' + api_key\n market_cap_respond = requests.get(api_call).json()\n except Exception as e:\n market_cap_respond = {}\n print(\"error on market cap request\", e)\n\n try:\n api_call = 'https://api.etherscan.io/api?module=gastracker&action=gasoracle&apikey=' + api_key\n med_gas_price_respond = requests.get(api_call).json()\n except Exception as e:\n med_gas_price_respond = {}\n print(\"error on med gas price request\", e)\n\n try:\n api_call = 'https://api.etherscan.io/api?module=gastracker&action=gasestimate&gasprice=2000000000&apikey=' + api_key\n gas_confirmation_time = requests.get(api_call).json()\n except Exception as e:\n gas_confirmation_time = {}\n print(\"error on med gas price request\", e)\n\n if ether_price_respond:\n ether_price = ether_price_respond[\"result\"]\n else:\n ether_price = ether_price_respond\n\n if market_cap_respond:\n market_cap = market_cap_respond[\"result\"]\n else:\n market_cap = market_cap_respond\n\n if med_gas_price_respond:\n med_gas_price = med_gas_price_respond['result']\n else:\n med_gas_price = med_gas_price_respond\n\n if gas_confirmation_time:\n gas_confirmation_time = gas_confirmation_time[\"result\"]\n else:\n gas_confirmation_time = gas_confirmation_time\n\n metric = 'general'\n\n ethbtc = float(ether_price['ethbtc'])\n ethusd = float(ether_price['ethusd'])\n market_cap = int(market_cap) / 1000000000000000000\n med_gas_price_avg = float(med_gas_price['ProposeGasPrice'])\n\n fields = {\n \"ethbtc\": float(ethbtc),\n \"ethusd\": float(ethusd),\n \"market_cap\": float(market_cap),\n \"med_gas_price_avg\": float(med_gas_price_avg),\n \"gas_confirmation_time\": float(gas_confirmation_time)\n }\n\n tags = {}\n\n point = {\"measurement\": metric, \"time\": time.time_ns(), \"fields\": fields, \"tags\": tags}\n\n datapoints.append(point)\n\n print('Inserting General Data')\n response = None\n try:\n response = client.write_points(datapoints, time_precision='n')\n except Exception as e:\n print(e)\n\n if not response:\n print('Problem inserting General Data, exiting...')\n exit(1)\n\n print(\"Wrote General Data, response: %s\" % response)\n\n datapoints = []\n\n # wallets transaction data----------------------------------------------------------------------------\n\n for address in wallets:\n ether_balance_respond = {}\n ether_transactions_respond = {}\n address_txn_count = {}\n\n try:\n api_call = 'https://api.etherscan.io/api?module=account&action=balance&address=' + address + '&tag=latest&apikey=' + api_key\n ether_balance_respond = requests.get(api_call).json()\n except Exception as e:\n ether_balance_respond = {}\n print(\"error on balance request\", e)\n\n try:\n api_call = 'https://api.etherscan.io/api?module=account&action=txlist&address=' + address + '&startblock=0&endblock=99999999&sort=desc&apikey=' + api_key\n ether_transactions_respond = requests.get(api_call).json()\n except Exception as e:\n ether_transactions_respond = {}\n print(\"error on transaction request\", e)\n\n try:\n api_call = 'https://api.etherscan.io/api?module=proxy&action=eth_getTransactionCount&address=' + address + '&tag=latest&apikey=' + api_key\n address_txn_count = requests.get(api_call).json()\n except Exception as e:\n address_txn_count = {}\n print(\"error on transaction count request\", e)\n\n if address_txn_count:\n address_txn_count = address_txn_count['result']\n else:\n address_txn_count = address_txn_count\n\n if ether_balance_respond:\n ether_balance = ether_balance_respond[\"result\"]\n else:\n ether_balance = ether_balance_respond\n\n if ether_transactions_respond:\n ether_transactions = ether_transactions_respond[\"result\"]\n else:\n ether_transactions = ether_transactions_respond\n\n address_txn_count = str(address_txn_count)\n address_txn_count = int(address_txn_count, 16)\n\n batchsize = len(ether_transactions)\n # threads = []\n for transaction in ether_transactions:\n metric = 'wallet1'\n address = address\n Txn_hash = transaction['hash']\n timestamp = float(transaction['timeStamp'])\n blockNumber = transaction['blockNumber']\n _from = transaction['from']\n _to = transaction['to']\n isError = transaction['isError']\n value = transaction['value']\n txn_fee = int(transaction['gasUsed']) * int(transaction['gasPrice']) * 0.000000000000000001\n ether_balance = int(ether_balance)\n ethbtc = float(ether_price['ethbtc'])\n ethusd = float(ether_price['ethusd'])\n address_txn_count = float(address_txn_count)\n total_transactions = batchsize\n\n fields = {\n \"Txn_hash\": Txn_hash,\n \"blockNumber\": float(blockNumber),\n # \"blockReward\": float(blockReward),\n # \"uncleReward\": float(uncleReward),\n \"timestamp\": timestamp,\n \"address\": address,\n \"_from\": _from,\n \"_to\": _to,\n \"isError\": float(isError),\n \"value\": float(value),\n \"txn_fee\": float(txn_fee),\n \"address_txn_count\": float(address_txn_count),\n \"ether_balance\": float(ether_balance),\n \"ethbtc\": float(ethbtc),\n \"ethusd\": float(ethusd),\n \"total_transactions\": float(total_transactions),\n # \"latest_block\": latest_block,\n }\n tags = {\n \"address\": str(address)\n }\n point = {\"measurement\": metric, \"time\": time.time_ns(), \"fields\": fields, \"tags\": tags}\n datapoints.append(point)\n\n if len(datapoints) % batchsize == 0:\n print('Inserting %d datapoints...' % (len(datapoints)))\n\n try:\n response = client.write_points(datapoints, time_precision='n')\n except Exception as e:\n print(e)\n\n if not response:\n print('Problem inserting points, exiting...')\n exit(1)\n\n print(\"Wrote %d points, up to %s, response: %s\" % (len(datapoints), timestamp, response))\n\n datapoints = []\n","repo_name":"madhawadias/etherscanapi-scraper","sub_path":"services/etherForAddress.py","file_name":"etherForAddress.py","file_ext":"py","file_size_in_byte":9730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38543392313","text":"import pandas\n\nfrom Student import Student\n\ndef create_list_students() -> list:\n students_object = []\n\n for row in range(n_rows_student): # change to 10 for check should be row\n experience_lst = []\n flag_year = True\n id_num = students_excel.iloc[row][0]\n full_name = students_excel.iloc[row][1] + \" \" + students_excel.iloc[row][2]\n year = students_excel.iloc[row][5]\n city = students_excel.iloc[row][6] # Hebrew\n\n if year == 'א' or year == 'ד':\n flag_year = False\n\n elif year == 'ב':\n experience_lst = experience_by_year.get(\"B\")\n\n elif year == 'ג':\n experience_lst = experience_by_year.get(\"C\")\n\n # else:\n # experience_lst = experience_by_year.get(\"D\")\n\n if flag_year:\n student = Student(id_num, full_name, city, year, experience_lst, cities_less_50_km_heb.get(city))\n students_object.append(student)\n\n return students_object\n\n\ndef write_comment(student): # This function is responsible for writing the column \"הערות\" in the output Excel\n if len(student.to_do) == 0:\n output_excel.at[row_of_output, 'הערות'] = \"הסטודנט שובץ בהכל\"\n\n else:\n size = num_of_exp_per_year.get(student.year)\n if size == len(student.to_do): # this check if there is not scheduling at all\n output_excel.at[row_of_output, 'תעודת זהות'] = student.id_num\n output_excel.at[row_of_output, 'שם מלא'] = student.name\n output_excel.at[row_of_output, 'עיר מגורים'] = student.city\n output_excel.at[row_of_output, 'שנה'] = student.year\n output_excel.at[row_of_output, 'הערות'] = \"בעיה - לא שובץ בכלום!\"\n else:\n comment_in_excel = \"לא שובץ ב: \"\n for experience in student.to_do:\n comment_in_excel += experience + \",\"\n\n comment_in_excel = comment_in_excel[:-1]\n output_excel.at[row_of_output, 'הערות'] = comment_in_excel\n\n\n# Experiences by years\nsecond_year = [\"ס.המבוגר - פנימית\", \"ס.המבוגר - כירורגית\"]\nthird_year = [\"טראומה מלרד - מיון\", \"סיעוד בקהילה\", \"ס.אישה - ס.אישה\", \"סיעוד בריאות הנפש\", \"ס.ילד - ילדים\"]\nfourth_year = [\"סטאז\"] # Maybe will be deleted\n\nnum_of_exp_per_year = {'ב': 2, 'ג': 5, 'ד': 1, }\n\nexperience_by_year = {\"B\": second_year, \"C\": third_year, \"D\": fourth_year}\n\n# -----------------------------------------------------------------\n\n# A dict that holds KEY = City and VALUE = list of cities less than 50 km from the city in the KEY position.\ncities_less_50_km_eng = {'Afula': ['Nahariyya', 'Afula', 'Zefat', 'Haifa', 'Pardesiya'],\n 'Ariel': ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan',\n 'Bnei Brak',\n 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Ashdod': ['Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan', 'Ashdod', 'Bnei Brak',\n 'Tel-Aviv', 'Hod Hasharon'], 'Ashkelon': ['Holon', 'Ashdod'],\n 'Arraba': ['Nahariyya', 'Afula', 'Zefat', 'Haifa'],\n 'Baqa-Jatt': ['Kefar Sava', 'Petah Tikva', 'Raanana', 'Afula', 'Haifa', 'Ramat Gan',\n 'Bnei Brak',\n 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Bat Yam': ['Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan', 'Ashdod',\n 'Bnei Brak',\n 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'], 'Beersheba': [\"Be'er Sheva\"],\n \"Beit She'an\": ['Afula'],\n 'Beit Shemesh': ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Ramat Gan', 'Ashdod',\n 'Bnei Brak', 'Tel-Aviv', 'Hod Hasharon'],\n 'Baqa al-Gharbiyye': ['Kefar Sava', 'Petah Tikva', 'Raanana', 'Afula', 'Haifa', 'Ramat Gan',\n 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Beitar Illit': ['Jerusalem', 'Petah Tikva', 'Holon', 'Ramat Gan', 'Ashdod'],\n 'Bnei Brak': ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan',\n 'Ashdod',\n 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Dimona': [\"Be'er Sheva\"],\n \"El'ad\": ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan', 'Ashdod',\n 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n \"Giv'atayim\": ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan',\n 'Ashdod',\n 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n \"Giv'at Shmuel\": ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan',\n 'Ashdod', 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Hadera': ['Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Afula', 'Haifa', 'Ramat Gan',\n 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Haifa': ['Nahariyya', 'Afula', 'Haifa'],\n 'Herzliya': ['Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan', 'Ashdod',\n 'Bnei Brak',\n 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Hod HaSharon': ['Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan', 'Ashdod',\n 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Holon': ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan', 'Ashdod',\n 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Jerusalem': ['Jerusalem', 'Petah Tikva', 'Holon', 'Ramat Gan', 'Bnei Brak'],\n 'Karmiel': ['Nahariyya', 'Afula', 'Zefat', 'Haifa'],\n 'Kafr Qasim': ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan',\n 'Ashdod',\n 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Kfar Saba': ['Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan', 'Ashdod',\n 'Bnei Brak',\n 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Kiryat Ata': ['Nahariyya', 'Afula', 'Zefat', 'Haifa'],\n 'Kiryat Bialik': ['Nahariyya', 'Afula', 'Zefat', 'Haifa'],\n 'Kiryat Gat': ['Jerusalem', 'Holon', 'Ashdod', \"Be'er Sheva\"],\n 'Kiryat Malakhi': ['Jerusalem', 'Petah Tikva', 'Holon', 'Ramat Gan', 'Ashdod', 'Bnei Brak',\n 'Tel-Aviv', 'Hod Hasharon'],\n 'Kiryat Motzkin': ['Nahariyya', 'Afula', 'Zefat', 'Haifa'],\n 'Kiryat Ono': ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan',\n 'Ashdod',\n 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Kiryat Shmona': ['Zefat'], 'Kiryat Yam': ['Nahariyya', 'Afula', 'Zefat', 'Haifa'],\n 'Lod': ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan', 'Ashdod',\n 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n \"Ma'ale Adumim\": ['Jerusalem'], \"Ma'alot-Tarshiha\": ['Nahariyya', 'Afula', 'Zefat', 'Haifa'],\n 'Migdal HaEmek': ['Nahariyya', 'Afula', 'Zefat', 'Haifa'],\n \"Modi'in Illit\": ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan',\n 'Ashdod', 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n \"Modi'in-Maccabim-Re'ut\": ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana',\n 'Ramat Gan', 'Ashdod', 'Bnei Brak', 'Netanya', 'Tel-Aviv',\n 'Pardesiya',\n 'Hod Hasharon'], 'Maghar': ['Nahariyya', 'Afula', 'Zefat', 'Haifa'],\n 'Nahariya': ['Nahariyya', 'Afula', 'Zefat', 'Haifa'],\n 'Nazareth': ['Nahariyya', 'Afula', 'Zefat', 'Haifa'],\n 'Nazareth Illit': ['Nahariyya', 'Afula', 'Zefat', 'Haifa'],\n 'Nesher': ['Nahariyya', 'Afula', 'Zefat', 'Haifa'],\n 'Ness Ziona': ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan',\n 'Ashdod',\n 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Netanya': ['Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan', 'Bnei Brak',\n 'Netanya',\n 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'], 'Netivot': ['Ashdod', \"Be'er Sheva\"],\n 'Ofakim': [\"Be'er Sheva\"],\n 'Or Akiva': ['Kefar Sava', 'Petah Tikva', 'Raanana', 'Afula', 'Haifa', 'Bnei Brak', 'Netanya',\n 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Or Yehuda': ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan',\n 'Ashdod',\n 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Nof HaGalil': ['Nahariyya', 'Afula', 'Zefat', 'Haifa'],\n 'Petah Tikva': ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan',\n 'Ashdod', 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Qalansawe': ['Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Afula', 'Ramat Gan',\n 'Bnei Brak',\n 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n \"Ra'anana\": ['Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan', 'Ashdod',\n 'Bnei Brak',\n 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Rahat': ['Ashdod', \"Be'er Sheva\"],\n 'Ramat Gan': ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan',\n 'Ashdod',\n 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Ramat HaSharon': ['Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan', 'Ashdod',\n 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Ramla': ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan', 'Ashdod',\n 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Rehovot': ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan',\n 'Ashdod',\n 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Rishon LeZion': ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan',\n 'Ashdod', 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Rosh HaAyin': ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan',\n 'Ashdod', 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Safed': ['Nahariyya', 'Afula', 'Zefat'], 'Sakhnin': ['Nahariyya', 'Afula', 'Zefat', 'Haifa'],\n 'Sderot': ['Ashdod', \"Be'er Sheva\"],\n \"Shefa-Amr (Shfar'am)\": ['Nahariyya', 'Afula', 'Zefat', 'Haifa'],\n 'Tamra': ['Nahariyya', 'Afula', 'Zefat', 'Haifa'],\n 'Tayibe': ['Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Afula', 'Ramat Gan', 'Bnei Brak',\n 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Tel Aviv': ['Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan', 'Ashdod',\n 'Bnei Brak',\n 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Tiberias': ['Nahariyya', 'Afula', 'Zefat'],\n 'Tirat Carmel': ['Nahariyya', 'Afula', 'Haifa', 'Netanya'],\n 'Umm al-Fahm': ['Kefar Sava', 'Raanana', 'Afula', 'Haifa', 'Netanya', 'Pardesiya',\n 'Hod Hasharon'],\n 'Yavne': ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan', 'Ashdod',\n 'Bnei Brak', 'Tel-Aviv', 'Hod Hasharon'],\n 'Yehud-Monosson': ['Jerusalem', 'Kefar Sava', 'Petah Tikva', 'Holon', 'Raanana', 'Ramat Gan',\n 'Ashdod', 'Bnei Brak', 'Netanya', 'Tel-Aviv', 'Pardesiya', 'Hod Hasharon'],\n 'Yokneam': ['Nahariyya', 'Afula', 'Haifa', 'Netanya', 'Pardesiya']}\n\ncities_less_50_km_heb = {'עפולה': ['נהריה', 'עפולה', 'צפת', 'חיפה', 'פרדסיה'],\n 'אריאל': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'בני ברק', 'נתניה',\n 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'אשדוד': ['כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד', 'בני ברק',\n 'תל אביב-יפו', 'הוד השרון'], 'אשקלון': ['חולון', 'אשדוד'],\n 'עראבה': ['נהריה', 'עפולה', 'צפת', 'חיפה'],\n \"באקה-ג'ת\": ['כפר סבא', 'פתח תקווה', 'רעננה', 'עפולה', 'חיפה', 'רמת גן', 'בני ברק', 'נתניה',\n 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'בת ים': ['כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד', 'בני ברק', 'נתניה',\n 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'], 'באר שבע': ['באר שבע'], 'בית שאן': ['עפולה'],\n 'בית שמש': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רמת גן', 'אשדוד', 'בני ברק',\n 'תל אביב-יפו', 'הוד השרון'],\n 'באקה אל-גרבייה': ['כפר סבא', 'פתח תקווה', 'רעננה', 'עפולה', 'חיפה', 'רמת גן', 'בני ברק',\n 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'ביתר עילית': ['ירושלים', 'פתח תקווה', 'חולון', 'רמת גן', 'אשדוד'],\n 'בני ברק': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד', 'בני ברק',\n 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'], 'דימונה': ['באר שבע'],\n 'אלעד': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד', 'בני ברק',\n 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'גבעתיים': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד', 'בני ברק',\n 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'גבעת שמואל': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד',\n 'בני ברק', 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'חדרה': ['כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'עפולה', 'חיפה', 'רמת גן', 'בני ברק',\n 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'], 'חיפה': ['נהריה', 'עפולה', 'חיפה'],\n 'הרצליה': ['כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד', 'בני ברק', 'נתניה',\n 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'כפר סבא': ['כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד', 'בני ברק', 'נתניה',\n 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'חולון': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד', 'בני ברק',\n 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'ירושלים': ['ירושלים', 'פתח תקווה', 'חולון', 'רמת גן', 'בני ברק'],\n 'כרמיאל': ['נהריה', 'עפולה', 'צפת', 'חיפה'],\n 'כפר קאסם': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד', 'בני ברק',\n 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'קריית אתא': ['נהריה', 'עפולה', 'צפת', 'חיפה'],\n 'קריית ביאליק': ['נהריה', 'עפולה', 'צפת', 'חיפה'],\n 'קריית גת': ['ירושלים', 'חולון', 'אשדוד', 'באר שבע'],\n 'קריית מלאכי': ['ירושלים', 'פתח תקווה', 'חולון', 'רמת גן', 'אשדוד', 'בני ברק', 'תל אביב-יפו',\n 'הוד השרון'], 'קריית מוצקין': ['נהריה', 'עפולה', 'צפת', 'חיפה'],\n 'קריית אונו': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד',\n 'בני ברק', 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'קריית שמונה': ['צפת'], 'קריית ים': ['נהריה', 'עפולה', 'צפת', 'חיפה'],\n 'לוד': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד', 'בני ברק',\n 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'], 'מעלה אדומים': ['ירושלים'],\n 'מעלות תרשיחא': ['נהריה', 'עפולה', 'צפת', 'חיפה'],\n 'מגדל העמק': ['נהריה', 'עפולה', 'צפת', 'חיפה'],\n 'מודיעין עילית': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד',\n 'בני ברק', 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'מודיעין- מכבים- רעות': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן',\n 'אשדוד', 'בני ברק', 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n \"מע'אר\": ['נהריה', 'עפולה', 'צפת', 'חיפה'], 'נצרת': ['נהריה', 'עפולה', 'צפת', 'חיפה'],\n 'נצרת עלית': ['נהריה', 'עפולה', 'צפת', 'חיפה'], 'נשר': ['נהריה', 'עפולה', 'צפת', 'חיפה'],\n 'נס ציונה': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד', 'בני ברק',\n 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'נתניה': ['כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'בני ברק', 'נתניה',\n 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'], 'נתיבות': ['אשדוד', 'באר שבע'],\n 'אופקים': ['באר שבע'],\n 'אור עקיבא': ['כפר סבא', 'פתח תקווה', 'רעננה', 'עפולה', 'חיפה', 'בני ברק', 'נתניה',\n 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'אור יהודה': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד',\n 'בני ברק', 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'נוף הגליל': ['נהריה', 'עפולה', 'צפת', 'חיפה'],\n 'פתח תקווה': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד',\n 'בני ברק', 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'קלנסווה': ['כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'עפולה', 'רמת גן', 'בני ברק', 'נתניה',\n 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'], 'רהט': ['אשדוד', 'באר שבע'],\n 'רמת גן': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד', 'בני ברק',\n 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'רמת השרון': ['כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד', 'בני ברק', 'נתניה',\n 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'רמלה': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד', 'בני ברק',\n 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'רחובות': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד', 'בני ברק',\n 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'ראשון לציון': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד',\n 'בני ברק', 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'ראש העין': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד', 'בני ברק',\n 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'סכנין': ['נהריה', 'עפולה', 'צפת', 'חיפה'], 'שדרות': ['אשדוד', 'באר שבע'],\n 'שפרעם': ['נהריה', 'עפולה', 'צפת', 'חיפה'], 'טמרה': ['נהריה', 'עפולה', 'צפת', 'חיפה'],\n 'טייבה': ['כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'עפולה', 'רמת גן', 'בני ברק', 'נתניה',\n 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'], 'טבריה': ['נהריה', 'עפולה', 'צפת'],\n 'טירת כרמל': ['נהריה', 'עפולה', 'חיפה', 'נתניה'],\n 'אום אל-פחם': ['כפר סבא', 'רעננה', 'עפולה', 'חיפה', 'נתניה', 'פרדסיה', 'הוד השרון'],\n 'יבנה': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד', 'בני ברק',\n 'תל אביב-יפו', 'הוד השרון'],\n 'יהוד-מונסון': ['ירושלים', 'כפר סבא', 'פתח תקווה', 'חולון', 'רעננה', 'רמת גן', 'אשדוד',\n 'בני ברק', 'נתניה', 'תל אביב-יפו', 'פרדסיה', 'הוד השרון'],\n 'יקנעם עילית': ['נהריה', 'עפולה', 'חיפה', 'נתניה', 'פרדסיה']}\n\n# ------------------------------------------------------------------\n\ncities_with_hospitals_h_e = {\"ירושלים\": \"Jerusalem\", \"נהריה\": \"Nahariyya\", \"כפר סבא\": \"Kefar Sava\",\n \"פתח תקווה\": \"Petah Tikva\", \"חולון\": \"Holon\", \"רעננה\": \"Raanana\", \"עפולה\": \"Afula\",\n \"צפת\": \"Zefat\", \"חיפה\": \"Haifa\", \"רמת גן\": \"Ramat Gan\", \"אשדוד\": \"Ashdod\",\n \"בני ברק\": \"Bnei Brak\", \"נתניה\": \"Netanya\", \"תל אביב\": \"Tel-Aviv\", \"פרדסיה\": \"Pardesiya\",\n \"באר שבע\": \"Be'er Sheva\", \"הוד השרון\": \"Hod Hasharon\"}\n\ncities_with_hospitals_e_h = {'Jerusalem': 'ירושלים', 'Nahariyya': 'נהריה', 'Kefar Sava': 'כפר סבא',\n 'Petah Tikva': 'פתח תקווה',\n 'Holon': 'חולון', 'Raanana': 'רעננה', 'Afula': 'עפולה', 'Zefat': 'צפת', 'Haifa': 'חיפה',\n 'Ramat Gan': 'רמת גן',\n 'Ashdod': 'אשדוד', 'Bnei Brak': 'בני ברק', 'Netanya': 'נתניה', 'Tel-Aviv': 'תל אביב',\n 'Pardesiya': 'פרדסיה',\n \"Be'er Sheva\": 'באר שבע', 'Hod Hasharon': 'הוד השרון'}\n\n# Maybe not necessary\nhospitals = [\"שערי צדק\", \"הדסה עין כרם\", \"הדסה הר צופים\", \"המרכז הרפואי הרצוג\", \"בית חולים נהריה\", \"בית חולים מאיר\",\n \"לניאדו\", \"השרון\", \"בלינסון\", \"שיבא תל השומר\", \"וולפסון\", \"אסותא אשדוד\", \"אסותא רמת החייל\", \"דורות\",\n \"בית רבקה\", \"מעייני הישועה\", \"בית לוינשטין\", \"זיו\", \"העמק\", \"רמב'ם\", \"כרמל\", \"סורוקה\", \"פרדסיה לב השרון\",\n \"גהה\", \"שלוותא\", \"כפר שאול\", \"נפש באר שבע\"]\n\n# -----------------------------------------------------------------\n# One dict that holds all the cities in israel, KEY = city_english , VALUE = city_hebrew\n# And the second dict that holds all the cities in israel, KEY = city_hebrew , VALUE = city_english\n\ncities_in_israel_en_heb = {'Afula': 'עפולה', 'Arad': 'ערד', 'Ariel': 'אריאל', 'Ashdod': 'אשדוד', 'Ashkelon': 'אשקלון',\n 'Arraba': 'עראבה',\n 'Baqa-Jatt': \"באקה-ג'ת\", 'Bat Yam': 'בת ים', 'Beersheba': 'באר שבע',\n \"Beit She'an\": 'בית שאן',\n 'Beit Shemesh': 'בית שמש', 'Baqa al-Gharbiyye': 'באקה אל-גרבייה',\n 'Beitar Illit': 'ביתר עילית', 'Bnei Brak': 'בני ברק',\n 'Dimona': 'דימונה', 'Eilat': 'אילת', \"El'ad\": 'אלעד', \"Giv'atayim\": 'גבעתיים',\n \"Giv'at Shmuel\": 'גבעת שמואל',\n 'Hadera': 'חדרה', 'Haifa': 'חיפה', 'Herzliya': 'הרצליה', 'Hod HaSharon': 'הוד השרון',\n 'Holon': 'חולון',\n 'Jerusalem': 'ירושלים', 'Karmiel': 'כרמיאל', 'Kafr Qasim': 'כפר קאסם',\n 'Kfar Saba': 'כפר סבא',\n 'Kiryat Ata': 'קריית אתא', 'Kiryat Bialik': 'קריית ביאליק', 'Kiryat Gat': 'קריית גת',\n 'Kiryat Malakhi': 'קריית מלאכי',\n 'Kiryat Motzkin': 'קריית מוצקין', 'Kiryat Ono': 'קריית אונו', 'Kiryat Shmona': 'קריית שמונה',\n 'Kiryat Yam': 'קריית ים',\n 'Lod': 'לוד', \"Ma'ale Adumim\": 'מעלה אדומים', \"Ma'alot-Tarshiha\": 'מעלות תרשיחא',\n 'Migdal HaEmek': 'מגדל העמק',\n \"Modi'in Illit\": 'מודיעין עילית', \"Modi'in-Maccabim-Re'ut\": 'מודיעין- מכבים- רעות',\n 'Maghar': \"מע'אר\",\n 'Nahariya': 'נהריה', 'Nazareth': 'נצרת', 'Nazareth Illit': 'נצרת עלית', 'Nesher': 'נשר',\n 'Ness Ziona': 'נס ציונה',\n 'Netanya': 'נתניה', 'Netivot': 'נתיבות', 'Ofakim': 'אופקים', 'Or Akiva': 'אור עקיבא',\n 'Or Yehuda': 'אור יהודה',\n 'Nof HaGalil': 'נוף הגליל', 'Petah Tikva': 'פתח תקווה', 'Qalansawe': 'קלנסווה',\n \"Ra'anana\": 'רעננה', 'Rahat': 'רהט',\n 'Ramat Gan': 'רמת גן', 'Ramat HaSharon': 'רמת השרון', 'Ramla': 'רמלה', 'Rehovot': 'רחובות',\n 'Rishon LeZion': 'ראשון לציון', 'Rosh HaAyin': 'ראש העין', 'Safed': 'צפת',\n 'Sakhnin': 'סכנין', 'Sderot': 'שדרות',\n \"Shefa-Amr (Shfar'am)\": 'שפרעם', 'Tamra': 'טמרה', 'Tayibe': 'טייבה',\n 'Tel Aviv': 'תל אביב-יפו', 'Tiberias': 'טבריה',\n 'Tira': 'טירה', 'Tirat Carmel': 'טירת כרמל', 'Umm al-Fahm': 'אום אל-פחם', 'Yavne': 'יבנה',\n 'Yehud-Monosson': 'יהוד-מונסון', 'Yokneam': 'יקנעם עילית'}\n\ncities_in_israel_heb_eng = {'עפולה': 'Afula', 'ערד': 'Arad', 'אריאל': 'Ariel', 'אשדוד': 'Ashdod', 'אשקלון': 'Ashkelon',\n 'עראבה': 'Arraba',\n \"באקה-ג'ת\": 'Baqa-Jatt', 'בת ים': 'Bat Yam', 'באר שבע': 'Beersheba',\n 'בית שאן': \"Beit She'an\",\n 'בית שמש': 'Beit Shemesh', 'באקה אל-גרבייה': 'Baqa al-Gharbiyye',\n 'ביתר עילית': 'Beitar Illit',\n 'בני ברק': 'Bnei Brak', 'דימונה': 'Dimona', 'אילת': 'Eilat', 'אלעד': \"El'ad\",\n 'גבעתיים': \"Giv'atayim\",\n 'גבעת שמואל': \"Giv'at Shmuel\", 'חדרה': 'Hadera', 'חיפה': 'Haifa', 'הרצליה': 'Herzliya',\n 'הוד השרון': 'Hod HaSharon', 'חולון': 'Holon', 'ירושלים': 'Jerusalem', 'כרמיאל': 'Karmiel',\n 'כפר קאסם': 'Kafr Qasim', 'כפר סבא': 'Kfar Saba', 'קריית אתא': 'Kiryat Ata',\n 'קריית ביאליק': 'Kiryat Bialik',\n 'קריית גת': 'Kiryat Gat', 'קריית מלאכי': 'Kiryat Malakhi', 'קריית מוצקין': 'Kiryat Motzkin',\n 'קריית אונו': 'Kiryat Ono', 'קריית שמונה': 'Kiryat Shmona', 'קריית ים': 'Kiryat Yam',\n 'לוד': 'Lod',\n 'מעלה אדומים': \"Ma'ale Adumim\", 'מעלות תרשיחא': \"Ma'alot-Tarshiha\",\n 'מגדל העמק': 'Migdal HaEmek',\n 'מודיעין עילית': \"Modi'in Illit\", 'מודיעין- מכבים- רעות': \"Modi'in-Maccabim-Re'ut\",\n \"מע'אר\": 'Maghar',\n 'נהריה': 'Nahariya', 'נצרת': 'Nazareth', 'נצרת עלית': 'Nazareth Illit', 'נשר': 'Nesher',\n 'נס ציונה': 'Ness Ziona',\n 'נתניה': 'Netanya', 'נתיבות': 'Netivot', 'אופקים': 'Ofakim', 'אור עקיבא': 'Or Akiva',\n 'אור יהודה': 'Or Yehuda',\n 'נוף הגליל': 'Nof HaGalil', 'פתח תקווה': 'Petah Tikva', 'קלנסווה': 'Qalansawe',\n 'רעננה': \"Ra'anana\",\n 'רהט': 'Rahat', 'רמת גן': 'Ramat Gan', 'רמת השרון': 'Ramat HaSharon', 'רמלה': 'Ramla',\n 'רחובות': 'Rehovot',\n 'ראשון לציון': 'Rishon LeZion', 'ראש העין': 'Rosh HaAyin', 'צפת': 'Safed',\n 'סכנין': 'Sakhnin', 'שדרות': 'Sderot',\n 'שפרעם': \"Shefa-Amr (Shfar'am)\", 'טמרה': 'Tamra', 'טייבה': 'Tayibe',\n 'תל אביב-יפו': 'Tel Aviv',\n 'טבריה': 'Tiberias', 'טירה': 'Tira', 'טירת כרמל': 'Tirat Carmel',\n 'אום אל-פחם': 'Umm al-Fahm', 'יבנה': 'Yavne',\n 'יהוד-מונסון': 'Yehud-Monosson', 'יקנעם עילית': 'Yokneam'}\n\n# -----------------------------------------------------------------\n\n# Read the excel of the Students to get the length of his rows and column\nstudents_excel = pandas.read_excel('students.xlsx')\nn_rows_student = len(students_excel.index)\nn_cols_student = len(students_excel.columns)\n\n# students is a list that contain elements of student object\nstudents = create_list_students()\n\n# -----------------------------------------------------------------\n\n# Create an Output Excel\n\ndata = {'תעודת זהות': [], 'שם מלא': [], 'שנה': [], 'עיר מגורים': [], 'התנסות א': [], 'בית חולים א': [], 'תאריכים א': [],\n 'איש קשר אחראי א': [], 'התנסות ב': [], 'בית חולים ב': [], 'תאריכים ב': [], 'איש קשר אחראי ב': [],\n 'התנסות ג': [],\n 'בית חולים ג': [], 'תאריכים ג': [], 'איש קשר אחראי ג': [], 'התנסות ד': [], 'בית חולים ד': [], 'תאריכים ד': [],\n 'איש קשר אחראי ד': [], 'התנסות ה': [], 'בית חולים ה': [], 'תאריכים ה': [], 'איש קשר אחראי ה': [], 'הערות': []}\n\noutput_excel = pandas.DataFrame(data)\n\n# Read the excel of the Hospitals to get the length of his rows and column\nhospitals_excel = pandas.read_excel(\"Hospitals.xlsx\")\nn_rows_hospital = len(hospitals_excel.index)\nn_cols_hospital = len(hospitals_excel.columns)\n\n# Create a Pandas Excel writer using XlsxWriter as the engine.\nwriter = pandas.ExcelWriter('demo.xlsx', engine='xlsxwriter')\n\n# Convert the dataframe to an XlsxWriter Excel object.\noutput_excel.to_excel(writer, sheet_name='Sheet1', index=False)\n\nrow_of_output = 1 # when we finish with a student we add 1\n\nfor s in students:\n\n num_of_experience = 0 # start from zero until the length of the to do list in student object\n for row in range(n_rows_hospital):\n city_host = str(hospitals_excel.iloc[row][0]) # city of the experience\n student_num = hospitals_excel.iloc[row][4]\n experience_host = hospitals_excel.iloc[row][2] + \" - \" + hospitals_excel.iloc[row][3]\n hospital = hospitals_excel.iloc[row][1]\n dates = hospitals_excel.iloc[row][6]\n responsible_contact = hospitals_excel.iloc[row][8]\n\n if dates not in s.exp_dates:\n\n if city_host in s.list_of_potential_cities: # check if the city match to the dist of the student\n\n if experience_host in s.to_do: # check if the student needs to do this experience\n\n if student_num > 0:\n flag = False\n hospitals_excel.at[row, 'מספר סטודנטים'] = student_num - 1\n hospitals_excel.to_excel(\"Hospitals.xlsx\", index=False)\n output_excel.at[row_of_output, 'תעודת זהות'] = s.id_num\n output_excel.at[row_of_output, 'שם מלא'] = s.name\n output_excel.at[row_of_output, 'עיר מגורים'] = s.city\n output_excel.at[row_of_output, 'שנה'] = s.year\n\n if num_of_experience == 0:\n output_excel.at[row_of_output, 'התנסות א'] = experience_host\n output_excel.at[row_of_output, 'בית חולים א'] = hospital\n output_excel.at[row_of_output, 'תאריכים א'] = dates\n output_excel.at[row_of_output, 'איש קשר אחראי א'] = responsible_contact\n s.exp_dates.append(dates)\n flag = True\n\n elif num_of_experience == 1:\n output_excel.at[row_of_output, 'התנסות ב'] = experience_host\n output_excel.at[row_of_output, 'בית חולים ב'] = hospital\n output_excel.at[row_of_output, 'תאריכים ב'] = dates\n output_excel.at[row_of_output, 'איש קשר אחראי ב'] = responsible_contact\n s.exp_dates.append(dates)\n flag = True\n\n elif num_of_experience == 2:\n output_excel.at[row_of_output, 'התנסות ג'] = experience_host\n output_excel.at[row_of_output, 'בית חולים ג'] = hospital\n output_excel.at[row_of_output, 'תאריכים ג'] = dates\n output_excel.at[row_of_output, 'איש קשר אחראי ג'] = responsible_contact\n s.exp_dates.append(dates)\n flag = True\n\n elif num_of_experience == 3:\n output_excel.at[row_of_output, 'התנסות ד'] = experience_host\n output_excel.at[row_of_output, 'בית חולים ד'] = hospital\n output_excel.at[row_of_output, 'תאריכים ד'] = dates\n output_excel.at[row_of_output, 'איש קשר אחראי ד'] = responsible_contact\n s.exp_dates.append(dates)\n flag = True\n\n else:\n output_excel.at[row_of_output, 'התנסות ה'] = experience_host\n output_excel.at[row_of_output, 'בית חולים ה'] = hospital\n output_excel.at[row_of_output, 'תאריכים ה'] = dates\n output_excel.at[row_of_output, 'איש קשר אחראי ה'] = responsible_contact\n s.exp_dates.append(dates)\n flag = True\n\n if flag:\n num_of_experience += 1\n s.done.append(experience_host)\n temp = []\n for exp in s.to_do:\n if exp != experience_host:\n temp.append(exp)\n s.to_do = temp\n\n write_comment(s)\n row_of_output += 1\n\noutput_excel.to_excel(\"Demo.xlsx\", index=False)\n\nwriter = pandas.ExcelWriter('demo.xlsx')\noutput_excel.to_excel(writer, sheet_name='Sheet1', index=False, na_rep='')\n\n\n# Auto-adjust columns' width\n\nfor column in output_excel:\n column_width = max(output_excel[column].astype(str).map(len).max(), len(column))\n col_idx = output_excel.columns.get_loc(column)\n writer.sheets['Sheet1'].set_column(col_idx, col_idx, column_width)\n\n\n# Get the xlsxwriter workbook and worksheet objects.\nworkbook = writer.book\nworksheet = writer.sheets['Sheet1']\n\n\n# Set formats for colors . Light red fill with dark red text.\nformat_experience_a = workbook.add_format({'bg_color': '#B73A3A', 'font_color': '#000000'}) #FF0000\n\nformat_experience_b = workbook.add_format({'bg_color': '#23C26F', 'font_color': '#000000'}) # 00FF00\n\nformat_experience_c = workbook.add_format({'bg_color': '#438FFF', 'font_color': '#000000'}) ##0000FF\n\nformat_experience_d = workbook.add_format({'bg_color': 'D83B01', 'font_color': '#000000'}) ##00FFFF\n\nformat_experience_e = workbook.add_format({'bg_color': '#FF72FF', 'font_color': '#000000'})\n\nformat_comments = workbook.add_format({'bg_color': '#FFC7CE', 'font_color': '#9C0006'})\n\n\n# Set the conditional format range for comments.\nstart_row = 1\nstart_col = 24\nend_row = len(output_excel)\nend_cold = start_col\n\n# Apply a conditional format to the cell range.\nworksheet.conditional_format(start_row, start_col, end_row, end_cold,\n {'type': 'cell',\n 'criteria': '==',\n 'value': '\"בעיה - לא שובץ בכלום!\"',\n 'format': format_comments})\n\n\n# Coloring experience A\nstart_row = 0\nstart_col = 4\nend_row = start_row\nend_col = 7\n\n# Apply a conditional format to the cell range.\nworksheet.conditional_format(start_row, start_col, end_row, end_col,\n {'type': 'cell',\n 'criteria': '>',\n 'value': 0,\n 'format': format_experience_a})\n\n\n# Coloring experience B\n\nstart_row = 0\nstart_col = 8\nend_row = start_row\nend_col = 11\n\n# Apply a conditional format to the cell range.\nworksheet.conditional_format(start_row, start_col, end_row, end_col,\n {'type': 'cell',\n 'criteria': '>',\n 'value': 0,\n 'format': format_experience_b})\n\n# Coloring experience C\n\nstart_row = 0\nstart_col = 12\nend_row = start_row\nend_col = 15\n\n# Apply a conditional format to the cell range.\nworksheet.conditional_format(start_row, start_col, end_row, end_col,\n {'type': 'cell',\n 'criteria': '>',\n 'value': 0,\n 'format': format_experience_c})\n\n# Coloring experience D\n\nstart_row = 0\nstart_col = 16\nend_row = start_row\nend_col = 19\n\n# Apply a conditional format to the cell range.\nworksheet.conditional_format(start_row, start_col, end_row, end_col,\n {'type': 'cell',\n 'criteria': '>',\n 'value': 0,\n 'format': format_experience_d})\n\n# Coloring experience E\n\nstart_row = 0\nstart_col = 20\nend_row = start_row\nend_col = 23\n\n# Apply a conditional format to the cell range.\nworksheet.conditional_format(start_row, start_col, end_row, end_col,\n {'type': 'cell',\n 'criteria': '>',\n 'value': 0,\n 'format': format_experience_e})\n\n\n# Close the Pandas Excel writer and output the Excel file.\nwriter.save()\n","repo_name":"oz105/Scheduling_Algorithm","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":46034,"program_lang":"python","lang":"he","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22997201808","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nfrom requests import get\nfrom json import loads\nfrom configparser import ConfigParser\nfrom emoji import emojize\n\n# Configurando o bot\nconfig = ConfigParser()\nconfig.read_file(open('config.ini'))\n\n# Conectando para o telegram API\n# Updater ler as inforamações no arquivo config.ini e dispatcher conecta comandos\nup = Updater(token=config['DEFAULT']['token'])\ndispatcher = up.dispatcher\n\n\ndef start(bot, update):\n \"\"\"\n Mostra uma mensagem de boas-vindas e informações de ajuda sobre os comandos disponíveis.\n \"\"\"\n\t\n #Mensagem inicial\n msg = \"Salve hackudos. \\n\"\n msg += \"O que você gostaria de fazer? \\n\"\n msg += \"/ranking Listará o top 10 atual do hackaflag \\n\"\n msg += \"/help Caso precise de ajuda\"\n\t\n # Envia a mensagem\n bot.send_message(chat_id=update.message.chat_id,\n text=msg)\n\n\ndef ranking(bot, update):\n # Request via get para ler os dados, e decodificar o json\n r = loads(get('https://ctf.hackaflag.com.br/premio_json.php').text)\n msg = emojize(\"Ranking :black_flag:\", use_aliases=True)\n \"\"\"\n Retorno do request listando o top 10 de times.\n \"\"\"\n msg += 'Entreguem a Taça #FireShell++'\n msg += '\\n1° - Time: ' + str(r['1']['nome']) + ' | Score: ' + str(r['1']['score']) + emojize(' :1st_place_medal:', use_aliases=True) #adding emoji gold\n msg += '\\n2° - Time: ' + str(r['2']['nome']) + ' | Score: ' + str(r['2']['score']) + emojize(' :2nd_place_medal:', use_aliases=True) #adding emoji silver\n msg += '\\n3° - Time: ' + str(r['3']['nome']) + ' | Score: ' + str(r['3']['score']) + emojize(' :3rd_place_medal:', use_aliases=True) #adding emoji bronze\n msg += '\\n4° - Time: ' + str(r['4']['nome']) + ' | Score: ' + str(r['4']['score'])\n msg += '\\n5° - Time: ' + str(r['5']['nome']) + ' | Score: ' + str(r['5']['score'])\n msg += '\\n6° - Time: ' + str(r['6']['nome']) + ' | Score: ' + str(r['6']['score'])\n msg += '\\n7° - Time: ' + str(r['7']['nome']) + ' | Score: ' + str(r['7']['score'])\n msg += '\\n8° - Time: ' + str(r['8']['nome']) + ' | Score: ' + str(r['8']['score'])\n msg += '\\n9° - Time: ' + str(r['9']['nome']) + ' |Score: ' + str(r['9']['score'])\n msg += '\\n10° - Time: ' + str(r['10']['nome']) + ' | Score: ' + str(r['10']['score'])\n\n # Envia a mensagem\n bot.send_message(chat_id=update.message.chat_id,text=msg)\n\n\ndef help(bot, update):\n \"\"\"\n\t Mostra uma descrição sobre o bot e suas funções\n \"\"\"\n\n # Mensagem de ajuda\n msg = \"Olá, sou um bot que lhe ajudará com algumas \\n\"\n msg += \"informações sobre o hackaflag 2017. \\n\"\n msg += \"Atualmente conto com apenas uma função, \\n\"\n msg += \"/ranking, essa função listará o top 10, \\n\"\n msg += \"de times do hackaflag 2017.\"\n\t\n # Envia a mensagem\n bot.send_message(chat_id=update.message.chat_id,\n text=msg)\n\n\ndef unknown(bot, update):\n \"\"\"\n Caso o usuário envie um comando desconhecido.\n \"\"\"\n msg = \"Digite uma função valida!.\"\n\t\n\t# Envia a mensagem\n bot.send_message(chat_id=update.message.chat_id,\n text=msg)\n\n# Adicionando os handlers start, ranking, help e unknown\ndispatcher.add_handler(CommandHandler('start', start))\ndispatcher.add_handler(CommandHandler('ranking', ranking))\ndispatcher.add_handler(CommandHandler('help', help))\ndispatcher.add_handler(MessageHandler((Filters.command), unknown))\n\nup.start_polling() # Inicia o progama\nprint(\":)\")\n","repo_name":"k13w/teleHaF","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39381453807","text":"# imprt json\n# create xchange rate class with required attributers\n# fetch the data\n# display the data\n# display the type of data\n# method to return the exchange rates\n# display exchange rates with specific currencies\n\n\nimport json\n\n\nclass ExchangeRates:\n def __init__(self):\n self.self = self\n# add function to call method within methods - add a method that fetches the json dictionary\n def fetch_exchange_rate(self, cur):\n with open(\"exchange_rates.json\") as jsonfile:\n # reading from the file we just created\n rates = json.load(jsonfile)\n print(f\"The exchange rate between EUR and {cur} is\", rates[\"rates\"][cur]) # since this is in the with open block, you know that this is coming from the new file\n\n def fetch_exchange_rates(self, curr):\n with open(\"exchange_rates.json\") as jsonfile:\n # reading from the file we just created\n rates = json.load(jsonfile)\n self.fetch_rates_list()\n x_rates = rates[\"rates\"] # rates is the whole dict, x_rates is the nested dict\n for cur in curr: # Iterates through the input \"curr\" which is the list of all the keys in the dict\n print(f\"The exchange rate between EUR and {cur} is\", x_rates[cur])\n\n def fetch_rates_list(self):\n with open(\"exchange_rates.json\") as jsonfile:\n # reading from the file we just created\n rates = json.load(jsonfile)\n x_rates_list = rates[\"rates\"].keys() # converts the keys of the dictionary \"rates\" into a list\n return list(x_rates_list)\n\n\nc = ExchangeRates()\n\n# c.fetch_exchange_rate(\"AUD\")\n\ncurr = (c.fetch_rates_list())\n# print(curr)\nc.fetch_exchange_rates(curr)\n","repo_name":"ibbocus/api_json","sub_path":"json_exchange_rates.py","file_name":"json_exchange_rates.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16584276641","text":"from JurassicBot import client, cogs\nimport json\n\n\ndef main():\n with open('config.json', \"r\") as f:\n config = json.load(f)\n\n for cog in cogs:\n client.load_extension(cog)\n\n client.run(config('token'))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"JurassicHost/jurassic-bot","sub_path":"launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7849849333","text":"from sklearn import svm\nfrom sklearn.svm import SVC\nimport csv\n\n\nprev_close_list=[]\nclose_price_list=[]\nmomentum_list=[]\nvolatility_list=[]\nhigh_price_list=[]\nlow_price_list=[]\naverage_price_list=[]\ntotal_traded_quantity_list=[]\n\nindex_momentum_list=[]\nindex_volatility_list=[]\n\nday1_list=[]\nday2_list=[]\nday3_list=[]\nday4_list=[]\nday5_list=[]\n\n\nwith open('features_10day_csv.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n momentum_list.append(int(row['Momentum of Close Price']))\n prev_close_list.append(float(row['Prev Close']))\n close_price_list.append(float(row['Close Price']))\n volatility_list.append(float(row['Volatility of Close Price']))\n high_price_list.append(float(row['High Price']))\n low_price_list.append(float(row['Low Price']))\n average_price_list.append(float(row['Average Price']))\n total_traded_quantity_list.append(int(row['Total Traded Quantity']))\n\n index_momentum_list.append(int(row['Momentum Bases on Index']))\n index_volatility_list.append(float(row['Volatility Bases on Index']))\n\n day1_list.append(int(row['Day1']))\n day2_list.append(int(row['Day1']))\n day3_list.append(int(row['Day3']))\n day4_list.append(int(row['Day4']))\n day5_list.append(int(row['Day5']))\n\n# x=[list(a) for a in zip(prev_close_list, close_price_list,momentum_list,volatility_list,high_price_list,low_price_list,average_price_list,total_traded_quantity_list,index_momentum_list,index_volatility_list)]\n# y=[list(b) for b in zip(day1_list,day2_list)]\nx=[list(a) for a in zip(prev_close_list, close_price_list,volatility_list,high_price_list,low_price_list,average_price_list,total_traded_quantity_list,index_momentum_list,index_volatility_list)]\nX = x\nY = momentum_list\nclf = svm.SVC()\nclf.fit(X, Y)\nSVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,\n decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',\n max_iter=-1, probability=False, random_state=None, shrinking=True,\n tol=0.001, verbose=False)\n\nprint (clf.predict([[2353.0, 2353.0, -0.0002571111961719646, 2378.75, 2345.05, 2364.53, 1435829, 0, 0.4926051550212077]]))\n","repo_name":"jaiprajapati3/Stock-Market-Prediction","sub_path":"SVM/svm2.py","file_name":"svm2.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"28887201600","text":"try:\n from PIL import Image\nexcept ImportError:\n import Image\nimport pytesseract\nimport cv2\nimport argparse\n\n\ndef video_string_match(input_file_path, output_path, string_match, x, y, h, w, count, success):\n vidcap = cv2.VideoCapture(input_file_path)\n fps = vidcap.get(cv2.CAP_PROP_FPS)\n print(\"The input video has %s fps\" % fps)\n while success:\n # Capture frame-by-frame\n success, image = vidcap.read()\n count += 1\n if count%5 == 0 and success == True:\n file_name = \"frame{}.png\".format(count)\n crop_img = image[y:y+h, x:x+w]\n img_rgb = cv2.cvtColor(crop_img, cv2.COLOR_BGR2RGB)\n text_output = pytesseract.image_to_string(img_rgb)\n print(\"Analyzing frame:{}\".format(count))\n if (text_output.find(string_match) != -1): \n print('Identified the frame{} with string match - \"{}\"'.format(count, string_match))\n cv2.imwrite(output_path + \"/\" + file_name, image)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--coordinates', dest='coordinates', type=int, nargs='+',\n help='Please enter coordinates from where the text needs to be extracted on a video frame in the following format -> x y h w \\n (x,y) will be starting point.\\n (h,w) is the height and width from the starting point')\n parser.add_argument('--input-file-path', dest='input_file_path', help='Provide the input file path for the video file')\n parser.add_argument('--output-file-path', dest='output_path', help='Provide the output path where the images will be stored')\n parser.add_argument('--string-match', dest='string_match', help='Provide the string that needs to be matched with the cropped frame')\n args = parser.parse_args() \n count = 0\n success = True\n video_string_match(args.input_file_path, args.output_path, args.string_match, args.coordinates[0], args.coordinates[1], args.coordinates[2], args.coordinates[3], count, success)\n","repo_name":"abhiramsiripurapu/videoanalytics","sub_path":"extractFrame.py","file_name":"extractFrame.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71884337494","text":"# Python Crash Course 2e\n# Exercise 16.1\n\n# sitka_rainfall\n\nimport csv\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\n\nfilename = \"data/sitka_weather_2018_simple.csv\"\n\nwith open(filename) as f:\n reader = csv.reader(f)\n header_row = next(reader)\n\n dates = []\n p_rates = []\n\n for row in reader:\n current_date = datetime.strptime(row[2], \"%Y-%m-%d\")\n p_rate = float(row[3])\n p_rates.append(p_rate)\n dates.append(current_date)\n\nplt.style.use('seaborn')\nfig, ax = plt.subplots()\nax.plot(dates, p_rates, c='blue')\n\nax.set_title(\"Daily Precipitation Rates - 2018\", fontsize=24)\nax.set_xlabel(\"\", fontsize = 16)\nfig.autofmt_xdate()\nax.set_ylabel(\"Precipitation Rate (%)\", fontsize=16)\nax.tick_params(axis='both', which='major', labelsize = 16)\n\nplt.show()\n\n# PRCP is located at index 3 (row[3])\n# PRCP values are float\n","repo_name":"js0288539/creek","sub_path":"David/Python Crash Course 2e - PROJECT 2/Chapter 16/exercise_16-1.py","file_name":"exercise_16-1.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28256388459","text":"import sqlite3\ndb = sqlite3.connect(\"app.db\")\ncr = db.cursor()\n\ndef commint_and_close():\n db.commit()\n db.close()\n print(\"Connection To Database IS Closed\")\n\n\nuid = 1\n\ninput_message = \"\"\"\nWhat Do You Want T o DO ?\n\"s\" => Show All Skills\n\"a\" => Add New Skills\n\"d\" => Delete A Skills\n\"u\" => Update Skills Progress\n\"q\" => Quit The App\nChoose Option: \n\"\"\"\nuser_input = input(input_message).strip().lower()\ncammands_list = [\"s\", \"a\", \"d\", \"u\", \"q\"]\n\ndef show_skills():\n print('Show Skills')\n commint_and_close()\n\ndef add_skills():\n sk = input(\"Write Skill Name: \").strip().capitalize()\n prog = input(\"Write Skill Progress: \").strip()\n cr.execute(f\"insert into skills(name, progress, user_id) values('{sk}', '{prog}', '{uid}')\")\n commint_and_close()\n \ndef delete_skills():\n sk = input(\"Write Skill Name: \").strip().capitalize()\n cr.execute(f\"delete from skills where name = {sk} \")\n commint_and_close()\n\ndef update_skills():\n print('Update Skill')\n commint_and_close()\n\nif user_input in cammands_list:\n print(f'Command Found {user_input}')\n if user_input == \"s\":\n show_skills()\n\n elif user_input == \"a\":\n add_skills()\n\n elif user_input == \"d\":\n delete_skills()\n\n elif user_input == \"u\":\n update_skills()\n else:\n print(\"App Is Closed\")\nelse:\n print(f'Sorry This Command {user_input} Is Not Found')\n","repo_name":"eyubech/Python--","sub_path":"Dataa/Lite.py","file_name":"Lite.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71190755414","text":"f = open(\"C:/users/extra/Desktop/매수종목2.txt\", encoding=\"utf-8\")\nlines = f.readlines()\n\ndata = {}\nfor line in lines:\n line = line.strip() # \\n 제거 라는데 이해불가\n key, value = line.split()\n data[key] = value\n\nprint(data)\nf.close()","repo_name":"magnificentLee/Study","sub_path":"초보자를 위한 파이썬 300제/12. 파일 입출력과 예외처리/291~300/295.py","file_name":"295.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23907123758","text":"#Aluno: Deivison rodrigues jordao\r\n#Urna de votos\r\n\r\n#Entrada de dados e definicao de variaveis\r\n\r\nnumero_de_eleitores = int(input(\"digite a quantidade de eleitores: \"))\r\n\r\ncandidato1 = 11\r\ncandidato2 = 22\r\ncandidato3 = 33\r\n\r\ncandidato1_contagem = 0\r\ncandidato2_contagem = 0\r\ncandidato3_contagem = 0\r\n\r\nprint()\r\nprint(\"Os candidatos são :\")\r\nprint(\"Os candidato1 de numero 11\")\r\nprint(\"Os candidato2 de numero 22\")\r\nprint(\"Os candidato3 de numero 33\")\r\nprint()\r\n\r\n#Processamento\r\n\r\nfor i in range(numero_de_eleitores):\r\n \r\n voto = int(input(\"digite o numero do candidato que deseja votar: \"))\r\n \r\n if(voto == candidato1):\r\n candidato1_contagem = candidato1_contagem + 1\r\n \r\n elif(voto == candidato2):\r\n candidato2_contagem = candidato2_contagem + 1\r\n \r\n if(voto == candidato3):\r\n candidato3_contagem = candidato3_contagem + 1\r\n\r\n#Saida\r\nprint()\r\nprint(\"O candidato1 recebeu \",candidato1_contagem,\" votos\")\r\nprint(\"O candidato2 recebeu \",candidato2_contagem,\" votos\")\r\nprint(\"O candidato3 recebeu \",candidato3_contagem,\" votos\")\r\n \r\n","repo_name":"deivisongithub/intro-a-programacao","sub_path":"lista 6/Questão 5.py","file_name":"Questão 5.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"44235308498","text":"from json import load\nimport os\nimport argparse\nimport random\nfrom copy import deepcopy\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch import nn\nimport sys\nimport torch\nimport numpy as np\nimport cvxopt\ntorch.manual_seed(0)\n\nfrom fedlab.core.client.serial_trainer import SubsetSerialTrainer\nfrom fedlab.utils.aggregator import Aggregators\nfrom fedlab.utils.serialization import SerializationTool\nfrom fedlab.utils.functional import evaluate\nfrom fedlab.utils.functional import get_best_gpu, load_dict\n\nsys.path.append(\"../\")\nfrom models.cnn import CNN_MNIST\n\ndef quadprog(Q, q, G, h, A, b):\n \"\"\"\n Input: Numpy arrays, the format follows MATLAB quadprog function: https://www.mathworks.com/help/optim/ug/quadprog.html\n Output: Numpy array of the solution\n \"\"\"\n Q = cvxopt.matrix(Q.tolist())\n q = cvxopt.matrix(q.tolist(), tc='d')\n G = cvxopt.matrix(G.tolist())\n h = cvxopt.matrix(h.tolist())\n A = cvxopt.matrix(A.tolist())\n b = cvxopt.matrix(b.tolist(), tc='d')\n sol = cvxopt.solvers.qp(Q, q.T, G.T, h.T, A.T, b)\n return np.array(sol['x'])\n\ndef optim_lambdas(gradients, lambda0):\n epsilon = 0.5\n n = len(gradients)\n J_t = [grad.numpy() for grad in gradients]\n J_t = np.array(J_t)\n # target function\n Q = 2 * np.dot(J_t, J_t.T)\n q = np.array([[0] for i in range(n)])\n # equality constrint\n A = np.ones(n).T\n b = np.array([1])\n # boundary\n lb = np.array([max(0, lambda0[i] - epsilon) for i in range(n)])\n ub = np.array([min(1, lambda0[i] + epsilon) for i in range(n)])\n G = np.zeros((2 * n, n))\n for i in range(n):\n G[i][i] = -1\n G[n + i][i] = 1\n h = np.zeros((2 * n, 1))\n for i in range(n):\n h[i] = -lb[i]\n h[n + i] = ub[i]\n res = quadprog(Q, q, G, h, A, b)\n return res\n\n# python standalone.py --sample_ratio 0.1 --batch_size 10 --epochs 5 --partition iid\n# configuration\nparser = argparse.ArgumentParser(description=\"Standalone training example\")\nparser.add_argument(\"--total_client\", type=int, default=10)\nparser.add_argument(\"--com_round\", type=int, default=5)\n\nparser.add_argument(\"--sample_ratio\", type=float)\nparser.add_argument(\"--batch_size\", type=int)\nparser.add_argument(\"--lr\", type=float)\nparser.add_argument(\"--epochs\", type=int)\n\nargs = parser.parse_args()\n\n# get raw dataset\nroot = \"../datasets/mnist/\"\ntrainset = torchvision.datasets.MNIST(root=root,\n train=True,\n download=True,\n transform=transforms.ToTensor())\n\ntestset = torchvision.datasets.MNIST(root=root,\n train=False,\n download=True,\n transform=transforms.ToTensor())\n\ntest_loader = torch.utils.data.DataLoader(testset,\n batch_size=len(testset),\n drop_last=False,\n shuffle=False)\n\n# setup\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1,2,3\"\n\ngpu = get_best_gpu()\nmodel = CNN_MNIST().cuda(gpu)\n\n# FL settings\nnum_per_round = int(args.total_client * args.sample_ratio)\naggregator = Aggregators.fedavg_aggregate\ntotal_client_num = args.total_client # client总数\ndata_indices = load_dict(\"./mnist_noniid.pkl\")\n\n\n# fedlab setup\nlocal_model = deepcopy(model)\n\ntrainer = SubsetSerialTrainer(model=local_model,\n dataset=trainset,\n data_slices=data_indices,\n aggregator=aggregator,\n args={\n \"batch_size\": args.batch_size,\n \"epochs\": args.epochs,\n \"lr\": args.lr\n })\n\ndynamic_lambdas = np.ones(num_per_round) * 1.0 / num_per_round\n\n# train procedure\nto_select = [i for i in range(total_client_num)]\nfor round in range(args.com_round):\n model_parameters = SerializationTool.serialize_model(model)\n selection = random.sample(to_select, num_per_round)\n parameters = trainer.train(model_parameters=model_parameters,\n id_list=selection,\n aggregate=False)\n\n gradients = [model_parameters - model for model in parameters]\n for i, grad in enumerate(gradients):\n gradients[i] = grad / grad.norm()\n print(len(gradients))\n print(gradients[0].shape)\n # calculate lamda\n lambda0 = [1.0 / num_per_round for _ in range(num_per_round)]\n dynamic_lambdas = torch.Tensor(optim_lambdas(gradients, lambda0)).view(-1)\n dt = Aggregators.fedavg_aggregate(gradients, dynamic_lambdas)\n serialized_parameters = model_parameters - dt * args.lr\n SerializationTool.deserialize_model(model, serialized_parameters)\n\n criterion = nn.CrossEntropyLoss()\n loss, acc = evaluate(model, criterion, test_loader)\n print(\"loss: {:.4f}, acc: {:.2f}\".format(loss, acc))\n","repo_name":"SMILELab-FL/FedLab-benchmarks","sub_path":"fedlab_benchmarks/fedmgda+/standalone.py","file_name":"standalone.py","file_ext":"py","file_size_in_byte":5034,"program_lang":"python","lang":"en","doc_type":"code","stars":137,"dataset":"github-code","pt":"67"} +{"seq_id":"72630771414","text":"from __future__ import division\nimport sys\nimport os\nimport time\nimport QueryExecution as QExec\nfrom bitmap import BitMap\nimport CFCosineSim\nimport TupleIntent as ti\nimport ParseConfigFile as parseConfig\nfrom ParseConfigFile import getConfig\nimport pickle\nimport argparse\nfrom pandas import DataFrame\n\ndef updateArrWithDictEntry(arr, evalOpsObjDict, epIndex, evalOpsObj):\n try:\n arr.append(float(evalOpsObjDict[epIndex])/float(evalOpsObj.numEpQueries[epIndex]))\n except:\n arr.append(\"\")\n return\n\ndef plotEvalMetricsOpWise(evalOpsObj):\n episodes = []\n numEpQueries = []\n meanReciprocalRank = []\n queryTypeP= []\n queryTypeR = []\n queryTypeF = []\n tablesP = []\n tablesR = []\n tablesF = []\n projColsP = []\n projColsR = []\n projColsF = []\n avgColsP = []\n avgColsR = []\n avgColsF = []\n minColsP = []\n minColsR = []\n minColsF = []\n maxColsP = []\n maxColsR = []\n maxColsF = []\n sumColsP = []\n sumColsR = []\n sumColsF = []\n countColsP = []\n countColsR = []\n countColsF = []\n selColsP = []\n selColsR = []\n selColsF = []\n condSelColsP = []\n condSelColsR = []\n condSelColsF = []\n groupByColsP = []\n groupByColsR = []\n groupByColsF = []\n orderByColsP = []\n orderByColsR = []\n orderByColsF = []\n havingColsP = []\n havingColsR = []\n havingColsF = []\n limitP = []\n limitR = []\n limitF = []\n joinPredsP = []\n joinPredsR = []\n joinPredsF = []\n for i in range(evalOpsObj.curEpisode + 1):\n episodes.append(i)\n updateArrWithDictEntry(meanReciprocalRank, evalOpsObj.meanReciprocalRank, i, evalOpsObj)\n updateArrWithDictEntry(queryTypeP, evalOpsObj.queryTypeP, i, evalOpsObj)\n updateArrWithDictEntry(queryTypeR, evalOpsObj.queryTypeR, i, evalOpsObj)\n updateArrWithDictEntry(queryTypeF, evalOpsObj.queryTypeF, i, evalOpsObj)\n updateArrWithDictEntry(tablesP, evalOpsObj.tablesP, i, evalOpsObj)\n updateArrWithDictEntry(tablesR, evalOpsObj.tablesR, i, evalOpsObj)\n updateArrWithDictEntry(tablesF, evalOpsObj.tablesF, i, evalOpsObj)\n updateArrWithDictEntry(projColsP, evalOpsObj.projColsP, i, evalOpsObj)\n updateArrWithDictEntry(projColsR, evalOpsObj.projColsR, i, evalOpsObj)\n updateArrWithDictEntry(projColsF, evalOpsObj.projColsF, i, evalOpsObj)\n updateArrWithDictEntry(avgColsP, evalOpsObj.avgColsP, i, evalOpsObj)\n updateArrWithDictEntry(avgColsR, evalOpsObj.avgColsR, i, evalOpsObj)\n updateArrWithDictEntry(avgColsF, evalOpsObj.avgColsF, i, evalOpsObj)\n updateArrWithDictEntry(minColsP, evalOpsObj.minColsP, i, evalOpsObj)\n updateArrWithDictEntry(minColsR, evalOpsObj.minColsR, i, evalOpsObj)\n updateArrWithDictEntry(minColsF, evalOpsObj.minColsF, i, evalOpsObj)\n updateArrWithDictEntry(maxColsP, evalOpsObj.maxColsP, i, evalOpsObj)\n updateArrWithDictEntry(maxColsR, evalOpsObj.maxColsR, i, evalOpsObj)\n updateArrWithDictEntry(maxColsF, evalOpsObj.maxColsF, i, evalOpsObj)\n updateArrWithDictEntry(sumColsP, evalOpsObj.sumColsP, i, evalOpsObj)\n updateArrWithDictEntry(sumColsR, evalOpsObj.sumColsR, i, evalOpsObj)\n updateArrWithDictEntry(sumColsF, evalOpsObj.sumColsF, i, evalOpsObj)\n updateArrWithDictEntry(countColsP, evalOpsObj.countColsP, i, evalOpsObj)\n updateArrWithDictEntry(countColsR, evalOpsObj.countColsR, i, evalOpsObj)\n updateArrWithDictEntry(countColsF, evalOpsObj.countColsF, i, evalOpsObj)\n updateArrWithDictEntry(selColsP, evalOpsObj.selColsP, i, evalOpsObj)\n updateArrWithDictEntry(selColsR, evalOpsObj.selColsR, i, evalOpsObj)\n updateArrWithDictEntry(selColsF, evalOpsObj.selColsF, i, evalOpsObj)\n updateArrWithDictEntry(condSelColsP, evalOpsObj.condSelColsP, i, evalOpsObj)\n updateArrWithDictEntry(condSelColsR, evalOpsObj.condSelColsR, i, evalOpsObj)\n updateArrWithDictEntry(condSelColsF, evalOpsObj.condSelColsF, i, evalOpsObj)\n updateArrWithDictEntry(groupByColsP, evalOpsObj.groupByColsP, i, evalOpsObj)\n updateArrWithDictEntry(groupByColsR, evalOpsObj.groupByColsR, i, evalOpsObj)\n updateArrWithDictEntry(groupByColsF, evalOpsObj.groupByColsF, i, evalOpsObj)\n updateArrWithDictEntry(orderByColsP, evalOpsObj.orderByColsP, i, evalOpsObj)\n updateArrWithDictEntry(orderByColsR, evalOpsObj.orderByColsR, i, evalOpsObj)\n updateArrWithDictEntry(orderByColsF, evalOpsObj.orderByColsF, i, evalOpsObj)\n updateArrWithDictEntry(havingColsP, evalOpsObj.havingColsP, i, evalOpsObj)\n updateArrWithDictEntry(havingColsR, evalOpsObj.havingColsR, i, evalOpsObj)\n updateArrWithDictEntry(havingColsF, evalOpsObj.havingColsF, i, evalOpsObj)\n updateArrWithDictEntry(limitP, evalOpsObj.limitP, i, evalOpsObj)\n updateArrWithDictEntry(limitR, evalOpsObj.limitR, i, evalOpsObj)\n updateArrWithDictEntry(limitF, evalOpsObj.limitF, i, evalOpsObj)\n updateArrWithDictEntry(joinPredsP, evalOpsObj.joinPredsP, i, evalOpsObj)\n updateArrWithDictEntry(joinPredsR, evalOpsObj.joinPredsR, i, evalOpsObj)\n updateArrWithDictEntry(joinPredsF, evalOpsObj.joinPredsF, i, evalOpsObj)\n df = DataFrame(\n {'episodes': episodes, 'meanReciprocalRank': meanReciprocalRank, 'queryTypeP': queryTypeP, 'queryTypeR': queryTypeR, 'queryTypeF': queryTypeF,\n 'tablesP': tablesP, 'tablesR': tablesR, 'tablesF': tablesF,\n 'projColsP': projColsP, 'projColsR': projColsR, 'projColsF': projColsF,\n 'avgColsP': avgColsP, 'avgColsR': avgColsR, 'avgColsF': avgColsF,\n 'minColsP': minColsP, 'minColsR': minColsR, 'minColsF': minColsF,\n 'maxColsP': maxColsP, 'maxColsR': maxColsR, 'maxColsF': maxColsF,\n 'sumColsP': sumColsP, 'sumColsR': sumColsR, 'sumColsF': sumColsF,\n 'countColsP': countColsP, 'countColsR': countColsR, 'countColsF': countColsF,\n 'selColsP': selColsP, 'selColsR': selColsR, 'selColsF': selColsF,\n 'condSelColsP': condSelColsP, 'condSelColsR': condSelColsR, 'condSelColsF': condSelColsF,\n 'groupByColsP': groupByColsP, 'groupByColsR': groupByColsR, 'groupByColsF': groupByColsF,\n 'orderByColsP': orderByColsP, 'orderByColsR': orderByColsR, 'orderByColsF': orderByColsF,\n 'havingColsP': havingColsP, 'havingColsR': havingColsR, 'havingColsF': havingColsF,\n 'limitP': limitP, 'limitR': limitR, 'limitF': limitF,\n 'joinPredsP': joinPredsP, 'joinPredsR': joinPredsR, 'joinPredsF': joinPredsF,})\n outputOpWiseQualityFileName = getConfig(evalOpsObj.configDict['OUTPUT_DIR']) + \"/OutputOpWiseQuality_\" + evalOpsObj.configDict[\n 'ALGORITHM']\n df.to_excel(outputOpWiseQualityFileName+\".xlsx\", sheet_name='sheet1', index=False)\n\nclass evalOps:\n def __init__(self, configFileName, logFile):\n self.configDict = parseConfig.parseConfigFile(configFileName)\n self.logFile = logFile\n self.curEpisode = 0\n self.numEpQueries = {}\n self.curQueryIndex = -1\n self.meanReciprocalRank = {}\n self.episode = {}\n self.queryTypeP = {}\n self.queryTypeR = {}\n self.queryTypeF = {}\n self.tablesP = {}\n self.tablesR = {}\n self.tablesF = {}\n self.projColsP = {}\n self.projColsR = {}\n self.projColsF = {}\n self.avgColsP = {}\n self.avgColsR = {}\n self.avgColsF = {}\n self.minColsP = {}\n self.minColsR = {}\n self.minColsF = {}\n self.maxColsP = {}\n self.maxColsR = {}\n self.maxColsF = {}\n self.sumColsP = {}\n self.sumColsR = {}\n self.sumColsF = {}\n self.countColsP = {}\n self.countColsR = {}\n self.countColsF = {}\n self.selColsP = {}\n self.selColsR = {}\n self.selColsF = {}\n self.condSelColsP = {}\n self.condSelColsR = {}\n self.condSelColsF = {}\n self.groupByColsP = {}\n self.groupByColsR = {}\n self.groupByColsF = {}\n self.orderByColsP = {}\n self.orderByColsR = {}\n self.orderByColsF = {}\n self.havingColsP = {}\n self.havingColsR = {}\n self.havingColsF = {}\n self.limitP = {}\n self.limitR = {}\n self.limitF = {}\n self.joinPredsP = {}\n self.joinPredsR = {}\n self.joinPredsF = {}\n\nclass nextActualOps:\n def __init__(self):\n self.queryType = None\n self.tables = None\n self.projCols = None\n self.avgCols = None\n self.minCols = None\n self.maxCols = None\n self.sumCols = None\n self.countCols = None\n self.selCols = None\n self.groupByCols = None\n self.orderByCols = None\n self.havingCols = None\n self.limit = None\n self.joinPreds = None\n\ndef parseLineAddOp(line, actualOrPredObj):\n if line.startswith(\"Query Type\"):\n actualOrPredObj.queryType = line.strip().split(\": \")[1]\n elif line.startswith(\"Limit\"):\n actualOrPredObj.limit = line.strip().split(\": \")[1]\n elif line.startswith(\"Tables\"):\n actualOrPredObj.tables = eval(line.strip().split(\": \")[1])\n elif line.startswith(\"Projected\"):\n actualOrPredObj.projCols = eval(line.strip().split(\": \")[1])\n elif line.startswith(\"AVG\"):\n actualOrPredObj.avgCols = eval(line.strip().split(\": \")[1])\n elif line.startswith(\"MIN\"):\n actualOrPredObj.minCols = eval(line.strip().split(\": \")[1])\n elif line.startswith(\"MAX\"):\n actualOrPredObj.maxCols = eval(line.strip().split(\": \")[1])\n elif line.startswith(\"SUM\"):\n actualOrPredObj.sumCols = eval(line.strip().split(\": \")[1])\n elif line.startswith(\"COUNT\"):\n actualOrPredObj.countCols = eval(line.strip().split(\": \")[1])\n elif line.startswith(\"SEL\"):\n actualOrPredObj.selCols = eval(line.strip().split(\": \")[1])\n elif line.startswith(\"GROUP\"):\n actualOrPredObj.groupByCols = eval(line.strip().split(\": \")[1])\n elif line.startswith(\"ORDER\"):\n actualOrPredObj.orderByCols = eval(line.strip().split(\": \")[1])\n elif line.startswith(\"HAVING\"):\n actualOrPredObj.havingCols = eval(line.strip().split(\": \")[1])\n elif line.startswith(\"JOIN\"):\n actualOrPredObj.joinPreds = eval(line.strip().split(\": \")[1])\n return\n\ndef updateMetricDict(metricDict, key, val):\n if key not in metricDict:\n metricDict[key] = val\n else:\n metricDict[key] = float(metricDict[key]+val)\n return\n\ndef computeOpF1(predOpList, actualOpList):\n if (actualOpList is None and predOpList is not None) or\\\n (predOpList is None and actualOpList is not None):\n return (0.0, 0.0, 0.0)\n elif predOpList is not None and actualOpList is not None:\n TP = len(set(predOpList).intersection(set(actualOpList)))\n FP = len(set(predOpList) - set(actualOpList))\n FN = len(set(actualOpList) - set(predOpList))\n P = float(TP)/float(TP+FP)\n R = float(TP)/float(TP+FN)\n if P == 0.0 or R == 0.0:\n F = 0.0\n else:\n F = 2*P*R / float(P+R)\n return (P, R, F)\n else:\n return (1.0, 1.0, 1.0)\n\ndef updateOpMetrics(P, R, F, evalOpsP, evalOpsR, evalOpsF, evalOpsObj):\n if P is not None and R is not None and F is not None:\n updateMetricDict(evalOpsP, evalOpsObj.curEpisode, P)\n updateMetricDict(evalOpsR, evalOpsObj.curEpisode, R)\n updateMetricDict(evalOpsF, evalOpsObj.curEpisode, F)\n return\n\ndef computeRelevantCols(accTables, predOrActualCols):\n relCols = []\n for col in predOrActualCols:\n tableName = col.split(\".\")[0]\n if tableName in accTables:\n relCols.append(col)\n if len(relCols) == 0:\n return None\n return relCols\n\ndef compUpdateOpMetrics(predOpList, actualOpList, evalOpsP, evalOpsR, evalOpsF, evalOpsObj):\n (P,R,F) = computeOpF1(predOpList, actualOpList)\n updateOpMetrics(P, R, F, evalOpsP, evalOpsR, evalOpsF, evalOpsObj)\n return\n\ndef compUpdateCondSelMetrics(predOpsObj, nextActualOpsObj, evalOpsObj):\n try:\n if evalOpsObj.tablesF[evalOpsObj.curEpisode] == 1.0 and evalOpsObj.curEpisode in evalOpsObj.selColsP \\\n and evalOpsObj.curEpisode in evalOpsObj.selColsR and evalOpsObj.curEpisode in evalOpsObj.selColsF:\n updateOpMetrics(evalOpsObj.selColsP[evalOpsObj.curEpisode], evalOpsObj.selColsR[evalOpsObj.curEpisode], evalOpsObj.selColsF[evalOpsObj.curEpisode], evalOpsObj.condSelColsP, evalOpsObj.condSelColsR, evalOpsObj.condSelColsF, evalOpsObj)\n elif evalOpsObj.tablesF[evalOpsObj.curEpisode] > 0.0: # partial overlap of tables\n accTables = list(set(predOpsObj.tables).intersection(set(nextActualOpsObj.tables)))\n relPredCols = computeRelevantCols(accTables, predOpsObj.selCols)\n relActualCols = computeRelevantCols(accTables, nextActualOpsObj.selCols)\n compUpdateOpMetrics(relPredCols, relActualCols, evalOpsObj.condSelColsP, evalOpsObj.condSelColsR, evalOpsObj.condSelColsF, evalOpsObj)\n else:\n updateOpMetrics(1.0, 1.0, 1.0, evalOpsObj.condSelColsP, evalOpsObj.condSelColsR, evalOpsObj.condSelColsF, evalOpsObj)\n except:\n updateOpMetrics(1.0, 1.0, 1.0, evalOpsObj.condSelColsP, evalOpsObj.condSelColsR, evalOpsObj.condSelColsF,\n evalOpsObj)\n pass\n return\n\ndef computeF1(evalOpsObj, predOpsObj, nextActualOpsObj):\n if predOpsObj.queryType == nextActualOpsObj.queryType:\n updateOpMetrics(1.0, 1.0, 1.0, evalOpsObj.queryTypeP, evalOpsObj.queryTypeR, evalOpsObj.queryTypeF, evalOpsObj)\n elif predOpsObj.queryType != nextActualOpsObj.queryType:\n updateOpMetrics(0.0, 0.0, 0.0, evalOpsObj.queryTypeP, evalOpsObj.queryTypeR, evalOpsObj.queryTypeF, evalOpsObj)\n if predOpsObj.limit == nextActualOpsObj.limit:\n updateOpMetrics(1.0, 1.0, 1.0, evalOpsObj.limitP, evalOpsObj.limitR, evalOpsObj.limitF, evalOpsObj)\n elif predOpsObj.limit != nextActualOpsObj.limit:\n updateOpMetrics(0.0, 0.0, 0.0, evalOpsObj.limitP, evalOpsObj.limitR, evalOpsObj.limitF, evalOpsObj)\n compUpdateOpMetrics(predOpsObj.tables, nextActualOpsObj.tables, evalOpsObj.tablesP,\n evalOpsObj.tablesR, evalOpsObj.tablesF, evalOpsObj)\n compUpdateOpMetrics(predOpsObj.projCols, nextActualOpsObj.projCols, evalOpsObj.projColsP,\n evalOpsObj.projColsR, evalOpsObj.projColsF, evalOpsObj)\n compUpdateOpMetrics(predOpsObj.avgCols, nextActualOpsObj.avgCols, evalOpsObj.avgColsP,\n evalOpsObj.avgColsR, evalOpsObj.avgColsF, evalOpsObj)\n compUpdateOpMetrics(predOpsObj.minCols, nextActualOpsObj.minCols, evalOpsObj.minColsP,\n evalOpsObj.minColsR, evalOpsObj.minColsF, evalOpsObj)\n compUpdateOpMetrics(predOpsObj.maxCols, nextActualOpsObj.maxCols, evalOpsObj.maxColsP,\n evalOpsObj.maxColsR, evalOpsObj.maxColsF, evalOpsObj)\n compUpdateOpMetrics(predOpsObj.sumCols, nextActualOpsObj.sumCols, evalOpsObj.sumColsP,\n evalOpsObj.sumColsR, evalOpsObj.sumColsF, evalOpsObj)\n compUpdateOpMetrics(predOpsObj.countCols, nextActualOpsObj.countCols, evalOpsObj.countColsP,\n evalOpsObj.countColsR, evalOpsObj.countColsF, evalOpsObj)\n compUpdateOpMetrics(predOpsObj.selCols, nextActualOpsObj.selCols, evalOpsObj.selColsP,\n evalOpsObj.selColsR, evalOpsObj.selColsF, evalOpsObj)\n compUpdateOpMetrics(predOpsObj.groupByCols, nextActualOpsObj.groupByCols, evalOpsObj.groupByColsP,\n evalOpsObj.groupByColsR, evalOpsObj.groupByColsF, evalOpsObj)\n compUpdateOpMetrics(predOpsObj.orderByCols, nextActualOpsObj.orderByCols, evalOpsObj.orderByColsP,\n evalOpsObj.orderByColsR, evalOpsObj.orderByColsF, evalOpsObj)\n compUpdateOpMetrics(predOpsObj.havingCols, nextActualOpsObj.havingCols, evalOpsObj.havingColsP,\n evalOpsObj.havingColsR, evalOpsObj.havingColsF, evalOpsObj)\n compUpdateOpMetrics(predOpsObj.joinPreds, nextActualOpsObj.joinPreds, evalOpsObj.joinPredsP,\n evalOpsObj.joinPredsR, evalOpsObj.joinPredsF, evalOpsObj)\n compUpdateCondSelMetrics(predOpsObj, nextActualOpsObj, evalOpsObj)\n return\n\n \ndef createEvalMetricsOpWise(evalOpsObj):\n prevEpisode = -1\n rank = float(\"-inf\")\n nextActualOpsObj = None\n predOpsObj = None\n with open(evalOpsObj.logFile) as f:\n for line in f:\n if line.startswith(\"#Episodes\"):\n evalOpsObj.curEpisode = int(line.strip().split(\";\")[0].split(\":\")[1])\n numTokens = len(line.strip().split(\";\"))\n rank = int(line.strip().split(\";\")[numTokens-3].split(\":\")[1])\n if rank == -1: # this can happen when all predicted queries are equally bad\n rank = 0\n assert rank >= 0 and rank < int(evalOpsObj.configDict['TOP_K'])\n MRR = float(1.0) / float(rank+1)\n if evalOpsObj.curEpisode != prevEpisode:\n evalOpsObj.numEpQueries[evalOpsObj.curEpisode] = 1\n assert evalOpsObj.curEpisode not in evalOpsObj.meanReciprocalRank\n evalOpsObj.meanReciprocalRank[evalOpsObj.curEpisode] = MRR\n else:\n evalOpsObj.numEpQueries[evalOpsObj.curEpisode] += 1\n evalOpsObj.meanReciprocalRank[evalOpsObj.curEpisode] = (evalOpsObj.meanReciprocalRank[evalOpsObj.curEpisode] + MRR)\n elif line.startswith(\"Actual SQL\"):\n evalOpsObj.curQueryIndex = -1\n nextActualOpsObj = nextActualOps()\n elif line.startswith(\"Predicted SQL Ops\"):\n substrTokens = line.strip().split(\":\")[0].split(\" \")\n evalOpsObj.curQueryIndex = int(substrTokens[len(substrTokens)-1])\n if evalOpsObj.curQueryIndex == rank:\n predOpsObj = nextActualOps()\n elif evalOpsObj.curQueryIndex == -1:\n parseLineAddOp(line, nextActualOpsObj)\n elif evalOpsObj.curQueryIndex == rank:\n parseLineAddOp(line, predOpsObj)\n elif line.startswith(\"---\") and predOpsObj is not None and evalOpsObj is not None:\n computeF1(evalOpsObj, predOpsObj, nextActualOpsObj)\n prevEpisode = evalOpsObj.curEpisode\n return evalOpsObj\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-config\", help=\"config file to parse\", type=str, required=True)\n parser.add_argument(\"-log\", help=\"log filename to analyze\", type=str, required=True)\n #parser.add_argument(\"-lineNum\", help=\"line Number to analyze\", type=int, required=True)\n args = parser.parse_args()\n evalOpsObj = evalOps(args.config, args.log)\n evalOpsObj = createEvalMetricsOpWise(evalOpsObj)\n plotEvalMetricsOpWise(evalOpsObj)","repo_name":"vamsikrishna1902/IntentPredictionEval","sub_path":"analyzeLogsAllEpisodes.py","file_name":"analyzeLogsAllEpisodes.py","file_ext":"py","file_size_in_byte":18984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72696752214","text":"# -*- coding: utf-8 -*-\n\nfrom flask import Flask, abort, json\nfrom flask_restful import Api\nfrom flask_socketio import SocketIO, Namespace\nimport model\nimport rest\nimport util\n\nEVENT_API=\"/api/event/\"\nNAME_API=\"/api/name/\"\nCOMMUNITY_API=\"/api/community/\"\nTAG_API=\"/api/tag/\"\nCOMPETITOR_API=\"/api/competitor/\"\nNOTIFICATION_API=\"/api/notifications/\"\n\napp = Flask(__name__)\napi = Api(app)\nsocketio = SocketIO(app, json=json)\n\neventModel = model.EventModel()\ntagModel = model.TagModel()\n\nnotifications = rest.Notifications('/api/notifications', socketio)\n\nsocketio.on_namespace(notifications)\napi.add_resource(rest.Event, EVENT_API + \"\",\n\t\tresource_class_kwargs=rest.Event.makeArgs(\n notifications, EVENT_API, eventModel))\napi.add_resource(rest.Events, EVENT_API,\n\t\tresource_class_kwargs=rest.Events.makeArgs(\n notifications, EVENT_API, eventModel))\napi.add_resource(rest.Names, NAME_API,\n\t\tresource_class_kwargs=rest.Names.makeArgs(\n notifications, NAME_API))\napi.add_resource(rest.Name, NAME_API + \"\",\n resource_class_kwargs=rest.Name.makeArgs(\n notifications, NAME_API))\napi.add_resource(rest.Communities, COMMUNITY_API,\n\t\tresource_class_kwargs=rest.Communities.makeArgs(\n notifications, COMMUNITY_API))\napi.add_resource(rest.Community, COMMUNITY_API + \"\",\n\t\tresource_class_kwargs=rest.Community.makeArgs(\n notifications, COMMUNITY_API))\napi.add_resource(rest.Tags, TAG_API,\n resource_class_kwargs=rest.Tags.makeArgs(\n notifications, TAG_API, tagModel))\napi.add_resource(rest.Tag, TAG_API + \"\",\n resource_class_kwargs=rest.Tags.makeArgs(\n notifications, TAG_API, tagModel))\napi.add_resource(rest.Competitors, COMPETITOR_API,\n resource_class_kwargs=rest.Competitors.makeArgs(\n notifications, COMPETITOR_API))\napi.add_resource(rest.Competitor, COMPETITOR_API + \"\",\n resource_class_kwargs=rest.Competitor.makeArgs(\n notifications, COMPETITOR_API))\n\n@app.route('/')\ndef root():\n\tif not app.debug:\n\t\tabort(404)\n\ttry:\n\t\tf = open('dist/index.html')\n\texcept IOError:\n\t\tabort(404)\n\t\treturn\n\treturn f.read()\n\n@app.route('/')\ndef catch_all(path):\n\tif not app.debug:\n\t\tabort(404)\n\ttry:\n\t\tf = open(\"dist/\" + path)\n\texcept IOError:\n\t\tabort(404)\n\t\treturn\n\treturn f.read()\n\nif __name__ == \"__main__\":\n\tsocketio.run(app, debug=True)\n","repo_name":"jjojala/results","sub_path":"server/tupal-server.py","file_name":"tupal-server.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72724272213","text":"from datetime import datetime\n\nclass Docente(object):\n \n db = None\n\n def __init__(self, apellido, nombre, fecha_nac, localidad_id, domicilio, genero_id, tipo_doc_id, numero, tel):\n self.apellido = apellido\n self.nombre = nombre\n self.fecha_nac = fecha_nac\n self.localidad_id = localidad_id\n self.domicilio = domicilio\n self.genero_id = genero_id\n self.tipo_doc_id = tipo_doc_id\n self.numero = numero\n self.tel = tel\n \n \n # RECUPERAR TODOS LOS DOCENTES\n @classmethod\n def all(self):\n sql = 'SELECT * FROM docente'\n cursor = self.db.cursor()\n cursor.execute(sql)\n\n return cursor.fetchall()\n \n \n # RECUPERAR UN DOCENTE DADO UN ID\n @classmethod\n def get_docente(self, id):\n sql = 'SELECT * FROM docente where id = %s'\n cursor = self.db.cursor()\n cursor.execute(sql, (id))\n\n return cursor.fetchone()\n \n \n # INSERTAR docente\n @classmethod\n def insert(self, docente):\n sql = \"\"\"\n INSERT INTO docente (apellido, nombre, fecha_nac, localidad_id, domicilio, genero_id, tipo_doc_id, numero, tel)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)\n \"\"\"\n\n cursor = self.db.cursor()\n cursor.execute(sql, (\n docente.apellido, \n docente.nombre, \n docente.fecha_nac, \n docente.localidad_id,\n docente.domicilio, \n docente.genero_id, \n docente.tipo_doc_id, \n docente.numero, \n docente.tel))\n self.db.commit()\n\n return True\n \n # RECUPERAR TODOS LOS docenteS POR TERMINO DE BUSQUEDA\n @classmethod\n def get_docentes(self, termino = None):\n params = []\n sql = \"\"\"\n SELECT * FROM docente\n \"\"\"\n if termino != None:\n termino = '%'+termino+'%'\n sql = sql + \"\"\" WHERE (nombre LIKE %s OR apellido LIKE %s) \"\"\"\n params.append(termino)\n params.append(termino)\n\n cursor = self.db.cursor()\n cursor.execute(sql, params)\n\n return cursor.fetchall()\n \n # RECUPERAR TODOS LOS docenteS POR TERMINO DE BUSQUEDA POR ROL Y PAGINADOS\n @classmethod\n def get_docentes_paginados(self, limit, offset = 1, termino = None):\n \n sql = \"\"\"\n SELECT * FROM docente\n \"\"\"\n\n if termino != None:\n termino = '%'+termino+'%'\n sql = sql + \"\"\" WHERE (nombre LIKE %s OR apellido LIKE %s)\"\"\"\n \n sql = sql + \"\"\"\n LIMIT %s OFFSET %s\n \"\"\"\n\n if termino != None:\n params = (termino,termino, limit, offset)\n else:\n params = (limit, offset)\n\n cursor = self.db.cursor()\n cursor.execute(sql, params)\n\n return cursor.fetchall()\n \n # RECUPERAR DOCENTE DADO UN ID CON INFORMACION\n @classmethod\n def get_docente_show(self, id):\n sql = \"\"\"\n SELECT d.id, d.apellido, d.nombre, d.fecha_nac, d.domicilio, d.numero,\n d.tel, g.nombre AS genero\n FROM docente d\n INNER JOIN genero g ON (g.id = d.genero_id)\n WHERE d.id = %s \n \"\"\"\n cursor = self.db.cursor()\n cursor.execute(sql, (id))\n return cursor.fetchone()\n\n\n # ELIMINAR UN DOCENTE\n @classmethod\n def eliminar(self, id_docente):\n cursor = self.db.cursor()\n \n sql = \"\"\"\n DELETE FROM docente\n WHERE id = %s\n \"\"\"\n\n ok = cursor.execute(sql, (id_docente))\n self.db.commit()\n\n return ok\n\n # ACTIVAR / DESACTIVAR UN DOCENTE\n @classmethod\n def activar(self, id_docente):\n cursor = self.db.cursor()\n \n sql = \"\"\"\n UPDATE docente \n SET borrado_logico = not borrado_logico\n WHERE id = %s\n \"\"\"\n\n ok = cursor.execute(sql, (id_docente))\n self.db.commit()\n\n return ok\n \n # EDITAR UN DOCENTE\n @classmethod\n def editar(self, id_docente, apellido, nombre, fecha_nac, localidad_id,domicilio, genero_id,tipo_doc_id, numero, tel):\n \n sql = \"\"\"\n UPDATE docente \n SET apellido = %s, nombre = %s, fecha_nac = %s, localidad_id = %s, domicilio = %s, genero_id = %s, tipo_doc_id = %s, numero = %s, tel = %s\n WHERE id = %s\n \"\"\"\n\n cursor = self.db.cursor()\n ok = cursor.execute(sql, ( apellido, nombre, fecha_nac, localidad_id, domicilio, genero_id, tipo_doc_id, numero, tel,id_docente))\n self.db.commit()\n\n return ok\n \n # VER SI EXISTE TIPO_DOC+NUM\n @classmethod\n def existe_doc(self, tipo_doc_id, numero):\n sql = \"\"\"\n SELECT id\n FROM docente\n WHERE tipo_doc_id = %s\n AND numero = %s\n \"\"\"\n cursor = self.db.cursor()\n cursor.execute(sql, (tipo_doc_id, numero))\n return cursor.fetchone()","repo_name":"bdgarat/ProySoft_OrquestaEscuelaBerisso_Flask","sub_path":"flaskps/models/Docente.py","file_name":"Docente.py","file_ext":"py","file_size_in_byte":5187,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23564242905","text":"import logging\n\n# this logger is used only to save the position of the uav\n\n\ndef create_logger():\n logger = logging.getLogger(\"POSITION ESTIMATION\")\n fileHandler = logging.FileHandler(\"Logging/position_log.csv\")\n fileFormatter = logging.Formatter('%(message)s;%(asctime)s,%(msecs)03d', '%H:%M:%S')\n fileHandler.setFormatter(fileFormatter)\n fileHandler.setLevel(logging.DEBUG)\n logger.addHandler(fileHandler)\n logger.setLevel(logging.DEBUG)\n return logger\n\n\nlogger = create_logger()","repo_name":"Jamie-Wubben/vision_drone","sub_path":"Logging/PositionLogger.py","file_name":"PositionLogger.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"20516230810","text":"import cv2 as cv\nfrom deepface import DeepFace\n\n\n# 画像から顔の位置と表情を認識するクラス\nclass FaceDetection():\n def __init__(self):\n # cascade_setterの引数は、自分で用意したhaarcascadeの正面顔認識モデルを入力する。\n self.cascade_frontalface = self.cascade_setter(\"model/haarcascade_frontalface_alt.xml\")\n self.faces = None\n self.dominant_emotion = 'neutral'\n print(\"FaceDetection Initialization Finished\")\n\n\n # haarカスケード分類器の設定\n def cascade_setter(self,cascade_name):\n cascade = cv.CascadeClassifier(cascade_name)\n if(cascade==None):\n print(\"Error: cascadeFile not found\\n\")\n exit(0)\n return cascade\n \n\n # 顔の位置を探索\n def search_face(self, cap_data, output_data):\n self.faces = self.cascade_frontalface.detectMultiScale(cap_data.smallImg, 1.1, 2, cv.CASCADE_SCALE_IMAGE, (20,20))\n for i in range(len(self.faces)):\n center, radius = self.detected_face_setter(cap_data.scale, i)\n cap_data.input_add_rect(center, radius, (0, 255, 0, 0))\n cap_data.input_add_text(self.dominant_emotion, center, 20, (255, 0, 0, 0))\n output_data.update_x(center[0], cap_data.input_frame.shape[0])\n\n\n # 縮小画像から検出された顔の座標を、元画像の座標に変換\n def detected_face_setter(self, scale, i):\n x = int((self.faces[i][0]+self.faces[i][2]*0.5)*scale)\n y = int((self.faces[i][1]+self.faces[i][3]*0.5)*scale)\n radius = int((self.faces[i][2]+self.faces[i][3])*0.25*scale)\n center = (x,y)\n return center, radius\n \n\n # 画像に映る顔から表情を検出\n def emotion_analyze(self, cap_data, output_data):\n try:\n result_emotion = DeepFace.analyze(cap_data.smallImg_colored, actions=['emotion'])\n self.dominant_emotion = result_emotion['dominant_emotion']\n output_data.update_how(self.dominant_emotion)\n except Exception as e:\n pass\n","repo_name":"Muna-akki/NovelGameMaker","sub_path":"src/face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73491982612","text":"# ACGAN references\n# https://github.com/eriklindernoren/PyTorch-GAN\n# https://github.com/znxlwm/pytorch-generative-model-collections\n# https://github.com/znxlwm/pytorch-generative-model-collections\n\nimport argparse\nimport os\nimport copy\nimport torch\nfrom MeRGAN import MeRGAN\nfrom TestGenerator import TestGenerator\n#from apex import amp\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', type=str, default='MNIST', choices=['MNSIT', 'SVHN'], help='dataset to train and generate')\n parser.add_argument('--class_num', type=int, default=10, help='the number of classes you want to train')\n parser.add_argument('--epoch', type=int, default=20, help='the number of epochs to run')\n parser.add_argument('--batch_size', type=int, default=64, help='the size of batch')\n parser.add_argument('--num_generated', type=int, default=4096, help='the number of images to generate in each class')\n parser.add_argument('--result_dir', type=str, default='result')\n parser.add_argument('--network_dir', type=str, default='network')\n parser.add_argument('--method', type=str, default='joint_retraining', choices=['jrt', 'joint_retraining', 'ra', 'replay_alignment'])\n parser.add_argument('--work', type=str, default='train', choices=['train', 'test'], help='train or test')\n parser.add_argument('--task', type=str, default='to_9', choices=['to_9', 'to_4'], help='the number of classes to classify')\n\n return check_args(parser.parse_args())\n\n\ndef check_args(args):\n if args.method == 'jrt':\n args.method = 'joint_retraining'\n elif args.method == 'ra':\n args.method = 'replay_alignment'\n\n if not os.path.exists(args.result_dir):\n os.makedirs(args.result_dir)\n\n if not os.path.exists(args.result_dir + '/' + args.dataset):\n os.makedirs(args.result_dir + '/' + args.dataset)\n\n if not os.path.exists(args.result_dir + '/' + args.dataset + '/' + args.method):\n os.makedirs(args.result_dir + '/' + args.dataset + '/' + args.method)\n\n for i in range(args.class_num):\n if not os.path.exists(args.result_dir + '/' + args.dataset + '/' + args.method + '/to_' + str(i)):\n os.makedirs(args.result_dir + '/' + args.dataset + '/' + args.method + '/to_' + str(i))\n\n if not os.path.exists(args.network_dir):\n os.makedirs(args.network_dir)\n\n if not os.path.exists(args.network_dir + '/' + args.dataset):\n os.makedirs(args.network_dir + '/' + args.dataset)\n\n if not os.path.exists(args.network_dir + '/' + args.dataset + '/' + args.method):\n os.makedirs(args.network_dir + '/' + args.dataset + '/' + args.method)\n\n return args\n\n\ndef main():\n args = parse_args()\n if args is None:\n exit()\n\n if args.work == 'train':\n mergan = MeRGAN(args)\n #dummy_opt = torch.optim.Adam(mergan.parameters(), lr=1e-3)\n #mergan, optimizer = amp.initialize(mergan, dummy_opt)\n if args.method == 'joint_retraining':\n for i in range(10):\n if i == 0:\n mergan.init_ACGAN(mergan.data_list[i], i)\n else:\n G = mergan.ACGAN.G\n D = mergan.ACGAN.D\n mergan.init_ACGAN(generated_data, i, G, D)\n generated_data = mergan.generate_trainset()\n if i < 9:\n generated_data.concat_datasets(mergan.data_list[i + 1])\n if i == 4:\n torch.save(mergan.ACGAN.G.state_dict(), './network/' + args.dataset + '/' + args.method + '/generator_' + args.method + '_to_4.pt')\n torch.save(mergan.ACGAN.G.state_dict(), './network/' + args.dataset + '/' + args.method + '/generator_' + args.method + '_to_9.pt')\n else:\n for i in range(10):\n if i == 0:\n mergan.init_ACGAN(mergan.data_list[i], i)\n else:\n mergan.init_ACGAN(mergan.data_list[i], i, G_past, D_past)\n G_past = copy.deepcopy(mergan.ACGAN.G)\n D_past = copy.deepcopy(mergan.ACGAN.D)\n if i == 4:\n torch.save(mergan.ACGAN.G.state_dict(), './network/' + args.dataset + '/' + args.method + '/generator_' + args.method + '_to_4.pt')\n torch.save(mergan.ACGAN.G.state_dict(), './network/' + args.dataset + '/' + args.method + '/generator_' + args.method + '_to_9.pt')\n\n elif args.work == 'test':\n test_generator = TestGenerator(args)\n test_generator.test()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dk-hong/MeRGAN","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39153949752","text":"#https://leetcode.com/problems/product-of-array-except-self/\nclass Solution:\n def productExceptSelf(self, nums: List[int]) -> List[int]:\n res = [1] * (len(nums))\n\n prefix = 1\n for i in range(len(nums)): #multiplying forwards\n res[i] = prefix\n prefix *= nums[i]\n postfix = 1\n\n for i in range(len(nums)-1, -1, -1): #multiplying backwards\n res[i] *= postfix\n postfix *= nums[i]\n\n \n return res\n\n#revisited\nclass Solution:\n def productExceptSelf(self, nums: List[int]) -> List[int]:\n res = [1 for x in range(len(nums))]\n \n before = 1\n for i in range(len(nums)):\n res[i] *= before\n before *= nums[i]\n \n after = 1\n for i in range(len(nums)-1, -1, -1):\n res[i] *= after\n after *= nums[i]\n \n return res","repo_name":"pallas0/InterviewAlgos","sub_path":"product_array_except_self.py","file_name":"product_array_except_self.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22369506380","text":"absent = [2,5]\nno_book = [7]\nfor student in range(1, 11):\n if student in absent:\n continue\n elif student in no_book:\n print(\"{0}.. end\".format(student))\n break\n print(\"{0} zzz\".format(student))\n\n\nstudents = [1,2,3,4,5]\nprint(students)\n\nstudents = [i+100 for i in students]\n\nprint(students)\n\nstudents = [\"aaa\", \"bbb\", \"ccc\"]\n#students = [len(i) for i in students]\n#print(students)\n\nstudents = [i.upper() for i in students]\nprint(students)\n\nfrom random import * \ncnt = 0 \nfor i in range(1,51): \n time = randrange(5,51) \n if 5 <= time <= 15:\n print(\"[0] {0} zzzzz {1}\".format(i, time))\n cnt += 1\n else: \n print(\"[ ] {0} zzzzz {1}\".format(i, time))\n\nprint(\"total : {0}\".format(cnt))","repo_name":"sofiaaaaaa/python-rpa","sub_path":"python-basic/loop/for_2.py","file_name":"for_2.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73533260693","text":"# coding=utf-8\nfrom operator import itemgetter\nimport time\nimport logging\nfrom blueman.bluez.Device import Device\nfrom blueman.bluez.Manager import DBusNoSuchAdapterError\nfrom blueman.gui.Notification import Notification\nfrom blueman.Sdp import ServiceUUID\nfrom blueman.plugins.AppletPlugin import AppletPlugin\n\n\nREGISTRY_VERSION = 0\n\n\nclass AdapterNotFound(Exception):\n pass\n\n\nclass DeviceNotFound(Exception):\n pass\n\n\nclass RecentConns(AppletPlugin):\n __depends__ = [\"Menu\"]\n __icon__ = \"document-open-recent\"\n __description__ = _(\"Provides a menu item that contains last used connections for quick access\")\n __author__ = \"Walmis\"\n\n __gsettings__ = {\n \"schema\": \"org.blueman.plugins.recentconns\",\n \"path\": None\n }\n __options__ = {\n \"max-items\": {\"type\": int,\n \"default\": 6,\n # the maximum number of items RecentConns menu will display\n \"name\": _(\"Maximum items\"),\n \"desc\": _(\"The maximum number of items recent connections menu will display.\"),\n \"range\": (6, 20)},\n \"recent-connections\": {\"type\": list, \"default\": \"[]\"}\n }\n\n _items = None\n\n def on_load(self):\n self.Adapters = {}\n self.__menuitems = []\n\n self.item = self.parent.Plugins.Menu.add(self, 52, text=_(\"Recent _Connections\") + \"…\",\n icon_name=\"document-open-recent\", submenu_function=self.get_menu)\n self.parent.Plugins.Menu.add(self, 53)\n\n self.deferred = False\n\n def store_state(self):\n to_store = []\n for item in self.items:\n x = item.copy()\n x[\"time\"] = str(x[\"time\"])\n x[\"uuid\"] = str(x[\"uuid\"])\n x[\"device\"] = ''\n x[\"mitem\"] = ''\n to_store.append(x)\n\n self.set_option(\"recent-connections\", to_store)\n\n def change_sensitivity(self, sensitive):\n if 'PowerManager' in self.parent.Plugins.get_loaded():\n power = self.parent.Plugins.PowerManager.get_bluetooth_status()\n else:\n power = True\n\n sensitive = sensitive and \\\n self.parent.Manager and \\\n power and \\\n self.items is not None and \\\n (len(self.items) > 0)\n\n self.item.set_sensitive(sensitive)\n\n def on_power_state_changed(self, manager, state):\n self.change_sensitivity(state)\n if state and self.deferred:\n self.deferred = False\n self.on_manager_state_changed(state)\n\n def on_unload(self):\n self.parent.Plugins.Menu.unregister(self)\n\n def initialize(self):\n logging.info(\"rebuilding menu\")\n\n self.__menuitems = []\n self.parent.Plugins.Menu.on_menu_changed()\n\n self.items.sort(key=itemgetter(\"time\"), reverse=True)\n\n self._items = self.items[0:self.get_option(\"max-items\")]\n self.items.reverse()\n\n if len(self.items) == 0:\n self.change_sensitivity(False)\n else:\n self.change_sensitivity(True)\n\n count = 0\n for item in self.items:\n if count < self.get_option(\"max-items\"):\n self.add_item(item)\n count += 1\n\n def on_manager_state_changed(self, state):\n if state:\n if 'PowerManager' in self.parent.Plugins.get_loaded():\n if not self.parent.Plugins.PowerManager.get_bluetooth_status():\n self.deferred = True\n self.item.set_sensitive(False)\n return\n\n self.item.set_sensitive(True)\n adapters = self.parent.Manager.get_adapters()\n\n self.Adapters = {}\n for adapter in adapters:\n self.Adapters[str(adapter.get_object_path())] = str(adapter[\"Address\"])\n\n for i in reversed(self.items):\n try:\n i[\"device\"] = self.get_device_path(i)\n except (AdapterNotFound, DeviceNotFound):\n pass\n\n self.initialize()\n else:\n self.item.set_sensitive(False)\n return\n\n self.change_sensitivity(state)\n\n def on_device_removed(self, path):\n for item in reversed(self.items):\n if item['device'] == path:\n self.items.remove(item)\n self.initialize()\n\n def on_adapter_added(self, path):\n a = self.parent.Manager.get_adapter(path)\n self.Adapters[path] = a[\"Address\"]\n self.initialize()\n\n def on_adapter_removed(self, path):\n if str(path) in self.Adapters:\n del self.Adapters[str(path)]\n else:\n logging.warning(\"Adapter not found in list\")\n\n self.initialize()\n\n def notify(self, object_path, uuid):\n device = Device(object_path)\n logging.info(\"%s %s\" % (device, uuid))\n item = {}\n try:\n adapter = self.parent.Manager.get_adapter(device['Adapter'])\n except DBusNoSuchAdapterError:\n logging.warning(\"adapter not found\")\n return\n\n item[\"adapter\"] = adapter[\"Address\"]\n item[\"address\"] = device['Address']\n item[\"alias\"] = device['Alias']\n item[\"icon\"] = device['Icon']\n item[\"name\"] = ServiceUUID(uuid).name\n item[\"uuid\"] = uuid\n item[\"time\"] = time.time()\n item[\"device\"] = object_path\n item[\"mitem\"] = None # menu item object\n\n for i in self.items:\n if i[\"adapter\"] == item[\"adapter\"] and \\\n i[\"address\"] == item[\"address\"] and \\\n i[\"uuid\"] == item[\"uuid\"]:\n i[\"time\"] = item[\"time\"]\n\n i[\"device\"] = item[\"device\"]\n self.initialize()\n return\n\n self.items.append(item)\n self.initialize()\n\n self.store_state()\n\n def on_item_activated(self, item):\n logging.info(\"Connect %s %s\" % (item[\"address\"], item[\"uuid\"]))\n\n item[\"mitem\"][\"sensitive\"] = False\n self.parent.Plugins.Menu.on_menu_changed()\n\n def reply(*args):\n Notification(_(\"Connected\"), _(\"Connected to %s\") % item[\"mitem\"][\"text\"],\n icon_name=item[\"icon\"]).show()\n item[\"mitem\"][\"sensitive\"] = True\n self.parent.Plugins.Menu.on_menu_changed()\n\n def err(reason):\n Notification(_(\"Failed to connect\"), str(reason).split(\": \")[-1],\n icon_name=\"dialog-error\").show()\n item[\"mitem\"][\"sensitive\"] = True\n self.parent.Plugins.Menu.on_menu_changed()\n\n self.parent.Plugins.DBusService.connect_service(item[\"device\"], item[\"uuid\"], reply, err)\n\n def add_item(self, item):\n if not item[\"mitem\"]:\n mitem = {\"icon_name\": item[\"icon\"], \"callback\": lambda itm=item: self.on_item_activated(itm)}\n item[\"mitem\"] = mitem\n else:\n mitem = item[\"mitem\"]\n mitem['sensitive'] = True\n mitem['tooltip'] = None\n\n item[\"mitem\"][\"text\"] = _(\"%(service)s on %(device)s\") % {\"service\": item[\"name\"], \"device\": item[\"alias\"]}\n item[\"mitem\"][\"markup\"] = True\n\n if item[\"adapter\"] not in self.Adapters.values():\n item[\"device\"] = None\n elif not item[\"device\"] and item[\"adapter\"] in self.Adapters.values():\n try:\n item[\"device\"] = self.get_device_path(item)\n\n except (AdapterNotFound, DeviceNotFound):\n self.items.remove(item)\n self.initialize()\n\n if not item[\"device\"]:\n mitem[\"sensitive\"] = False\n mitem[\"tooltip\"] = _(\"Adapter for this connection is not available\")\n\n self.__menuitems.append(mitem)\n self.parent.Plugins.Menu.on_menu_changed()\n\n def get_menu(self):\n return self.__menuitems\n\n def get_device_path(self, item):\n try:\n adapter = self.parent.Manager.get_adapter(item[\"adapter\"])\n except DBusNoSuchAdapterError:\n raise AdapterNotFound\n\n device = self.parent.Manager.find_device(item[\"address\"], adapter.get_object_path())\n if device is None:\n raise DeviceNotFound\n else:\n return device.get_object_path()\n\n @property\n def items(self):\n if self._items is not None:\n return self._items\n\n items = self.get_option(\"recent-connections\")\n\n if not items:\n self._items = []\n return self._items\n\n for i in reversed(items):\n if \"name\" not in i or \"uuid\" not in i:\n items.remove(i)\n try:\n i[\"device\"] = self.get_device_path(i)\n except AdapterNotFound:\n i[\"device\"] = None\n except DeviceNotFound:\n items.remove(i)\n\n i[\"time\"] = float(i[\"time\"])\n\n self._items = items\n\n return self._items\n","repo_name":"Caesar-github/blueman","sub_path":"blueman/plugins/applet/RecentConns.py","file_name":"RecentConns.py","file_ext":"py","file_size_in_byte":8955,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"37637540116","text":"from collections import deque\n\n\nN, K = map(int, input().split())\narr = deque(range(1, N + 1))\nanswer = []\nwhile arr:\n arr.rotate(1 - K)\n answer.append(str(arr.popleft()))\nprint(f'<{\", \".join(answer)}>')\n","repo_name":"ITCenAlgo/CenAlgo","sub_path":"09주차/BOJ_11866_요세푸스문제0/BOJ_11866_요세푸스문제0_이재만.py","file_name":"BOJ_11866_요세푸스문제0_이재만.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"35732901899","text":"from unicodedata import name\nfrom django.urls import path,re_path,include\nfrom.import views\n\nfrom django.views.generic import RedirectView\n\n\nurlpatterns= [\n \n path('',views.results,name='results'),\n path('results',views.results,name='results'),\n path('update',views.update, name='update'),\n #path('user_results',views.user_results, name='user_results'),\n path('update#Search/', RedirectView.as_view(url = '/results#Search/')),\n path('update#Analysis/', RedirectView.as_view(url = '/results#Analysis/')),\n path('index',views.index,name='index'),\n path('login',RedirectView.as_view(url='accounts/login')),\n re_path('accounts/',include('accounts.urls')),\n \n \n #path('api', views.ChartData.as_view()),\n]","repo_name":"gunnusravani/Student_Academic_Analysis_System","sub_path":"myapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30206162330","text":"import random\n\n\nclass Command:\n def __init__(self, day, route_id, cargo_name, expiry, amount, dock):\n self.day = day\n self.route_id = route_id\n self.cargo_name = cargo_name\n self.expiry = expiry\n self.amount = amount\n self.dock = dock\n\n def get_string_line(self):\n return '{day} {route_id} {cargo_name} {expiry} {amount}'.format(\n day=self.day,\n route_id=self.route_id,\n cargo_name=self.cargo_name,\n expiry=self.expiry,\n amount=self.amount\n )\n\n\nclass Dock:\n def __init__(self, name, cargo=None):\n self.name = name\n self.cargo_items = cargo or []\n self.commands = []\n\n def add_to_ship(self, ship, cargo, current_time):\n command = Command(current_time, ship.route_id, cargo.name, cargo.expiry, cargo.amount, self.name)\n self.commands.append(command)\n ship.add_cargo(cargo)\n\n def handle_ships(self, ships, current_time):\n ship_cargo_items = []\n for ship in ships:\n ship_cargo_items += ship.unload_cargo()\n\n money = 0\n for ship_cargo in ship_cargo_items:\n if ship_cargo.target == self.name:\n ship_cargo.completed = True\n money += ship_cargo.amount * (30 if ship_cargo.expiry > current_time else 10)\n\n self.cargo_items += ship_cargo_items\n\n for cargo_item in self.cargo_items:\n cargo_item.visited.add(self.name)\n\n cargo_items_to_further_ship = [cargo_item for cargo_item in self.cargo_items if not cargo_item.completed]\n cargo_ids_to_remove = set()\n ships_to_upload = set()\n\n for ship in ships:\n for cargo_item in cargo_items_to_further_ship:\n if ship.space_left() == 0:\n break\n if cargo_item.can_go_to(ship.return_other_destination(self.name)):\n if cargo_item.amount > ship.space_left():\n new_cargo_item = cargo_item.divide(cargo_item.amount - ship.space_left())\n self.add_to_ship(ship, new_cargo_item, current_time)\n else:\n self.add_to_ship(ship, cargo_item, current_time)\n cargo_ids_to_remove.add(id(cargo_item))\n if ship.space_left() > 0:\n ships_to_upload.add(ship)\n\n self.cargo_items = [cargo_item for cargo_item in self.cargo_items if id(cargo_item) not in cargo_ids_to_remove]\n\n for ship in ships_to_upload:\n for cargo_item in self.cargo_items:\n if ship.space_left() == 0:\n break\n if random.choice([True, False]):\n if cargo_item.amount > ship.space_left():\n new_cargo_item = cargo_item.divide(cargo_item.amount - ship.space_left())\n self.add_to_ship(ship, new_cargo_item, current_time)\n else:\n self.add_to_ship(ship, cargo_item, current_time)\n cargo_ids_to_remove.add(id(cargo_item))\n\n self.cargo_items = [cargo_item for cargo_item in self.cargo_items if id(cargo_item) not in cargo_ids_to_remove]\n\n return money\n","repo_name":"matyasfodor/shipping","sub_path":"dock.py","file_name":"dock.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11338803536","text":"import pandas as pd\nimport sqlite3\n\ndef from_xslx_to_csv(filename, sheet, csvfilename = 'csvfile.csv'):\n data_xls = pd.read_excel(filename, sheet, dtype=str, index_col=None)\n data_xls.to_csv(csvfilename, encoding='utf-8', index=False)\n return csvfilename\n\ndef get_sheet_names(filename):\n xls = pd.ExcelFile(filename)\n return xls.sheet_names\n\ndef from_csv_to_dataframe(filename):\n data = pd.read_csv(filename) \n return pd.DataFrame(data)\n\ndef connect_dataframe_to_database(dfs, conn):\n names = get_sheet_names('data.xlsx')\n i = 0\n for df in dfs:\n df.to_sql(name=(names[i].lower()), con = conn)\n i+=1\n\nif __name__==\"__main__\":\n dataframes = []\n conn = sqlite3.connect('data.db')\n\n if conn is None:\n sheet_names = get_sheet_names('data.xlsx')\n\n for sheet in sheet_names:\n csv = from_xslx_to_csv('data.xlsx', sheet,sheet+'.csv')\n dataframes.append(from_csv_to_dataframe(csv))\n\n connect_dataframe_to_database(dataframes,conn)\n sqlQuery = \"SELECT * FROM admissions\"\n cursor = conn.execute(sqlQuery)\n for row in cursor:\n print(row)\n conn.commit()\n conn.close()\n\n\n\n # #Create Database - One Time\n # conn = sqlite3.connect('data.db')\n # dataframes = []\n # sheet_names = get_sheet_names('data.xlsx')\n\n # for sheet in sheet_names:\n # csv = from_xslx_to_csv('data.xlsx', sheet,sheet+'.csv')\n # dataframes.append(from_csv_to_dataframe(csv))\n\n # connect_dataframe_to_database(dataframes,conn)\n # conn.commit()\n # conn.close()\n\n ","repo_name":"HeartBot-Comp0016-Team20/HeartBot","sub_path":"archive/data_extraction/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31653511375","text":"from setuptools import setup, find_packages\nimport codecs\nimport os\n\nVERSION = '0.0.1'\nDESCRIPTION = 'Topsis implementation'\nsetup(\n name=\"Topsis-Hemang-101916107\",\n version=VERSION,\n author=\"Hemang\",\n author_email=\"\",\n description=DESCRIPTION,\n packages=find_packages(),\n install_requires=['topsispy', 'numpy', 'pandas'],\n keywords=['python', 'topsis', 'impacts'],\n classifiers=[\n \"Development Status :: 1 - Planning\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3\",\n \"Operating System :: Unix\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n ]\n)\n","repo_name":"hemang1717/Topsis_Hemang_101916107","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40960276616","text":"import hidtools.hid\nfrom hidtools.util import BusType\nimport os\nimport select\nimport struct\nimport time\nimport uuid\n\nfrom hidtools.hut import U8, U16, U32\nfrom typing import Any, Callable, Dict, Final, List, Optional, Tuple, Type, Union\n\nfrom pathlib import Path\n\nimport logging\n\nlogger = logging.getLogger(\"hidtools.hid.uhid\")\n\n\nclass UHIDIncompleteException(Exception):\n \"\"\"\n An exception raised when a UHIDDevice does not have sufficient\n information to create a kernel device.\n \"\"\"\n\n pass\n\n\nclass UHIDDevice(object):\n \"\"\"\n A uhid device. uhid is a kernel interface to create virtual HID devices\n based on a report descriptor.\n\n This class also acts as context manager for any :class:`UHIDDevice`\n objects. See :meth:`dispatch` for details.\n\n .. attribute:: uniq\n\n A uniq string assigned to this device. This string is autogenerated\n and can be used to reliably identify the device.\n\n \"\"\"\n\n __UHID_LEGACY_CREATE: Final = 0\n _UHID_DESTROY: Final = 1\n _UHID_START: Final = 2\n _UHID_STOP: Final = 3\n _UHID_OPEN: Final = 4\n _UHID_CLOSE: Final = 5\n _UHID_OUTPUT: Final = 6\n __UHID_LEGACY_OUTPUT_EV: Final = 7\n __UHID_LEGACY_INPUT: Final = 8\n _UHID_GET_REPORT: Final = 9\n _UHID_GET_REPORT_REPLY: Final = 10\n _UHID_CREATE2: Final = 11\n _UHID_INPUT2: Final = 12\n _UHID_SET_REPORT: Final = 13\n _UHID_SET_REPORT_REPLY: Final = 14\n\n UHID_FEATURE_REPORT: Final = 0\n UHID_OUTPUT_REPORT: Final = 1\n UHID_INPUT_REPORT: Final = 2\n\n _polling_functions: Dict[int, Callable[[], None]] = {}\n _poll = select.poll()\n _devices: List[\"UHIDDevice\"] = []\n\n @classmethod\n def dispatch(cls: Type[\"UHIDDevice\"], timeout: Optional[float] = None) -> bool:\n \"\"\"\n Process any events available on any internally registered file\n descriptor and deal with the events.\n\n The caller must call this function regularly to make sure things\n like udev events are processed correctly. There's no indicator of\n when to call :meth:`dispatch` yet, call it whenever you're idle.\n\n :returns: True if data was processed, False otherwise\n \"\"\"\n had_data = False\n devices = cls._poll.poll(timeout)\n while devices:\n for fd, mask in devices:\n if mask & select.POLLIN:\n fun = cls._polling_functions[fd]\n fun()\n devices = cls._poll.poll(timeout)\n had_data = True\n return had_data\n\n @classmethod\n def _append_fd_to_poll(\n cls: Type[\"UHIDDevice\"],\n fd: int,\n read_function: Callable[[], None],\n mask=select.POLLIN,\n ) -> None:\n cls._poll.register(fd, mask)\n cls._polling_functions[fd] = read_function\n\n @classmethod\n def _remove_fd_from_poll(cls: Type[\"UHIDDevice\"], fd: int) -> None:\n cls._poll.unregister(fd)\n\n def __init__(self: \"UHIDDevice\") -> None:\n self._name: Optional[str] = None\n self._phys: Optional[str] = \"\"\n self._rdesc: Optional[List[int]] = None\n self.parsed_rdesc: Optional[hidtools.hid.ReportDescriptor] = None\n self._info: Optional[Tuple[int, int, int]] = None\n self._bustype: Optional[BusType] = None\n self._fd: int = os.open(\"/dev/uhid\", os.O_RDWR)\n self._start = self.start\n self._stop = self.stop\n self._open = self.open\n self._close = self.close\n self._output_report = self.output_report\n self._ready: bool = False\n self._is_destroyed: bool = False\n self._sys_path: Optional[Path] = None\n self.uniq = f\"uhid_{str(uuid.uuid4())}\"\n self.hid_id: int = 0\n self._append_fd_to_poll(self._fd, self._process_one_event)\n UHIDDevice._devices.append(self)\n\n def __enter__(self: \"UHIDDevice\") -> \"UHIDDevice\":\n return self\n\n def __exit__(self: \"UHIDDevice\", *exc_details) -> None:\n if not self._is_destroyed:\n self.destroy()\n\n @property\n def fd(self: \"UHIDDevice\") -> int:\n \"\"\"\n The fd to the ``/dev/uhid`` device node\n \"\"\"\n return self._fd\n\n @property\n def rdesc(self: \"UHIDDevice\") -> Optional[List[int]]:\n \"\"\"\n The device's report descriptor\n \"\"\"\n return self._rdesc\n\n @rdesc.setter\n def rdesc(\n self: \"UHIDDevice\", rdesc: Union[hidtools.hid.ReportDescriptor, str, bytes]\n ):\n if isinstance(rdesc, hidtools.hid.ReportDescriptor):\n self.parsed_rdesc = rdesc\n else:\n if isinstance(rdesc, str):\n rdesc = f\"XXX {rdesc}\"\n self.parsed_rdesc = hidtools.hid.ReportDescriptor.from_string(rdesc)\n else:\n self.parsed_rdesc = hidtools.hid.ReportDescriptor.from_bytes(rdesc)\n if self.parsed_rdesc is not None: # should always be true\n self._rdesc = self.parsed_rdesc.bytes\n\n @property\n def phys(self: \"UHIDDevice\") -> Optional[str]:\n \"\"\"\n The device's phys string\n \"\"\"\n return self._phys\n\n @phys.setter\n def phys(self: \"UHIDDevice\", phys: str) -> None:\n self._phys = phys\n\n @property\n def name(self: \"UHIDDevice\") -> Optional[str]:\n \"\"\"\n The devices HID name\n \"\"\"\n return self._name\n\n @name.setter\n def name(self: \"UHIDDevice\", name: str) -> None:\n self._name = name\n\n @property\n def info(self: \"UHIDDevice\") -> Optional[Tuple[int, int, int]]:\n \"\"\"\n The devices's bus, vendor ID and product ID as tuple\n \"\"\"\n return self._info\n\n @info.setter\n def info(self: \"UHIDDevice\", info: Tuple[int, int, int]) -> None:\n self._info = info\n # In case bus type is passed as 'int', wrap it in BusType.\n self._bustype = info[0] if isinstance(info[0], BusType) else BusType(info[0])\n\n @property\n def bus(self: \"UHIDDevice\") -> Optional[BusType]:\n \"\"\"\n The device's bus type :class:`hidtools.util.BusType`\n \"\"\"\n return self._bustype\n\n @property\n def vid(self: \"UHIDDevice\") -> Optional[int]:\n \"\"\"\n The device's 16-bit vendor ID\n \"\"\"\n if self._info is None:\n return None\n return self._info[1]\n\n @property\n def pid(self: \"UHIDDevice\") -> Optional[int]:\n \"\"\"\n The device's 16-bit product ID\n \"\"\"\n if self._info is None:\n return None\n return self._info[2]\n\n def _call_set_report(self: \"UHIDDevice\", req: int, err: int) -> None:\n buf = struct.pack(\"< L L H\", UHIDDevice._UHID_SET_REPORT_REPLY, req, err)\n os.write(self._fd, buf)\n\n def _call_get_report(self: \"UHIDDevice\", req: U8, data: List[U8], err: int) -> None:\n bdata = bytes(data)\n buf = struct.pack(\n \"< L L H H 4096s\",\n UHIDDevice._UHID_GET_REPORT_REPLY,\n req,\n err,\n len(bdata),\n bdata,\n )\n os.write(self._fd, buf)\n\n def call_input_event(self: \"UHIDDevice\", _data: List[int]) -> None:\n \"\"\"\n Send an input event from this device.\n\n :param list data: a list of 8-bit integers representing the HID\n report for this input event\n \"\"\"\n data: bytes = bytes(_data)\n buf = struct.pack(\"< L H 4096s\", UHIDDevice._UHID_INPUT2, len(data), data)\n logger.debug(f\"inject {buf[:len(data)]!r}\")\n os.write(self._fd, buf)\n\n @property\n def sys_path(self: \"UHIDDevice\") -> Optional[Path]:\n \"\"\"\n The device's /sys path\n \"\"\"\n return self._sys_path\n\n def walk_sysfs(\n self: \"UHIDDevice\", kind: str, glob: Optional[str] = None\n ) -> Tuple[Path]:\n kinds: Final = {\n \"evdev\": \"input/input*/event*\",\n \"hidraw\": \"hidraw/hidraw*\",\n }\n if glob is None and kind in kinds:\n glob = kinds[kind]\n if self._sys_path is None or glob is None:\n return tuple()\n\n return tuple(self._sys_path.glob(glob))\n\n @property\n def device_nodes(self) -> List[str]:\n \"\"\"\n A list of evdev nodes associated with this HID device. Populating\n this list requires the kernel to process the uhid device, and sometimes\n the kernel needs to talk to the uhid process.\n Ensure that :meth:`dispatch` is called and that you wait for some\n reasonable time after creating the device.\n \"\"\"\n return [f\"/dev/input/{e.name}\" for e in self.walk_sysfs(\"evdev\")]\n\n @property\n def hidraw_nodes(self) -> List[str]:\n \"\"\"\n A list of hidraw nodes associated with this HID device. Populating\n this list requires the kernel to process the uhid device, and sometimes\n the kernel needs to talk to the uhid process.\n Ensure that :meth:`dispatch` is called and that you wait for some\n reasonable time after creating the device.\n \"\"\"\n return [f\"/dev/{h.name}\" for h in self.walk_sysfs(\"hidraw\")]\n\n def create_kernel_device(self: \"UHIDDevice\") -> None:\n \"\"\"\n Create a kernel device from this device. Note that the device is not\n immediately ready to go after creation, you must wait for\n :meth:`start` and ideally for :meth:`open` to be called.\n\n :raises: :class:`UHIDIncompleteException` if the device does not\n have a name, report descriptor or the info bits set.\n \"\"\"\n if (\n self._name is None\n or self._rdesc is None\n or self._info is None\n or self._phys is None\n ):\n raise UHIDIncompleteException(\"missing uhid initialization\")\n\n buf = struct.pack(\n \"< L 128s 64s 64s H H L L L L 4096s\",\n UHIDDevice._UHID_CREATE2,\n bytes(self._name, \"utf-8\"), # name\n bytes(self._phys, \"utf-8\"), # phys\n bytes(self.uniq, \"utf-8\"), # uniq\n len(self._rdesc), # rd_size\n self.bus, # bus\n self.vid, # vendor\n self.pid, # product\n 0, # version\n 0, # country\n bytes(self._rdesc),\n ) # rd_data[HID_MAX_DESCRIPTOR_SIZE]\n\n logger.debug(\"creating kernel device\")\n n = os.write(self._fd, buf)\n assert n == len(buf)\n\n # the kernel creates the device in a worker struct\n # when we are here, we might still not have the device created\n # and thus need to wait for incoming events. In practice, this\n # works at the first attempt\n found: Optional[Path] = None\n iterations = 10\n glob = f\"{self.bus:04X}:{self.vid:04X}:{self.pid:04X}.*/uevent\"\n while found is None and iterations > 0:\n iterations -= 1\n uhid_path = Path(\"/sys/devices/virtual/misc/uhid\")\n for p in uhid_path.glob(glob):\n try:\n with open(p) as f:\n for line in f.readlines():\n if not line.startswith(\"HID_UNIQ=\"):\n continue\n if line[9:].strip() == self.uniq:\n found = p\n break\n except FileNotFoundError:\n pass\n except OSError:\n pass\n\n time.sleep(0.001)\n if found is not None:\n self._sys_path = found.parent\n self.hid_id = int(self._sys_path.name[15:], 16)\n self._ready = True\n\n def destroy(self: \"UHIDDevice\") -> None:\n \"\"\"\n Destroy the device. The kernel will trigger the appropriate\n messages in response before removing the device.\n\n This function is called automatically on __exit__()\n \"\"\"\n\n if self._ready:\n buf = struct.pack(\"< L\", UHIDDevice._UHID_DESTROY)\n os.write(self._fd, buf)\n self._ready = False\n # equivalent to dispatch() but just for our device.\n # this ensures that the callbacks are called correctly\n poll = select.poll()\n poll.register(self._fd, select.POLLIN)\n while poll.poll(1):\n fun = self._polling_functions[self._fd]\n fun()\n\n UHIDDevice._devices.remove(self)\n self._remove_fd_from_poll(self._fd)\n os.close(self._fd)\n self._is_destroyed = True\n\n def start(self: \"UHIDDevice\", flags: int) -> None:\n \"\"\"\n Called when the uhid device is ready to accept IO.\n\n This message is sent by the kernel, to receive this message you must\n call :meth:`dispatch`\n \"\"\"\n logger.debug(\"start\")\n\n def stop(self: \"UHIDDevice\") -> None:\n \"\"\"\n Called when the uhid device no longer accepts IO.\n\n This message is sent by the kernel, to receive this message you must\n call :meth:`dispatch`\n \"\"\"\n logger.debug(\"stop\")\n\n def open(self: \"UHIDDevice\") -> None:\n \"\"\"\n Called when a userspace client opens the created kernel device.\n\n This message is sent by the kernel, to receive this message you must\n call :meth:`dispatch`\n \"\"\"\n logger.debug(\"open {}\".format(self.sys_path))\n\n def close(self: \"UHIDDevice\") -> None:\n \"\"\"\n Called when a userspace client closes the created kernel device.\n\n Sending events on a closed device will not result in anyone reading\n it.\n\n This message is sent by the kernel, to receive this message you must\n call :meth:`dispatch`\n \"\"\"\n logger.debug(\"close\")\n\n def set_report(\n self: \"UHIDDevice\", req: int, rnum: int, rtype: int, data: List[int]\n ) -> int:\n \"\"\"\n Callback invoked when a process calls SetReport on this UHID device.\n\n Return ``0`` on success or an errno on failure.\n\n The default method always returns ``EIO`` for a failure. Override\n this in your device if you want SetReport to succeed.\n\n :param req: the request identifier\n :param rnum: ???\n :param rtype: one of :attr:`UHID_FEATURE_REPORT`, :attr:`UHID_INPUT_REPORT`, or :attr:`UHID_OUTPUT_REPORT`\n :param list data: a byte string with the data\n \"\"\"\n return 5 # EIO\n\n def _set_report(\n self: \"UHIDDevice\", req: int, rnum: int, rtype: int, size: int, data: List[int]\n ) -> None:\n logger.debug(\n \"set report {} {} {} {} {} \".format(\n req, rnum, rtype, size, [f\"{d:02x}\" for d in data[:size]]\n )\n )\n error = self.set_report(req, rnum, rtype, [int(x) for x in data[:size]])\n if self._ready:\n self._call_set_report(req, error)\n\n def get_report(\n self: \"UHIDDevice\", req: int, rnum: int, rtype: int\n ) -> Tuple[int, List[U8]]:\n \"\"\"\n Callback invoked when a process calls SetReport on this UHID device.\n\n Return ``(0, [data bytes])`` on success or ``(errno, [])`` on\n failure.\n\n The default method always returns ``(EIO, [])`` for a failure.\n Override this in your device if you want GetReport to succeed.\n\n :param req: the request identifier\n :param rnum: ???\n :param rtype: one of :attr:`UHID_FEATURE_REPORT`, :attr:`UHID_INPUT_REPORT`, or :attr:`UHID_OUTPUT_REPORT`\n \"\"\"\n return (5, []) # EIO\n\n def _get_report(self: \"UHIDDevice\", req: int, rnum: int, rtype: int) -> None:\n logger.debug(\"get report {} {} {}\".format(req, rnum, rtype))\n error, data = self.get_report(req, rnum, rtype)\n if self._ready:\n self._call_get_report(req, data, error)\n\n def output_report(\n self: \"UHIDDevice\", data: List[int], size: int, rtype: int\n ) -> None:\n \"\"\"\n Callback invoked when a process sends raw data to the device.\n\n :param data: the data sent by the kernel\n :param size: size of the data\n :param rtype: one of :attr:`UHID_FEATURE_REPORT`, :attr:`UHID_INPUT_REPORT`, or :attr:`UHID_OUTPUT_REPORT`\n \"\"\"\n logger.debug(\n \"output {} {} {}\".format(rtype, size, [f\"{d:02x}\" for d in data[:size]])\n )\n\n def _process_one_event(self: \"UHIDDevice\") -> None:\n buf = os.read(self._fd, 4380)\n assert len(buf) == 4380\n evtype = struct.unpack_from(\"< L\", buf)[0]\n if evtype == UHIDDevice._UHID_START:\n ev, flags = struct.unpack_from(\"< L Q\", buf)\n self.start(flags)\n elif evtype == UHIDDevice._UHID_OPEN:\n self._open()\n elif evtype == UHIDDevice._UHID_STOP:\n self._stop()\n elif evtype == UHIDDevice._UHID_CLOSE:\n self._close()\n elif evtype == UHIDDevice._UHID_SET_REPORT:\n ev, req, rnum, rtype, size, data = struct.unpack_from(\n \"< L L B B H 4096s\", buf\n )\n self._set_report(req, rnum, rtype, size, data)\n elif evtype == UHIDDevice._UHID_GET_REPORT:\n ev, req, rnum, rtype = struct.unpack_from(\"< L L B B\", buf)\n self._get_report(req, rnum, rtype)\n elif evtype == UHIDDevice._UHID_OUTPUT:\n ev, data, size, rtype = struct.unpack_from(\"< L 4096s H B\", buf)\n self._output_report(data, size, rtype)\n\n def create_report(\n self: \"UHIDDevice\",\n data: Any,\n global_data=None,\n reportID: Optional[int] = None,\n application: Optional[Union[str, U32]] = None,\n ) -> List[U8]:\n \"\"\"\n Convert the data object to an array of ints representing the report.\n Each property of the given data object is matched against the field\n usage name (think ``hasattr``) and filled in accordingly.::\n\n mouse = MouseData()\n mouse.b1 = int(l)\n mouse.b2 = int(r)\n mouse.b3 = int(m)\n mouse.x = x\n mouse.y = y\n\n data_bytes = uhid_device.create_report(mouse)\n\n The :class:`UHIDDevice` will create the report according to the\n device's report descriptor.\n \"\"\"\n if self.parsed_rdesc is None:\n return []\n return self.parsed_rdesc.create_report(data, global_data, reportID, application)\n","repo_name":"bentiss/hid-tools","sub_path":"hidtools/uhid.py","file_name":"uhid.py","file_ext":"py","file_size_in_byte":18280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5329203804","text":"import cv2\n\ndef click_event(event, x, y, flags, param):\n if event == cv2.EVENT_FLAG_LBUTTON:\n cv2.circle(img,(x,y),3,(0,255,255),-1)\n\n points.append((x,y))\n if len(points)>=2:\n cv2.line(img, points[-1], points[-2],(0,0,255),4)\n cv2.imshow('image',img) \n elif event== cv2.EVENT_FLAG_RBUTTON:\n blue=img[x,y,0]\n green=img[x,y,1]\n red=img[x,y,2]\n strcol=str(blue)+' '+str(green)+' '+str(red)\n font=cv2.FONT_HERSHEY_COMPLEX\n cv2.putText(img,strcol,(x,y),font,1,(255,200,170),2)\n cv2.imshow('image',img)\n\nimg=cv2.imread('lenaimg.jpg')\n\ncv2.imshow('image',img)\n\npoints=[]\ncv2.setMouseCallback('image',click_event)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"adi673/opencv","sub_path":"opencv1/cv10.py","file_name":"cv10.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20387659712","text":"from aiogram import html, Router\nfrom aiogram.enums import ChatType\nfrom aiogram.types import InlineQuery, InlineQueryResultArticle, InputTextMessageContent\nfrom fluent.runtime import FluentLocalization\n\nrouter = Router()\n\n\n@router.inline_query()\nasync def inline_mode_handler(query: InlineQuery, l10n: FluentLocalization):\n result = InlineQueryResultArticle(\n id=\".\",\n title=l10n.format_value(\"inline-mode-title\", args={\"id\": query.from_user.id}),\n description=l10n.format_value(\"inline-mode-description\"),\n input_message_content=InputTextMessageContent(\n message_text=l10n.format_value(\"inline-mode-text\", args={\"id\": html.code(query.from_user.id)})\n )\n )\n # Do not forget about is_personal parameter! Otherwise, all people will see the same ID\n switch_pm_text = l10n.format_value(\"inline-mode-tryme\") if query.chat_type != ChatType.SENDER else None\n await query.answer(\n results=[result], cache_time=3600, is_personal=True,\n switch_pm_parameter=\"1\", switch_pm_text=switch_pm_text\n )\n","repo_name":"MasterGroosha/my-id-bot","sub_path":"bot/handlers/inline_mode.py","file_name":"inline_mode.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":92,"dataset":"github-code","pt":"67"} +{"seq_id":"30895816533","text":"class Tictactoe:\n def __init__(self):\n self.board = [' ' for x in range(9)]\n self.current_move = 0\n\n winning_combos = [\n [0, 1, 2],\n [3, 4, 5],\n [6, 7, 8],\n [0, 3, 6],\n [1, 4, 7],\n [2, 5, 8],\n [0, 4, 8],\n [2, 4, 6]\n ]\n\n def show_board(self):\n print(f'''\n {self.board[0]} | {self.board[1]} | {self.board[2]} \n-----------\n {self.board[3]} | {self.board[4]} | {self.board[5]} \n-----------\n {self.board[6]} | {self.board[7]} | {self.board[8]} \n''')\n\n def minimax(self, is_max, player):\n if self.won():\n return -1 if is_max else 1\n\n if self.draw():\n return 0\n\n other_player = 'X' if player == 'O' else 'O'\n best_score = float('-inf') if is_max else float('inf')\n for i in range(9):\n if self.valid_move(i):\n self.board[i] = player if is_max else other_player\n score = self.minimax(not is_max, player)\n self.board[i] = ' '\n best_score = max(best_score, score) if is_max else min(\n best_score, score)\n return best_score\n \n def best_move(self):\n best_score = float('-inf')\n move = 0\n for i in range(9):\n if self.valid_move(i):\n self.board[i] = self.current_player()\n score = self.minimax(False, self.current_player())\n self.board[i] = ' '\n\n if score > best_score:\n best_score = score\n move = i\n\n return move\n\n def move_ai(self):\n self.board[self.best_move()] = self.current_player()\n self.current_move += 1\n self.show_board()\n\n def move(self, player):\n if player == 'player':\n try:\n place = int(input('Where do you want to move?')) - 1\n except ValueError:\n print('Must be a number 1-9')\n return\n if place <= 8 and place >= 0 and self.valid_move(place):\n self.board[place] = self.current_player()\n self.current_move += 1\n self.show_board()\n else:\n self.move_ai()\n\n def valid_move(self, place):\n if self.board[place] == ' ':\n return True\n return False\n\n def valid_input(self, user_input):\n try:\n user_input = int(user_input) - 1\n return user_input <= 9 and user_input >= 1\n except ValueError:\n return False\n\n def current_player(self):\n return 'X' if self.current_move % 2 == 0 else 'O'\n\n def won(self):\n for combo in self.winning_combos:\n if self.board[combo[0]] == self.board[combo[1]] and self.board[combo[0]] == self.board[combo[2]] and (self.board[combo[0]] == 'O' or self.board[combo[0]] == 'X'):\n return True\n return False\n\n def winner(self):\n return 'O' if self.current_player() == 'X' else 'X'\n\n def clear(self):\n self.__init__()\n\n def draw(self):\n return ' ' not in self.board and not self.won()\n\n def play(self):\n user_input = input('''Play - 1\nExit - 2\n''')\n while user_input != '2':\n player_x = input('player or ai? ')\n player_o = input('player or ai? ')\n self.show_board()\n while True:\n player = self.current_player()\n if player == 'X':\n self.move(player_x)\n else:\n self.move(player_o)\n if self.won():\n print(f'{self.winner()} won!')\n self.clear()\n break\n elif self.draw():\n print('There is a draw')\n self.clear()\n break\n user_input = input('''Play - 1\nExit - 2\n''')\n\n\nidk = Tictactoe()\nidk.play()\n","repo_name":"OneTrick475/TicTacToe-with-AI","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"47679006218","text":"import sys\nsys.stdin = open(\"trungthuong.inp\",\"r\")\nsys.stdout = open(\"trungthuong.out\",\"w\")\nn = int(input())\nc = [int(x) for x in input().split()]\ndem = 0\nfor i in range (n - 1):\n x = y = c[i]\n for j in range (i + 1, n):\n if x > c[j]: x = c[j]\n if y < c[j]: y = c[j]\n if (x== min(c[i], c[j])) and (y == max(c[i], c[j])):\n dem += 1\nprint(dem)\n","repo_name":"hoclentop/hoclentop.github.io","sub_path":"code/b22163.py","file_name":"b22163.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15277735573","text":"# -*- coding: utf-8 -*-\n# \n# author: william tiria wamburu | tiriawamburu@gmail.com\n#\n\nimport numpy as np\n\ndef calculate_success_rate(crop, region):\n \"\"\"\n Calculate weights of all minerals, pH and rainfall\n Normalize by dividing (L+N), where L is the highest index, N is the number of inputs\n Calculate Banzhaf power index for all weights\n Calculate overall index by a version of Weighted Majority Algorithm\n Return percentage\n \"\"\"\n crop_minerals = crop.mineral_requirements.all()\n weights = []\n weights.append(region.ph_high-crop.ph_high)\n weights.append(region.ph_low-crop.ph_low)\n for m in crop_minerals:\n if region.mineral_composition.filter(mineral=m.mineral).exists():\n composition = region.mineral_composition.filter(mineral=m.mineral).first()\n weights.append(composition.percentage - m.percentage)\n else:\n weights.append(m.percetage*-1)\n\n min_weight = min(weights)\n if min_weight <= 0:\n diff = 1 - min_weight\n else:\n diff = 0\n\n # Normalize\n weights = map(lambda x: x+diff, weights)\n\n max_weight = max(weights)\n\n # Calculate Banzhaf power index on normalized weights\n weights = map(lambda x: x/(max_weight+len(weights)), weights)\n\n # Calculate average percentage\n average = np.mean(weights) * 100\n average = int(average)\n\n return average\n","repo_name":"TiriaWamburu/agriculture","sub_path":"crops/algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34196680563","text":"#clear variables\n#from IPython import get_ipython\n#get_ipython().magic('reset -sf')\n\n#import nltk\n#from nltk.corpus import words\n#from nltk.tag import pos_tag\nfrom collections import Counter\n\nimport math\n\nimport copy\n\n\n\n# #letter frequency\n# letterScore = [['a',8.5],['b',2.1],['c',4.5],['d',3.4],['e',11.1],['f',1.8],['g',2.5],['h',3.0],['i',7.5],['j',.2],['k',1.1],['l',5.5],['m',3.0],['n',6.7],['o',7.2],['p',3.2],['q',.2],['r',7.6],['s',5.7],['t',7.0],['u',3.6],['v',1.0],['w',1.3],['x',.3],['y',1.8],['z',.3]]\n# #maxScore = \n# discount = .5\n\n#------------------------------------------------------------------------------\n#N letter words\nnumCharacters = 5\nspaceReduction = .25\nprintNum = 5\nroundNum = 6\nwordSource = 'wordleList.txt'\n#wordSource = 'wordleLegalGuess.txt'\n\ngoodSetMade = []\ngreySetMade = []\nyellowSetMade = [[] for _ in range(numCharacters)]\ngreenSetMade = [[] for _ in range(numCharacters)]\n\n\n#------------------------------------------------------------------------------\n\n# #importing english words\n# wordList = words.words()\n# charNList = []\n\n# #slimming word list down to N letter words\n# for i in range(len(wordList)):\n# if len(wordList[i]) == numCharacters:\n# charNList.append(wordList[i])\n\n# #tagging N letter words with grammer type\n# taggedList = pos_tag(charNList)\n\n# #making list of N letter words with no proper nouns\n# charNListNoPN = []\n# for i in range(len(charNList)):\n# if taggedList[i][1] != 'NNP':\n# charNListNoPN.append(taggedList[i][0])\n\n#------------------------------------------------------------------------------\n\n#this is the complete possible wordle list (big time cheating)\nwith open(wordSource,'r') as f:\n wordleList = f.readlines()\ncharNListNoPN = [s.replace(\"\\n\", \"\") for s in wordleList]\n#------------------------------------------------------------------------------\n#functions\n\ndef greyLet(greySetMade,greyGuess):\n \n for i in range(len(greyGuess[0])):\n if greyGuess[0][i] != ' ':\n if greyGuess[0][i] not in greySetMade:\n greySetMade.append(greyGuess[0][i])\n \n return greySetMade\n\ndef yellowLet(goodSetMade,yellowSetMade,yellowGuess):\n \n for i in range(len(yellowGuess[0])):\n if yellowGuess[0][i] != ' ':\n if yellowGuess[0][i] not in goodSetMade:\n goodSetMade.append(yellowGuess[0][i])\n\n for j in range(len(yellowGuess[0])):\n if yellowGuess[0][j] != ' ':\n if (yellowGuess[0][j] not in yellowSetMade[j]):\n yellowSetMade[j].append(yellowGuess[0][j])\n\n return goodSetMade,yellowSetMade\n\ndef greenLet(goodSetMade,greenSetMade,greenGuess):\n\n for i in range(len(greenGuess[0])):\n if greenGuess[0][i] != ' ':\n if greenGuess[0][i] not in goodSetMade:\n goodSetMade.append(greenGuess[0][i])\n\n for j in range(len(greenGuess[0])):\n if greenGuess[0][j] != ' ':\n if (greenGuess[0][j] not in greenSetMade[j]):\n greenSetMade[j] = (greenGuess[0][j])\n \n return goodSetMade,greenSetMade\n\ndef guessListMaker(numCharacters,charNListNoPN,goodSetMade,greySetMade,yellowSetMade,greenSetMade):\n greyListMade = []\n goodListMade = []\n yellowListMade = []\n greenListMade = []\n \n #weed out greySetMade\n for i in range(len(charNListNoPN)):\n status = 1\n for j in range(numCharacters):\n if status == 0:\n break\n if charNListNoPN[i][j] in greySetMade:\n #counting freq. of that letter in greenSetMade\n countsGreen = 0\n countsYellow = 0\n for k in range(numCharacters): \n if bool(greenSetMade[k]) == True:\n if greenSetMade[k] == charNListNoPN[i][j]:\n countsGreen = countsGreen + 1\n if charNListNoPN[i][j] in yellowSetMade[k]:\n countsYellow = countsYellow + 1\n #counting freq. of that letter in word\n countsWord = Counter(charNListNoPN[i])[charNListNoPN[i][j]]\n #only put on greyListMade if freq. in word is greater than freq. in greenListMade\n if countsWord > countsGreen and countsYellow == 0: \n status = 0\n if status == 1:\n if charNListNoPN[i] not in greyListMade:\n greyListMade.append(charNListNoPN[i])\n\n #pare down to yellowListMade\n for i in range(len(greyListMade)):\n status = 1\n for j in range(numCharacters):\n if greyListMade[i][j] in yellowSetMade[j]:\n status = 0\n if status == 0:\n break\n if status == 1:\n if greyListMade[i] not in yellowListMade:\n yellowListMade.append(greyListMade[i])\n \n #pare down to goodListMade\n for i in range(len(yellowListMade)):\n status = 1\n for j in range(len(goodSetMade)):\n if goodSetMade[j] not in yellowListMade[i]:\n status = 0\n if status == 0:\n break\n if status == 1:\n if yellowListMade[i] not in goodListMade:\n goodListMade.append(yellowListMade[i])\n \n #pare down to greenListMade\n for i in range(len(goodListMade)):\n status = 1\n for j in range(numCharacters):\n if (bool(greenSetMade[j]) == True) and (goodListMade[i][j] != greenSetMade[j]):\n status = 0\n if status == 0:\n break\n if status == 1:\n if goodListMade[i] not in greenListMade:\n greenListMade.append(goodListMade[i])\n \n return greyListMade,goodListMade,yellowListMade,greenListMade\n\n#assigning average bits of uncertainty to each word against greenWordList\ndef wordUnc(numCharacters,listUnc,listUnc2,greenSetUnc,yellowSetUnc,greySetUnc,goodSetUnc):\n wordUncSumList = [0]*len(listUnc)\n wordUncAvgList = [0]*len(listUnc)\n \n for i in range(len(listUnc)):\n \n wordUncSum = 0\n \n for j in range(len(listUnc2)):\n greySetTemp = copy.deepcopy(greySetUnc)\n yellowSetTemp = copy.deepcopy(yellowSetUnc)\n greenSetTemp = copy.deepcopy(greenSetUnc)\n goodSetTemp = copy.deepcopy(goodSetUnc)\n \n if listUnc[i] != listUnc2[j]:\n \n #assigning greenSetTemp, yellowSetTemp, greySetTemp, and goodSetTemp for that word\n wordUnc = 0\n for k in range(numCharacters):\n if listUnc[i][k] == listUnc2[j][k]:\n #green set + good set\n if listUnc[i][k] not in goodSetTemp:\n goodSetTemp.append(listUnc[i][k])\n \n if listUnc[i][k] not in greenSetTemp[k]:\n greenSetTemp[k] = listUnc[i][k]\n else:\n\n if listUnc[i][k] in listUnc2[j]:\n \n #yellow set + good set\n if listUnc[i][k] not in goodSetTemp:\n goodSetTemp.append(listUnc[i][k])\n \n if listUnc[i][k] not in yellowSetTemp[k]:\n yellowSetTemp[k].append(listUnc[i][k])\n else:\n #grey set\n if listUnc[i][k] not in greySetTemp:\n greySetTemp.append(listUnc[i][k])\n \n greyListTemp,goodListTemp,yellowListTemp,greenListTemp = guessListMaker(numCharacters,listUnc,goodSetTemp,greySetTemp,yellowSetTemp,greenSetTemp)\n \n # print('Control Word: ' + str(listUnc[i]))\n # print('Target Word: ' + str(listUnc[j]))\n # print('greyListTemp ' + str(len(greyListTemp)))\n # print('goodListTemp ' + str(len(goodListTemp)))\n # print('yellowListTemp ' + str(len(yellowListTemp)))\n # print('greenListTemp ' + str(len(greenListTemp)))\n \n wordUnc = len(greenListTemp)\n wordUncSum = wordUncSum + wordUnc\n \n wordUncSumList[i] = wordUncSum\n print('word ' + str(i) + '/' + str(len(wordUncSumList)-1))\n \n #creating sorted list of avg. uncertainties w/ word\n wordUncAvgListValues = [x / len(wordUncSumList) for x in wordUncSumList]\n for l in range(len(wordUncAvgListValues)):\n wordUncAvgList[l] = [wordUncAvgListValues[l],listUnc[l]]\n wordUncAvgListSorted = sorted(wordUncAvgList, reverse=False)\n \n return wordUncAvgListSorted\n#----------------------------------------------------------------------------- \n#main \n\nwordSpaceList = [0]*(roundNum+1)\nwordSpaceList[0] = len(charNListNoPN)\n\nfor round in range(roundNum):\n \n greyPrompt = (input('enter your grey letters this round\\n')).lower()\n greyGuess = [greyPrompt]\n yellowPrompt = (input('enter your yellow letters this round\\n')).lower()\n yellowGuess = [yellowPrompt]\n greenPrompt = (input('enter green letters this round\\n')).lower()\n greenGuess = [greenPrompt]\n\n greySetMade = greyLet(greySetMade,greyGuess)\n goodSetMade,yellowSetMade = yellowLet(goodSetMade,yellowSetMade,yellowGuess)\n goodSetMade,greenSetMade = greenLet(goodSetMade,greenSetMade,greenGuess)\n greyListMade,goodListMade,yellowListMade,greenListMade = guessListMaker(numCharacters,charNListNoPN,goodSetMade,greySetMade,yellowSetMade,greenSetMade)\n \n wordSpaceList[round+1] = len(greenListMade)\n \n wordUncAvgListSorted = wordUnc(numCharacters,greenListMade,greenListMade,greenSetMade,yellowSetMade,greySetMade,goodSetMade)\n \n print('--------------------------------------')\n \n print('space size history: ' + str(wordSpaceList))\n \n if (min(wordUncAvgListSorted)[0] >= spaceReduction*wordSpaceList[round+1]) and (wordSpaceList[round+1] > 2):\n wordUncAvgListSorted = wordUnc(numCharacters,yellowListMade,greenListMade,greenSetMade,yellowSetMade,greySetMade,goodSetMade)\n print('estimated new space size: ' + str(min(wordUncAvgListSorted)[0]))\n print('yellowList used')\n else:\n print('estimated new space size: ' + str(min(wordUncAvgListSorted)[0]))\n print('greenList used')\n \n print('\\n')\n \n\n if len(wordUncAvgListSorted) <= 5:\n printNum = len(wordUncAvgListSorted)\n for i in range(printNum):\n print(wordUncAvgListSorted[i])\n print('round finished')\n print('--------------------------------------')\n input(\"Press Enter to continue...\")\n ","repo_name":"mnsilvern/Word-Solver","sub_path":"Word Solver Variants/wordSolver_answer/wordSolver.py","file_name":"wordSolver.py","file_ext":"py","file_size_in_byte":10825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6130492790","text":"import nltk\nfrom nltk.corpus import LazyCorpusLoader, BracketParseCorpusReader\nfrom nltk.grammar import Production, ProbabilisticProduction, PCFG\nfrom nltk import Tree, Nonterminal\nimport matplotlib.pyplot as plt\n\n\ndef simplify_functional_tag(tag):\n if tag == \"-NONE-\":\n return tag\n if '-' in tag:\n tag = tag.split('-')[0]\n return tag\n\ntreebank = LazyCorpusLoader('treebank/combined', BracketParseCorpusReader, r'wsj_.*\\.mrg')\n\n\ndef get_tag(tree):\n if isinstance(tree, Tree):\n return Nonterminal(simplify_functional_tag(tree.label()))\n else:\n return tree\n\ndef tree_to_production(tree):\n return Production(get_tag(tree), [get_tag(child) for child in tree])\n\ndef tree_to_productions(tree):\n yield tree_to_production(tree)\n for child in tree:\n if isinstance(child, Tree):\n for prod in tree_to_productions(child):\n yield prod\n\n\ndef plot_freq(pcount):\n dict_vals = {}\n vals = pcount.values()\n for x in vals:\n dict_vals[x] = dict_vals.get(x,0) + 1\n\n s = sorted(dict_vals, key=dict_vals.get)\n print(dict_vals)\n x = s[-10:]\n y = []\n for x0 in x:\n y.append(dict_vals[x0])\n\n plt.bar(x,y)\n plt.show()\n\n\ndef pcfg_learn(treebank, n):\n trees = treebank.parsed_sents()[:n]\n pcount = {}\n lcount = {}\n for s in trees:\n curr = tree_to_productions(s)\n for prod in curr:\n if not (\"-NONE-\" in str(prod.lhs()) or \"-NONE-\" in str(prod.rhs())):\n lcount[prod.lhs()] = lcount.get(prod.lhs(), 0) + 1\n pcount[prod] = pcount.get(prod, 0) + 1\n\n prods = [\n ProbabilisticProduction(p.lhs(), p.rhs(), prob=pcount[p] / lcount[p.lhs()])\n for p in pcount\n ]\n\n incount = 0\n for p in prods:\n if len(p.rhs()) and not(isinstance(p.rhs()[0],Nonterminal)):\n incount += 1\n\n print(prods)\n print(\"number of internal nodes: \" + str(incount))\n print(\"number of productions: \" + str(len(pcount)))\n plot_freq(pcount)\n return PCFG(Nonterminal(\"S\"), prods)\n\nprint(\"------ 200 trees:\")\nres = pcfg_learn(treebank,200)\nprint(\"------ 400 trees:\")\nres = pcfg_learn(treebank,400)\n\n\n\n\n#--------------------------------------2.3-------------------------------------------------------\n\n\ndef pcfg_cnf_learn(treebank, n):\n trees = treebank.parsed_sents()[:n]\n pcount = {}\n lcount = {}\n for s in trees:\n nltk.treetransforms.chomsky_normal_form(s, factor='right', horzMarkov=1, vertMarkov=1, childChar='|',\n parentChar='^')\n curr = tree_to_productions(s)\n for prod in curr:\n if not (\"-NONE-\" in str(prod.lhs()) or \"-NONE-\" in str(prod.rhs())):\n lcount[prod.lhs()] = lcount.get(prod.lhs(), 0) + 1\n pcount[prod] = pcount.get(prod, 0) + 1\n prods = [\n ProbabilisticProduction(p.lhs(), p.rhs(), prob=pcount[p] / lcount[p.lhs()])\n for p in pcount\n ]\n\n print(prods)\n\n print(\"number of productions: \" + str(len(pcount)))\n plot_freq(pcount)\n return PCFG(Nonterminal(\"S\"), prods)\n\n# print(\"------ 200 trees:\")\n# #\n# g = pcfg_cnf_learn(treebank,500)\n# print(\"is cnf? \" + str(g.is_chomsky_normal_form()))\n# #---- 3.1\n#\n#\n# vp=nltk.parse.viterbi.ViterbiParser(g)\n# t = vp.parse([\"Pierre\", \"Vinken\", \"will\", \"join\", \"the\", \"board\"])\n# for st in t:\n# print(st)\n","repo_name":"mosheShechner/NLP_HW3","sub_path":"obsolete/Q2.2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25832058165","text":"import threading\nimport socket\nfrom Output import sound\n\n\nclass SOCK:\n\n def __init__(self,main):\n self.main = main\n self.pc = PC(main)\n self.start_sck()\n self.addrs = {\"192.168.0.173\":self.pc}\n self.thread = None\n\n\n\n def start_sck(self):\n self.sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create a socket object\n self.host = \"0.0.0.0\" # Get local machine name\n self.port = 10010 # Reserve a port for your service.\n self.sck.bind((self.host, self.port))\n self.sck.listen(1)\n self.thread = threading.Thread(target=self.accept_conns)\n self.thread.daemon = True\n self.thread.start()\n\n\n def accept_conns(self):\n while 1:\n try:\n con, addr = self.sck.accept()\n obj = self.addrs[addr[0]]\n obj.establish_conn(con,addr)\n except KeyError:\n print(\"Unknown IP\")\n except:\n print(\"Error accepting connection\")\n\n\nclass PC:\n\n def __init__(self,main):\n self.main = main\n self.addr = None\n self.con = None\n self.active = False\n self.recver = threading.Thread(target=self.recv)\n self.recver.daemon = True\n self.recver.start()\n self.SOUND = sound.SOUND()\n\n def establish_conn(self,con,addr):\n self.addr = addr\n self.con = con\n self.active = True\n print(\"PC connected\")\n self.SOUND.TTS(\"connected\")\n\n def recv(self):\n while 1:\n if self.active:\n try:\n data = self.con.recv(1024)\n data = str(data,\"utf-8\")\n self.handle_data(data)\n except ConnectionResetError:\n print(\"PC disconnected\")\n self.SOUND.TTS(\"disconnected\")\n self.active = False\n\n def handle_data(self,data):\n try:\n print(data)\n app, cont = data.split(\":::\")\n if app == \"SPOTIFY\":\n self.main.d_print(cont,5)\n except:\n print(\"RECV ERROR\")\n\n\n","repo_name":"TheRealCubeAD/BANDO","sub_path":"SmartHome/sock.py","file_name":"sock.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24454560417","text":"import discord\nimport os\nfrom decouple import config\nfrom discord.ext import commands\nfrom keep_alive import keep_alive\n\nintents = discord.Intents.default()\nintents.members = True\n\nDISCORD_TOKEN = config(\"DISCORD_TOKEN\")\n\nbot = commands.Bot(command_prefix=\"-\", description=\"Bot de lazer force uwu\", intents=intents)\n\nfor file_name in os.listdir(\"./cogs\"):\n if file_name.endswith(\".py\"):\n bot.load_extension(f\"cogs.{file_name[:-3]}\")\n\n@bot.event\nasync def on_command_error(ctx, error):\n if isinstance(error, commands.CommandNotFound):\n await ctx.send(\"El comando no existe o lo escribiste mal :eyes:\")\n\n@bot.event\nasync def on_ready():\n await bot.change_presence(activity=discord.Game(\"-help\"))\n for guild in bot.guilds:\n print(f\"{bot.user} has connected to {guild.name}!\")\n\nkeep_alive()\nbot.run(DISCORD_TOKEN)\n","repo_name":"NicolasMafla/Lazer-Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33486544627","text":"import json\nimport os\n\nimport requests\nfrom wagtail import hooks\nfrom django.utils.translation import gettext as _\n\nfrom editable_admin.wagtail_hooks import ListedBulkAction\nfrom .models import ResourceType, Resource\nfrom django.conf import settings\n\n\n@hooks.register(\"register_bulk_action\")\nclass UpdateResourcesBulkAction(ListedBulkAction):\n display_name = _(\"Update Resource\")\n aria_label = _(\"Update all Resources\")\n action_type = \"update_resources\"\n template_name = \"visualizer/admin/confirm_update_action.html\"\n models = [ResourceType]\n\n def __init__(self, *args, **kwargs):\n from .admin import ResourceTypeAdmin\n super().__init__(*args, **kwargs)\n self.next_url = ResourceTypeAdmin().url_helper.get_action_url('index')\n\n @classmethod\n def _load_from_server(cls, resource_type):\n next_page = os.path.join(settings.FHIR_ENDPOINT, resource_type)\n\n while next_page:\n response = requests.get(next_page)\n\n if response.status_code == 200:\n json_data = response.json()\n for entry in json_data['entry']:\n resource = entry[\"resource\"]\n default_values = {\n \"resource_type\": resource['resourceType'],\n \"name\": resource['id']\n }\n Resource.objects.update_or_create(\n **default_values,\n defaults={\n **default_values,\n \"data\": json.dumps(resource)\n }\n )\n try:\n next_page = next(x['url'] for x in json_data[\"link\"] if x['relation'] == 'next')\n except StopIteration:\n next_page = None\n\n else: raise Exception()\n\n\n\n @classmethod\n def execute_action(cls, objects, **kwargs):\n objects[0]._meta.model.objects.all().delete()\n for obj in objects:\n cls._load_from_server(obj.name)\n\n return len(objects), 0 # return the count of updated objects\n","repo_name":"Capa00/django_fhir_apps","sub_path":"visualizer/wagtail_hooks.py","file_name":"wagtail_hooks.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74033073812","text":"from turtle import *\r\nfrom random import *\r\n#uma função para mover a tartaruga para uma posição aleatória\r\ndef aleatoria():\r\n penup()\r\n setpos(randint(-400,400),randint(-400,400))\r\n pendown()\r\n\r\n#uma função para desenhar uma estrela de um tamanho específico\r\ndef estrela(tamanho,cor):\r\n color(cor)\r\n pendown()\r\n begin_fill()\r\n for lado in range(5):\r\n left(144)\r\n forward(tamanho)\r\n end_fill()\r\n penup()\r\n#uma função para desenhar uma pequena galaxia de estrela\r\ndef galaxia(nestrelas):\r\n cores=[\"#054385\",\"#0275A6\",\"827E01\"]\r\n aleatoria()\r\n#desenha varias pequenas estrelas coloridas\r\n for estrelas in range(nestrelas-1):\r\n estrela(randint(7,15),\"white\")\r\n pendown()\r\n left(randint(-90,90))\r\n forward(randint(30,70))\r\n#agora desenhamos a ultima estrela\r\n estrela(randint(7,15),\"white\")\r\n\r\nspeed(11)\r\n\r\n#isso desenha um fundo azul escuro\r\nbgcolor(\"MidnightBlue\")\r\n\r\n#desenha 30 estrelas brancas aleatorias\r\nfor estrelas in range(30):\r\n aleatoria()\r\n estrela(randint(5,25),\"white\")\r\n\r\n#desenhamos 3 pequenas galaxias de 40 estrelas\r\nfor galaxias in range(3):\r\n galaxia(40)\r\n\r\n#desenha 2 constelações, cada uma com um numero aleatório de estrelas\r\nfor constelacoes in range(2):\r\n estrela(randint(4,7),\"white\")\r\nhideturtle()\r\ndone()\r\nprint(\"Done\")\r\n","repo_name":"haraldyy/teaching_turtles","sub_path":"desenhando_constelacoes.py","file_name":"desenhando_constelacoes.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1045553652","text":"import ast\n\n\nclass Transformer(ast.NodeTransformer):\n ALLOWED_NAMES = {'Decimal', 'None', 'False', 'True', 'null'}\n ALLOWED_NODE_TYPES = {'Expression', 'Tuple', 'Call', 'Name', 'Load', 'Str', 'Num', 'List', 'Dict'}\n\n def visit_Name(self, node):\n if not node.id in self.ALLOWED_NAMES:\n raise RuntimeError(\"Name access to %s is not allowed\" % node.id)\n\n # traverse to child nodes\n return self.generic_visit(node)\n\n def generic_visit(self, node):\n nodetype = type(node).__name__\n if nodetype not in self.ALLOWED_NODE_TYPES:\n raise RuntimeError(\"Invalid expression: %s not allowed\" % nodetype)\n\n return ast.NodeTransformer.generic_visit(self, node)\n","repo_name":"ilyaonishenko/checkers","sub_path":"server/src/Transformer.py","file_name":"Transformer.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"228377314","text":"#Aqui se utilizan parametros mutables, es decir que cambian, en donde por medio de un parametro obligatorio, se actualiza el segundo parametro\ndef lista(arg, result=None):\n if result is None:\n result = []\n result.append(arg)\n print(result)\n\n#Ejercicio: Tomé el presente ejercicio, y pasé a la función la lista con los días de la semana restantes\n\ndiasRestantes = ['sábado','viernes','jueves','miércoles','martes','lunes']\n\nfor dia in ['domingo'] + diasRestantes:\n lista(dia)\n\n\n","repo_name":"MariaFernandaMolanoml/python","sub_path":"pythonEjercicios/hojaDef6.py","file_name":"hojaDef6.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28594887300","text":"__author__ = 'z00500762'\n\nimport argparse\nimport os\nimport json\nimport sys\nimport logging\n\n\nclass SplitJson:\n compile_commands_success = list()\n compile_commands_fail = list()\n\n def __init__(self, input_json):\n self.input = input_json\n\n @staticmethod\n def validate(execution):\n if \"arguments\" not in execution:\n return False\n if \"directory\" not in execution:\n return False\n if \"exec_result\" not in execution:\n return False\n return True\n\n def get_compile_commands(self):\n compile_commands = list()\n try:\n with open(self.input, \"r\", encoding='utf-8', errors='ignore') as json_file:\n compile_commands = json.load(json_file)\n if len(compile_commands) == 0:\n logging.info(\"compile commands json file is empty: %s\", self.input)\n except IOError as exception:\n logging.error(\"open compile commands json file failed: %s\", exception)\n except json.decoder.JSONDecodeError as exception:\n logging.error(\"json decode file failed: %s\", exception)\n\n return compile_commands\n\n def split_commands(self):\n compile_commands = self.get_compile_commands()\n for item in compile_commands:\n if not self.validate(item):\n logging.info(\"discard invalid commands: %s\", str(item))\n if not item.get(\"rebuild\"):\n self.compile_commands_success.append(item)\n else:\n self.compile_commands_fail.append(item)\n self.write_json()\n\n def write_json(self):\n compile_commands_success_file = os.path.splitext(self.input)[0] + \".json\"\n compile_commands_fail_file = os.path.splitext(self.input)[0] + \".fail.json\"\n with open(compile_commands_success_file, 'w+') as fw:\n json.dump(self.compile_commands_success, fw, sort_keys=False, indent=4)\n\n with open(compile_commands_fail_file, 'w+') as fw:\n json.dump(self.compile_commands_fail, fw, sort_keys=False, indent=4)\n\n\ndef main(input_json):\n if not os.path.isabs(input_json):\n input_json = os.path.join(os.getcwd(), input_json)\n if not os.path.exists(input_json):\n logging.error(\"compile_command_file not exists : %s\", input_json)\n return -1\n\n sj = SplitJson(input_json)\n sj.split_commands()\n\n\nif __name__ == \"__main__\":\n cmd_parser = argparse.ArgumentParser(description=\"split compile commands json\")\n cmd_parser.add_argument(\n '-i', '--input', dest='input_json', metavar='store', action='store',\n help='json to split'\n )\n args = cmd_parser.parse_args()\n sys.exit(main(args.input_json))\n","repo_name":"openeuler-mirror/A-FOT","sub_path":"split_json.py","file_name":"split_json.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72417683095","text":"import re\nimport six\n\nfrom django.db.models.sql import compiler\nfrom django.utils.six.moves import zip_longest\nfrom django.db.utils import DatabaseError\n\n\nclass Counter(object):\n \"\"\"\n Class counting calls.\n \n Example:\n \n > c1 = Counter()\n > c1()\n 1\n > c1()\n 2\n \n > c2 = Counter(lambda c: \"-%d-\" % (c,))\n > c2()\n '-1-'\n > c2()\n '-2-'\n \n \"\"\"\n\n def __init__(self, out_function=None):\n \"\"\"\n \n Args:\n out_function - method(counter)\n \"\"\"\n self.counter = 0\n self.out_function = out_function\n\n def __call__(self, *args, **kwargs):\n \"\"\"\n Return:\n method(counter, *args, **kwargs)\n OR\n counter - int\n \"\"\"\n self.counter += 1\n if self.out_function:\n return self.out_function(self.counter, *args, **kwargs)\n else:\n return self.counter\n\n\nre_param = re.compile('%s')\nre_param_table_name = re.compile('(\"[^\"]+\")\\.(\"[^\"]+\")')\nre_none = re.compile('([^\"\\']\\s*)(None)(\\s*[^\"\\'])')\nre_offset = re.compile('OFFSET\\s+(\\d+)')\nre_limit = re.compile('LIMIT\\s+(\\d+)')\n\n\n\nclass SQLCompiler(compiler.SQLCompiler):\n\n def resolve_columns(self, row, fields=()):\n values = [self.query.convert_values(v, f, connection=self.connection)\n for v, f in zip(row, fields)]\n return tuple(values)\n\n def _as_sql(self, with_limits=True, with_col_aliases=False):\n \"\"\"\n Creates the SQL for this query. Returns the SQL string and list of\n parameters.\n\n If 'with_limits' is False, any limit/offset information is not included\n in the query.\n \"\"\"\n if with_limits and self.query.low_mark == self.query.high_mark:\n return '', ()\n\n self.pre_sql_setup()\n # After executing the query, we must get rid of any joins the query\n # setup created. So, take note of alias counts before the query ran.\n # However we do not want to get rid of stuff done in pre_sql_setup(),\n # as the pre_sql_setup will modify query state in a way that forbids\n # another run of it.\n self.refcounts_before = self.query.alias_refcount.copy()\n out_cols, s_params = self.get_columns(with_col_aliases)\n ordering, o_params, ordering_group_by = self.get_ordering()\n\n distinct_fields = self.get_distinct()\n\n # This must come after 'select', 'ordering' and 'distinct' -- see\n # docstring of get_from_clause() for details.\n from_, f_params = self.get_from_clause()\n\n qn = self.quote_name_unless_alias\n\n where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)\n having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)\n having_group_by = self.query.having.get_cols()\n params = []\n for val in six.itervalues(self.query.extra_select):\n params.extend(val[1])\n\n result = ['SELECT']\n\n if self.query.distinct:\n result.append(self.connection.ops.distinct_sql(distinct_fields))\n params.extend(o_params)\n result.append(', '.join(out_cols + self.ordering_aliases))\n params.extend(s_params)\n params.extend(self.ordering_params)\n\n result.append('FROM')\n result.extend(from_)\n params.extend(f_params)\n\n if where:\n result.append('WHERE %s' % where)\n params.extend(w_params)\n\n grouping, gb_params = self.get_grouping(having_group_by, ordering_group_by)\n if grouping:\n raise DatabaseError('Grupping is not supported on this database backend.')\n\n if having:\n raise DatabaseError('Having is not supported on this database backend.')\n\n grouping, gb_params = self.get_grouping(having_group_by, ordering_group_by)\n# if grouping:\n# if distinct_fields:\n# raise NotImplementedError(\n# \"annotate() + distinct(fields) not implemented.\")\n# if not ordering:\n# ordering = self.connection.ops.force_no_ordering()\n# result.append('GROUP BY %s' % ', '.join(grouping))\n# params.extend(gb_params)\n#\n# if having:\n# result.append('HAVING %s' % having)\n# params.extend(h_params)\n\n if where and ordering:\n result.append('ORDER BY %s' % ', '.join(ordering))\n\n if with_limits:\n if self.query.high_mark is not None:\n result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))\n if self.query.low_mark:\n raise DatabaseError('OFFSET is not supported on this database backend.')\n# if self.query.high_mark is None:\n# val = self.connection.ops.no_limit_value()\n# if val:\n# result.append('LIMIT %d' % val)\n# result.append('OFFSET %d' % self.query.low_mark)\n\n if self.query.select_for_update and self.connection.features.has_select_for_update:\n # If we've been asked for a NOWAIT query but the backend does not support it,\n # raise a DatabaseError otherwise we could get an unexpected deadlock.\n nowait = self.query.select_for_update_nowait\n if nowait and not self.connection.features.has_select_for_update_nowait:\n raise DatabaseError('NOWAIT is not supported on this database backend.')\n result.append(self.connection.ops.for_update_sql(nowait=nowait))\n\n # Finally do cleanup - get rid of the joins we created above.\n self.query.reset_refcounts(self.refcounts_before)\n\n return ' '.join(result), tuple(params)\n\n def as_sql(self):\n #query, params = super(SQLCompiler, self).as_sql()\n query, params = self._as_sql()\n\n if params:\n # correct parameters \"%s\" => :d, where x = 1,2,3,...\n param_counter = Counter(lambda c, m: \":d%s \" % (c,))\n query = re_param.sub(param_counter, query)\n params = dict([('d%s' % (k), v) for k, v in enumerate(params, 1)])\n\n # remove the name of the table from parameters\n query = re_param_table_name.sub(lambda m: m.groups()[1], query)\n\n return query, params\n\n def placeholder(self, field, val):\n if field is None:\n # A field value of None means the value is raw.\n return val\n else:\n # Return the common case for the placeholder\n if isinstance(val, (unicode, str)):\n return \"'%s'\" % val\n else:\n return \"%s\" % str(val)\n\n\nclass SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):\n\n def __init__(self, *args, **kwargs):\n self.return_id = False\n super(SQLInsertCompiler, self).__init__(*args, **kwargs)\n\n placeholder = SQLCompiler.placeholder\n\n def as_sql(self):\n\n opts = self.query.get_meta()\n has_fields = bool(self.query.fields)\n fields = self.query.fields if has_fields else [opts.pk]\n\n insert_template = 'INSERT INTO %s ' % opts.db_table\n insert_template += '(%s)' % ', '.join((f.column for f in fields))\n\n if has_fields:\n values = (\n (\n f.get_db_prep_save(\n getattr(obj, f.attname)\n if self.query.raw else f.pre_save(obj, True),\n connection=self.connection)\n for f in fields\n )\n for obj in self.query.objs\n )\n else:\n values = (tuple(self.connection.ops.pk_default_value()) for obj in self.query.objs)\n fields = (None,)\n\n can_bulk = (not any(hasattr(field, \"get_placeholder\") for field in fields) and\n not self.return_id and self.connection.features.has_bulk_insert)\n\n # ###\n # [ [:d or , :d or , ...], ...]\n # ###\n placeholders = (\n (self.placeholder(field, value)\n for field, value in zip(fields, val))\n for val in values\n )\n\n return_inserts = (\n \" \".join(\n (insert_template,) +\n (\"VALUES (%s)\" % \", \".join(vals),) +\n (';',))\n for vals in placeholders\n )\n\n if can_bulk:\n return_list = ('BEGIN BATCH',)\n return_list += tuple(return_inserts)\n return_list += ('APPLY BATCH',)\n return ' '.join(return_list)\n else:\n return ' '.join(return_inserts)\n\n def execute_sql(self, return_id=False):\n assert not (return_id and len(self.query.objs) != 1)\n self.return_id = return_id\n cursor = self.connection.cursor()\n sql = self.as_sql()\n cursor.execute(sql)\n if not (return_id and cursor):\n return\n if self.connection.features.can_return_id_from_insert:\n return self.connection.ops.fetch_returned_insert_id(cursor)\n return self.connection.ops.last_insert_id(cursor,\n self.query.get_meta().db_table, self.query.get_meta().pk.column)\n\n\nclass SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):\n pass\n\n\nclass SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):\n as_sql = SQLCompiler.as_sql\n\n\nclass SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):\n pass\n\n\nclass SQLDateCompiler(compiler.SQLDateCompiler, SQLCompiler):\n as_sql = SQLCompiler.as_sql\n\n\nclass SQLDateTimeCompiler(compiler.SQLDateTimeCompiler, SQLCompiler):\n as_sql = SQLCompiler.as_sql\n\n","repo_name":"fizista/django-cassandra","sub_path":"django_cassandra/db/compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":9663,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"13237760692","text":"import sys\nimport requests\n\ndef api_call(hash):\n url = \"http://endpoint.com/api/hash\"\n p = {\"q\" : hash}\n r = requests.get(url,params=p)\n print(r.json()[\"hash\"])\n\ndef main(argv):\n args = sys.argv\n api_call(args[1])\n\nif __name__ == '__main__':\n main(sys.argv[1:])","repo_name":"murata0531/Competitive-programming","sub_path":"opt/example/hash_api_call.py","file_name":"hash_api_call.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"44727836815","text":"from cs50 import SQL\nfrom flask import Flask, flash, redirect, render_template, request, session, url_for\nfrom flask_session import Session\nfrom passlib.apps import custom_app_context as pwd_context\nimport csv\nimport io\nimport urllib\n\nfrom tempfile import gettempdir\n\nfrom helpers import *\n\n# configure application\napp = Flask(__name__)\n\n# ensure responses aren't cached\nif app.config[\"DEBUG\"]:\n @app.after_request\n def after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n# custom filter\napp.jinja_env.filters[\"usd\"] = usd\n\n# configure session to use filesystem (instead of signed cookies)\napp.config[\"SESSION_FILE_DIR\"] = gettempdir()\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n# configure CS50 Library to use SQLite database\ndb = SQL(\"sqlite:///finance.db\")\n\n@app.route(\"/\")\n@login_required\ndef index():\n id = session.get('user_id')\n\n stock_purchases = db.execute(\"SELECT * FROM purchases WHERE id = :id\", id=id)\n\n #https://developer.mozilla.org/en-US/docs/Web/HTML/Element/table\n #http://stackoverflow.com/questions/10974937/how-to-set-dynamically-the-width-of-a-html-table-column-according-to-its-text-co\n return render_template(\"index.html\", stock_purchases=stock_purchases)\n\n@app.route(\"/buy\", methods=[\"GET\", \"POST\"])\n@login_required\ndef buy():\n if request.method == \"POST\":\n #http://stackoverflow.com/questions/32640090/python-flask-keeping-track-of-user-sessions-how-to-get-session-cookie-id\n id = session.get('user_id')\n\n url_start = 'http://download.finance.yahoo.com/d/quotes.csv?s='\n url_middle = request.form[\"symbol\"]\n url_end = '&f=nsl1d1t1c1ohgv&e=.csv'\n full_url = url_start + url_middle + url_end\n\n # http://stackoverflow.com/questions/21351882/reading-data-from-a-csv-file-online-in-python-3\n response = urllib.request.urlopen(full_url)\n\n datareader = csv.reader(io.TextIOWrapper(response))\n quote_list = list(datareader)\n\n num_shares = request.form[\"num_shares\"]\n\n name = quote_list[0][0]\n symbol = quote_list[0][1]\n price = float(quote_list[0][2])\n\n #http://stackoverflow.com/questions/12078571/jinja-templates-format-a-float-as-comma-separated-currency\n total_cost = round((float(price) * 100.0) * float(num_shares) / 100.0,2)\n\n username = db.execute(\"SELECT username FROM users WHERE id = :id\", id=id)\n username = username[0]\n username = username.get('username')\n\n db.execute(\"INSERT INTO purchases (id, symbol, name, shares, price, total) VALUES(:id, :symbol, :name, :shares, :price, :total)\",\n id=id, symbol=symbol, name=name, price=price, shares=num_shares, total=total_cost)\n\n return render_template(\"bought.html\", username=username, id=id, name=name, symbol=symbol, price=price, num_shares=num_shares, total_cost=total_cost)\n else:\n return render_template(\"buy.html\")\n\n@app.route(\"/history\")\n@login_required\ndef history():\n \"\"\"Show history of transactions.\"\"\"\n return apology(\"TODO\")\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n \"\"\"Log user in.\"\"\"\n\n # forget any user_id\n session.clear()\n\n # if user reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # ensure username was submitted\n if not request.form.get(\"username\"):\n return apology(\"must provide username\")\n\n # ensure password was submitted\n elif not request.form.get(\"password\"):\n return apology(\"must provide password\")\n\n # query database for username\n rows = db.execute(\"SELECT * FROM users WHERE username = :username\", username=request.form.get(\"username\"))\n\n # ensure username exists and password is correct\n if len(rows) != 1 or not pwd_context.verify(request.form.get(\"password\"), rows[0][\"hash\"]):\n return apology(\"invalid username and/or password\")\n\n # remember which user has logged in\n session[\"user_id\"] = rows[0][\"id\"]\n\n # redirect user to home page\n return redirect(url_for(\"index\"))\n\n # else if user reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"login.html\")\n\n@app.route(\"/logout\")\ndef logout():\n \"\"\"Log user out.\"\"\"\n\n # forget any user_id\n session.clear()\n\n # redirect user to login form\n return redirect(url_for(\"login\"))\n\n@app.route(\"/quote\", methods=[\"GET\", \"POST\"])\n@login_required\ndef quote():\n if request.method == \"POST\":\n\n url_start = 'http://download.finance.yahoo.com/d/quotes.csv?s='\n url_middle = request.form[\"symbol\"]\n url_end = '&f=nsl1d1t1c1ohgv&e=.csv'\n full_url = url_start + url_middle + url_end\n\n\n # http://stackoverflow.com/questions/21351882/reading-data-from-a-csv-file-online-in-python-3\n response = urllib.request.urlopen(full_url)\n\n datareader = csv.reader(io.TextIOWrapper(response))\n quote_list = list(datareader)\n\n name = quote_list[0][0]\n symbol = quote_list[0][1]\n price = quote_list[0][2]\n\n return render_template(\"quote_display.html\", name=name, symbol=symbol, price=price)\n else:\n return render_template(\"quote.html\")\n\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n if request.method == \"POST\":\n my_hash = pwd_context.hash(request.form[\"hash\"])\n\n db.execute(\"INSERT INTO users (username, hash) VALUES(:username, :hash)\",\n username=request.form[\"username\"], hash=my_hash)\n return render_template(\"login.html\")\n else:\n return render_template(\"register.html\")\n\n@app.route(\"/sell\", methods=[\"GET\", \"POST\"])\n@login_required\ndef sell():\n if request.method == \"POST\":\n order_num = request.form[\"order_num\"]\n #https://www.w3schools.com/tags/tryit.asp?filename=tryhtml5_input_type_hidden\n #https://developer.mozilla.org/en-US/docs/Web/HTML/Element/table\n #http://stackoverflow.com/questions/10974937/how-to-set-dynamically-the-width-of-a-html-table-column-according-to-its-text-co\n return redirect(url_for(\"sellselected\", order_num=order_num))\n else:\n id = session.get('user_id')\n\n stock_purchases = db.execute(\"SELECT * FROM purchases WHERE id = :id\", id=id)\n\n #https://developer.mozilla.org/en-US/docs/Web/HTML/Element/table\n #http://stackoverflow.com/questions/10974937/how-to-set-dynamically-the-width-of-a-html-table-column-according-to-its-text-co\n return render_template(\"sell.html\", stock_purchases=stock_purchases)\n\n\n@app.route(\"/sellselected/\", methods=[\"GET\", \"POST\"])\n@login_required\ndef sellselected(order_num):\n stock_to_sell = db.execute(\"SELECT * FROM purchases WHERE order_num = :order_num\", order_num=order_num)\n if request.method == \"POST\":\n # NOT DONE, WORKING ON GET FIRST\n #order_num = request.form[\"order_num\"]\n #minus_shares = request.form[\"sell_num\"]\n #command = db.execute(\"UPDATE purchases SET shares=7 WHERE order_num=order_num\")\n\n #https://www.w3schools.com/tags/tryit.asp?filename=tryhtml5_input_type_hidden\n #https://developer.mozilla.org/en-US/docs/Web/HTML/Element/table\n #http://stackoverflow.com/questions/10974937/how-to-set-dynamically-the-width-of-a-html-table-column-according-to-its-text-co\n return redirect(url_for(\"index\"))\n else:\n #NO FORMATTING YET, JUST ATTEMPTING TO PASS STOCK_TO_SELL TO TEMPLATE\n #https://developer.mozilla.org/en-US/docs/Web/HTML/Element/table\n #http://stackoverflow.com/questions/10974937/how-to-set-dynamically-the-width-of-a-html-table-column-according-to-its-text-co\n return render_template(\"sellselected.html\", stock_to_sell=stock_to_sell, order_num=order_num)\n","repo_name":"jamesdylangoldstein/finance","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9709648221","text":"from __future__ import absolute_import, print_function, division, unicode_literals\nfrom builtins import super, str\n\nimport os\nimport time\nimport shutil\nimport shlex\nimport logging\nimport psutil\nfrom datetime import datetime\nfrom absl import flags\nfrom typing import List, Deque\nfrom contextlib import contextmanager\nfrom collections import deque\n\nfrom .config import SalusConfig\nfrom ..utils.compatiblity import pathlib, subprocess as sp\nfrom ..utils import ServerError, Popen, execute, kill_tree, kill_hard, remove_prefix, try_with_default\nfrom ..utils import prompt\n\n\nPath = pathlib.Path\nFLAGS = flags.FLAGS\nlogger = logging.getLogger(__name__)\nflags.DEFINE_string('server_endpoint', 'zrpc://tcp://127.0.0.1:5501', 'Salus server endpoint to listen on')\nflags.DEFINE_boolean('no_server', False, \"Don't start Salus server, just print out the command and wait for the user\")\nflags.DEFINE_string('server_args', '', 'Extra arguments to Salus server')\nflags.DEFINE_string('server_save_output', '', 'Capture and save server output to the given path')\n\n\nclass SalusServer(object):\n\n def __init__(self, cfg):\n # type: (SalusConfig) -> None\n super().__init__()\n\n self.config = cfg\n self.env = os.environ.copy()\n self.env.update(cfg.env)\n if 'CUDA_VISIBLE_DEVICES' not in self.env:\n self.env['CUDA_VISIBLE_DEVICES'] = '1'\n if 'TF_CPP_MIN_LOG_LEVEL' not in self.env:\n self.env['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n if cfg.save_outerr:\n FLAGS.server_save_output = 'server'\n\n self.endpoint = FLAGS.server_endpoint # type: str\n\n # normalize build_dir before doing any path finding\n self.config.build_dir = cfg.build_dir.resolve(strict=True)\n self._build_cmd()\n\n self.proc = None # type: Popen\n\n def _find_executable(self):\n # type: () -> str\n \"\"\"Find the absolute path to server executable, according to 'config.build_type'\"\"\"\n candidates = [\n self.config.build_dir / self.config.build_type / 'src' / 'executor',\n self.config.build_dir / self.config.build_type / 'src' / 'salus-server',\n self.config.build_dir / self.config.build_type / 'bin' / 'executor',\n self.config.build_dir / self.config.build_type / 'bin' / 'salus-server',\n self.config.build_dir / self.config.build_type.lower() / 'src' / 'executor',\n self.config.build_dir / self.config.build_type.lower() / 'src' / 'salus-server',\n self.config.build_dir / self.config.build_type.lower() / 'bin' / 'executor',\n self.config.build_dir / self.config.build_type.lower() / 'bin' / 'salus-server',\n ]\n for path in candidates:\n if os.access(str(path), os.X_OK):\n return str(path)\n raise ServerError(f'Cannot find server executable, examined candidates are: {candidates}')\n\n def _find_logconf(self):\n # type: () -> str\n \"\"\"Find the absolute path to the logconf file specified in 'config.logconf'.\n\n First try to use 'config.logconf_dir' is specified.\n Second try walk up and find project dir\n \"\"\"\n logconf_dir = self.config.logconf_dir\n if logconf_dir is None:\n for p in self.config.build_dir.parents:\n if not (p / 'README.md').exists():\n continue\n logconf_dir = p / 'scripts' / 'logconf'\n if logconf_dir is None:\n raise ServerError('Cannot find logconf dir')\n\n if not logconf_dir.is_dir():\n raise ServerError(f'Logconf dir does not exist: {logconf_dir}')\n\n logconf = (logconf_dir / self.config.logconf).with_suffix('.config')\n if not logconf.exists():\n raise ServerError(f\"Requested logconf `{self.config.logconf}'does not exist in logconf_dir: {logconf_dir}\")\n return str(logconf)\n\n def _build_cmd(self):\n # type: () -> List[str]\n \"\"\"Build commandline using 'config' information\"\"\"\n self.args = []\n\n if self.config.use_nvprof:\n self.args += [\n 'nvprof',\n '--export-profile', str(self.config.output_dir / 'profile.prof'),\n ]\n\n self.args += [\n self._find_executable(),\n '--listen', remove_prefix(self.endpoint, 'zrpc://'),\n '--logconf', self._find_logconf(),\n '--sched', self.config.scheduler,\n ]\n self.args += self.config.extra_args\n\n # handle extra args from cmd\n self.args += shlex.split(FLAGS.server_args)\n\n if self.config.use_gperf:\n self.env['SALUS_PROFILE'] = '/tmp/gperf.out'\n self.args += ['--gperf']\n else:\n self.env['SALUS_PROFILE'] = ''\n\n if self.config.disable_adc:\n self.args.append('--disable-adc')\n if self.config.disable_wc:\n self.args.append('--disable-wc')\n\n return self.args\n\n @contextmanager\n def run(self):\n # type: () -> None\n \"\"\"Run server\"\"\"\n outputfiles = [Path(p) for p in ['/tmp/server.output',\n '/tmp/perf.output',\n '/tmp/alloc.output',\n '/tmp/gperf.out',\n 'verbose.log']]\n stdout, stderr = None, None\n if self.config.hide_output:\n stdout, stderr = sp.DEVNULL, sp.DEVNULL\n # remove any existing output\n for f in outputfiles:\n if f.exists():\n f.unlink()\n\n # assert output_dir exists\n assert(self.config.output_dir.is_dir())\n\n if FLAGS.server_save_output:\n captured_stdout_path = self.config.output_dir / Path(FLAGS.server_save_output).with_suffix('.stdout')\n captured_stderr_path = self.config.output_dir / Path(FLAGS.server_save_output).with_suffix('.stderr')\n stdout, stderr = captured_stdout_path.open('w'), captured_stderr_path.open('w')\n\n # noinspection PyBroadException\n try:\n if FLAGS.no_server:\n print('Start server with the following command:')\n prefixs = ['SALUS', 'TF', 'CUDA']\n special_envs = ' '.join(f'{k}=\"{v}\"' for k, v in self.env.items() if any(k.startswith(p) for p in prefixs))\n print('env ' + special_envs + ' ' + ' '.join(self.args))\n prompt.pause()\n else:\n logger.info(f'Starting server with cmd: {self.args}')\n # start\n self.proc = execute(self.args, env=self.env, stdin=sp.DEVNULL, stdout=stdout, stderr=stderr)\n\n # wait for a while for the server to be ready\n # FUTURE: make the server write a pid file when it's ready\n time.sleep(5)\n\n logger.info(f'Started server with pid: {self.proc.pid}')\n\n # make self the current server\n with self.as_current():\n yield\n except Exception as ex:\n logger.error(f'Got exception while running the server: {ex!s}')\n finally:\n self.kill()\n\n if FLAGS.server_save_output:\n stdout.close()\n stderr.close()\n\n # move back server log files\n for f in outputfiles:\n if f.exists():\n if f.stat().st_size == 0:\n f.unlink()\n else:\n target = self.config.output_dir/f.name\n if target.exists():\n target.unlink()\n shutil.move(str(f), str(target))\n\n _current = deque() # type: Deque[SalusServer]\n\n @contextmanager\n def as_current(self):\n SalusServer._current.append(self)\n yield self\n SalusServer._current.pop()\n\n @classmethod\n def has_current(cls):\n # type: () -> bool\n return len(cls._current) > 0\n\n @classmethod\n def current_server(cls):\n # type: () -> SalusServer\n try:\n return cls._current[-1]\n except IndexError:\n raise ServerError('No current running server')\n\n def check(self):\n # type: () -> None\n \"\"\"Check that the server is healthy and running\"\"\"\n if FLAGS.no_server:\n return\n\n if self.proc is None:\n raise ServerError('Server is not yet started')\n if self.proc.poll() is not None:\n out, err = self.proc.communicate()\n msg = [f'Server died unexpectedly with return code: {self.proc.returncode}']\n if out is not None:\n msg.append(f'\\nStandard output:\\n{out}')\n if err is not None:\n msg.append(f'\\nStandard error:\\n{err}')\n raise ServerError('\\n'.join(msg))\n\n def kill(self):\n # type: () -> None\n \"\"\"Kill the server\"\"\"\n if FLAGS.no_server:\n return\n\n if self.proc is None or self.proc.poll() is not None:\n logger.warning('Server already died or is not yet started')\n self.proc = None\n return\n\n logger.info(f'Killing server with pid: {self.proc.pid}')\n _, alive = kill_tree(self.proc, timeout=self.config.kill_timeout)\n if alive:\n prompt.confirm('Server did not respond in time, do you want to kill hard?')\n logger.info(f'Force killing server with pid: {self.proc.pid}')\n kill_hard(alive)\n\n self.proc = None\n\n @classmethod\n def wait_workloads(cls, workloads, timeout=None, callback=None):\n \"\"\"Wait workloads, raise if server died\"\"\"\n if callback is None:\n def done(proc):\n logger.info(f'Workload {proc.workload.canonical_name} exited with {proc.returncode}')\n\n callback = done\n\n gone = []\n alive = [w.proc for w in workloads]\n enter = datetime.now()\n while alive:\n if SalusServer.has_current():\n SalusServer.current_server().check()\n\n g, alive = psutil.wait_procs(alive, timeout=.25, callback=callback)\n gone += g\n\n if timeout is not None and (datetime.now() - enter).total_seconds() >= timeout:\n break\n\n return [p.workload for p in gone], [p.workload for p in alive]\n","repo_name":"SymbioticLab/Salus","sub_path":"benchmarks/driver/server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10380,"program_lang":"python","lang":"en","doc_type":"code","stars":135,"dataset":"github-code","pt":"67"} +{"seq_id":"31840385675","text":"\"\"\"This is the test for trigrams.py.\"\"\"\nimport pytest\nimport string\n\n# Punctuation that should be removed.\nPARAMS_PUNCTUATION = string.punctuation + '\\n'\nPARAMS_TEST_DICT = {\n \"i wish\": [\"i\", \"i\"],\n \"wish i\": [\"may\", \"might\"],\n \"may i\": [\"wish\"],\n \"i may\": [\"i\"]\n}\nPARAMS_TEST_STRING = \"i wish i may i wish i might\".split()\nPARAMS_TEST_DICT_UNIQUE = {\n \"this very\": [\"day\"],\n \"very day\": [\"i\"],\n \"day i\": [\"left\"],\n \"i left\": [\"to\"],\n \"left to\": [\"play\"],\n \"to play\": [\"in\"],\n \"play in\": [\"the\"],\n \"in the\": [\"month\"],\n \"the month\": [\"of\"],\n \"month of\": [\"may\"],\n}\nPARAMS_TEST_STRING_UNIQUE = \"this very day i left to play in the month of may\".split()\nPARAMS_NUM_WORDS = [1, 2, 3, 4, 5, 6, 10, 100, 1000]\nPARAMS_TEST_TEXT = \"Sherlock_Holmes.txt\"\n\ndef test_open_file():\n \"\"\"Test open file.\"\"\"\n from trigrams import open_file\n assert type(open_file('Sherlock_Holmes_short.txt')) is str\n\n\ndef test_string_format_islowercase():\n \"\"\"Tests string is lowercase.\"\"\"\n from trigrams import format_text, open_file\n assert format_text(open_file('Sherlock_Holmes_short.txt')).islower()\n\n\n@pytest.mark.parametrize(\"n\", PARAMS_PUNCTUATION)\ndef test_string_format_punctuation(n):\n \"\"\"Tests string formatting.\"\"\"\n from trigrams import format_text, open_file\n assert n not in format_text(open_file('Sherlock_Holmes_short.txt'))\n\n\ndef test_make_tri_dict():\n \"\"\"Test tri_dict(), verify format of dictionary.\"\"\"\n from trigrams import make_tri_dict\n assert make_tri_dict(PARAMS_TEST_STRING) == PARAMS_TEST_DICT\n\n\ndef test_make_tri_dict_unique():\n \"\"\"Test tri_dict() in case of input with all unique values for keys.\"\"\"\n from trigrams import make_tri_dict\n assert make_tri_dict(PARAMS_TEST_STRING_UNIQUE) == PARAMS_TEST_DICT_UNIQUE\n\n\n@pytest.mark.parametrize(\"n\", PARAMS_NUM_WORDS)\ndef test_build_text(n):\n \"\"\"Test build_text to see if it is the correct number of words.\"\"\"\n from trigrams import build_text\n assert len(build_text(PARAMS_TEST_DICT, n).split()) == n\n","repo_name":"iamrobinhood12345/trigrams","sub_path":"src/test_trigrams.py","file_name":"test_trigrams.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4260211248","text":"from apscheduler.schedulers.blocking import BlockingScheduler\nimport tweepy\nimport re\nimport string\nimport pickle\n\n# Load pre-trained SVM with its corresponding Word Vectorizer\nwith open('Models\\\\vectorizer.pickle', 'rb') as f:\n vectorizer = pickle.load(f)\n\nwith open('Models\\\\model.pickle', 'rb') as f:\n model = pickle.load(f)\n\ndef process_text(latest_tweet):\n # pre-process the tweet\n latest_tweet = latest_tweet.lower() # everything lower case\n latest_tweet = re.sub(r'\\d+', '', latest_tweet) # remove numbers\n translator = str.maketrans('', '', string.punctuation)\n latest_tweet = latest_tweet.translate(translator) # remove punctuation\n latest_tweet = latest_tweet.replace('.', '') # remove all periods\n\n return latest_tweet\n\n\ndef tweet_update():\n search_words = \"#blacklivesmatter\"\n importantBostonAccts = ['BlmBoston', 'ACLU_Mass', 'UCBoston',\n 'ViolenceNBoston', 'MassBailFund',\n 'LiveBoston617', 'BostonGlobe',\n 'SunriseBoston', 'AyannaPressley',\n 'BostonTweet']\n importantWords = ['donate', 'protest',\n 'AM', 'PM', 'vigil', 'petition',\n 'meeting', 'demonstration',\n 'march', 'assembly', 'pledge',\n 'next week', 'today', 'tomorrow']\n\n badWords = 'alllivesmatter'\n\n # authentication keys for your personal Developer Twitter account:\n consumer_key = '#####'\n consumer_secret = '#####'\n\n # authentication keys for the account to tweet from:\n access_token = '#####'\n access_token_secret = '#####'\n\n # Set up OAuth and integrate with API\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api_tweet = tweepy.API(auth) # create the object to send out tweets\n\n # get the tweets\n for account in importantBostonAccts:\n print('Evaluating latest tweet for {0}'.format(account))\n to_retweet = False\n user = api_tweet.get_user(account)\n id_acct = user.id\n # get latest tweets\n tweets = api_tweet.user_timeline(id_acct)\n # latest tweet will be the first one\n latest_tweet = tweets[0].text\n latest_tweet = process_text(latest_tweet)\n\n\n # check to see if any of the relevant key words are inside the latest tweet\n for word in importantWords:\n if word in latest_tweet and badWords not in latest_tweet:\n to_retweet = True\n print('It has been evaluated to be an important tweet')\n\n if to_retweet:\n try:\n tweets[0].retweet()\n print('ReTweeted')\n\n except Exception as e:\n print('Tried but failed to retweet')\n print('The exception is : {0}'.format(str(e)))\n\n\n else:\n print('Not an important tweet, moving on to next account')\n\n print('==============================')\n print('EVALUATING TWEETS WITH HASHTAGS')\n # look at all recent tweets with hashtag blmprotest and see if any of them contain the word boston to narrow\n # down to specifically boston related things\n tweets = tweepy.Cursor(api_tweet.search, q=search_words, lang=\"en\").items(100)\n for tweet in range(100):\n to_retweet = False\n curr_tweet = tweets.next()\n curr_tweet_text = process_text(curr_tweet.text)\n\n # use our pre-trained SVM to determine whether this tweet contains actionable information\n if model.predict(vectorizer([curr_tweet_text]))[0]:\n to_retweet = True\n\n if to_retweet:\n try:\n print('Important Tweet with hashtag')\n curr_tweet.retweet()\n print(\"Retweeted\")\n except Exception as e:\n print('Important but Failed ')\n print('Failed because: {0}'.format(str(e)))\n else:\n print('Tweet not important')\n\n print('DONE')\n\nsched = BlockingScheduler()\nsched.add_job(tweet_update, 'cron', minute='*/5')\nsched.start()\n","repo_name":"hermessuen/tweetForActivism","sub_path":"twitterBot.py","file_name":"twitterBot.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17319666149","text":"from enum import Enum\n\n\nclass msgCode(Enum):\n NO_COMMAND = (\"NO_COMMAND\", \"无该指令返回\")\n TARGET_USER_NOT_HAVE_CARD = (\"TARGET_USER_NOT_HAVE_CARD\", \"[{EXT1}]还没有选择好角色卡哦。\")\n SENDER_NOT_FROM_GROUP = (\"SENDER_NOT_FROM_GROUP\", \"发送者不是来源于群聊\")\n NO_ACHIEVE_CMD = (\"NO_ACHIEVE_CMD\", \"该指令功能尚未完成\")\n ILLEGAL_FORMAT = (\"ILLEGAL_FORMAT\", \"格式非法\")\n RD_ILLEGAL_FORMAT = (\"RD_ILLEGAL_FORMAT\", \"RD格式非法\")\n RD_RESULT = (\"RD_RESULT\", \"RD结果\")\n MAKE_CARD_COC7 = (\"MAKE_CARD_COC7\", \"角色卡作成\")\n NO_CARD = (\"NO_CARD\", \"[{USERNAME}]的包包中没有角色卡哦!\")\n SAVE_CARD_SUCCESS = (\"SAVE_CARD_SUCCESS\", \"成功保存角色卡\")\n UPDATE_CARD_SUCCESS = (\"UPDATE_CARD_SUCCESS\", \"成功更新角色卡\")\n SWITCH_CARD_SUCCESS = (\"SWITCH_CARD_SUCCESS\", \"成功切换角色卡\")\n NOT_FOUND_CARD = (\"NOT_FOUND_CARD\", \"从[{USERNAME}]的包包里找不到名为{RESULT}的卡哦(?)\")\n NOT_FOUND_CARD_PROP = (\"NOT_FOUND_CARD_PROP\", \"找不到该角色卡属性\")\n CARD_NAME_TOO_LONG = (\"CARD_NAME_TOO_LONG\", \"卡名过长\")\n CARD_NOW_USED_SO_CANT_REMOVE = (\"CARD_NOW_USED_SO_CANT_REMOVE\", \"不可删除当前正��使用的角色卡\")\n CARD_LOCKED_BY_THIS_GROUP = (\"CARD_LOCKED_BY_THIS_GROUP\", \"角色卡已在本群锁定\")\n CARD_LOCKED_BY_OTHER_GROUP = (\"CARD_LOCKED_BY_OTHER_GROUP\", \"角色卡[{PCNAME}]在{RESULT}中被锁定了\")\n ST_RM_SUCCESS = (\"ST_RM_SUCCESS\", \"[{USERNAME}]从的包包中将名为[{PCNAME}]的角色卡丢进了垃圾桶\")\n ST_LOCK_SUCCESS = (\"ST_LOCK\", \"已将本群的角色卡锁定为:{PCNAME}\")\n ST_IS_LOCKED = (\"ST_IS_LOCKED\", \"本群的角色卡已锁定为{PCNAME},如需切换请先使用.st unlock解锁角色卡\")\n ST_UNLOCK = (\"ST_UNLOCK\", \"已解锁[{USERNAME}]当前角色卡\")\n ST_UNLOCK_TARGET_GROUP_NO_LOCK = (\n \"ST_UNLOCK_TARGET_GROUP_NO_LOCK\", \"[{USERNAME}]解锁角色卡失败,目标群聊[{EXT1}]没有相关角色卡锁定信息\")\n ROLL_FAIL = (\"ROLL_FAIL\", \"掷骰出错了\")\n ROLL_CHECK_SUCCESS = (\"ROLL_CHECK_SUCCESS\", \"掷骰检定成功\")\n ROLL_CHECK_HARD_SUCCESS = (\"ROLL_CHECK_HARD_SUCCESS\", \"掷骰检定困难成功\")\n ROLL_CHECK_EXT_HARD_SUCCESS = (\"ROLL_CHECK_EXT_HARD_SUCCESS\", \"掷骰检定极难成功\")\n ROLL_CHECK_GREAT_SUCCESS = (\"ROLL_CHECK_GREAT_SUCCESS\", \"掷骰检定大成功\")\n ROLL_CHECK_FAIL = (\"ROLL_CHECK_FAIL\", \"掷骰检定失败\")\n ROLL_CHECK_GREAT_FAIL = (\"ROLL_CHECK_GREAT_FAIL\", \"掷骰检定大失败\")\n SC_CHECK_SUCCESS = (\"SC_CHECK_SUCCESS\", \"sc检定成功\")\n SC_CHECK_GREAT_SUCCESS = (\"SC_CHECK_GREAT_SUCCESS\", \"sc检定大成功\")\n SC_CHECK_FAIL = (\"SC_CHECK_FAIL\", \"sc检定失败\")\n SC_CHECK_GREAT_FAIL = (\"SC_CHECK_GREAT_FAIL\", \"sc检定大失败\")\n RD_BEFORE = (\"RD_BEFORE\", \"RD延迟骰预告\")\n SHOW_CARD_INFO = (\"SHOW_CARD_INFO\", \"查看角色卡详情\")\n SHOW_CARD_LIST = (\"SHOW_CARD_LIST\", \"查看角色卡列表\")\n RH_TO_PRIVATE = (\"RH_TO_PRIVATE\", \"暗骰私聊消息\")\n RH_TO_GROUP = (\"RH_TO_GROUP\", \"暗骰群聊消息\")\n RP_OR_RB_FORMAT_FAIL = (\"RP_OR_RB_FORMAT_FAIL\", \"RP、RB格式错误\")\n DICE_SET_NOT_ADMIN = (\"DICE_SET_NOT_ADMIN\", \"RP、RB格式错误\")\n DICE_SET_HELP = (\"DICE_SET_HELP\", \"群设置帮助字符串\")\n DICE_SET_ON = (\"DICE_SET_ON\", \"骰功能已开启\")\n DICE_SET_OFF = (\"DICE_SET_OFF\", \"骰功能已关闭\")\n DICE_SET_SECRET_ON = (\"DICE_SET_SECRET_ON\", \"已开启秘密团模式\")\n DICE_SET_SECRET_OFF = (\"DICE_SET_SECRET_OFF\", \"已关闭秘密团模式\")\n DICE_SET_ISNOTICE_ON = (\"DICE_SET_ISNOTICE_ON\", \"已开启本群为团贴公告扩散群\")\n DICE_SET_ISNOTICE_OFF = (\"DICE_SET_ISNOTICE_OFF\", \"已关闭本群为团贴公告扩散群\")\n DICE_SET_MODE = (\"DICE_SET_MODE\", \"设置DICE默认模式\")\n DICE_SET_RULE = (\"DICE_SET_RULE\", \"设置本群默认房规\")\n DICE_SET_DICETYPE = (\"DICE_SET_DICETYPE\", \"设置本群默认DICETYPE\")\n ST_HELP = (\"ST_HELP\", \"获取ST相关的指令帮助\")\n GROUP_NO_ONE = (\"GROUP_NO_ONE\", \"唔?你说的是谁?群里边有 {RESULT} 这个人吗?\")\n TEAM_NO_ONE = (\"TEAM_NO_ONE\", \"唔?你说的是谁?队里边有 {RESULT} 这个人吗?\")\n TEAM_ADD_SUCCESS = (\"TEAM_ADD_SUCCESS\", \"已将{RESULT}添加到本群队伍中\")\n TEAM_RM_SUCCESS = (\"TEAM_RM_SUCCESS\", \"已将{RESULT}从本群队伍中移除\")\n TEAM_IN_LOCK = (\"TEAM_IN_LOCK\", \"{EXT1}已锁定角色卡[{PCNAME}],请先解锁角色卡\")\n TEAM_LIST = (\"TEAM_LIST\", \"当前队伍列表:{RESULT}\")\n TEAM_SHOW = (\"TEAM_SHOW\", \"{PCNAME}的属性如下:\\n{RESULT}\")\n TEAM_CLR = (\"TEAM_CLR\", \"{RESULT}的队伍已清空\")\n TEAM_CALL = (\"TEAM_CALL\", \"{NICKNAME}正在帮助[{PCNAME}]释放鸽子大召唤术!\\n{RESULT}\")\n TEAM_PROP = (\"TEAM_PROP\", \"将[{PCNAME}]的{EXT1}调整为:{RESULT}\")\n TEAM_LOCK_SUCCESS = (\"TEAM_LOCK_SUCCESS\", \"已将队伍中的全部角色卡锁定\")\n TEAM_UNLOCK_SUCCESS = (\"TEAM_UNLOCK_SUCCESS\", \"已将队伍中的全部角色卡解锁\")\n LOGS_START_SUCCESS = (\"LOGS_START_SUCCESS\", \"已开启名为{result}的日志记录,记得使用.log off暂时关闭哦。\")\n LOGS_START_FAIL = (\"LOGS_START_FAIL\", \"输入的logName包含非法字符\")\n LOGS_OFF_SUCCESS = (\"LOGS_OFF_SUCCESS\", \"已暂时关闭名为{result}的日志记录,可以使用.log on/get {result}开启或获取哦。\")\n LOGS_NOT_HAVE_NAME = (\"LOGS_START_FAIL\", \"必须告诉{NICKNAME}日志叫什么名字呀\")\n LOGS_NOT_STARTED = (\"LOGS_NOT_STARTED\", \"目前没有名为{RESULT}的日志处于正在记录状态\")\n LOGS_IS_PREPARING = (\"LOGS_IS_PREPARING\", \"{NICKNAME}正在整理、准备包裹,请等待...\")\n LOGS_SEND_SUCCESS = (\"LOGS_SEND_SUCCESS\", \"{NICKNAME}已将包含日志文件的邮件包裹已经寄出给地址:[{result}]啦,请注意查收~\")\n","repo_name":"opaup/pamo-zhenxun-onedice","sub_path":"em/msgCode.py","file_name":"msgCode.py","file_ext":"py","file_size_in_byte":5894,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"69984525013","text":"import os\nimport glob\nimport random\nimport math\nimport time\nimport gc\nfrom optparse import OptionParser\n\nclass ruleMining:\n\n def __init__(self):\n self.filename = \"FV3.txt\"\n self.cleanFile = \"cleanFV3.txt\"\n self.appearances = \"appearances.txt\"\n self.output_file = \"rules.txt\"\n\n #Make format to what is needed by Apriori and write it out to newFV3.txt\n def cleanUp(self):\n inf = open(self.filename,'r')\n linedump = inf.readlines()\n inf.close()\n ouf = open(self.cleanFile,'w')\n line = linedump[0]\n newData = []\n# for line in linedump:\n firstRecord = line[1:line.find(\">\")]\n# print \"first\",firstRecord\n newData = firstRecord[1:firstRecord.find(\">\")].split(\",\")\n secondRecord = line[line.find(\">\"):]\n# print \"second\",secondRecord\n newData.extend(secondRecord[1:secondRecord.find(\">\")].split(\",\"))\n thirdRecord = secondRecord[secondRecord.find(\">\"):]\n# print \"third\",thirdRecord\n newData.extend(thirdRecord[1:thirdRecord.find(\">\")].split(\",\"))\n# print \"newData\", newData\n newData1 = [a.strip(\" \") for a in newData]\n print (newData1)\n strippedData = [a.strip(\"'\") for a in newData1]\n print (strippedData)\n for word in strippedData:\n ouf.write(str(word))\n ouf.write(\" \")\n ouf.write(\"\\n\")\n ouf.close()\n\n #Call apriori script to mine rules\n def callApriori(self, cluster, sup, conf, num):\n #Write cluster to file infile\n ouf = open(\"infile.txt\",'w') \n for i in range(len(cluster)):\n ouf.write(cluster[i])\n ouf.write(\"\\n\")\n ouf.close()\n outfile = \"rules\" + str(num) + \".txt\"\n #cluster is a list of strings\n if len(cluster) == 1:\n return (cluster[0][cluster[0].rfind(\",\")+1:])\n elif (len(cluster) > 1 and len(cluster) < 10):\n return (\"defaultClassLabel0\")\n elif (len(cluster)<20):\n command = \"apriori.exe -k, -m2 -tr -o -s90 -c70\" + \" infile.txt \" + outfile \n elif (len(cluster)>=20 and len(cluster)<100):\n command = \"apriori.exe -k, -m2 -tr -o -s70 -c70\" + \" infile.txt \" + outfile\n else:\n command = \"apriori.exe -k, -m2 -tr -o -s\" + str(sup) + \" -c\" + str(conf) + \" infile.txt \" + outfile \n print (\"Running command: \", command)\n print (\"Cluster was size:\" + str(len(cluster)))\n os.system(command)\n #return list of rule file names \n return outfile\n \n #Creates the rule classifiers by pruning the rules and ordering them by confidence followed by support \n def generateClassifiers(self, ruleFiles, classes):\n ruleClassifiers = []\n for file in ruleFiles:\n if \".txt\" not in file:\n ruleClassifiers.append([file])\n continue\n inf = open(file, \"r\")\n linedump = inf.readlines()\n inf.close()\n if len(linedump) < 2: \n ruleClassifiers.append([\"defaultLabel0\"])\n continue\n if len(linedump[-1]) < 3: del linedump[-1] #Remove empty line\n \n #Prune unwanted rules\n classFrequencies = {}\n goodRules = []\n for aprioriRule in linedump:\n (isValid, rule) = self.isValidRule(aprioriRule, classes)\n if isValid: \n goodRules.append(rule)\n if rule[1] in classFrequencies: classFrequencies[rule[1]] += 1\n else: classFrequencies[rule[1]] = 1\n \n orderedRules = sorted(goodRules, key = lambda x: (x[3], x[2]), reverse=True)\n if len(classFrequencies) == 0: \n dominantClass = \"defaultLabel0\"\n else:\n dominantClass = max(classFrequencies, key=classFrequencies.get)\n orderedRules.append(dominantClass)\n ruleClassifiers.append(orderedRules)\n del linedump\n gc.collect()\n return ruleClassifiers\n \n #Checks to see if the rule generated from the Apriori rule is valid and prunes \n def isValidRule(self, rule, classes):\n arrow = rule.find(\"<\")\n paren = rule.find(\"(\", arrow)\n classification = rule[:arrow].replace(\" \",\"\")\n words = rule[arrow+2:paren].replace(\" \",\"\").split(\",\")\n support = float(rule[paren+1:rule.find(\",\", paren)])\n confidence = float(rule[rule.rfind(\",\")+1:rule.rfind(\")\")].strip(\" \"))\n \n if classification == '' or words[0] == '': return (False, [])\n if classification not in classes: return (False, []) #Remove if the consequent is a word and not a class\n for word in words: #If any classes are in the antecedent, remove them\n if word in classes: return (False, [])\n \n return (True, [words, classification, support, confidence])\n \n#Call kmeans progrmam to cluster\ndef callKMeans(trainingClusteringList, numClusters, distance):\n # return [[],[]]\n #Generate Weka .arff file\n createWekaFile(trainingClusteringList) #file name is reuters.arff\n # Run java file to cluster\n command = \"java -jar ./KMeansClustering.jar reuters.arff \"+ str(numClusters) + \" \" + str(distance);\n #command = \"javac KMeansClustering.jar reuters.arff \"+ str(numClusters) + \" \" + str(distance);\n os.system(command)\n # Read centroids from file and return list of lists\n centroids = []\n inf = open(\"centroids.txt\",'r')\n linedump = inf.readlines()\n inf.close()\n for line in linedump:\n sCents = line.strip(\"\\n\").split(\",\")\n cents = [float(a) for a in sCents]\n centroids.append(cents)\n del linedump\n gc.collect()\n return centroids\n #return cluster centroids in the form: list of lists\n #centroids = [[centroid1], ...., [centroidN]]\n\n#Creates the weka .arff file for clustering\ndef createWekaFile(clusterList):\n #Create header with the list of attributes\n header = \"@RELATION reuters\\n\\n\" \n for attribute in clusterList[0]:\n header = header + \"@ATTRIBUTE \" + attribute + \" NUMERIC\\n\"\n header = header + \"\\n@DATA\\n\"\n \n #Generate the data portion of the .arff file \n outf = open(\"reuters.arff\", \"w\")\n outf.write(header)\n for vector in clusterList[1:]:\n outf.write(vector[0])\n for datum in vector[1:]:\n outf.write(\",\"+datum)\n outf.write(\"\\n\") \n\n outf.close()\n\n#Set up command line options\ndef parseOptions():\n parser = OptionParser()\n sup = 20\n conf = 60\n numC = 1\n dist = 1\n sampling = 200000\n parser.add_option(\"-s\", \"--support\", dest=\"sup\", action=\"store\")\n parser.add_option(\"-c\", \"--confidence\", dest=\"conf\", action=\"store\")\n parser.add_option(\"-k\", \"--numOfClusters\", dest=\"numClusters\", action=\"store\")\n parser.add_option(\"-d\", \"--distance\", dest=\"distance\", action=\"store\")\n parser.add_option(\"-a\", \"--sampling\", dest=\"sample\", action=\"store\")\n (options, args) = parser.parse_args()\n if options.sup:\n sup = options.sup\n if options.conf:\n conf = options.conf\n if options.numClusters:\n numC = options.numClusters\n if options.distance:\n dist = options.distance\n if options.sample:\n sampling = options.sample\n return (int(sup), int(conf), int(numC), int(dist), int(sampling)) \n \n#Partitions the data into 5 different sets used for 10-fold cross validation\ndef partitionData(sampling):\n inf = open(\"FV3.txt\", \"r\")\n linedump = inf.readlines()\n inf.close()\n \n partitions = [[],[],[]]\n allClasses = set([]) #Keeps track of all the classes we have (topics and places)\n index = 0\n size = len(linedump)\n sample = 0\n \n #Randomly sample the data and place into partitions\n for num in range(size):\n randomIndex = random.randrange(0,len(linedump))\n line = linedump[randomIndex].replace(\"'\",\"\").rstrip(\"\\n\")\n del linedump[randomIndex]\n (hasNoClass, classes) = checkForClasses(line)\n if hasNoClass: #skip if there are no topics or places\n continue\n else:\n for word in classes: allClasses.add(word)\n sample += 1\n partitions[index].append(line)\n index = (index+1)%3\n if sample == sampling: break\n del linedump\n gc.collect()\n return (partitions, allClasses)\n\ndef checkForClasses(line):\n topics_pos = line.rfind(\"<\")\n topics = line[topics_pos+1:line.rfind(\">\")].replace(\" \",\"\").split(\",\")\n places = line[line[:topics_pos].rfind(\"<\")+1:topics_pos-1].replace(\" \",\"\").split(\",\")\n if len(topics) == 1 and topics[0] == '' and len(places) == 1 and places[0] == '':\n return (True, [])\n else:\n classes = []\n for c in topics:\n if c != '': classes.append(c)\n for c in places:\n if c != '': classes.append(c)\n return (False, classes)\n\n#Generates a list of the data to be clustered. From the trainingSet, this function\n#finds the corresponding feature fector in FV4.txt and appends it to the list.\n#The feature vector has the topics and places removed, the attributes converted\n#to binary, and the document ID number removed\ndef generateClusteringSet(trainingSet):\n inf = open(\"FV4.txt\", \"r\")\n linedump = inf.readlines()\n inf.close()\n\n clusteringSet = []\n topics = linedump[0]\n topics = topics[1:topics.find(\">\")].replace(\"'\",\"\").replace(\" \",\"\").split(\",\")\n clusteringSet.append(topics)\n for vector in trainingSet:\n position = int(vector[1:vector.find(\",\")])\n line = linedump[position]\n line = line[1:line.find(\">\")].replace(\" \",\"\").split(\",\")\n dataVector = []\n for datum in line[1:]:\n if datum != \"0\": dataVector.append(\"1\")\n else: dataVector.append(\"0\")\n clusteringSet.append(dataVector)\n del linedump\n gc.collect()\n return clusteringSet\n\n#Creates a training and test set from the partitions \ndef generateTrainingTestSets(partitions, num):\n testSet = partitions[num]\n trainingSet = []\n for k, partition in enumerate(partitions):\n if k == num: continue\n else: trainingSet = trainingSet + partition\n\n return (trainingSet, testSet)\n \n #From the training set and cluster centroids, this function groups the transactional\n #data into the closest centroid to be fed into the Apriori rule mining software\ndef generateRulePartitions(centroids, trainingSet, distanceMetric):\n inf = open(\"FV4.txt\", \"r\")\n linedump = inf.readlines()\n inf.close()\n \n clusters = []\n \n for num in range(len(centroids)):\n clusters.append([])\n \n for vector in trainingSet:\n position = int(vector[1:vector.find(\",\")])\n minDistanceCluster = findClosestCentroid(linedump[position], centroids, distanceMetric)\n clusters[minDistanceCluster].append(createAprioriDataVector(vector)) \n del linedump\n gc.collect() \n return clusters \n \n#Computes the distance between vector1 and vector2\ndef distanceBetweenVectors(vector1, vector2, distanceMetric):\n distance = 0\n if distanceMetric == 1: #Manhattan distance\n for num in range(len(vector1)):\n distance = distance + abs(vector1[num]-vector2[num])\n else: #Euclidean distance\n for num in range(len(vector1)):\n distance = distance + math.pow((vector1[num]-vector2[num]),2)\n distance = math.pow(distance, 0.5)\n return distance\n\n#Takes the transactional feature vector from the training set and converts it into a comma separated string\n#that can be used by the Apriori software\ndef createAprioriDataVector(vector):\n class_pos = vector.find(\"<\", 2)\n words = vector[vector.find(\",\")+1:class_pos-1].replace(\" \",\"\") + \",\" #Extract words\n places = vector[class_pos+1:vector.find(\">\",class_pos)].replace(\" \",\"\") + \",\" #Extract places\n if places == \",\": places = \"\"\n topics = vector[vector.rfind(\"<\")+1:vector.rfind(\">\")].replace(\" \",\"\") #Extract topics\n if topics == \"\": places = places[:-1]\n s = words + places + topics\n return s\n \n#Finds the closest cluster centroid the data belongs to \ndef findClosestCentroid(line, clusterCentroids, distanceMetric):\n line = line[1:line.find(\">\")].replace(\" \",\"\").split(\",\")\n dataVector = []\n for datum in line[1:]:\n if datum != \"0\": dataVector.append(1.0)\n else: dataVector.append(0.0)\n #dVector = [float(a) for a in line[1:]]\n minDistance = -1\n minDistanceCluster = 0\n for num, centroid in enumerate(clusterCentroids):\n distance = distanceBetweenVectors(centroid, dataVector, distanceMetric)\n if distance < minDistance or minDistance == -1:\n minDistance = distance\n minDistanceCluster = num\n return minDistanceCluster \n \n#Looks through the generated rules and assigns a class label to the point\ndef classify(ruleSet, vector):\n if len(ruleSet) == 1: return ruleSet[0]\n vectorWords = vector[1:vector.find(\">\")].replace(\" \",\"\").split(\",\")\n vectorWords = vectorWords[1:]\n vectorWordSet = set(vectorWords)\n for rule in ruleSet[:-1]:\n ruleSetWords = set(rule[0])\n for word in ruleSetWords:\n if word not in vectorWordSet: break\n else: #Executes when no break is encountered (means all the words in the rule set were found in the test feature vector)\n return rule[1]\n return ruleSet[-1]\n\n#Returns 1 if the classLabel matches the topics or places of the vector and returns 0 otherwise \ndef isCorrectClassification(classLabel, vector):\n class_pos = vector.find(\"<\", 2)\n places = vector[class_pos+1:vector.find(\">\",class_pos)].replace(\" \",\"\").split(\",\")\n topics = vector[vector.rfind(\"<\")+1:vector.rfind(\">\")].replace(\" \",\"\").split(\",\")\n if classLabel in places or classLabel in topics: return 1\n else: return 0\n \n#Prints results to a file \ndef printResults(fold_accuracies, totalTime, aprioriTime, clusteringTime, clusterNum, distanceMetric, support, confidence):\n outf = open(\"results.txt\", \"a\")\n s = \"\"\n if clusterNum == 1:\n s = \"Clustering: None\"\n else:\n s = \"Clustering: \" + str(clusterNum) + \" clusters,\"\n if distanceMetric == 1:\n s = s + \" Manhattan distance\"\n else:\n s = s + \" Euclidean distance\"\n s = s + \"\\nSupport: \" + str(support) + \"% Confidence: \" + str(confidence) + \"%\\n\"\n s = s + \"Accuracy: \" + str(sum(fold_accuracies)/float(len(fold_accuracies))) + \"\\n\"\n s = s + \"Total run time: \" + str(totalTime) + \" Clustering time: \" + str(clusteringTime) + \" Apriori Rule Creation Time: \" + str(aprioriTime) + \"\\n\\n\\n\"\n outf.write(s)\n outf.close()\n\n#Cleans up the extra clutter in the directory \ndef cleanDirectory():\n os.remove(\"infile.txt\")\n if os.path.exists(\"reuters.arff\"): os.remove(\"reuters.arff\")\n if os.path.exists(\"centroids.txt\"): os.remove(\"centroids.txt\")\n rulesFiles = glob.glob(\"rules*\")\n for file in rulesFiles:\n os.remove(file)\n \ndef main():\n (support, confidence, clusterNum, distanceMetric, sampling) = parseOptions()\n startTime = time.time()\n clusteringTime = 0\n aprioriTime = 0\n rm = ruleMining()\n (partitions, classes) = partitionData(sampling) \n fold_accuracies = []\n for num, partition in enumerate(partitions):\n (trainingSet, testSet) = generateTrainingTestSets(partitions, num) \n correct = 0\n if clusterNum > 1: #Perform Clustering\n trainingClusteringList = generateClusteringSet(trainingSet)\n cStartTime = time.time()\n clusterCentroids = callKMeans(trainingClusteringList, clusterNum, distanceMetric) \n cEndTime = time.time()\n clusteringTime += cEndTime - cStartTime\n clusters = generateRulePartitions(clusterCentroids, trainingSet, distanceMetric)\n rules = []\n for cNum, cluster in enumerate(clusters):\n aStartTime = time.time()\n rules.append(rm.callApriori(cluster, support, confidence, cNum+1)) \n aEndTime = time.time()\n aprioriTime += aEndTime - aStartTime\n classifiers = rm.generateClassifiers(rules, classes) \n \n #Test classifiers on the test set\n inf = open(\"FV4.txt\", \"r\")\n linedump = inf.readlines()\n inf.close()\n print (\"Classifying test set using generated rules...\")\n for vector in testSet:\n position = int(vector[1:vector.find(\",\")])\n closestCentroid = findClosestCentroid(linedump[position], clusterCentroids, distanceMetric)\n classLabel = classify(classifiers[closestCentroid], vector)\n correct = correct + isCorrectClassification(classLabel, vector)\n fold_accuracies.append(correct/float(len(testSet))) \n del linedump\n gc.collect()\n else: #No clustering\n cluster = []\n for vector in trainingSet:\n cluster.append(createAprioriDataVector(vector))\n aStartTime = time.time() \n outfile = rm.callApriori(cluster, support, confidence, -1)\n aEndTime = time.time()\n aprioriTime += aEndTime - aStartTime\n classifier = rm.generateClassifiers([outfile], classes)\n \n print (\"Classifying test set using generated rules...\")\n for vector in testSet:\n classLabel = classify(classifier[0], vector)\n correct = correct + isCorrectClassification(classLabel, vector)\n fold_accuracies.append(correct/float(len(testSet))) \n \n endTime = time.time()\n totalTime = endTime - startTime\n printResults(fold_accuracies, totalTime, aprioriTime, clusteringTime, clusterNum, distanceMetric, support, confidence)\n cleanDirectory() \n \nif __name__ == \"__main__\":\n main()\n \n","repo_name":"JoliJeremy/AssociationRuleMining","sub_path":"ruleMining.py","file_name":"ruleMining.py","file_ext":"py","file_size_in_byte":18026,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"71873549654","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Main module.\"\"\"\nimport logging\nfrom abc import ABC\nfrom typing import Optional, Set, List, Callable, Any, Tuple\n\nimport gym\nfrom flloat.semantics import PLInterpretation\nfrom gym.spaces import Discrete, MultiDiscrete\nfrom pythomata.base import Symbol, State\nfrom pythomata.dfa import DFA\n\nfrom temprl.automata import (\n TemporalLogicFormula,\n RewardDFA,\n RewardAutomatonSimulator\n)\n\nObservation = Any\nAction = Any\n\nlogger = logging.getLogger(__name__)\n\n\nclass TemporalGoal(ABC):\n \"\"\"Abstract class to represent a temporal goal.\"\"\"\n\n def __init__(\n self,\n formula: Optional[TemporalLogicFormula] = None,\n reward: float = 1.0,\n automaton: Optional[DFA] = None,\n labels: Optional[Set[Symbol]] = None,\n reward_shaping: bool = True,\n extract_fluents: Optional[Callable] = None,\n zero_terminal_state: bool = False,\n one_hot_encoding: bool = False,\n targets: list = []\n ):\n \"\"\"\n Initialize a temporal goal.\n\n :param formula: the formula to be satisfied. it will be ignored if automaton is set.\n :param automaton: the pythomata.DFA instance. it will be\n | the preferred input against 'formula'.\n :param reward: the reward associated to the temporal goal.\n :param labels: the set of all possible fluents\n | (used to generate the full automaton).\n :param reward_shaping: the set of all possible fluents\n | (used to generate the full automaton).\n :param extract_fluents: a callable that takes an observation\n | and an actions, and returns a\n | propositional interpretation with the active fluents.\n | if None, the 'extract_fluents' method is taken.\n :param zero_terminal_state: when reward_shaping is True, make the\n | potential function at a terminal state equal to zero.\n :param one_hot_encoding: use one-hot encoding for representing the\n | automata dimensions.\n \"\"\"\n if formula is None and automaton is None:\n raise ValueError(\"Provide either a formula or an automaton.\")\n\n self._formula = formula\n if automaton:\n self._automaton = RewardDFA(automaton, reward)\n else:\n self._automaton = RewardDFA.from_formula(\n self._formula,\n reward,\n alphabet=labels\n )\n self._simulator = RewardAutomatonSimulator(\n self._automaton,\n reward_shaping=reward_shaping,\n zero_terminal_state=zero_terminal_state\n )\n self._reward = reward\n self._one_hot_encoding = one_hot_encoding\n if extract_fluents is not None:\n setattr(self, \"extract_fluents\", extract_fluents)\n \n self.targets = targets\n\n @property\n def observation_space(self) -> Discrete:\n \"\"\"Return the observation space of the temporal goal.\"\"\"\n # we add one virtual state for the 'super' sink state\n # - that is, when the symbol is not in the alphabet.\n # This is going to be a temporary workaround due to\n # the Pythomata's lack of support for this corner case.\n return Discrete(len(self._automaton.states) + 1)\n\n @property\n def formula(self):\n \"\"\"Get the formula.\"\"\"\n return self._formula\n\n @property\n def automaton(self):\n \"\"\"Get the automaton.\"\"\"\n return self._automaton\n\n @property\n def reward(self):\n \"\"\"Get the reward.\"\"\"\n return self._reward\n\n def extract_fluents(self, obs, action, targets) -> PLInterpretation:\n \"\"\"\n Extract high-level features from the observation.\n\n :return: the list of active fluents.\n \"\"\"\n raise NotImplementedError\n\n def step(self, observation, action) -> Optional[State]:\n \"\"\"Do a step in the simulation.\"\"\"\n fluents = self.extract_fluents(observation, action, self.targets)\n self._simulator.step(fluents)\n\n result = self._simulator.cur_state if self._simulator.cur_state is not None\\\n else len(self._simulator.dfa.states)\n return result\n\n def reset(self):\n \"\"\"Reset the simulation.\"\"\"\n self._simulator.reset()\n return self._simulator.cur_state\n\n def observe_reward(self, is_terminal_state: bool = False) -> float:\n \"\"\"Observe the reward of the last transition.\"\"\"\n return self._simulator.observe_reward(is_terminal_state)\n\n def is_true(self):\n \"\"\"Check if the simulation is in a final state.\"\"\"\n return self._simulator.is_true()\n\n def is_failed(self):\n \"\"\"Check whether the simulation has failed.\"\"\"\n return self._simulator.is_failed()\n\n\nclass TemporalGoalWrapper(gym.Wrapper):\n \"\"\"Gym wrapper to include a temporal goal in the environment.\"\"\"\n\n def __init__(\n self,\n env: gym.Env,\n temp_goals: List[TemporalGoal],\n feature_extractor: Optional[Callable[[Observation, Action], Any]] = None,\n combine: Optional[Callable[[Observation, Tuple], Any]] = None,\n ):\n \"\"\"\n Wrap a Gym environment with a temporal goal.\n\n :param env: the Gym environment to wrap.\n :param temp_goals: the temporal goal to be learnt\n :param feature_extractor: (optional) extract feature\n | from the environment state\n :param combine: (optional) combine the agent state with\n | the temporal goal state.\n \"\"\"\n super().__init__(env)\n self.temp_goals = temp_goals\n self.feature_extractor = feature_extractor\\\n if feature_extractor is not None else (lambda obs, action: obs)\n self.combine = combine\\\n if combine is not None else (lambda obs, qs: tuple((obs, *qs)))\n\n self.observation_space = self._get_observation_space()\n\n def _get_observation_space(self) -> gym.spaces.Space:\n \"\"\"Return the observation space.\"\"\"\n if isinstance(self.env.observation_space, MultiDiscrete):\n env_shape = tuple(self.env.observation_space.nvec)\n else:\n env_shape = (self.env.observation_space.n, )\n temp_goals_shape = tuple(tg.observation_space.n for tg in self.temp_goals)\n\n combined_obs_space = env_shape + temp_goals_shape\n return MultiDiscrete(combined_obs_space)\n\n def step(self, action):\n \"\"\"Do a step in the Gym environment.\"\"\"\n obs, reward, done, info = super().step(action)\n features = self.feature_extractor(obs=obs, action=action)\n next_automata_states = tuple([tg.step(obs, action) for tg in self.temp_goals])\n\n # temp_goal_all_true = all(tg.is_true() for tg in self.temp_goals)\n # temp_goal_some_false = any(tg.is_failed() for tg in self.temp_goals)\n temp_goal_rewards = [\n tg.observe_reward(is_terminal_state=done)\n for tg in self.temp_goals\n ]\n total_goal_rewards = sum(temp_goal_rewards)\n\n if any(r != 0.0 for r in temp_goal_rewards):\n logger.debug(\"Non-zero goal rewards: {}\".format(temp_goal_rewards))\n\n obs_prime = self.combine(features, next_automata_states)\n reward_prime = reward + total_goal_rewards\n return obs_prime, reward_prime, done, info\n\n def reset(self, **kwargs):\n \"\"\"Reset the Gym environment.\"\"\"\n obs = super().reset()\n for tg in self.temp_goals:\n tg.reset()\n\n features = self.feature_extractor(obs, None)\n automata_states = tuple([tg.reset() for tg in self.temp_goals])\n new_observation = self.combine(features, automata_states)\n return new_observation\n","repo_name":"lavallone/RL_project","sub_path":"temprl/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":7855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5861888289","text":"\"\"\"\nRun the following commands first\npip3 install git+https://github.com/keras-team/keras-tuner.git@1.0.2rc1\npip3 install autokeras==1.0.5\n\nThis Script searches for a model for the wine dataset\nSource and Description of data:\n\"\"\"\nimport os\n\nimport pandas as pd\nimport tensorflow as tf\n\nimport autokeras as ak\n\ndataset_url = (\n \"https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\"\n)\n\n# save data\ndata_file_path = tf.keras.utils.get_file(\n fname=os.path.basename(dataset_url), origin=dataset_url\n)\n\ncolumn_names = [\n \"Wine\",\n \"Alcohol\",\n \"Malic.acid\",\n \"Ash\",\n \"Acl\",\n \"Mg\",\n \"Phenols\",\n \"Flavanoids\",\n \"Nonflavanoid.phenols\",\n \"Proanth\",\n \"Color.int\",\n \"Hue\",\n \"OD\",\n \"Proline\",\n]\n\nfeature_names = column_names[1:]\nlabel_name = column_names[0] # Wine\n\ndata = pd.read_csv(data_file_path, header=0, names=column_names)\n# Shuffling\ndata = data.sample(frac=1)\n\nsplit_length = int(data.shape[0] * 0.8) # 141\n\n# train and test\ntrain_data = data.iloc[:split_length]\ntest_data = data.iloc[split_length:]\n\n\n# Initialize the classifier.\nclf = ak.StructuredDataClassifier(max_trials=5)\n\n# Evaluate\nclf.fit(x=train_data[feature_names], y=train_data[label_name])\nprint(\n \"Accuracy: {accuracy}\".format(\n accuracy=clf.evaluate(\n x=test_data[feature_names], y=test_data[label_name]\n )\n )\n)\n","repo_name":"keras-team/autokeras","sub_path":"examples/wine.py","file_name":"wine.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":8985,"dataset":"github-code","pt":"67"} +{"seq_id":"22473256990","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport logging\nimport time\nimport sys\nimport os\nfrom .speedtest import speedtest\nfrom configloader import load_config\nfrom Munager.MuAPI.webapi_utils import WebApi\nclass Speedtest(object):\n\n def __init__(self):\n import threading\n self.event = threading.Event()\n self.has_stopped = False\n self.config = load_config()\n self.webapi = WebApi()\n\n def speedtest_thread(self):\n if self.event.wait(600):\n return\n\n logging.info(\"Speedtest starting...You can't stop right now!\")\n CTid = 0\n speedtest_ct = speedtest.Speedtest()\n speedtest_ct.get_servers()\n servers_list = []\n for _, servers in sorted(speedtest_ct.servers.items()):\n for server in servers:\n if server['country'].find(\n 'China') != -1 and server['sponsor'].find('Telecom') != -1:\n servers_list.append(server)\n speedtest_ct.get_best_server(servers_list)\n results_ct = speedtest_ct.results\n CTPing = str(results_ct.server['latency']) + ' ms'\n speedtest_ct.download()\n CTDLSpeed = str(\n round(\n (results_ct.download / 1000 / 1000),\n 2)) + \" Mbit/s\"\n speedtest_ct.upload()\n CTUpSpeed = str(\n round(\n (results_ct.upload / 1000 / 1000),\n 2)) + \" Mbit/s\"\n\n CUid = 0\n speedtest_cu = speedtest.Speedtest()\n speedtest_cu.get_servers()\n servers_list = []\n for _, servers in sorted(speedtest_cu.servers.items()):\n for server in servers:\n if server['country'].find(\n 'China') != -1 and server['sponsor'].find('Unicom') != -1:\n servers_list.append(server)\n speedtest_cu.get_best_server(servers_list)\n results_cu = speedtest_cu.results\n CUPing = str(results_cu.server['latency']) + ' ms'\n speedtest_cu.download()\n CUDLSpeed = str(\n round(\n (results_cu.download / 1000 / 1000),\n 2)) + \" Mbit/s\"\n speedtest_cu.upload()\n CUUpSpeed = str(\n round(\n (results_cu.upload / 1000 / 1000),\n 2)) + \" Mbit/s\"\n\n CMid = 0\n speedtest_cm = speedtest.Speedtest()\n speedtest_cm.get_servers()\n servers_list = []\n for _, servers in sorted(speedtest_cm.servers.items()):\n for server in servers:\n if server['country'].find(\n 'China') != -1 and server['sponsor'].find('Mobile') != -1:\n servers_list.append(server)\n speedtest_cm.get_best_server(servers_list)\n results_cm = speedtest_cm.results\n CMPing = str(results_cm.server['latency']) + ' ms'\n speedtest_cm.download()\n CMDLSpeed = str(\n round(\n (results_cm.download / 1000 / 1000),\n 2)) + \" Mbit/s\"\n speedtest_cm.upload()\n CMUpSpeed = str(\n round(\n (results_cm.upload / 1000 / 1000),\n 2)) + \" Mbit/s\"\n\n\n self.webapi.postApi('func/speedtest',\n {'node_id': self.config.get(\"node_id\",0)},\n {'data': [{'telecomping': CTPing,\n 'telecomeupload': CTUpSpeed,\n 'telecomedownload': CTDLSpeed,\n 'unicomping': CUPing,\n 'unicomupload': CUUpSpeed,\n 'unicomdownload': CUDLSpeed,\n 'cmccping': CMPing,\n 'cmccupload': CMUpSpeed,\n 'cmccdownload': CMDLSpeed}]})\n\n logging.info(\"Speedtest finished\")\n\n @staticmethod\n def thread_db(obj):\n config = load_config()\n if config.get(\"speedtest\",0)== 0:\n return\n global db_instance\n db_instance = obj()\n try:\n while True:\n try:\n db_instance.speedtest_thread()\n except Exception as e:\n import traceback\n trace = traceback.format_exc()\n logging.error(trace)\n #logging.warn('db thread except:%s' % e)\n if db_instance.event.wait(config.get(\"speedtest\") * 3600):\n break\n if db_instance.has_stopped:\n break\n except KeyboardInterrupt as e:\n pass\n db_instance = None\n\n @staticmethod\n def thread_db_stop():\n global db_instance\n db_instance.has_stopped = True\n db_instance.event.set()\n","repo_name":"pandoraes/v2ray-api","sub_path":"Munager/SpeedTestManager/speedtest_thread.py","file_name":"speedtest_thread.py","file_ext":"py","file_size_in_byte":4865,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"12141763236","text":"import streamlit as st\nimport pickle\nimport pandas as pd\nimport sklearn\nimport time\nfrom model import final_model\nfrom PIL import Image\n\nimg_lis_info = []\nfor i in range(1,9):\n img = Image.open(f\"投影片{i}.jpg\")\n img_lis_info.append(img)\n\nimg_lis_teach = []\nfor i in range(1,8):\n img = Image.open(f\"卫教_{i}.jpg\")\n img_lis_teach.append(img)\n\nmodel = final_model\n\ndef setup_quesion(title='title'):\n value = st.radio(title,['无','有'], horizontal=True, key=title)\n if value == '无':\n value = 0\n else:\n value = 1 \n return value\n\ndef predict():\n features = pd.DataFrame({\n '平均每周几天吃水果':[fruit_freq],\n '是否有胃溃疡或十二指肠溃疡':[stomach_problem],\n '是否有心脏病':[heart_problem],\n '和去年比较之健康状况':[compare],\n '是否有同居伴侣':[couple],\n 'bmi':[bmi],\n '失业中':[0],\n '是否曾嚼食槟榔':[chew],\n '是否有高血压':[high_blood_pressure],\n '是否有高血脂':[high_blood_oil], \n '是否有糖尿病':[high_blood_sugar], \n '是否有骨质疏松症':[bone_lack], \n '是否有肝脏疾病':[liver_problem], \n '是否有子宫卵巢疾病':[womon_proble],\n '是否关节疼痛':[joint_pain], \n '是否下背部疼痛或腰痛':[back_pain], \n '是否坐骨神经痛':[sit_pain], \n '是否头痛或偏头痛':[head_pain], \n '是否痛风':[gout], \n '是否使用慢性处方签':[longterm_drug],\n '是否曾吸烟':[smoke], \n '年龄':[age]})\n column_order = ['是否有同居伴侣', '和去年比较之健康状况', '是否有高血压', '是否有高血脂', '是否有心脏病', '是否有糖尿病',\n '是否有骨质疏松症', '是否有胃溃疡或十二指肠溃疡', '是否有肝脏疾病', '是否有子宫卵巢疾病', '是否关节疼痛',\n '是否下背部疼痛或腰痛', '是否坐骨神经痛', '是否头痛或偏头痛', '是否痛风', '是否使用慢性处方签', '是否曾吸烟',\n '是否曾嚼食槟榔', '平均每周几天吃水果', '年龄', 'bmi', '失业中']\n features = features.reindex(columns=column_order)\n result = model.predict(features)[0]\n prob = model.predict_proba(features)[0][1]\n if result:\n st.error(f'预测为可能有慢性肾脏病,患病几率为: {prob:.0%},表示您对于上述危险因子具有高度曝险不一定有患病,建议您到医院做进一步检查')\n hospital(region)\n health_info()\n else:\n st.success(f'预测为健康状态,健康几率为: {1-prob:.0%}')\n health_info()\n \ndef health_info():\n st.warning('卫教资讯(有效预防与延缓慢性肾脏病)')\n for img in img_lis_teach:\n st.image(img)\n\ndef hospital(region):\n lookup_table = {\n '基隆市':[('基隆长庚','https://www.cgmh.org.tw/tw/Services/DeptInfo/2/21000/21500'),\n ('基隆医院', 'https://www.kln.mohw.gov.tw/?aid=51&pid=114')],\n '台北市':[('台北医院', 'https://www.tph.mohw.gov.tw/?aid=51&pid=7'),\n ('台北马偕', 'https://www.mmh.org.tw/departmain.php?id=4')],\n '新北市':[('土城医院', 'https://www.cgmh.org.tw/tw/Services/DeptInfo/V/V1000/V1500'),\n ('亚东医院', 'https://www.femh.org.tw/doctor/doctor?t=2')],\n '桃园市':[('桃园荣总', 'https://www.tyvh.gov.tw/team/'),\n ('桃园医院', 'https://www.tygh.mohw.gov.tw/?aid=51&pid=132')],\n '新竹县':[('台大医院新竹分院', 'https://www.hch.gov.tw/?aid=51&pid=8'),\n ('中国医药大学新竹附设医院', 'https://www.cmu-hch.cmu.edu.tw/Doctor/Department?detail=7¤t=1&source=dep')],\n '新竹市':[('新竹马偕', 'https://www.hc.mmh.org.tw/departmain.php?depid=4#staff'),\n ('国军桃园总医院新竹分院', 'https://813.mnd.gov.tw/department/sn5/')],\n '苗栗县':[('苗栗医院', 'https://www.mil.mohw.gov.tw/?aid=51&pid=51')],\n '台中市':[('台中荣总','https://www.vghtc.gov.tw/APIPage/DoctorInfoList?WebMenuID=dc264319-1d78-4ad8-a7ba-647440dbad6b&SECTION_ID=NEPH&SECTION=%E8%85%8E%E8%87%9F%E7%97%85%E7%A7%91'),\n ('台中慈济医院','https://taichungsub.tzuchi.com.tw/12/doctors')],\n '彰化县':[('彰化基督教医院', 'https://www.cch.org.tw/NEPHRO/'),\n ('彰化医院', 'https://www.chhw.mohw.gov.tw/?aid=51&pid=31')],\n '云林县':[('台大医院云林分院', 'https://www.ylh.gov.tw/?aid=51&pid=35'),\n ('云林基督教医院', 'https://yl.cch.org.tw/doctor_1_detial.aspx?cID=74&key=0105')],\n '嘉义县':[('朴子医院', 'https://www.puzih.mohw.gov.tw/?aid=51&pid=41&page_name=list&pageNo=2'),\n ('嘉义长庚', 'https://www.cgmh.org.tw/tw/Systems/AreaInfo/9')],\n '嘉义市':[('嘉义医院', 'https://www.chyi.mohw.gov.tw/list.asp?orcaid={850945D1-5D12-4D84-AE32-8C8B0E26D923}'),\n ('台中荣总嘉义分院', 'https://www.vhcy.gov.tw/PageView/RowView?WebMenuID=25cadfcc-5a2d-4b5b-a1b5-eb11cbb21f7b')],\n '台南市':[('台南医院', 'https://www.tnhosp.mohw.gov.tw/list.asp?orcaid={850945D1-5D12-4D84-AE32-8C8B0E26D923}'),\n ('奇美医院', 'http://sub.chimei.org.tw/57340/index.php/members/')],\n '高雄市':[('高雄医学大学附设医院', 'https://www.kmuh.org.tw/KMUHInterWeb/InterWeb/InnerPage/1001124056'),\n ('高雄荣总', 'https://org.vghks.gov.tw/neph/')],\n '屏东县':[('屏东医院', 'https://www.pntn.mohw.gov.tw/?aid=51&pid=90'),\n ('屏东荣总', 'https://org.ptvgh.gov.tw/nephro/')],\n '南投县':[('竹山秀传医院', 'https://www.csshow.org.tw/new_cssummary/cssummary_04_01_08.asp'),\n ('南投医院', 'https://www.nant.mohw.gov.tw/?aid=51&pid=63')],\n '宜兰县':[('阳明交通大学附设医院', 'https://www.hosp.nycu.edu.tw/departments/health-care/internal/kidney/member.html'),\n ('罗东博爱医院', 'https://www.pohai.org.tw/doctor.php?Class1=7&Class2=17&Sort=2#sidebar_2')],\n \n '花莲县':[('花莲医院', 'https://webreg.hwln.mohw.gov.tw/OINetReg.WebRwd/Reg/Dept'),\n ('花莲慈济医院', 'https://hlm.tzuchi.com.tw/neph/')],\n '台东县':[('台东医院', 'https://www.tait.mohw.gov.tw/?aid=51&pid=41&page_name=detail&iid=659'),\n ('台东马偕', 'https://ttw3.mmh.org.tw/departmain.php?id=4')],\n '金门县':[('金门医院', 'https://netreg.kmhp.mohw.gov.tw/OINetReg.WebRwd/')],\n '澎湖县':[('三军总医院澎湖分院', 'https://wwwv.tsgh.ndmctsgh.edu.tw/Docdetaillist/195/40123/25218'),\n ('澎湖医院', 'https://www.pngh.mohw.gov.tw/?aid=51&pid=41')],\n '连江县':[('连江县立医院', 'https://ljc.matsuh.gov.tw/OINetReg/OINetReg.Reg/Reg_NetReg.aspx')]}\n\n data = lookup_table[region]\n st.info('在您地区提供肾脏检查之医院')\n for i, (name, ref) in enumerate(data,1):\n st.markdown(f\"{i}. [{name}]({ref})\")\n\n\n# app section\nst.title('成功逆转肾 :point_up:')\nst.warning('为达到较佳的使用体验,手机用户请使用浏览器开启此网页,如:chrome, safari 等,请勿使用社群平台内建之浏览器开启')\nst.caption('本预测模型是由“肾败难免”团队利用卫生福利部国民健康署 \\\n (前身:卫生署国民健康局) 于民国 91 年所施行的“台湾地区国民健康促进知识、态度与行为调查”作为原始资料进行机器学习训练而得\\\n ,预测结果仅为参考仍应以医师诊断结果为主,模型表现数据如下:')\n\nwith st.expander(\"查看更多关于模型表现\"):\n st.text(\"\"\"\n 原始资料经整理后共有 24392 个有效样本,其中患有肾脏病者为 1000 人,占有效样本数 4%\n 团队将资料进行 8:2 切割分成训练集与验证集,以下数据为模型训练完成后在验证集的表现\n (采 bootstrap 法建构之 95% 信赖区间)\n Accuracy(整体预测正确率): [0.7222, 0.7306]\n Sensitivity(实际有病者中预测为有病比率): [0.6222, 0.6667]\n Specificity(实际没病者中预测为没病比率): [0.7257, 0.7343]\n \"\"\")\n\n\nwith (st.expander(\"查看更多认识慢性肾脏病\")):\n for img in img_lis_info:\n st.image(img)\n\nst.write(\"---\")\n\nst.markdown('#### 以下共有 23 题,请依实际状况回答')\nst.markdown('##### 基本资料 (身高体重是为了算 bmi)')\n\nregion = st.selectbox('您目前居住在哪里',\n ['基隆市','台北市','新北市','桃园市','新竹县','新竹市','苗栗县','台中市',\n '彰化县','云林县','嘉义县','嘉义市','台南市','高雄市','屏东县','南投县'\n '宜兰县','花莲县','台东县','金门县','澎湖县','连江县'])\nage = st.number_input('您目前的年龄',\n min_value=1,max_value=100,value=25)\nheight = st.number_input('您目前身高(cm)',min_value=1,max_value=200,value=165)\nweight = st.number_input('您目前体重(kg)',min_value=1,max_value=200,value=60)\nbmi = weight / ((height/100)**2)\ncompare = st.selectbox('和去年比较之健康状况',\n ['比去年差很多', '比去年差一些', '和去年持平','比去年好一些','比去年好很多'],index=2)\nif compare == '比去年差很多':\n compare = -2\nelif compare =='比去年差一些':\n compare = -1\nelif compare =='和去年持平':\n compare = 0\nelif compare =='比去年好一些':\n compare = 1\nelif compare =='比去年好很多':\n compare = 2\nfruit_freq = st.selectbox('每周吃几天水果',\n ['不吃', '每周 1 天或以下', '每周 2-3 天','每周 4-5 天','几乎每天'],index=2)\nif fruit_freq == '不吃':\n fruit_freq = 0\nelif fruit_freq =='每周一天或以下':\n fruit_freq = 1\nelif fruit_freq =='每周 2-3 天':\n fruit_freq = 2\nelif fruit_freq =='每周 4-5 天':\n fruit_freq = 3\nelif fruit_freq =='几乎每天':\n fruit_freq = 4\n\nst.markdown('##### 三高病史 (须为经医师诊断)')\nhigh_blood_pressure = setup_quesion('您是否有高血压')\nhigh_blood_sugar = setup_quesion('您是否有糖尿病')\nhigh_blood_oil = setup_quesion('您是否有高血脂')\n\nst.markdown('##### 其他疾病 (须为经医师诊断)')\nheart_problem = setup_quesion('是否有心脏病')\nbone_lack = setup_quesion('您是否有骨质疏松')\nliver_problem = setup_quesion('您是否有肝脏疾病')\ngout = setup_quesion('您是否有痛风')\nstomach_problem = setup_quesion('是否有胃溃疡或十二指肠溃疡')\nwomon_proble = setup_quesion('您是否有子宫卵巢疾病(男性填无)')\n\n\nst.markdown('##### 身体不适症状 (自我评估)')\njoint_pain = setup_quesion('您是否关节疼痛')\nback_pain = setup_quesion('您是否有下背部疼痛或是腰痛')\nsit_pain = setup_quesion('您是否有坐骨神经痛')\nhead_pain = setup_quesion('您是否有头痛或偏头痛')\n\nst.markdown('##### 其他')\ncouple = setup_quesion('是否有同居伴侣 (不论已婚未婚)')\nlongterm_drug = setup_quesion('您是否有使用慢性处方签')\nsmoke = setup_quesion('您是否有或有过长期吸烟的经验(一年以上)')\nchew = setup_quesion('您是否有或有过长期吃槟榔的经验(一年以上)')\n\n\npred = st.button('Predict')\nif pred:\n my_bar = st.progress(0)\n time.sleep(0.1)\n for i in range(0,100, 10):\n if i < 5:\n time.sleep(0.1)\n elif i < 8:\n time.sleep(0.03)\n else:\n time.sleep(0.01)\n my_bar.progress(i + 10)\n time.sleep(0.01)\n result = predict()\n\n","repo_name":"ianchen0313/healthapp","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11805,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11217958048","text":"import numpy as np\nfrom scipy.sparse import dok_matrix\nfrom tqdm import tqdm\n\n\ndef from_text(path, encoding='utf-8', verbose=1, class2int=True):\n \"\"\"\n Reads a labelled colletion of documents.\n File fomart <0 or 1>\\t\\n\n\n :param path: path to the labelled collection\n :param encoding: the text encoding used to open the file\n :param verbose: if >0 (default) shows some progress information in standard output\n :return: a list of sentences, and a list of labels\n \"\"\"\n all_sentences, all_labels = [], []\n if verbose>0:\n file = tqdm(open(path, 'rt', encoding=encoding).readlines(), f'loading {path}')\n else:\n file = open(path, 'rt', encoding=encoding).readlines()\n for line in file:\n line = line.strip()\n if line:\n try:\n label, sentence = line.split('\\t')\n sentence = sentence.strip()\n if class2int:\n label = int(label)\n if sentence:\n all_sentences.append(sentence)\n all_labels.append(label)\n except ValueError:\n print(f'format error in {line}')\n return all_sentences, all_labels\n\n\ndef from_sparse(path):\n \"\"\"\n Reads a labelled collection of real-valued instances expressed in sparse format\n File format <-1 or 0 or 1>[\\s col(int):val(float)]\\n\n\n :param path: path to the labelled collection\n :return: a `csr_matrix` containing the instances (rows), and a ndarray containing the labels\n \"\"\"\n\n def split_col_val(col_val):\n col, val = col_val.split(':')\n col, val = int(col) - 1, float(val)\n return col, val\n\n all_documents, all_labels = [], []\n max_col = 0\n for line in tqdm(open(path, 'rt').readlines(), f'loading {path}'):\n parts = line.strip().split()\n if parts:\n all_labels.append(int(parts[0]))\n cols, vals = zip(*[split_col_val(col_val) for col_val in parts[1:]])\n cols, vals = np.asarray(cols), np.asarray(vals)\n max_col = max(max_col, cols.max())\n all_documents.append((cols, vals))\n n_docs = len(all_labels)\n X = dok_matrix((n_docs, max_col + 1), dtype=float)\n for i, (cols, vals) in tqdm(enumerate(all_documents), total=len(all_documents),\n desc=f'\\-- filling matrix of shape {X.shape}'):\n X[i, cols] = vals\n X = X.tocsr()\n y = np.asarray(all_labels) + 1\n return X, y\n\n\ndef from_csv(path, encoding='utf-8'):\n \"\"\"\n Reads a csv file in which columns are separated by ','.\n File format